aboutsummaryrefslogtreecommitdiff
path: root/circuitpython/py
diff options
context:
space:
mode:
authorRaghuram Subramani <raghus2247@gmail.com>2022-06-19 19:47:51 +0530
committerRaghuram Subramani <raghus2247@gmail.com>2022-06-19 19:47:51 +0530
commit4fd287655a72b9aea14cdac715ad5b90ed082ed2 (patch)
tree65d393bc0e699dd12d05b29ba568e04cea666207 /circuitpython/py
parent0150f70ce9c39e9e6dd878766c0620c85e47bed0 (diff)
add circuitpython code
Diffstat (limited to '')
-rw-r--r--circuitpython/py/__pycache__/makeqstrdata.cpython-310.pycbin0 -> 16783 bytes
-rw-r--r--circuitpython/py/__pycache__/makeqstrdefs.cpython-310.pycbin0 -> 6053 bytes
-rw-r--r--circuitpython/py/argcheck.c209
-rw-r--r--circuitpython/py/asmarm.c372
-rw-r--r--circuitpython/py/asmarm.h216
-rw-r--r--circuitpython/py/asmbase.c102
-rw-r--r--circuitpython/py/asmbase.h69
-rw-r--r--circuitpython/py/asmthumb.c573
-rw-r--r--circuitpython/py/asmthumb.h427
-rw-r--r--circuitpython/py/asmx64.c634
-rw-r--r--circuitpython/py/asmx64.h220
-rw-r--r--circuitpython/py/asmx86.c535
-rw-r--r--circuitpython/py/asmx86.h215
-rw-r--r--circuitpython/py/asmxtensa.c253
-rw-r--r--circuitpython/py/asmxtensa.h408
-rw-r--r--circuitpython/py/bc.c339
-rw-r--r--circuitpython/py/bc.h281
-rw-r--r--circuitpython/py/bc0.h150
-rw-r--r--circuitpython/py/binary.c466
-rw-r--r--circuitpython/py/binary.h46
-rw-r--r--circuitpython/py/builtin.h127
-rw-r--r--circuitpython/py/builtinevex.c176
-rw-r--r--circuitpython/py/builtinhelp.c182
-rw-r--r--circuitpython/py/builtinimport.c607
-rw-r--r--circuitpython/py/circuitpy_defns.mk759
-rw-r--r--circuitpython/py/circuitpy_mpconfig.h578
-rw-r--r--circuitpython/py/circuitpy_mpconfig.mk531
-rw-r--r--circuitpython/py/compile.c3611
-rw-r--r--circuitpython/py/compile.h45
-rw-r--r--circuitpython/py/dynruntime.h287
-rw-r--r--circuitpython/py/dynruntime.mk143
-rw-r--r--circuitpython/py/emit.h288
-rw-r--r--circuitpython/py/emitbc.c937
-rw-r--r--circuitpython/py/emitcommon.c61
-rw-r--r--circuitpython/py/emitglue.c232
-rw-r--r--circuitpython/py/emitglue.h110
-rw-r--r--circuitpython/py/emitinlinethumb.c847
-rw-r--r--circuitpython/py/emitinlinextensa.c352
-rw-r--r--circuitpython/py/emitnarm.c20
-rw-r--r--circuitpython/py/emitnative.c3045
-rw-r--r--circuitpython/py/emitnthumb.c20
-rw-r--r--circuitpython/py/emitnx64.c20
-rw-r--r--circuitpython/py/emitnx86.c72
-rw-r--r--circuitpython/py/emitnxtensa.c20
-rw-r--r--circuitpython/py/emitnxtensawin.c23
-rw-r--r--circuitpython/py/enum.c52
-rw-r--r--circuitpython/py/enum.h65
-rw-r--r--circuitpython/py/formatfloat.c438
-rw-r--r--circuitpython/py/formatfloat.h35
-rw-r--r--circuitpython/py/frozenmod.c135
-rw-r--r--circuitpython/py/frozenmod.h40
-rw-r--r--circuitpython/py/gc.c1217
-rw-r--r--circuitpython/py/gc.h101
-rw-r--r--circuitpython/py/gc_long_lived.c149
-rw-r--r--circuitpython/py/gc_long_lived.h43
-rw-r--r--circuitpython/py/genlast.py90
-rw-r--r--circuitpython/py/grammar.h372
-rw-r--r--circuitpython/py/ioctl.h38
-rw-r--r--circuitpython/py/lexer.c921
-rw-r--r--circuitpython/py/lexer.h212
-rw-r--r--circuitpython/py/makecompresseddata.py205
-rw-r--r--circuitpython/py/makemoduledefs.py111
-rw-r--r--circuitpython/py/makeqstrdata.py786
-rw-r--r--circuitpython/py/makeqstrdefs.py226
-rw-r--r--circuitpython/py/makeversionhdr.py129
-rw-r--r--circuitpython/py/malloc.c222
-rw-r--r--circuitpython/py/map.c462
-rw-r--r--circuitpython/py/misc.h275
-rw-r--r--circuitpython/py/mkenv.mk84
-rw-r--r--circuitpython/py/mkrules.cmake161
-rw-r--r--circuitpython/py/mkrules.mk210
-rw-r--r--circuitpython/py/modarray.c45
-rw-r--r--circuitpython/py/modbuiltins.c797
-rw-r--r--circuitpython/py/modcmath.c152
-rw-r--r--circuitpython/py/modcollections.c49
-rw-r--r--circuitpython/py/modgc.c118
-rw-r--r--circuitpython/py/modio.c249
-rw-r--r--circuitpython/py/modmath.c435
-rw-r--r--circuitpython/py/modmicropython.c213
-rw-r--r--circuitpython/py/modstruct.c285
-rw-r--r--circuitpython/py/modsys.c238
-rw-r--r--circuitpython/py/modthread.c306
-rw-r--r--circuitpython/py/moduerrno.c180
-rw-r--r--circuitpython/py/mpconfig.h1897
-rw-r--r--circuitpython/py/mperrno.h148
-rw-r--r--circuitpython/py/mphal.h93
-rw-r--r--circuitpython/py/mpprint.c609
-rw-r--r--circuitpython/py/mpprint.h88
-rw-r--r--circuitpython/py/mpstate.c34
-rw-r--r--circuitpython/py/mpstate.h310
-rw-r--r--circuitpython/py/mpthread.h61
-rw-r--r--circuitpython/py/mpz.c1750
-rw-r--r--circuitpython/py/mpz.h161
-rw-r--r--circuitpython/py/nativeglue.c349
-rw-r--r--circuitpython/py/nativeglue.h178
-rw-r--r--circuitpython/py/nlr.c51
-rw-r--r--circuitpython/py/nlr.h190
-rw-r--r--circuitpython/py/nlraarch64.c83
-rw-r--r--circuitpython/py/nlrpowerpc.c121
-rw-r--r--circuitpython/py/nlrsetjmp.c43
-rw-r--r--circuitpython/py/nlrthumb.c141
-rw-r--r--circuitpython/py/nlrx64.c114
-rw-r--r--circuitpython/py/nlrx86.c104
-rw-r--r--circuitpython/py/nlrxtensa.c83
-rw-r--r--circuitpython/py/obj.c776
-rw-r--r--circuitpython/py/obj.h1141
-rw-r--r--circuitpython/py/objarray.c887
-rw-r--r--circuitpython/py/objarray.h62
-rw-r--r--circuitpython/py/objattrtuple.c98
-rw-r--r--circuitpython/py/objbool.c102
-rw-r--r--circuitpython/py/objboundmeth.c119
-rw-r--r--circuitpython/py/objcell.c71
-rw-r--r--circuitpython/py/objclosure.c100
-rw-r--r--circuitpython/py/objcomplex.c273
-rw-r--r--circuitpython/py/objdeque.c172
-rw-r--r--circuitpython/py/objdict.c658
-rw-r--r--circuitpython/py/objenumerate.c95
-rw-r--r--circuitpython/py/objexcept.c793
-rw-r--r--circuitpython/py/objexcept.h53
-rw-r--r--circuitpython/py/objfilter.c75
-rw-r--r--circuitpython/py/objfloat.c364
-rw-r--r--circuitpython/py/objfun.c557
-rw-r--r--circuitpython/py/objfun.h47
-rw-r--r--circuitpython/py/objgenerator.c423
-rw-r--r--circuitpython/py/objgenerator.h34
-rw-r--r--circuitpython/py/objgetitemiter.c78
-rw-r--r--circuitpython/py/objint.c582
-rw-r--r--circuitpython/py/objint.h72
-rw-r--r--circuitpython/py/objint_longlong.c309
-rw-r--r--circuitpython/py/objint_mpz.c469
-rw-r--r--circuitpython/py/objlist.c559
-rw-r--r--circuitpython/py/objlist.h42
-rw-r--r--circuitpython/py/objmap.c76
-rw-r--r--circuitpython/py/objmodule.c329
-rw-r--r--circuitpython/py/objmodule.h38
-rw-r--r--circuitpython/py/objnamedtuple.c193
-rw-r--r--circuitpython/py/objnamedtuple.h58
-rw-r--r--circuitpython/py/objnone.c58
-rw-r--r--circuitpython/py/objobject.c124
-rw-r--r--circuitpython/py/objpolyiter.c56
-rw-r--r--circuitpython/py/objproperty.c119
-rw-r--r--circuitpython/py/objproperty.h62
-rw-r--r--circuitpython/py/objrange.c235
-rw-r--r--circuitpython/py/objreversed.c83
-rw-r--r--circuitpython/py/objset.c605
-rw-r--r--circuitpython/py/objsingleton.c59
-rw-r--r--circuitpython/py/objslice.c213
-rw-r--r--circuitpython/py/objstr.c2274
-rw-r--r--circuitpython/py/objstr.h112
-rw-r--r--circuitpython/py/objstringio.c284
-rw-r--r--circuitpython/py/objstringio.h40
-rw-r--r--circuitpython/py/objstrunicode.c350
-rw-r--r--circuitpython/py/objtraceback.c42
-rw-r--r--circuitpython/py/objtraceback.h39
-rw-r--r--circuitpython/py/objtuple.c309
-rw-r--r--circuitpython/py/objtuple.h66
-rw-r--r--circuitpython/py/objtype.c1541
-rw-r--r--circuitpython/py/objtype.h59
-rw-r--r--circuitpython/py/objzip.c79
-rw-r--r--circuitpython/py/opmethods.c53
-rw-r--r--circuitpython/py/pairheap.c147
-rw-r--r--circuitpython/py/pairheap.h100
-rw-r--r--circuitpython/py/parse.c1256
-rw-r--r--circuitpython/py/parse.h107
-rw-r--r--circuitpython/py/parsenum.c362
-rw-r--r--circuitpython/py/parsenum.h37
-rw-r--r--circuitpython/py/parsenumbase.c71
-rw-r--r--circuitpython/py/parsenumbase.h33
-rw-r--r--circuitpython/py/persistentcode.c883
-rw-r--r--circuitpython/py/persistentcode.h114
-rw-r--r--circuitpython/py/profile.c970
-rw-r--r--circuitpython/py/profile.h79
-rw-r--r--circuitpython/py/proto.c53
-rw-r--r--circuitpython/py/proto.h44
-rw-r--r--circuitpython/py/py.cmake149
-rw-r--r--circuitpython/py/py.mk310
-rw-r--r--circuitpython/py/pystack.c56
-rw-r--r--circuitpython/py/pystack.h124
-rw-r--r--circuitpython/py/qstr.c342
-rw-r--r--circuitpython/py/qstr.h94
-rw-r--r--circuitpython/py/qstrdefs.h69
-rw-r--r--circuitpython/py/reader.c148
-rw-r--r--circuitpython/py/reader.h46
-rw-r--r--circuitpython/py/repl.c324
-rw-r--r--circuitpython/py/repl.h38
-rw-r--r--circuitpython/py/ringbuf.c170
-rw-r--r--circuitpython/py/ringbuf.h62
-rw-r--r--circuitpython/py/runtime.c1771
-rw-r--r--circuitpython/py/runtime.h249
-rw-r--r--circuitpython/py/runtime0.h161
-rw-r--r--circuitpython/py/runtime_utils.c53
-rw-r--r--circuitpython/py/scheduler.c162
-rw-r--r--circuitpython/py/scope.c153
-rw-r--r--circuitpython/py/scope.h100
-rw-r--r--circuitpython/py/sequence.c233
-rw-r--r--circuitpython/py/showbc.c548
-rw-r--r--circuitpython/py/smallint.c75
-rw-r--r--circuitpython/py/smallint.h68
-rw-r--r--circuitpython/py/stackctrl.c99
-rw-r--r--circuitpython/py/stackctrl.h56
-rw-r--r--circuitpython/py/stream.c583
-rw-r--r--circuitpython/py/stream.h139
-rw-r--r--circuitpython/py/unicode.c211
-rw-r--r--circuitpython/py/unicode.h35
-rw-r--r--circuitpython/py/usermod.cmake52
-rw-r--r--circuitpython/py/vm.c1471
-rw-r--r--circuitpython/py/vmentrytable.h138
-rw-r--r--circuitpython/py/vstr.c249
-rw-r--r--circuitpython/py/warning.c56
209 files changed, 64479 insertions, 0 deletions
diff --git a/circuitpython/py/__pycache__/makeqstrdata.cpython-310.pyc b/circuitpython/py/__pycache__/makeqstrdata.cpython-310.pyc
new file mode 100644
index 0000000..2fb7d9b
--- /dev/null
+++ b/circuitpython/py/__pycache__/makeqstrdata.cpython-310.pyc
Binary files differ
diff --git a/circuitpython/py/__pycache__/makeqstrdefs.cpython-310.pyc b/circuitpython/py/__pycache__/makeqstrdefs.cpython-310.pyc
new file mode 100644
index 0000000..ab47a5b
--- /dev/null
+++ b/circuitpython/py/__pycache__/makeqstrdefs.cpython-310.pyc
Binary files differ
diff --git a/circuitpython/py/argcheck.c b/circuitpython/py/argcheck.c
new file mode 100644
index 0000000..c2066a7
--- /dev/null
+++ b/circuitpython/py/argcheck.c
@@ -0,0 +1,209 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+
+#include "py/runtime.h"
+
+#include "supervisor/shared/translate.h"
+
+void mp_arg_check_num_sig(size_t n_args, size_t n_kw, uint32_t sig) {
+ // TODO maybe take the function name as an argument so we can print nicer error messages
+
+ // The reverse of MP_OBJ_FUN_MAKE_SIG
+ bool takes_kw = sig & 1;
+ size_t n_args_min = sig >> 17;
+ size_t n_args_max = (sig >> 1) & 0xffff;
+
+ if (n_kw && !takes_kw) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_arg_error_terse_mismatch();
+ #else
+ mp_raise_TypeError(MP_ERROR_TEXT("function doesn't take keyword arguments"));
+ #endif
+ }
+
+ if (n_args_min == n_args_max) {
+ if (n_args != n_args_min) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_arg_error_terse_mismatch();
+ #else
+ mp_raise_TypeError_varg(MP_ERROR_TEXT("function takes %d positional arguments but %d were given"),
+ n_args_min, n_args);
+ #endif
+ }
+ } else {
+ if (n_args < n_args_min) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_arg_error_terse_mismatch();
+ #else
+ mp_raise_TypeError_varg(
+ MP_ERROR_TEXT("function missing %d required positional arguments"),
+ n_args_min - n_args);
+ #endif
+ } else if (n_args > n_args_max) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_arg_error_terse_mismatch();
+ #else
+ mp_raise_TypeError_varg(
+ MP_ERROR_TEXT("function expected at most %d arguments, got %d"),
+ n_args_max, n_args);
+ #endif
+ }
+ }
+}
+
+inline void mp_arg_check_num_kw_array(size_t n_args, size_t n_kw, size_t n_args_min, size_t n_args_max, bool takes_kw) {
+ mp_arg_check_num_sig(n_args, n_kw, MP_OBJ_FUN_MAKE_SIG(n_args_min, n_args_max, takes_kw));
+}
+
+void mp_arg_parse_all(size_t n_pos, const mp_obj_t *pos, mp_map_t *kws, size_t n_allowed, const mp_arg_t *allowed, mp_arg_val_t *out_vals) {
+ size_t pos_found = 0, kws_found = 0;
+ for (size_t i = 0; i < n_allowed; i++) {
+ mp_obj_t given_arg;
+ if (i < n_pos) {
+ if (allowed[i].flags & MP_ARG_KW_ONLY) {
+ goto extra_positional;
+ }
+ pos_found++;
+ given_arg = pos[i];
+ } else {
+ mp_map_elem_t *kw = NULL;
+ if (kws != NULL) {
+ kw = mp_map_lookup(kws, MP_OBJ_NEW_QSTR(allowed[i].qst), MP_MAP_LOOKUP);
+ }
+ if (kw == NULL) {
+ if (allowed[i].flags & MP_ARG_REQUIRED) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_arg_error_terse_mismatch();
+ #else
+ mp_raise_TypeError_varg(MP_ERROR_TEXT("'%q' argument required"), allowed[i].qst);
+ #endif
+ }
+ out_vals[i] = allowed[i].defval;
+ continue;
+ } else {
+ kws_found++;
+ given_arg = kw->value;
+ }
+ }
+ if ((allowed[i].flags & MP_ARG_KIND_MASK) == MP_ARG_BOOL) {
+ out_vals[i].u_bool = mp_obj_is_true(given_arg);
+ } else if ((allowed[i].flags & MP_ARG_KIND_MASK) == MP_ARG_INT) {
+ out_vals[i].u_int = mp_obj_get_int(given_arg);
+ } else {
+ assert((allowed[i].flags & MP_ARG_KIND_MASK) == MP_ARG_OBJ);
+ out_vals[i].u_obj = given_arg;
+ }
+ }
+ if (pos_found < n_pos) {
+ extra_positional:
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_arg_error_terse_mismatch();
+ #else
+ // TODO better error message
+ mp_raise_TypeError(MP_ERROR_TEXT("extra positional arguments given"));
+ #endif
+ }
+ if (kws != NULL && kws_found < kws->used) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_arg_error_terse_mismatch();
+ #else
+ // TODO better error message
+ mp_raise_TypeError(MP_ERROR_TEXT("extra keyword arguments given"));
+ #endif
+ }
+}
+
+void mp_arg_parse_all_kw_array(size_t n_pos, size_t n_kw, const mp_obj_t *args, size_t n_allowed, const mp_arg_t *allowed, mp_arg_val_t *out_vals) {
+ mp_map_t kw_args;
+ mp_map_init_fixed_table(&kw_args, n_kw, args + n_pos);
+ mp_arg_parse_all(n_pos, args, &kw_args, n_allowed, allowed, out_vals);
+}
+
+NORETURN void mp_arg_error_terse_mismatch(void) {
+ mp_raise_TypeError(MP_ERROR_TEXT("argument num/types mismatch"));
+}
+
+#if MICROPY_CPYTHON_COMPAT
+NORETURN void mp_arg_error_unimpl_kw(void) {
+ mp_raise_NotImplementedError(MP_ERROR_TEXT("keyword argument(s) not yet implemented - use normal args instead"));
+}
+#endif
+
+
+mp_int_t mp_arg_validate_int_min(mp_int_t i, mp_int_t min, qstr arg_name) {
+ if (i < min) {
+ mp_raise_ValueError_varg(translate("%q must be >= %d"), arg_name, min);
+ }
+ return i;
+}
+
+mp_int_t mp_arg_validate_int_max(mp_int_t i, mp_int_t max, qstr arg_name) {
+ if (i > max) {
+ mp_raise_ValueError_varg(translate("%q must be <= %d"), arg_name, max);
+ }
+ return i;
+}
+
+mp_int_t mp_arg_validate_int_range(mp_int_t i, mp_int_t min, mp_int_t max, qstr arg_name) {
+ if (i < min || i > max) {
+ mp_raise_ValueError_varg(translate("%q must be %d-%d"), arg_name, min, max);
+ }
+ return i;
+}
+
+mp_float_t mp_arg_validate_obj_float_non_negative(mp_obj_t float_in, mp_float_t default_for_null, qstr arg_name) {
+ const mp_float_t f = (float_in == MP_OBJ_NULL)
+ ? default_for_null
+ : mp_obj_get_float(float_in);
+ if (f <= (mp_float_t)0.0) {
+ mp_raise_ValueError_varg(translate("%q must be >= 0"), arg_name);
+ }
+ return f;
+}
+
+mp_uint_t mp_arg_validate_length_range(mp_uint_t length, mp_uint_t min, mp_uint_t max, qstr arg_name) {
+ if (length < min || length > max) {
+ mp_raise_ValueError_varg(translate("%q length must be %d-%d"), arg_name, min, max);
+ }
+ return length;
+}
+
+mp_obj_t mp_arg_validate_type(mp_obj_t obj, const mp_obj_type_t *type, qstr arg_name) {
+ if (!mp_obj_is_type(obj, type)) {
+ mp_raise_TypeError_varg(translate("%q must be of type %q"), arg_name, type->name);
+ }
+ return obj;
+}
+
+mp_obj_t mp_arg_validate_string(mp_obj_t obj, qstr arg_name) {
+ if (!mp_obj_is_str(obj)) {
+ mp_raise_TypeError_varg(translate("%q must be a string"), arg_name);
+ }
+ return obj;
+}
diff --git a/circuitpython/py/asmarm.c b/circuitpython/py/asmarm.c
new file mode 100644
index 0000000..3b637d3
--- /dev/null
+++ b/circuitpython/py/asmarm.c
@@ -0,0 +1,372 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Fabian Vogt
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+
+#include "py/mpconfig.h"
+
+// wrapper around everything in this file
+#if MICROPY_EMIT_ARM
+
+#include "py/asmarm.h"
+
+#define SIGNED_FIT24(x) (((x) & 0xff800000) == 0) || (((x) & 0xff000000) == 0xff000000)
+
+// Insert word into instruction flow
+STATIC void emit(asm_arm_t *as, uint op) {
+ uint8_t *c = mp_asm_base_get_cur_to_write_bytes(&as->base, 4);
+ if (c != NULL) {
+ *(uint32_t *)c = op;
+ }
+}
+
+// Insert word into instruction flow, add "ALWAYS" condition code
+STATIC void emit_al(asm_arm_t *as, uint op) {
+ emit(as, op | ASM_ARM_CC_AL);
+}
+
+// Basic instructions without condition code
+STATIC uint asm_arm_op_push(uint reglist) {
+ // stmfd sp!, {reglist}
+ return 0x92d0000 | (reglist & 0xFFFF);
+}
+
+STATIC uint asm_arm_op_pop(uint reglist) {
+ // ldmfd sp!, {reglist}
+ return 0x8bd0000 | (reglist & 0xFFFF);
+}
+
+STATIC uint asm_arm_op_mov_reg(uint rd, uint rn) {
+ // mov rd, rn
+ return 0x1a00000 | (rd << 12) | rn;
+}
+
+STATIC uint asm_arm_op_mov_imm(uint rd, uint imm) {
+ // mov rd, #imm
+ return 0x3a00000 | (rd << 12) | imm;
+}
+
+STATIC uint asm_arm_op_mvn_imm(uint rd, uint imm) {
+ // mvn rd, #imm
+ return 0x3e00000 | (rd << 12) | imm;
+}
+
+STATIC uint asm_arm_op_add_imm(uint rd, uint rn, uint imm) {
+ // add rd, rn, #imm
+ return 0x2800000 | (rn << 16) | (rd << 12) | (imm & 0xFF);
+}
+
+STATIC uint asm_arm_op_add_reg(uint rd, uint rn, uint rm) {
+ // add rd, rn, rm
+ return 0x0800000 | (rn << 16) | (rd << 12) | rm;
+}
+
+STATIC uint asm_arm_op_sub_imm(uint rd, uint rn, uint imm) {
+ // sub rd, rn, #imm
+ return 0x2400000 | (rn << 16) | (rd << 12) | (imm & 0xFF);
+}
+
+STATIC uint asm_arm_op_sub_reg(uint rd, uint rn, uint rm) {
+ // sub rd, rn, rm
+ return 0x0400000 | (rn << 16) | (rd << 12) | rm;
+}
+
+STATIC uint asm_arm_op_mul_reg(uint rd, uint rm, uint rs) {
+ // mul rd, rm, rs
+ assert(rd != rm);
+ return 0x0000090 | (rd << 16) | (rs << 8) | rm;
+}
+
+STATIC uint asm_arm_op_and_reg(uint rd, uint rn, uint rm) {
+ // and rd, rn, rm
+ return 0x0000000 | (rn << 16) | (rd << 12) | rm;
+}
+
+STATIC uint asm_arm_op_eor_reg(uint rd, uint rn, uint rm) {
+ // eor rd, rn, rm
+ return 0x0200000 | (rn << 16) | (rd << 12) | rm;
+}
+
+STATIC uint asm_arm_op_orr_reg(uint rd, uint rn, uint rm) {
+ // orr rd, rn, rm
+ return 0x1800000 | (rn << 16) | (rd << 12) | rm;
+}
+
+void asm_arm_bkpt(asm_arm_t *as) {
+ // bkpt #0
+ emit_al(as, 0x1200070);
+}
+
+// locals:
+// - stored on the stack in ascending order
+// - numbered 0 through num_locals-1
+// - SP points to first local
+//
+// | SP
+// v
+// l0 l1 l2 ... l(n-1)
+// ^ ^
+// | low address | high address in RAM
+
+void asm_arm_entry(asm_arm_t *as, int num_locals) {
+ assert(num_locals >= 0);
+
+ as->stack_adjust = 0;
+ as->push_reglist = 1 << ASM_ARM_REG_R1
+ | 1 << ASM_ARM_REG_R2
+ | 1 << ASM_ARM_REG_R3
+ | 1 << ASM_ARM_REG_R4
+ | 1 << ASM_ARM_REG_R5
+ | 1 << ASM_ARM_REG_R6
+ | 1 << ASM_ARM_REG_R7
+ | 1 << ASM_ARM_REG_R8;
+
+ // Only adjust the stack if there are more locals than usable registers
+ if (num_locals > 3) {
+ as->stack_adjust = num_locals * 4;
+ // Align stack to 8 bytes
+ if (num_locals & 1) {
+ as->stack_adjust += 4;
+ }
+ }
+
+ emit_al(as, asm_arm_op_push(as->push_reglist | 1 << ASM_ARM_REG_LR));
+ if (as->stack_adjust > 0) {
+ emit_al(as, asm_arm_op_sub_imm(ASM_ARM_REG_SP, ASM_ARM_REG_SP, as->stack_adjust));
+ }
+}
+
+void asm_arm_exit(asm_arm_t *as) {
+ if (as->stack_adjust > 0) {
+ emit_al(as, asm_arm_op_add_imm(ASM_ARM_REG_SP, ASM_ARM_REG_SP, as->stack_adjust));
+ }
+
+ emit_al(as, asm_arm_op_pop(as->push_reglist | (1 << ASM_ARM_REG_PC)));
+}
+
+void asm_arm_push(asm_arm_t *as, uint reglist) {
+ emit_al(as, asm_arm_op_push(reglist));
+}
+
+void asm_arm_pop(asm_arm_t *as, uint reglist) {
+ emit_al(as, asm_arm_op_pop(reglist));
+}
+
+void asm_arm_mov_reg_reg(asm_arm_t *as, uint reg_dest, uint reg_src) {
+ emit_al(as, asm_arm_op_mov_reg(reg_dest, reg_src));
+}
+
+size_t asm_arm_mov_reg_i32(asm_arm_t *as, uint rd, int imm) {
+ // Insert immediate into code and jump over it
+ emit_al(as, 0x59f0000 | (rd << 12)); // ldr rd, [pc]
+ emit_al(as, 0xa000000); // b pc
+ size_t loc = mp_asm_base_get_code_pos(&as->base);
+ emit(as, imm);
+ return loc;
+}
+
+void asm_arm_mov_reg_i32_optimised(asm_arm_t *as, uint rd, int imm) {
+ // TODO: There are more variants of immediate values
+ if ((imm & 0xFF) == imm) {
+ emit_al(as, asm_arm_op_mov_imm(rd, imm));
+ } else if (imm < 0 && imm >= -256) {
+ // mvn is "move not", not "move negative"
+ emit_al(as, asm_arm_op_mvn_imm(rd, ~imm));
+ } else {
+ asm_arm_mov_reg_i32(as, rd, imm);
+ }
+}
+
+void asm_arm_mov_local_reg(asm_arm_t *as, int local_num, uint rd) {
+ // str rd, [sp, #local_num*4]
+ emit_al(as, 0x58d0000 | (rd << 12) | (local_num << 2));
+}
+
+void asm_arm_mov_reg_local(asm_arm_t *as, uint rd, int local_num) {
+ // ldr rd, [sp, #local_num*4]
+ emit_al(as, 0x59d0000 | (rd << 12) | (local_num << 2));
+}
+
+void asm_arm_cmp_reg_i8(asm_arm_t *as, uint rd, int imm) {
+ // cmp rd, #imm
+ emit_al(as, 0x3500000 | (rd << 16) | (imm & 0xFF));
+}
+
+void asm_arm_cmp_reg_reg(asm_arm_t *as, uint rd, uint rn) {
+ // cmp rd, rn
+ emit_al(as, 0x1500000 | (rd << 16) | rn);
+}
+
+void asm_arm_setcc_reg(asm_arm_t *as, uint rd, uint cond) {
+ emit(as, asm_arm_op_mov_imm(rd, 1) | cond); // movCOND rd, #1
+ emit(as, asm_arm_op_mov_imm(rd, 0) | (cond ^ (1 << 28))); // mov!COND rd, #0
+}
+
+void asm_arm_add_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
+ // add rd, rn, rm
+ emit_al(as, asm_arm_op_add_reg(rd, rn, rm));
+}
+
+void asm_arm_sub_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
+ // sub rd, rn, rm
+ emit_al(as, asm_arm_op_sub_reg(rd, rn, rm));
+}
+
+void asm_arm_mul_reg_reg_reg(asm_arm_t *as, uint rd, uint rs, uint rm) {
+ // rs and rm are swapped because of restriction rd!=rm
+ // mul rd, rm, rs
+ emit_al(as, asm_arm_op_mul_reg(rd, rm, rs));
+}
+
+void asm_arm_and_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
+ // and rd, rn, rm
+ emit_al(as, asm_arm_op_and_reg(rd, rn, rm));
+}
+
+void asm_arm_eor_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
+ // eor rd, rn, rm
+ emit_al(as, asm_arm_op_eor_reg(rd, rn, rm));
+}
+
+void asm_arm_orr_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm) {
+ // orr rd, rn, rm
+ emit_al(as, asm_arm_op_orr_reg(rd, rn, rm));
+}
+
+void asm_arm_mov_reg_local_addr(asm_arm_t *as, uint rd, int local_num) {
+ // add rd, sp, #local_num*4
+ emit_al(as, asm_arm_op_add_imm(rd, ASM_ARM_REG_SP, local_num << 2));
+}
+
+void asm_arm_mov_reg_pcrel(asm_arm_t *as, uint reg_dest, uint label) {
+ assert(label < as->base.max_num_labels);
+ mp_uint_t dest = as->base.label_offsets[label];
+ mp_int_t rel = dest - as->base.code_offset;
+ rel -= 12 + 8; // adjust for load of rel, and then PC+8 prefetch of add_reg_reg_reg
+
+ // To load rel int reg_dest, insert immediate into code and jump over it
+ emit_al(as, 0x59f0000 | (reg_dest << 12)); // ldr rd, [pc]
+ emit_al(as, 0xa000000); // b pc
+ emit(as, rel);
+
+ // Do reg_dest += PC
+ asm_arm_add_reg_reg_reg(as, reg_dest, reg_dest, ASM_ARM_REG_PC);
+}
+
+void asm_arm_lsl_reg_reg(asm_arm_t *as, uint rd, uint rs) {
+ // mov rd, rd, lsl rs
+ emit_al(as, 0x1a00010 | (rd << 12) | (rs << 8) | rd);
+}
+
+void asm_arm_lsr_reg_reg(asm_arm_t *as, uint rd, uint rs) {
+ // mov rd, rd, lsr rs
+ emit_al(as, 0x1a00030 | (rd << 12) | (rs << 8) | rd);
+}
+
+void asm_arm_asr_reg_reg(asm_arm_t *as, uint rd, uint rs) {
+ // mov rd, rd, asr rs
+ emit_al(as, 0x1a00050 | (rd << 12) | (rs << 8) | rd);
+}
+
+void asm_arm_ldr_reg_reg(asm_arm_t *as, uint rd, uint rn, uint byte_offset) {
+ // ldr rd, [rn, #off]
+ emit_al(as, 0x5900000 | (rn << 16) | (rd << 12) | byte_offset);
+}
+
+void asm_arm_ldrh_reg_reg(asm_arm_t *as, uint rd, uint rn) {
+ // ldrh rd, [rn]
+ emit_al(as, 0x1d000b0 | (rn << 16) | (rd << 12));
+}
+
+void asm_arm_ldrb_reg_reg(asm_arm_t *as, uint rd, uint rn) {
+ // ldrb rd, [rn]
+ emit_al(as, 0x5d00000 | (rn << 16) | (rd << 12));
+}
+
+void asm_arm_str_reg_reg(asm_arm_t *as, uint rd, uint rm, uint byte_offset) {
+ // str rd, [rm, #off]
+ emit_al(as, 0x5800000 | (rm << 16) | (rd << 12) | byte_offset);
+}
+
+void asm_arm_strh_reg_reg(asm_arm_t *as, uint rd, uint rm) {
+ // strh rd, [rm]
+ emit_al(as, 0x1c000b0 | (rm << 16) | (rd << 12));
+}
+
+void asm_arm_strb_reg_reg(asm_arm_t *as, uint rd, uint rm) {
+ // strb rd, [rm]
+ emit_al(as, 0x5c00000 | (rm << 16) | (rd << 12));
+}
+
+void asm_arm_str_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
+ // str rd, [rm, rn, lsl #2]
+ emit_al(as, 0x7800100 | (rm << 16) | (rd << 12) | rn);
+}
+
+void asm_arm_strh_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
+ // strh doesn't support scaled register index
+ emit_al(as, 0x1a00080 | (ASM_ARM_REG_R8 << 12) | rn); // mov r8, rn, lsl #1
+ emit_al(as, 0x18000b0 | (rm << 16) | (rd << 12) | ASM_ARM_REG_R8); // strh rd, [rm, r8]
+}
+
+void asm_arm_strb_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn) {
+ // strb rd, [rm, rn]
+ emit_al(as, 0x7c00000 | (rm << 16) | (rd << 12) | rn);
+}
+
+void asm_arm_bcc_label(asm_arm_t *as, int cond, uint label) {
+ assert(label < as->base.max_num_labels);
+ mp_uint_t dest = as->base.label_offsets[label];
+ mp_int_t rel = dest - as->base.code_offset;
+ rel -= 8; // account for instruction prefetch, PC is 8 bytes ahead of this instruction
+ rel >>= 2; // in ARM mode the branch target is 32-bit aligned, so the 2 LSB are omitted
+
+ if (SIGNED_FIT24(rel)) {
+ emit(as, cond | 0xa000000 | (rel & 0xffffff));
+ } else {
+ printf("asm_arm_bcc: branch does not fit in 24 bits\n");
+ }
+}
+
+void asm_arm_b_label(asm_arm_t *as, uint label) {
+ asm_arm_bcc_label(as, ASM_ARM_CC_AL, label);
+}
+
+void asm_arm_bl_ind(asm_arm_t *as, uint fun_id, uint reg_temp) {
+ // The table offset should fit into the ldr instruction
+ assert(fun_id < (0x1000 / 4));
+ emit_al(as, asm_arm_op_mov_reg(ASM_ARM_REG_LR, ASM_ARM_REG_PC)); // mov lr, pc
+ emit_al(as, 0x597f000 | (fun_id << 2)); // ldr pc, [r7, #fun_id*4]
+}
+
+void asm_arm_bx_reg(asm_arm_t *as, uint reg_src) {
+ emit_al(as, 0x012fff10 | reg_src);
+}
+
+#endif // MICROPY_EMIT_ARM
diff --git a/circuitpython/py/asmarm.h b/circuitpython/py/asmarm.h
new file mode 100644
index 0000000..cab4de7
--- /dev/null
+++ b/circuitpython/py/asmarm.h
@@ -0,0 +1,216 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Fabian Vogt
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_ASMARM_H
+#define MICROPY_INCLUDED_PY_ASMARM_H
+
+#include "py/misc.h"
+#include "py/asmbase.h"
+
+#define ASM_ARM_REG_R0 (0)
+#define ASM_ARM_REG_R1 (1)
+#define ASM_ARM_REG_R2 (2)
+#define ASM_ARM_REG_R3 (3)
+#define ASM_ARM_REG_R4 (4)
+#define ASM_ARM_REG_R5 (5)
+#define ASM_ARM_REG_R6 (6)
+#define ASM_ARM_REG_R7 (7)
+#define ASM_ARM_REG_R8 (8)
+#define ASM_ARM_REG_R9 (9)
+#define ASM_ARM_REG_R10 (10)
+#define ASM_ARM_REG_R11 (11)
+#define ASM_ARM_REG_R12 (12)
+#define ASM_ARM_REG_R13 (13)
+#define ASM_ARM_REG_R14 (14)
+#define ASM_ARM_REG_R15 (15)
+#define ASM_ARM_REG_SP (ASM_ARM_REG_R13)
+#define ASM_ARM_REG_LR (ASM_ARM_REG_R14)
+#define ASM_ARM_REG_PC (ASM_ARM_REG_R15)
+
+#define ASM_ARM_CC_EQ (0x0 << 28)
+#define ASM_ARM_CC_NE (0x1 << 28)
+#define ASM_ARM_CC_CS (0x2 << 28)
+#define ASM_ARM_CC_CC (0x3 << 28)
+#define ASM_ARM_CC_MI (0x4 << 28)
+#define ASM_ARM_CC_PL (0x5 << 28)
+#define ASM_ARM_CC_VS (0x6 << 28)
+#define ASM_ARM_CC_VC (0x7 << 28)
+#define ASM_ARM_CC_HI (0x8 << 28)
+#define ASM_ARM_CC_LS (0x9 << 28)
+#define ASM_ARM_CC_GE (0xa << 28)
+#define ASM_ARM_CC_LT (0xb << 28)
+#define ASM_ARM_CC_GT (0xc << 28)
+#define ASM_ARM_CC_LE (0xd << 28)
+#define ASM_ARM_CC_AL (0xe << 28)
+
+typedef struct _asm_arm_t {
+ mp_asm_base_t base;
+ uint push_reglist;
+ uint stack_adjust;
+} asm_arm_t;
+
+static inline void asm_arm_end_pass(asm_arm_t *as) {
+ (void)as;
+}
+
+void asm_arm_entry(asm_arm_t *as, int num_locals);
+void asm_arm_exit(asm_arm_t *as);
+
+void asm_arm_bkpt(asm_arm_t *as);
+
+// mov
+void asm_arm_mov_reg_reg(asm_arm_t *as, uint reg_dest, uint reg_src);
+size_t asm_arm_mov_reg_i32(asm_arm_t *as, uint rd, int imm);
+void asm_arm_mov_reg_i32_optimised(asm_arm_t *as, uint rd, int imm);
+void asm_arm_mov_local_reg(asm_arm_t *as, int local_num, uint rd);
+void asm_arm_mov_reg_local(asm_arm_t *as, uint rd, int local_num);
+void asm_arm_setcc_reg(asm_arm_t *as, uint rd, uint cond);
+
+// compare
+void asm_arm_cmp_reg_i8(asm_arm_t *as, uint rd, int imm);
+void asm_arm_cmp_reg_reg(asm_arm_t *as, uint rd, uint rn);
+
+// arithmetic
+void asm_arm_add_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
+void asm_arm_sub_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
+void asm_arm_mul_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
+void asm_arm_and_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
+void asm_arm_eor_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
+void asm_arm_orr_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm);
+void asm_arm_mov_reg_local_addr(asm_arm_t *as, uint rd, int local_num);
+void asm_arm_mov_reg_pcrel(asm_arm_t *as, uint reg_dest, uint label);
+void asm_arm_lsl_reg_reg(asm_arm_t *as, uint rd, uint rs);
+void asm_arm_lsr_reg_reg(asm_arm_t *as, uint rd, uint rs);
+void asm_arm_asr_reg_reg(asm_arm_t *as, uint rd, uint rs);
+
+// memory
+void asm_arm_ldr_reg_reg(asm_arm_t *as, uint rd, uint rn, uint byte_offset);
+void asm_arm_ldrh_reg_reg(asm_arm_t *as, uint rd, uint rn);
+void asm_arm_ldrb_reg_reg(asm_arm_t *as, uint rd, uint rn);
+void asm_arm_str_reg_reg(asm_arm_t *as, uint rd, uint rm, uint byte_offset);
+void asm_arm_strh_reg_reg(asm_arm_t *as, uint rd, uint rm);
+void asm_arm_strb_reg_reg(asm_arm_t *as, uint rd, uint rm);
+// store to array
+void asm_arm_str_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn);
+void asm_arm_strh_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn);
+void asm_arm_strb_reg_reg_reg(asm_arm_t *as, uint rd, uint rm, uint rn);
+
+// stack
+void asm_arm_push(asm_arm_t *as, uint reglist);
+void asm_arm_pop(asm_arm_t *as, uint reglist);
+
+// control flow
+void asm_arm_bcc_label(asm_arm_t *as, int cond, uint label);
+void asm_arm_b_label(asm_arm_t *as, uint label);
+void asm_arm_bl_ind(asm_arm_t *as, uint fun_id, uint reg_temp);
+void asm_arm_bx_reg(asm_arm_t *as, uint reg_src);
+
+// Holds a pointer to mp_fun_table
+#define ASM_ARM_REG_FUN_TABLE ASM_ARM_REG_R7
+
+#if defined(GENERIC_ASM_API) && GENERIC_ASM_API
+
+// The following macros provide a (mostly) arch-independent API to
+// generate native code, and are used by the native emitter.
+
+#define ASM_WORD_SIZE (4)
+
+#define REG_RET ASM_ARM_REG_R0
+#define REG_ARG_1 ASM_ARM_REG_R0
+#define REG_ARG_2 ASM_ARM_REG_R1
+#define REG_ARG_3 ASM_ARM_REG_R2
+#define REG_ARG_4 ASM_ARM_REG_R3
+
+#define REG_TEMP0 ASM_ARM_REG_R0
+#define REG_TEMP1 ASM_ARM_REG_R1
+#define REG_TEMP2 ASM_ARM_REG_R2
+
+#define REG_LOCAL_1 ASM_ARM_REG_R4
+#define REG_LOCAL_2 ASM_ARM_REG_R5
+#define REG_LOCAL_3 ASM_ARM_REG_R6
+#define REG_LOCAL_NUM (3)
+
+// Holds a pointer to mp_fun_table
+#define REG_FUN_TABLE ASM_ARM_REG_FUN_TABLE
+
+#define ASM_T asm_arm_t
+#define ASM_END_PASS asm_arm_end_pass
+#define ASM_ENTRY asm_arm_entry
+#define ASM_EXIT asm_arm_exit
+
+#define ASM_JUMP asm_arm_b_label
+#define ASM_JUMP_IF_REG_ZERO(as, reg, label, bool_test) \
+ do { \
+ asm_arm_cmp_reg_i8(as, reg, 0); \
+ asm_arm_bcc_label(as, ASM_ARM_CC_EQ, label); \
+ } while (0)
+#define ASM_JUMP_IF_REG_NONZERO(as, reg, label, bool_test) \
+ do { \
+ asm_arm_cmp_reg_i8(as, reg, 0); \
+ asm_arm_bcc_label(as, ASM_ARM_CC_NE, label); \
+ } while (0)
+#define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \
+ do { \
+ asm_arm_cmp_reg_reg(as, reg1, reg2); \
+ asm_arm_bcc_label(as, ASM_ARM_CC_EQ, label); \
+ } while (0)
+#define ASM_JUMP_REG(as, reg) asm_arm_bx_reg((as), (reg))
+#define ASM_CALL_IND(as, idx) asm_arm_bl_ind(as, idx, ASM_ARM_REG_R3)
+
+#define ASM_MOV_LOCAL_REG(as, local_num, reg_src) asm_arm_mov_local_reg((as), (local_num), (reg_src))
+#define ASM_MOV_REG_IMM(as, reg_dest, imm) asm_arm_mov_reg_i32_optimised((as), (reg_dest), (imm))
+#define ASM_MOV_REG_IMM_FIX_U16(as, reg_dest, imm) asm_arm_mov_reg_i32((as), (reg_dest), (imm))
+#define ASM_MOV_REG_IMM_FIX_WORD(as, reg_dest, imm) asm_arm_mov_reg_i32((as), (reg_dest), (imm))
+#define ASM_MOV_REG_LOCAL(as, reg_dest, local_num) asm_arm_mov_reg_local((as), (reg_dest), (local_num))
+#define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_arm_mov_reg_reg((as), (reg_dest), (reg_src))
+#define ASM_MOV_REG_LOCAL_ADDR(as, reg_dest, local_num) asm_arm_mov_reg_local_addr((as), (reg_dest), (local_num))
+#define ASM_MOV_REG_PCREL(as, reg_dest, label) asm_arm_mov_reg_pcrel((as), (reg_dest), (label))
+
+#define ASM_LSL_REG_REG(as, reg_dest, reg_shift) asm_arm_lsl_reg_reg((as), (reg_dest), (reg_shift))
+#define ASM_LSR_REG_REG(as, reg_dest, reg_shift) asm_arm_lsr_reg_reg((as), (reg_dest), (reg_shift))
+#define ASM_ASR_REG_REG(as, reg_dest, reg_shift) asm_arm_asr_reg_reg((as), (reg_dest), (reg_shift))
+#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_arm_orr_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_arm_eor_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_arm_and_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_arm_add_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_arm_sub_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_arm_mul_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src))
+
+#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_arm_ldr_reg_reg((as), (reg_dest), (reg_base), 0)
+#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_arm_ldr_reg_reg((as), (reg_dest), (reg_base), 4 * (word_offset))
+#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_arm_ldrb_reg_reg((as), (reg_dest), (reg_base))
+#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_arm_ldrh_reg_reg((as), (reg_dest), (reg_base))
+#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_arm_ldr_reg_reg((as), (reg_dest), (reg_base), 0)
+
+#define ASM_STORE_REG_REG(as, reg_value, reg_base) asm_arm_str_reg_reg((as), (reg_value), (reg_base), 0)
+#define ASM_STORE_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_arm_str_reg_reg((as), (reg_dest), (reg_base), 4 * (word_offset))
+#define ASM_STORE8_REG_REG(as, reg_value, reg_base) asm_arm_strb_reg_reg((as), (reg_value), (reg_base))
+#define ASM_STORE16_REG_REG(as, reg_value, reg_base) asm_arm_strh_reg_reg((as), (reg_value), (reg_base))
+#define ASM_STORE32_REG_REG(as, reg_value, reg_base) asm_arm_str_reg_reg((as), (reg_value), (reg_base), 0)
+
+#endif // GENERIC_ASM_API
+
+#endif // MICROPY_INCLUDED_PY_ASMARM_H
diff --git a/circuitpython/py/asmbase.c b/circuitpython/py/asmbase.c
new file mode 100644
index 0000000..ff1dd80
--- /dev/null
+++ b/circuitpython/py/asmbase.c
@@ -0,0 +1,102 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2016 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include "py/obj.h"
+#include "py/misc.h"
+#include "py/asmbase.h"
+
+#if MICROPY_EMIT_MACHINE_CODE
+
+void mp_asm_base_init(mp_asm_base_t *as, size_t max_num_labels) {
+ as->max_num_labels = max_num_labels;
+ as->label_offsets = m_new(size_t, max_num_labels);
+}
+
+void mp_asm_base_deinit(mp_asm_base_t *as, bool free_code) {
+ if (free_code) {
+ MP_PLAT_FREE_EXEC(as->code_base, as->code_size);
+ }
+ m_del(size_t, as->label_offsets, as->max_num_labels);
+}
+
+void mp_asm_base_start_pass(mp_asm_base_t *as, int pass) {
+ if (pass < MP_ASM_PASS_EMIT) {
+ // Reset labels so we can detect backwards jumps (and verify unique assignment)
+ memset(as->label_offsets, -1, as->max_num_labels * sizeof(size_t));
+ } else {
+ // allocating executable RAM is platform specific
+ MP_PLAT_ALLOC_EXEC(as->code_offset, (void **)&as->code_base, &as->code_size);
+ assert(as->code_base != NULL);
+ }
+ as->pass = pass;
+ as->code_offset = 0;
+}
+
+// all functions must go through this one to emit bytes
+// if as->pass < MP_ASM_PASS_EMIT, then this function just counts the number
+// of bytes needed and returns NULL, and callers should not store any data
+uint8_t *mp_asm_base_get_cur_to_write_bytes(mp_asm_base_t *as, size_t num_bytes_to_write) {
+ uint8_t *c = NULL;
+ if (as->pass == MP_ASM_PASS_EMIT) {
+ assert(as->code_offset + num_bytes_to_write <= as->code_size);
+ c = as->code_base + as->code_offset;
+ }
+ as->code_offset += num_bytes_to_write;
+ return c;
+}
+
+void mp_asm_base_label_assign(mp_asm_base_t *as, size_t label) {
+ assert(label < as->max_num_labels);
+ if (as->pass < MP_ASM_PASS_EMIT) {
+ // assign label offset
+ assert(as->label_offsets[label] == (size_t)-1);
+ as->label_offsets[label] = as->code_offset;
+ } else {
+ // ensure label offset has not changed from PASS_COMPUTE to PASS_EMIT
+ assert(as->label_offsets[label] == as->code_offset);
+ }
+}
+
+// align must be a multiple of 2
+void mp_asm_base_align(mp_asm_base_t *as, unsigned int align) {
+ as->code_offset = (as->code_offset + align - 1) & (~(align - 1));
+}
+
+// this function assumes a little endian machine
+void mp_asm_base_data(mp_asm_base_t *as, unsigned int bytesize, uintptr_t val) {
+ uint8_t *c = mp_asm_base_get_cur_to_write_bytes(as, bytesize);
+ if (c != NULL) {
+ for (unsigned int i = 0; i < bytesize; i++) {
+ *c++ = val;
+ val >>= 8;
+ }
+ }
+}
+
+#endif // MICROPY_EMIT_MACHINE_CODE
diff --git a/circuitpython/py/asmbase.h b/circuitpython/py/asmbase.h
new file mode 100644
index 0000000..f2932d6
--- /dev/null
+++ b/circuitpython/py/asmbase.h
@@ -0,0 +1,69 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2016 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_ASMBASE_H
+#define MICROPY_INCLUDED_PY_ASMBASE_H
+
+#include <stdint.h>
+#include <stdbool.h>
+
+#define MP_ASM_PASS_COMPUTE (1)
+#define MP_ASM_PASS_EMIT (2)
+
+typedef struct _mp_asm_base_t {
+ int pass;
+ size_t code_offset;
+ size_t code_size;
+ uint8_t *code_base;
+
+ size_t max_num_labels;
+ size_t *label_offsets;
+} mp_asm_base_t;
+
+void mp_asm_base_init(mp_asm_base_t *as, size_t max_num_labels);
+void mp_asm_base_deinit(mp_asm_base_t *as, bool free_code);
+void mp_asm_base_start_pass(mp_asm_base_t *as, int pass);
+uint8_t *mp_asm_base_get_cur_to_write_bytes(mp_asm_base_t *as, size_t num_bytes_to_write);
+void mp_asm_base_label_assign(mp_asm_base_t *as, size_t label);
+void mp_asm_base_align(mp_asm_base_t *as, unsigned int align);
+void mp_asm_base_data(mp_asm_base_t *as, unsigned int bytesize, uintptr_t val);
+
+static inline size_t mp_asm_base_get_code_pos(mp_asm_base_t *as) {
+ return as->code_offset;
+}
+
+static inline size_t mp_asm_base_get_code_size(mp_asm_base_t *as) {
+ return as->code_size;
+}
+
+static inline void *mp_asm_base_get_code(mp_asm_base_t *as) {
+ #if defined(MP_PLAT_COMMIT_EXEC)
+ return MP_PLAT_COMMIT_EXEC(as->code_base, as->code_size, NULL);
+ #else
+ return as->code_base;
+ #endif
+}
+
+#endif // MICROPY_INCLUDED_PY_ASMBASE_H
diff --git a/circuitpython/py/asmthumb.c b/circuitpython/py/asmthumb.c
new file mode 100644
index 0000000..547e5be
--- /dev/null
+++ b/circuitpython/py/asmthumb.c
@@ -0,0 +1,573 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+
+#include "py/mpconfig.h"
+
+// wrapper around everything in this file
+#if MICROPY_EMIT_THUMB || MICROPY_EMIT_INLINE_THUMB
+
+#include "py/mpstate.h"
+#include "py/persistentcode.h"
+#include "py/asmthumb.h"
+
+#define UNSIGNED_FIT5(x) ((uint32_t)(x) < 32)
+#define UNSIGNED_FIT7(x) ((uint32_t)(x) < 128)
+#define UNSIGNED_FIT8(x) (((x) & 0xffffff00) == 0)
+#define UNSIGNED_FIT16(x) (((x) & 0xffff0000) == 0)
+#define SIGNED_FIT8(x) (((x) & 0xffffff80) == 0) || (((x) & 0xffffff80) == 0xffffff80)
+#define SIGNED_FIT9(x) (((x) & 0xffffff00) == 0) || (((x) & 0xffffff00) == 0xffffff00)
+#define SIGNED_FIT12(x) (((x) & 0xfffff800) == 0) || (((x) & 0xfffff800) == 0xfffff800)
+#define SIGNED_FIT23(x) (((x) & 0xffc00000) == 0) || (((x) & 0xffc00000) == 0xffc00000)
+
+#if MICROPY_EMIT_THUMB_ARMV7M
+// Note: these actually take an imm12 but the high-bit is not encoded here
+#define OP_ADD_W_RRI_HI(reg_src) (0xf200 | (reg_src))
+#define OP_ADD_W_RRI_LO(reg_dest, imm11) ((imm11 << 4 & 0x7000) | reg_dest << 8 | (imm11 & 0xff))
+#define OP_SUB_W_RRI_HI(reg_src) (0xf2a0 | (reg_src))
+#define OP_SUB_W_RRI_LO(reg_dest, imm11) ((imm11 << 4 & 0x7000) | reg_dest << 8 | (imm11 & 0xff))
+
+#define OP_LDR_W_HI(reg_base) (0xf8d0 | (reg_base))
+#define OP_LDR_W_LO(reg_dest, imm12) ((reg_dest) << 12 | (imm12))
+#endif
+
+static inline byte *asm_thumb_get_cur_to_write_bytes(asm_thumb_t *as, int n) {
+ return mp_asm_base_get_cur_to_write_bytes(&as->base, n);
+}
+
+/*
+STATIC void asm_thumb_write_byte_1(asm_thumb_t *as, byte b1) {
+ byte *c = asm_thumb_get_cur_to_write_bytes(as, 1);
+ c[0] = b1;
+}
+*/
+
+/*
+#define IMM32_L0(x) ((x) & 0xff)
+#define IMM32_L1(x) (((x) >> 8) & 0xff)
+#define IMM32_L2(x) (((x) >> 16) & 0xff)
+#define IMM32_L3(x) (((x) >> 24) & 0xff)
+
+STATIC void asm_thumb_write_word32(asm_thumb_t *as, int w32) {
+ byte *c = asm_thumb_get_cur_to_write_bytes(as, 4);
+ c[0] = IMM32_L0(w32);
+ c[1] = IMM32_L1(w32);
+ c[2] = IMM32_L2(w32);
+ c[3] = IMM32_L3(w32);
+}
+*/
+
+// rlolist is a bit map indicating desired lo-registers
+#define OP_PUSH_RLIST(rlolist) (0xb400 | (rlolist))
+#define OP_PUSH_RLIST_LR(rlolist) (0xb400 | 0x0100 | (rlolist))
+#define OP_POP_RLIST(rlolist) (0xbc00 | (rlolist))
+#define OP_POP_RLIST_PC(rlolist) (0xbc00 | 0x0100 | (rlolist))
+
+// The number of words must fit in 7 unsigned bits
+#define OP_ADD_SP(num_words) (0xb000 | (num_words))
+#define OP_SUB_SP(num_words) (0xb080 | (num_words))
+
+// locals:
+// - stored on the stack in ascending order
+// - numbered 0 through num_locals-1
+// - SP points to first local
+//
+// | SP
+// v
+// l0 l1 l2 ... l(n-1)
+// ^ ^
+// | low address | high address in RAM
+
+void asm_thumb_entry(asm_thumb_t *as, int num_locals) {
+ assert(num_locals >= 0);
+
+ // If this Thumb machine code is run from ARM state then add a prelude
+ // to switch to Thumb state for the duration of the function.
+ #if MICROPY_DYNAMIC_COMPILER || MICROPY_EMIT_ARM || (defined(__arm__) && !defined(__thumb2__) && !defined(__thumb__))
+ #if MICROPY_DYNAMIC_COMPILER
+ if (mp_dynamic_compiler.native_arch == MP_NATIVE_ARCH_ARMV6)
+ #endif
+ {
+ asm_thumb_op32(as, 0x4010, 0xe92d); // push {r4, lr}
+ asm_thumb_op32(as, 0xe009, 0xe28f); // add lr, pc, 8 + 1
+ asm_thumb_op32(as, 0xff3e, 0xe12f); // blx lr
+ asm_thumb_op32(as, 0x4010, 0xe8bd); // pop {r4, lr}
+ asm_thumb_op32(as, 0xff1e, 0xe12f); // bx lr
+ }
+ #endif
+
+ // work out what to push and how many extra spaces to reserve on stack
+ // so that we have enough for all locals and it's aligned an 8-byte boundary
+ // we push extra regs (r1, r2, r3) to help do the stack adjustment
+ // we probably should just always subtract from sp, since this would be more efficient
+ // for push rlist, lowest numbered register at the lowest address
+ uint reglist;
+ uint stack_adjust;
+ // don't pop r0 because it's used for return value
+ switch (num_locals) {
+ case 0:
+ reglist = 0xf2;
+ stack_adjust = 0;
+ break;
+
+ case 1:
+ reglist = 0xf2;
+ stack_adjust = 0;
+ break;
+
+ case 2:
+ reglist = 0xfe;
+ stack_adjust = 0;
+ break;
+
+ case 3:
+ reglist = 0xfe;
+ stack_adjust = 0;
+ break;
+
+ default:
+ reglist = 0xfe;
+ stack_adjust = ((num_locals - 3) + 1) & (~1);
+ break;
+ }
+ asm_thumb_op16(as, OP_PUSH_RLIST_LR(reglist));
+ if (stack_adjust > 0) {
+ #if MICROPY_EMIT_THUMB_ARMV7M
+ if (UNSIGNED_FIT7(stack_adjust)) {
+ asm_thumb_op16(as, OP_SUB_SP(stack_adjust));
+ } else {
+ asm_thumb_op32(as, OP_SUB_W_RRI_HI(ASM_THUMB_REG_SP), OP_SUB_W_RRI_LO(ASM_THUMB_REG_SP, stack_adjust * 4));
+ }
+ #else
+ int adj = stack_adjust;
+ // we don't expect the stack_adjust to be massive
+ while (!UNSIGNED_FIT7(adj)) {
+ asm_thumb_op16(as, OP_SUB_SP(127));
+ adj -= 127;
+ }
+ asm_thumb_op16(as, OP_SUB_SP(adj));
+ #endif
+ }
+ as->push_reglist = reglist;
+ as->stack_adjust = stack_adjust;
+}
+
+void asm_thumb_exit(asm_thumb_t *as) {
+ if (as->stack_adjust > 0) {
+ #if MICROPY_EMIT_THUMB_ARMV7M
+ if (UNSIGNED_FIT7(as->stack_adjust)) {
+ asm_thumb_op16(as, OP_ADD_SP(as->stack_adjust));
+ } else {
+ asm_thumb_op32(as, OP_ADD_W_RRI_HI(ASM_THUMB_REG_SP), OP_ADD_W_RRI_LO(ASM_THUMB_REG_SP, as->stack_adjust * 4));
+ }
+ #else
+ int adj = as->stack_adjust;
+ // we don't expect the stack_adjust to be massive
+ while (!UNSIGNED_FIT7(adj)) {
+ asm_thumb_op16(as, OP_ADD_SP(127));
+ adj -= 127;
+ }
+ asm_thumb_op16(as, OP_ADD_SP(adj));
+ #endif
+ }
+ asm_thumb_op16(as, OP_POP_RLIST_PC(as->push_reglist));
+}
+
+STATIC mp_uint_t get_label_dest(asm_thumb_t *as, uint label) {
+ assert(label < as->base.max_num_labels);
+ return as->base.label_offsets[label];
+}
+
+void asm_thumb_op16(asm_thumb_t *as, uint op) {
+ byte *c = asm_thumb_get_cur_to_write_bytes(as, 2);
+ if (c != NULL) {
+ // little endian
+ c[0] = op;
+ c[1] = op >> 8;
+ }
+}
+
+void asm_thumb_op32(asm_thumb_t *as, uint op1, uint op2) {
+ byte *c = asm_thumb_get_cur_to_write_bytes(as, 4);
+ if (c != NULL) {
+ // little endian, op1 then op2
+ c[0] = op1;
+ c[1] = op1 >> 8;
+ c[2] = op2;
+ c[3] = op2 >> 8;
+ }
+}
+
+#define OP_FORMAT_4(op, rlo_dest, rlo_src) ((op) | ((rlo_src) << 3) | (rlo_dest))
+
+void asm_thumb_format_4(asm_thumb_t *as, uint op, uint rlo_dest, uint rlo_src) {
+ assert(rlo_dest < ASM_THUMB_REG_R8);
+ assert(rlo_src < ASM_THUMB_REG_R8);
+ asm_thumb_op16(as, OP_FORMAT_4(op, rlo_dest, rlo_src));
+}
+
+void asm_thumb_mov_reg_reg(asm_thumb_t *as, uint reg_dest, uint reg_src) {
+ uint op_lo;
+ if (reg_src < 8) {
+ op_lo = reg_src << 3;
+ } else {
+ op_lo = 0x40 | ((reg_src - 8) << 3);
+ }
+ if (reg_dest < 8) {
+ op_lo |= reg_dest;
+ } else {
+ op_lo |= 0x80 | (reg_dest - 8);
+ }
+ // mov reg_dest, reg_src
+ asm_thumb_op16(as, 0x4600 | op_lo);
+}
+
+#if MICROPY_EMIT_THUMB_ARMV7M
+
+// if loading lo half with movw, the i16 value will be zero extended into the r32 register!
+size_t asm_thumb_mov_reg_i16(asm_thumb_t *as, uint mov_op, uint reg_dest, int i16_src) {
+ assert(reg_dest < ASM_THUMB_REG_R15);
+ size_t loc = mp_asm_base_get_code_pos(&as->base);
+ // mov[wt] reg_dest, #i16_src
+ asm_thumb_op32(as, mov_op | ((i16_src >> 1) & 0x0400) | ((i16_src >> 12) & 0xf), ((i16_src << 4) & 0x7000) | (reg_dest << 8) | (i16_src & 0xff));
+ return loc;
+}
+
+#else
+
+void asm_thumb_mov_rlo_i16(asm_thumb_t *as, uint rlo_dest, int i16_src) {
+ asm_thumb_mov_rlo_i8(as, rlo_dest, (i16_src >> 8) & 0xff);
+ asm_thumb_lsl_rlo_rlo_i5(as, rlo_dest, rlo_dest, 8);
+ asm_thumb_add_rlo_i8(as, rlo_dest, i16_src & 0xff);
+}
+
+#endif
+
+#define OP_B_N(byte_offset) (0xe000 | (((byte_offset) >> 1) & 0x07ff))
+
+bool asm_thumb_b_n_label(asm_thumb_t *as, uint label) {
+ mp_uint_t dest = get_label_dest(as, label);
+ mp_int_t rel = dest - as->base.code_offset;
+ rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
+ asm_thumb_op16(as, OP_B_N(rel));
+ return as->base.pass != MP_ASM_PASS_EMIT || SIGNED_FIT12(rel);
+}
+
+#define OP_BCC_N(cond, byte_offset) (0xd000 | ((cond) << 8) | (((byte_offset) >> 1) & 0x00ff))
+
+// all these bit arithmetics need coverage testing!
+#define OP_BCC_W_HI(cond, byte_offset) (0xf000 | ((cond) << 6) | (((byte_offset) >> 10) & 0x0400) | (((byte_offset) >> 14) & 0x003f))
+#define OP_BCC_W_LO(byte_offset) (0x8000 | ((byte_offset) & 0x2000) | (((byte_offset) >> 1) & 0x0fff))
+
+bool asm_thumb_bcc_nw_label(asm_thumb_t *as, int cond, uint label, bool wide) {
+ mp_uint_t dest = get_label_dest(as, label);
+ mp_int_t rel = dest - as->base.code_offset;
+ rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
+ if (!wide) {
+ asm_thumb_op16(as, OP_BCC_N(cond, rel));
+ return as->base.pass != MP_ASM_PASS_EMIT || SIGNED_FIT9(rel);
+ } else {
+ #if MICROPY_EMIT_THUMB_ARMV7M
+ asm_thumb_op32(as, OP_BCC_W_HI(cond, rel), OP_BCC_W_LO(rel));
+ return true;
+ #else
+ // this method should not be called for ARMV6M
+ return false;
+ #endif
+ }
+}
+
+#define OP_BL_HI(byte_offset) (0xf000 | (((byte_offset) >> 12) & 0x07ff))
+#define OP_BL_LO(byte_offset) (0xf800 | (((byte_offset) >> 1) & 0x07ff))
+
+bool asm_thumb_bl_label(asm_thumb_t *as, uint label) {
+ mp_uint_t dest = get_label_dest(as, label);
+ mp_int_t rel = dest - as->base.code_offset;
+ rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
+ asm_thumb_op32(as, OP_BL_HI(rel), OP_BL_LO(rel));
+ return as->base.pass != MP_ASM_PASS_EMIT || SIGNED_FIT23(rel);
+}
+
+size_t asm_thumb_mov_reg_i32(asm_thumb_t *as, uint reg_dest, mp_uint_t i32) {
+ // movw, movt does it in 8 bytes
+ // ldr [pc, #], dw does it in 6 bytes, but we might not reach to end of code for dw
+
+ size_t loc = mp_asm_base_get_code_pos(&as->base);
+
+ #if MICROPY_EMIT_THUMB_ARMV7M
+ asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVW, reg_dest, i32);
+ asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVT, reg_dest, i32 >> 16);
+ #else
+ // should only be called with lo reg for ARMV6M
+ assert(reg_dest < ASM_THUMB_REG_R8);
+
+ // sanity check that generated code is aligned
+ assert(!as->base.code_base || !(3u & (uintptr_t)as->base.code_base));
+
+ // basically:
+ // (nop)
+ // ldr reg_dest, _data
+ // b 1f
+ // _data: .word i32
+ // 1:
+ if (as->base.code_offset & 2u) {
+ asm_thumb_op16(as, ASM_THUMB_OP_NOP);
+ }
+ asm_thumb_ldr_rlo_pcrel_i8(as, reg_dest, 0);
+ asm_thumb_op16(as, OP_B_N(2));
+ asm_thumb_op16(as, i32 & 0xffff);
+ asm_thumb_op16(as, i32 >> 16);
+ #endif
+
+ return loc;
+}
+
+void asm_thumb_mov_reg_i32_optimised(asm_thumb_t *as, uint reg_dest, int i32) {
+ if (reg_dest < 8 && UNSIGNED_FIT8(i32)) {
+ asm_thumb_mov_rlo_i8(as, reg_dest, i32);
+ } else {
+ #if MICROPY_EMIT_THUMB_ARMV7M
+ if (UNSIGNED_FIT16(i32)) {
+ asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVW, reg_dest, i32);
+ } else {
+ asm_thumb_mov_reg_i32(as, reg_dest, i32);
+ }
+ #else
+ uint rlo_dest = reg_dest;
+ assert(rlo_dest < ASM_THUMB_REG_R8); // should never be called for ARMV6M
+
+ bool negate = i32 < 0 && ((i32 + i32) & 0xffffffffu); // don't negate 0x80000000
+ if (negate) {
+ i32 = -i32;
+ }
+
+ uint clz = __builtin_clz(i32);
+ uint ctz = i32 ? __builtin_ctz(i32) : 0;
+ assert(clz + ctz <= 32);
+ if (clz + ctz >= 24) {
+ asm_thumb_mov_rlo_i8(as, rlo_dest, (i32 >> ctz) & 0xff);
+ asm_thumb_lsl_rlo_rlo_i5(as, rlo_dest, rlo_dest, ctz);
+ } else if (UNSIGNED_FIT16(i32)) {
+ asm_thumb_mov_rlo_i16(as, rlo_dest, i32);
+ } else {
+ if (negate) {
+ // no point in negating if we're storing in 32 bit anyway
+ negate = false;
+ i32 = -i32;
+ }
+ asm_thumb_mov_reg_i32(as, rlo_dest, i32);
+ }
+ if (negate) {
+ asm_thumb_neg_rlo_rlo(as, rlo_dest, rlo_dest);
+ }
+ #endif
+ }
+}
+
+#define OP_STR_TO_SP_OFFSET(rlo_dest, word_offset) (0x9000 | ((rlo_dest) << 8) | ((word_offset) & 0x00ff))
+#define OP_LDR_FROM_SP_OFFSET(rlo_dest, word_offset) (0x9800 | ((rlo_dest) << 8) | ((word_offset) & 0x00ff))
+
+static void asm_thumb_mov_local_check(asm_thumb_t *as, int word_offset) {
+ if (as->base.pass >= MP_ASM_PASS_EMIT) {
+ assert(word_offset >= 0);
+ if (!UNSIGNED_FIT8(word_offset)) {
+ mp_raise_NotImplementedError(MP_ERROR_TEXT("too many locals for native method"));
+ }
+ }
+}
+
+void asm_thumb_mov_local_reg(asm_thumb_t *as, int local_num, uint rlo_src) {
+ assert(rlo_src < ASM_THUMB_REG_R8);
+ int word_offset = local_num;
+ asm_thumb_mov_local_check(as, word_offset);
+ asm_thumb_op16(as, OP_STR_TO_SP_OFFSET(rlo_src, word_offset));
+}
+
+void asm_thumb_mov_reg_local(asm_thumb_t *as, uint rlo_dest, int local_num) {
+ assert(rlo_dest < ASM_THUMB_REG_R8);
+ int word_offset = local_num;
+ asm_thumb_mov_local_check(as, word_offset);
+ asm_thumb_op16(as, OP_LDR_FROM_SP_OFFSET(rlo_dest, word_offset));
+}
+
+#define OP_ADD_REG_SP_OFFSET(rlo_dest, word_offset) (0xa800 | ((rlo_dest) << 8) | ((word_offset) & 0x00ff))
+
+void asm_thumb_mov_reg_local_addr(asm_thumb_t *as, uint rlo_dest, int local_num) {
+ assert(rlo_dest < ASM_THUMB_REG_R8);
+ int word_offset = local_num;
+ assert(as->base.pass < MP_ASM_PASS_EMIT || word_offset >= 0);
+ asm_thumb_op16(as, OP_ADD_REG_SP_OFFSET(rlo_dest, word_offset));
+}
+
+void asm_thumb_mov_reg_pcrel(asm_thumb_t *as, uint rlo_dest, uint label) {
+ mp_uint_t dest = get_label_dest(as, label);
+ mp_int_t rel = dest - as->base.code_offset;
+ rel |= 1; // to stay in Thumb state when jumping to this address
+ #if MICROPY_EMIT_THUMB_ARMV7M
+ rel -= 4 + 4; // adjust for mov_reg_i16 and then PC+4 prefetch of add_reg_reg
+ asm_thumb_mov_reg_i16(as, ASM_THUMB_OP_MOVW, rlo_dest, rel); // 4 bytes
+ #else
+ rel -= 8 + 4; // adjust for four instructions and then PC+4 prefetch of add_reg_reg
+ // 6 bytes
+ asm_thumb_mov_rlo_i16(as, rlo_dest, rel);
+ // 2 bytes - not always needed, but we want to keep the size the same
+ asm_thumb_sxth_rlo_rlo(as, rlo_dest, rlo_dest);
+ #endif
+ asm_thumb_add_reg_reg(as, rlo_dest, ASM_THUMB_REG_R15); // 2 bytes
+}
+
+#if MICROPY_EMIT_THUMB_ARMV7M
+static inline void asm_thumb_ldr_reg_reg_i12(asm_thumb_t *as, uint reg_dest, uint reg_base, uint word_offset) {
+ asm_thumb_op32(as, OP_LDR_W_HI(reg_base), OP_LDR_W_LO(reg_dest, word_offset * 4));
+}
+#endif
+
+void asm_thumb_ldr_reg_reg_i12_optimised(asm_thumb_t *as, uint reg_dest, uint reg_base, uint word_offset) {
+ if (reg_dest < ASM_THUMB_REG_R8 && reg_base < ASM_THUMB_REG_R8 && UNSIGNED_FIT5(word_offset)) {
+ asm_thumb_ldr_rlo_rlo_i5(as, reg_dest, reg_base, word_offset);
+ } else {
+ #if MICROPY_EMIT_THUMB_ARMV7M
+ asm_thumb_ldr_reg_reg_i12(as, reg_dest, reg_base, word_offset);
+ #else
+ word_offset -= 31;
+ if (reg_dest < ASM_THUMB_REG_R8 && reg_base < ASM_THUMB_REG_R8) {
+ if (UNSIGNED_FIT8(word_offset) && (word_offset < 64 || reg_dest != reg_base)) {
+ if (word_offset < 64) {
+ if (reg_dest != reg_base) {
+ asm_thumb_mov_reg_reg(as, reg_dest, reg_base);
+ }
+ asm_thumb_add_rlo_i8(as, reg_dest, word_offset * 4);
+ } else {
+ asm_thumb_mov_rlo_i8(as, reg_dest, word_offset);
+ asm_thumb_lsl_rlo_rlo_i5(as, reg_dest, reg_dest, 2);
+ asm_thumb_add_rlo_rlo_rlo(as, reg_dest, reg_dest, reg_base);
+ }
+ } else {
+ if (reg_dest != reg_base) {
+ asm_thumb_mov_rlo_i16(as, reg_dest, word_offset * 4);
+ asm_thumb_add_rlo_rlo_rlo(as, reg_dest, reg_dest, reg_dest);
+ } else {
+ uint reg_other = reg_dest ^ 7;
+ asm_thumb_op16(as, OP_PUSH_RLIST((1 << reg_other)));
+ asm_thumb_mov_rlo_i16(as, reg_other, word_offset * 4);
+ asm_thumb_add_rlo_rlo_rlo(as, reg_dest, reg_dest, reg_other);
+ asm_thumb_op16(as, OP_POP_RLIST((1 << reg_other)));
+ }
+ }
+ } else {
+ assert(0); // should never be called for ARMV6M
+ }
+ asm_thumb_ldr_rlo_rlo_i5(as, reg_dest, reg_dest, 31);
+ #endif
+ }
+}
+
+// this could be wrong, because it should have a range of +/- 16MiB...
+#define OP_BW_HI(byte_offset) (0xf000 | (((byte_offset) >> 12) & 0x07ff))
+#define OP_BW_LO(byte_offset) (0xb800 | (((byte_offset) >> 1) & 0x07ff))
+
+void asm_thumb_b_label(asm_thumb_t *as, uint label) {
+ mp_uint_t dest = get_label_dest(as, label);
+ mp_int_t rel = dest - as->base.code_offset;
+ rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
+ if (dest != (mp_uint_t)-1 && rel <= -4) {
+ // is a backwards jump, so we know the size of the jump on the first pass
+ // calculate rel assuming 12 bit relative jump
+ if (SIGNED_FIT12(rel)) {
+ asm_thumb_op16(as, OP_B_N(rel));
+ } else {
+ goto large_jump;
+ }
+ } else {
+ // is a forwards jump, so need to assume it's large
+ large_jump:
+ #if MICROPY_EMIT_THUMB_ARMV7M
+ asm_thumb_op32(as, OP_BW_HI(rel), OP_BW_LO(rel));
+ #else
+ if (SIGNED_FIT12(rel)) {
+ // this code path has to be the same number of instructions irrespective of rel
+ asm_thumb_op16(as, OP_B_N(rel));
+ } else {
+ asm_thumb_op16(as, ASM_THUMB_OP_NOP);
+ if (dest != (mp_uint_t)-1) {
+ // we have an actual branch > 12 bits; this is not handled yet
+ mp_raise_NotImplementedError(MP_ERROR_TEXT("native method too big"));
+ }
+ }
+ #endif
+ }
+}
+
+void asm_thumb_bcc_label(asm_thumb_t *as, int cond, uint label) {
+ mp_uint_t dest = get_label_dest(as, label);
+ mp_int_t rel = dest - as->base.code_offset;
+ rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
+ if (dest != (mp_uint_t)-1 && rel <= -4) {
+ // is a backwards jump, so we know the size of the jump on the first pass
+ // calculate rel assuming 9 bit relative jump
+ if (SIGNED_FIT9(rel)) {
+ asm_thumb_op16(as, OP_BCC_N(cond, rel));
+ } else {
+ goto large_jump;
+ }
+ } else {
+ // is a forwards jump, so need to assume it's large
+ large_jump:
+ #if MICROPY_EMIT_THUMB_ARMV7M
+ asm_thumb_op32(as, OP_BCC_W_HI(cond, rel), OP_BCC_W_LO(rel));
+ #else
+ // reverse the sense of the branch to jump over a longer branch
+ asm_thumb_op16(as, OP_BCC_N(cond ^ 1, 0));
+ asm_thumb_b_label(as, label);
+ #endif
+ }
+}
+
+void asm_thumb_bcc_rel9(asm_thumb_t *as, int cond, int rel) {
+ rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
+ assert(SIGNED_FIT9(rel));
+ asm_thumb_op16(as, OP_BCC_N(cond, rel));
+}
+
+void asm_thumb_b_rel12(asm_thumb_t *as, int rel) {
+ rel -= 4; // account for instruction prefetch, PC is 4 bytes ahead of this instruction
+ assert(SIGNED_FIT12(rel));
+ asm_thumb_op16(as, OP_B_N(rel));
+}
+
+#define OP_BLX(reg) (0x4780 | ((reg) << 3))
+#define OP_SVC(arg) (0xdf00 | (arg))
+
+void asm_thumb_bl_ind(asm_thumb_t *as, uint fun_id, uint reg_temp) {
+ // Load ptr to function from table, indexed by fun_id, then call it
+ asm_thumb_ldr_reg_reg_i12_optimised(as, reg_temp, ASM_THUMB_REG_FUN_TABLE, fun_id);
+ asm_thumb_op16(as, OP_BLX(reg_temp));
+}
+
+#endif // MICROPY_EMIT_THUMB || MICROPY_EMIT_INLINE_THUMB
diff --git a/circuitpython/py/asmthumb.h b/circuitpython/py/asmthumb.h
new file mode 100644
index 0000000..ea48f7e
--- /dev/null
+++ b/circuitpython/py/asmthumb.h
@@ -0,0 +1,427 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_ASMTHUMB_H
+#define MICROPY_INCLUDED_PY_ASMTHUMB_H
+
+#include <assert.h>
+#include "py/misc.h"
+#include "py/asmbase.h"
+
+#define ASM_THUMB_REG_R0 (0)
+#define ASM_THUMB_REG_R1 (1)
+#define ASM_THUMB_REG_R2 (2)
+#define ASM_THUMB_REG_R3 (3)
+#define ASM_THUMB_REG_R4 (4)
+#define ASM_THUMB_REG_R5 (5)
+#define ASM_THUMB_REG_R6 (6)
+#define ASM_THUMB_REG_R7 (7)
+#define ASM_THUMB_REG_R8 (8)
+#define ASM_THUMB_REG_R9 (9)
+#define ASM_THUMB_REG_R10 (10)
+#define ASM_THUMB_REG_R11 (11)
+#define ASM_THUMB_REG_R12 (12)
+#define ASM_THUMB_REG_R13 (13)
+#define ASM_THUMB_REG_R14 (14)
+#define ASM_THUMB_REG_R15 (15)
+#define ASM_THUMB_REG_SP (ASM_THUMB_REG_R13)
+#define ASM_THUMB_REG_LR (REG_R14)
+
+#define ASM_THUMB_CC_EQ (0x0)
+#define ASM_THUMB_CC_NE (0x1)
+#define ASM_THUMB_CC_CS (0x2)
+#define ASM_THUMB_CC_CC (0x3)
+#define ASM_THUMB_CC_MI (0x4)
+#define ASM_THUMB_CC_PL (0x5)
+#define ASM_THUMB_CC_VS (0x6)
+#define ASM_THUMB_CC_VC (0x7)
+#define ASM_THUMB_CC_HI (0x8)
+#define ASM_THUMB_CC_LS (0x9)
+#define ASM_THUMB_CC_GE (0xa)
+#define ASM_THUMB_CC_LT (0xb)
+#define ASM_THUMB_CC_GT (0xc)
+#define ASM_THUMB_CC_LE (0xd)
+
+typedef struct _asm_thumb_t {
+ mp_asm_base_t base;
+ uint32_t push_reglist;
+ uint32_t stack_adjust;
+} asm_thumb_t;
+
+static inline void asm_thumb_end_pass(asm_thumb_t *as) {
+ (void)as;
+}
+
+void asm_thumb_entry(asm_thumb_t *as, int num_locals);
+void asm_thumb_exit(asm_thumb_t *as);
+
+// argument order follows ARM, in general dest is first
+// note there is a difference between movw and mov.w, and many others!
+
+#define ASM_THUMB_OP_IT (0xbf00)
+#define ASM_THUMB_OP_ITE_EQ (0xbf0c)
+#define ASM_THUMB_OP_ITE_NE (0xbf14)
+#define ASM_THUMB_OP_ITE_CS (0xbf2c)
+#define ASM_THUMB_OP_ITE_CC (0xbf34)
+#define ASM_THUMB_OP_ITE_MI (0xbf4c)
+#define ASM_THUMB_OP_ITE_PL (0xbf54)
+#define ASM_THUMB_OP_ITE_VS (0xbf6c)
+#define ASM_THUMB_OP_ITE_VC (0xbf74)
+#define ASM_THUMB_OP_ITE_HI (0xbf8c)
+#define ASM_THUMB_OP_ITE_LS (0xbf94)
+#define ASM_THUMB_OP_ITE_GE (0xbfac)
+#define ASM_THUMB_OP_ITE_LT (0xbfb4)
+#define ASM_THUMB_OP_ITE_GT (0xbfcc)
+#define ASM_THUMB_OP_ITE_LE (0xbfd4)
+
+#define ASM_THUMB_OP_NOP (0xbf00)
+#define ASM_THUMB_OP_WFI (0xbf30)
+#define ASM_THUMB_OP_CPSID_I (0xb672) // cpsid i, disable irq
+#define ASM_THUMB_OP_CPSIE_I (0xb662) // cpsie i, enable irq
+
+void asm_thumb_op16(asm_thumb_t *as, uint op);
+void asm_thumb_op32(asm_thumb_t *as, uint op1, uint op2);
+
+static inline void asm_thumb_it_cc(asm_thumb_t *as, uint cc, uint mask) {
+ asm_thumb_op16(as, ASM_THUMB_OP_IT | (cc << 4) | mask);
+}
+
+// FORMAT 1: move shifted register
+
+#define ASM_THUMB_FORMAT_1_LSL (0x0000)
+#define ASM_THUMB_FORMAT_1_LSR (0x0800)
+#define ASM_THUMB_FORMAT_1_ASR (0x1000)
+
+#define ASM_THUMB_FORMAT_1_ENCODE(op, rlo_dest, rlo_src, offset) \
+ ((op) | ((offset) << 6) | ((rlo_src) << 3) | (rlo_dest))
+
+static inline void asm_thumb_format_1(asm_thumb_t *as, uint op, uint rlo_dest, uint rlo_src, uint offset) {
+ assert(rlo_dest < ASM_THUMB_REG_R8);
+ assert(rlo_src < ASM_THUMB_REG_R8);
+ asm_thumb_op16(as, ASM_THUMB_FORMAT_1_ENCODE(op, rlo_dest, rlo_src, offset));
+}
+
+// FORMAT 2: add/subtract
+
+#define ASM_THUMB_FORMAT_2_ADD (0x1800)
+#define ASM_THUMB_FORMAT_2_SUB (0x1a00)
+#define ASM_THUMB_FORMAT_2_REG_OPERAND (0x0000)
+#define ASM_THUMB_FORMAT_2_IMM_OPERAND (0x0400)
+
+#define ASM_THUMB_FORMAT_2_ENCODE(op, rlo_dest, rlo_src, src_b) \
+ ((op) | ((src_b) << 6) | ((rlo_src) << 3) | (rlo_dest))
+
+static inline void asm_thumb_format_2(asm_thumb_t *as, uint op, uint rlo_dest, uint rlo_src, int src_b) {
+ assert(rlo_dest < ASM_THUMB_REG_R8);
+ assert(rlo_src < ASM_THUMB_REG_R8);
+ asm_thumb_op16(as, ASM_THUMB_FORMAT_2_ENCODE(op, rlo_dest, rlo_src, src_b));
+}
+
+static inline void asm_thumb_add_rlo_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint rlo_src_a, uint rlo_src_b) {
+ asm_thumb_format_2(as, ASM_THUMB_FORMAT_2_ADD | ASM_THUMB_FORMAT_2_REG_OPERAND, rlo_dest, rlo_src_a, rlo_src_b);
+}
+static inline void asm_thumb_add_rlo_rlo_i3(asm_thumb_t *as, uint rlo_dest, uint rlo_src_a, int i3_src) {
+ asm_thumb_format_2(as, ASM_THUMB_FORMAT_2_ADD | ASM_THUMB_FORMAT_2_IMM_OPERAND, rlo_dest, rlo_src_a, i3_src);
+}
+static inline void asm_thumb_sub_rlo_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint rlo_src_a, uint rlo_src_b) {
+ asm_thumb_format_2(as, ASM_THUMB_FORMAT_2_SUB | ASM_THUMB_FORMAT_2_REG_OPERAND, rlo_dest, rlo_src_a, rlo_src_b);
+}
+static inline void asm_thumb_sub_rlo_rlo_i3(asm_thumb_t *as, uint rlo_dest, uint rlo_src_a, int i3_src) {
+ asm_thumb_format_2(as, ASM_THUMB_FORMAT_2_SUB | ASM_THUMB_FORMAT_2_IMM_OPERAND, rlo_dest, rlo_src_a, i3_src);
+}
+
+// FORMAT 3: move/compare/add/subtract immediate
+// These instructions all do zero extension of the i8 value
+
+#define ASM_THUMB_FORMAT_3_MOV (0x2000)
+#define ASM_THUMB_FORMAT_3_CMP (0x2800)
+#define ASM_THUMB_FORMAT_3_ADD (0x3000)
+#define ASM_THUMB_FORMAT_3_SUB (0x3800)
+#define ASM_THUMB_FORMAT_3_LDR (0x4800)
+
+#define ASM_THUMB_FORMAT_3_ENCODE(op, rlo, i8) ((op) | ((rlo) << 8) | (i8))
+
+static inline void asm_thumb_format_3(asm_thumb_t *as, uint op, uint rlo, int i8) {
+ assert(rlo < ASM_THUMB_REG_R8);
+ asm_thumb_op16(as, ASM_THUMB_FORMAT_3_ENCODE(op, rlo, i8));
+}
+
+static inline void asm_thumb_mov_rlo_i8(asm_thumb_t *as, uint rlo, int i8) {
+ asm_thumb_format_3(as, ASM_THUMB_FORMAT_3_MOV, rlo, i8);
+}
+static inline void asm_thumb_cmp_rlo_i8(asm_thumb_t *as, uint rlo, int i8) {
+ asm_thumb_format_3(as, ASM_THUMB_FORMAT_3_CMP, rlo, i8);
+}
+static inline void asm_thumb_add_rlo_i8(asm_thumb_t *as, uint rlo, int i8) {
+ asm_thumb_format_3(as, ASM_THUMB_FORMAT_3_ADD, rlo, i8);
+}
+static inline void asm_thumb_sub_rlo_i8(asm_thumb_t *as, uint rlo, int i8) {
+ asm_thumb_format_3(as, ASM_THUMB_FORMAT_3_SUB, rlo, i8);
+}
+static inline void asm_thumb_ldr_rlo_pcrel_i8(asm_thumb_t *as, uint rlo, uint i8) {
+ asm_thumb_format_3(as, ASM_THUMB_FORMAT_3_LDR, rlo, i8);
+}
+
+// FORMAT 4: ALU operations
+
+#define ASM_THUMB_FORMAT_4_AND (0x4000)
+#define ASM_THUMB_FORMAT_4_EOR (0x4040)
+#define ASM_THUMB_FORMAT_4_LSL (0x4080)
+#define ASM_THUMB_FORMAT_4_LSR (0x40c0)
+#define ASM_THUMB_FORMAT_4_ASR (0x4100)
+#define ASM_THUMB_FORMAT_4_ADC (0x4140)
+#define ASM_THUMB_FORMAT_4_SBC (0x4180)
+#define ASM_THUMB_FORMAT_4_ROR (0x41c0)
+#define ASM_THUMB_FORMAT_4_TST (0x4200)
+#define ASM_THUMB_FORMAT_4_NEG (0x4240)
+#define ASM_THUMB_FORMAT_4_CMP (0x4280)
+#define ASM_THUMB_FORMAT_4_CMN (0x42c0)
+#define ASM_THUMB_FORMAT_4_ORR (0x4300)
+#define ASM_THUMB_FORMAT_4_MUL (0x4340)
+#define ASM_THUMB_FORMAT_4_BIC (0x4380)
+#define ASM_THUMB_FORMAT_4_MVN (0x43c0)
+
+void asm_thumb_format_4(asm_thumb_t *as, uint op, uint rlo_dest, uint rlo_src);
+
+static inline void asm_thumb_cmp_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint rlo_src) {
+ asm_thumb_format_4(as, ASM_THUMB_FORMAT_4_CMP, rlo_dest, rlo_src);
+}
+static inline void asm_thumb_mvn_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint rlo_src) {
+ asm_thumb_format_4(as, ASM_THUMB_FORMAT_4_MVN, rlo_dest, rlo_src);
+}
+static inline void asm_thumb_neg_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint rlo_src) {
+ asm_thumb_format_4(as, ASM_THUMB_FORMAT_4_NEG, rlo_dest, rlo_src);
+}
+
+// FORMAT 5: hi register operations (add, cmp, mov, bx)
+// For add/cmp/mov, at least one of the args must be a high register
+
+#define ASM_THUMB_FORMAT_5_ADD (0x4400)
+#define ASM_THUMB_FORMAT_5_BX (0x4700)
+
+#define ASM_THUMB_FORMAT_5_ENCODE(op, r_dest, r_src) \
+ ((op) | ((r_dest) << 4 & 0x0080) | ((r_src) << 3) | ((r_dest) & 0x0007))
+
+static inline void asm_thumb_format_5(asm_thumb_t *as, uint op, uint r_dest, uint r_src) {
+ asm_thumb_op16(as, ASM_THUMB_FORMAT_5_ENCODE(op, r_dest, r_src));
+}
+
+static inline void asm_thumb_add_reg_reg(asm_thumb_t *as, uint r_dest, uint r_src) {
+ asm_thumb_format_5(as, ASM_THUMB_FORMAT_5_ADD, r_dest, r_src);
+}
+static inline void asm_thumb_bx_reg(asm_thumb_t *as, uint r_src) {
+ asm_thumb_format_5(as, ASM_THUMB_FORMAT_5_BX, 0, r_src);
+}
+
+// FORMAT 9: load/store with immediate offset
+// For word transfers the offset must be aligned, and >>2
+
+// FORMAT 10: load/store halfword
+// The offset must be aligned, and >>1
+// The load is zero extended into the register
+
+#define ASM_THUMB_FORMAT_9_STR (0x6000)
+#define ASM_THUMB_FORMAT_9_LDR (0x6800)
+#define ASM_THUMB_FORMAT_9_WORD_TRANSFER (0x0000)
+#define ASM_THUMB_FORMAT_9_BYTE_TRANSFER (0x1000)
+
+#define ASM_THUMB_FORMAT_10_STRH (0x8000)
+#define ASM_THUMB_FORMAT_10_LDRH (0x8800)
+
+#define ASM_THUMB_FORMAT_9_10_ENCODE(op, rlo_dest, rlo_base, offset) \
+ ((op) | (((offset) << 6) & 0x07c0) | ((rlo_base) << 3) | (rlo_dest))
+
+static inline void asm_thumb_format_9_10(asm_thumb_t *as, uint op, uint rlo_dest, uint rlo_base, uint offset) {
+ asm_thumb_op16(as, ASM_THUMB_FORMAT_9_10_ENCODE(op, rlo_dest, rlo_base, offset));
+}
+
+static inline void asm_thumb_str_rlo_rlo_i5(asm_thumb_t *as, uint rlo_src, uint rlo_base, uint word_offset) {
+ asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_9_STR | ASM_THUMB_FORMAT_9_WORD_TRANSFER, rlo_src, rlo_base, word_offset);
+}
+static inline void asm_thumb_strb_rlo_rlo_i5(asm_thumb_t *as, uint rlo_src, uint rlo_base, uint byte_offset) {
+ asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_9_STR | ASM_THUMB_FORMAT_9_BYTE_TRANSFER, rlo_src, rlo_base, byte_offset);
+}
+static inline void asm_thumb_strh_rlo_rlo_i5(asm_thumb_t *as, uint rlo_src, uint rlo_base, uint byte_offset) {
+ asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_10_STRH, rlo_src, rlo_base, byte_offset);
+}
+static inline void asm_thumb_ldr_rlo_rlo_i5(asm_thumb_t *as, uint rlo_dest, uint rlo_base, uint word_offset) {
+ asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_9_LDR | ASM_THUMB_FORMAT_9_WORD_TRANSFER, rlo_dest, rlo_base, word_offset);
+}
+static inline void asm_thumb_ldrb_rlo_rlo_i5(asm_thumb_t *as, uint rlo_dest, uint rlo_base, uint byte_offset) {
+ asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_9_LDR | ASM_THUMB_FORMAT_9_BYTE_TRANSFER, rlo_dest, rlo_base, byte_offset);
+}
+static inline void asm_thumb_ldrh_rlo_rlo_i5(asm_thumb_t *as, uint rlo_dest, uint rlo_base, uint byte_offset) {
+ asm_thumb_format_9_10(as, ASM_THUMB_FORMAT_10_LDRH, rlo_dest, rlo_base, byte_offset);
+}
+static inline void asm_thumb_lsl_rlo_rlo_i5(asm_thumb_t *as, uint rlo_dest, uint rlo_src, uint shift) {
+ asm_thumb_format_1(as, ASM_THUMB_FORMAT_1_LSL, rlo_dest, rlo_src, shift);
+}
+static inline void asm_thumb_asr_rlo_rlo_i5(asm_thumb_t *as, uint rlo_dest, uint rlo_src, uint shift) {
+ asm_thumb_format_1(as, ASM_THUMB_FORMAT_1_ASR, rlo_dest, rlo_src, shift);
+}
+
+// FORMAT 11: sign/zero extend
+
+#define ASM_THUMB_FORMAT_11_ENCODE(op, rlo_dest, rlo_src) \
+ ((op) | ((rlo_src) << 3) | (rlo_dest))
+
+#define ASM_THUMB_FORMAT_11_SXTH (0xb200)
+#define ASM_THUMB_FORMAT_11_SXTB (0xb240)
+#define ASM_THUMB_FORMAT_11_UXTH (0xb280)
+#define ASM_THUMB_FORMAT_11_UXTB (0xb2c0)
+
+static inline void asm_thumb_format_11(asm_thumb_t *as, uint op, uint rlo_dest, uint rlo_src) {
+ assert(rlo_dest < ASM_THUMB_REG_R8);
+ assert(rlo_src < ASM_THUMB_REG_R8);
+ asm_thumb_op16(as, ASM_THUMB_FORMAT_11_ENCODE(op, rlo_dest, rlo_src));
+}
+
+static inline void asm_thumb_sxth_rlo_rlo(asm_thumb_t *as, uint rlo_dest, uint rlo_src) {
+ asm_thumb_format_11(as, ASM_THUMB_FORMAT_11_SXTH, rlo_dest, rlo_src);
+}
+
+// TODO convert these to above format style
+
+#define ASM_THUMB_OP_MOVW (0xf240)
+#define ASM_THUMB_OP_MOVT (0xf2c0)
+
+void asm_thumb_mov_reg_reg(asm_thumb_t *as, uint reg_dest, uint reg_src);
+
+#if MICROPY_EMIT_THUMB_ARMV7M
+size_t asm_thumb_mov_reg_i16(asm_thumb_t *as, uint mov_op, uint reg_dest, int i16_src);
+#else
+void asm_thumb_mov_rlo_i16(asm_thumb_t *as, uint rlo_dest, int i16_src);
+#endif
+
+// these return true if the destination is in range, false otherwise
+bool asm_thumb_b_n_label(asm_thumb_t *as, uint label);
+bool asm_thumb_bcc_nw_label(asm_thumb_t *as, int cond, uint label, bool wide);
+bool asm_thumb_bl_label(asm_thumb_t *as, uint label);
+
+size_t asm_thumb_mov_reg_i32(asm_thumb_t *as, uint reg_dest, mp_uint_t i32_src); // convenience
+void asm_thumb_mov_reg_i32_optimised(asm_thumb_t *as, uint reg_dest, int i32_src); // convenience
+void asm_thumb_mov_local_reg(asm_thumb_t *as, int local_num_dest, uint rlo_src); // convenience
+void asm_thumb_mov_reg_local(asm_thumb_t *as, uint rlo_dest, int local_num); // convenience
+void asm_thumb_mov_reg_local_addr(asm_thumb_t *as, uint rlo_dest, int local_num); // convenience
+void asm_thumb_mov_reg_pcrel(asm_thumb_t *as, uint rlo_dest, uint label);
+
+void asm_thumb_ldr_reg_reg_i12_optimised(asm_thumb_t *as, uint reg_dest, uint reg_base, uint byte_offset); // convenience
+
+void asm_thumb_b_label(asm_thumb_t *as, uint label); // convenience: picks narrow or wide branch
+void asm_thumb_bcc_label(asm_thumb_t *as, int cc, uint label); // convenience: picks narrow or wide branch
+void asm_thumb_bl_ind(asm_thumb_t *as, uint fun_id, uint reg_temp); // convenience
+void asm_thumb_bcc_rel9(asm_thumb_t *as, int cc, int rel);
+void asm_thumb_b_rel12(asm_thumb_t *as, int rel);
+
+// Holds a pointer to mp_fun_table
+#define ASM_THUMB_REG_FUN_TABLE ASM_THUMB_REG_R7
+
+#if defined(GENERIC_ASM_API) && GENERIC_ASM_API
+
+// The following macros provide a (mostly) arch-independent API to
+// generate native code, and are used by the native emitter.
+
+#define ASM_WORD_SIZE (4)
+
+#define REG_RET ASM_THUMB_REG_R0
+#define REG_ARG_1 ASM_THUMB_REG_R0
+#define REG_ARG_2 ASM_THUMB_REG_R1
+#define REG_ARG_3 ASM_THUMB_REG_R2
+#define REG_ARG_4 ASM_THUMB_REG_R3
+// rest of args go on stack
+
+#define REG_TEMP0 ASM_THUMB_REG_R0
+#define REG_TEMP1 ASM_THUMB_REG_R1
+#define REG_TEMP2 ASM_THUMB_REG_R2
+
+#define REG_LOCAL_1 ASM_THUMB_REG_R4
+#define REG_LOCAL_2 ASM_THUMB_REG_R5
+#define REG_LOCAL_3 ASM_THUMB_REG_R6
+#define REG_LOCAL_NUM (3)
+
+#define REG_FUN_TABLE ASM_THUMB_REG_FUN_TABLE
+
+#define ASM_T asm_thumb_t
+#define ASM_END_PASS asm_thumb_end_pass
+#define ASM_ENTRY asm_thumb_entry
+#define ASM_EXIT asm_thumb_exit
+
+#define ASM_JUMP asm_thumb_b_label
+#define ASM_JUMP_IF_REG_ZERO(as, reg, label, bool_test) \
+ do { \
+ asm_thumb_cmp_rlo_i8(as, reg, 0); \
+ asm_thumb_bcc_label(as, ASM_THUMB_CC_EQ, label); \
+ } while (0)
+#define ASM_JUMP_IF_REG_NONZERO(as, reg, label, bool_test) \
+ do { \
+ asm_thumb_cmp_rlo_i8(as, reg, 0); \
+ asm_thumb_bcc_label(as, ASM_THUMB_CC_NE, label); \
+ } while (0)
+#define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \
+ do { \
+ asm_thumb_cmp_rlo_rlo(as, reg1, reg2); \
+ asm_thumb_bcc_label(as, ASM_THUMB_CC_EQ, label); \
+ } while (0)
+#define ASM_JUMP_REG(as, reg) asm_thumb_bx_reg((as), (reg))
+#define ASM_CALL_IND(as, idx) asm_thumb_bl_ind(as, idx, ASM_THUMB_REG_R3)
+
+#define ASM_MOV_LOCAL_REG(as, local_num, reg) asm_thumb_mov_local_reg((as), (local_num), (reg))
+#define ASM_MOV_REG_IMM(as, reg_dest, imm) asm_thumb_mov_reg_i32_optimised((as), (reg_dest), (imm))
+#if MICROPY_EMIT_THUMB_ARMV7M
+#define ASM_MOV_REG_IMM_FIX_U16(as, reg_dest, imm) asm_thumb_mov_reg_i16((as), ASM_THUMB_OP_MOVW, (reg_dest), (imm))
+#else
+#define ASM_MOV_REG_IMM_FIX_U16(as, reg_dest, imm) asm_thumb_mov_rlo_i16((as), (reg_dest), (imm))
+#endif
+#define ASM_MOV_REG_IMM_FIX_WORD(as, reg_dest, imm) asm_thumb_mov_reg_i32((as), (reg_dest), (imm))
+#define ASM_MOV_REG_LOCAL(as, reg_dest, local_num) asm_thumb_mov_reg_local((as), (reg_dest), (local_num))
+#define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_thumb_mov_reg_reg((as), (reg_dest), (reg_src))
+#define ASM_MOV_REG_LOCAL_ADDR(as, reg_dest, local_num) asm_thumb_mov_reg_local_addr((as), (reg_dest), (local_num))
+#define ASM_MOV_REG_PCREL(as, rlo_dest, label) asm_thumb_mov_reg_pcrel((as), (rlo_dest), (label))
+
+#define ASM_LSL_REG_REG(as, reg_dest, reg_shift) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_LSL, (reg_dest), (reg_shift))
+#define ASM_LSR_REG_REG(as, reg_dest, reg_shift) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_LSR, (reg_dest), (reg_shift))
+#define ASM_ASR_REG_REG(as, reg_dest, reg_shift) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_ASR, (reg_dest), (reg_shift))
+#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_ORR, (reg_dest), (reg_src))
+#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_EOR, (reg_dest), (reg_src))
+#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_AND, (reg_dest), (reg_src))
+#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_thumb_add_rlo_rlo_rlo((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_thumb_sub_rlo_rlo_rlo((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_MUL, (reg_dest), (reg_src))
+
+#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_thumb_ldr_rlo_rlo_i5((as), (reg_dest), (reg_base), 0)
+#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_thumb_ldr_reg_reg_i12_optimised((as), (reg_dest), (reg_base), (word_offset))
+#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_thumb_ldrb_rlo_rlo_i5((as), (reg_dest), (reg_base), 0)
+#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_thumb_ldrh_rlo_rlo_i5((as), (reg_dest), (reg_base), 0)
+#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_thumb_ldr_rlo_rlo_i5((as), (reg_dest), (reg_base), 0)
+
+#define ASM_STORE_REG_REG(as, reg_src, reg_base) asm_thumb_str_rlo_rlo_i5((as), (reg_src), (reg_base), 0)
+#define ASM_STORE_REG_REG_OFFSET(as, reg_src, reg_base, word_offset) asm_thumb_str_rlo_rlo_i5((as), (reg_src), (reg_base), (word_offset))
+#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_thumb_strb_rlo_rlo_i5((as), (reg_src), (reg_base), 0)
+#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_thumb_strh_rlo_rlo_i5((as), (reg_src), (reg_base), 0)
+#define ASM_STORE32_REG_REG(as, reg_src, reg_base) asm_thumb_str_rlo_rlo_i5((as), (reg_src), (reg_base), 0)
+
+#endif // GENERIC_ASM_API
+
+#endif // MICROPY_INCLUDED_PY_ASMTHUMB_H
diff --git a/circuitpython/py/asmx64.c b/circuitpython/py/asmx64.c
new file mode 100644
index 0000000..62df5c6
--- /dev/null
+++ b/circuitpython/py/asmx64.c
@@ -0,0 +1,634 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+
+#include "py/mpconfig.h"
+
+// wrapper around everything in this file
+#if MICROPY_EMIT_X64
+
+#include "py/asmx64.h"
+
+/* all offsets are measured in multiples of 8 bytes */
+#define WORD_SIZE (8)
+
+#define OPCODE_NOP (0x90)
+#define OPCODE_PUSH_R64 (0x50) /* +rq */
+#define OPCODE_PUSH_I64 (0x68)
+#define OPCODE_PUSH_M64 (0xff) /* /6 */
+#define OPCODE_POP_R64 (0x58) /* +rq */
+#define OPCODE_RET (0xc3)
+#define OPCODE_MOV_I8_TO_R8 (0xb0) /* +rb */
+#define OPCODE_MOV_I64_TO_R64 (0xb8) /* +rq */
+#define OPCODE_MOV_I32_TO_RM32 (0xc7)
+#define OPCODE_MOV_R8_TO_RM8 (0x88) /* /r */
+#define OPCODE_MOV_R64_TO_RM64 (0x89) /* /r */
+#define OPCODE_MOV_RM64_TO_R64 (0x8b) /* /r */
+#define OPCODE_MOVZX_RM8_TO_R64 (0xb6) /* 0x0f 0xb6/r */
+#define OPCODE_MOVZX_RM16_TO_R64 (0xb7) /* 0x0f 0xb7/r */
+#define OPCODE_LEA_MEM_TO_R64 (0x8d) /* /r */
+#define OPCODE_AND_R64_TO_RM64 (0x21) /* /r */
+#define OPCODE_OR_R64_TO_RM64 (0x09) /* /r */
+#define OPCODE_XOR_R64_TO_RM64 (0x31) /* /r */
+#define OPCODE_ADD_R64_TO_RM64 (0x01) /* /r */
+#define OPCODE_ADD_I32_TO_RM32 (0x81) /* /0 */
+#define OPCODE_ADD_I8_TO_RM32 (0x83) /* /0 */
+#define OPCODE_SUB_R64_FROM_RM64 (0x29)
+#define OPCODE_SUB_I32_FROM_RM64 (0x81) /* /5 */
+#define OPCODE_SUB_I8_FROM_RM64 (0x83) /* /5 */
+// #define OPCODE_SHL_RM32_BY_I8 (0xc1) /* /4 */
+// #define OPCODE_SHR_RM32_BY_I8 (0xc1) /* /5 */
+// #define OPCODE_SAR_RM32_BY_I8 (0xc1) /* /7 */
+#define OPCODE_SHL_RM64_CL (0xd3) /* /4 */
+#define OPCODE_SHR_RM64_CL (0xd3) /* /5 */
+#define OPCODE_SAR_RM64_CL (0xd3) /* /7 */
+// #define OPCODE_CMP_I32_WITH_RM32 (0x81) /* /7 */
+// #define OPCODE_CMP_I8_WITH_RM32 (0x83) /* /7 */
+#define OPCODE_CMP_R64_WITH_RM64 (0x39) /* /r */
+// #define OPCODE_CMP_RM32_WITH_R32 (0x3b)
+#define OPCODE_TEST_R8_WITH_RM8 (0x84) /* /r */
+#define OPCODE_TEST_R64_WITH_RM64 (0x85) /* /r */
+#define OPCODE_JMP_REL8 (0xeb)
+#define OPCODE_JMP_REL32 (0xe9)
+#define OPCODE_JMP_RM64 (0xff) /* /4 */
+#define OPCODE_JCC_REL8 (0x70) /* | jcc type */
+#define OPCODE_JCC_REL32_A (0x0f)
+#define OPCODE_JCC_REL32_B (0x80) /* | jcc type */
+#define OPCODE_SETCC_RM8_A (0x0f)
+#define OPCODE_SETCC_RM8_B (0x90) /* | jcc type, /0 */
+#define OPCODE_CALL_REL32 (0xe8)
+#define OPCODE_CALL_RM32 (0xff) /* /2 */
+#define OPCODE_LEAVE (0xc9)
+
+#define MODRM_R64(x) (((x) & 0x7) << 3)
+#define MODRM_RM_DISP0 (0x00)
+#define MODRM_RM_DISP8 (0x40)
+#define MODRM_RM_DISP32 (0x80)
+#define MODRM_RM_REG (0xc0)
+#define MODRM_RM_R64(x) ((x) & 0x7)
+
+#define OP_SIZE_PREFIX (0x66)
+
+#define REX_PREFIX (0x40)
+#define REX_W (0x08) // width
+#define REX_R (0x04) // register
+#define REX_X (0x02) // index
+#define REX_B (0x01) // base
+#define REX_W_FROM_R64(r64) ((r64) >> 0 & 0x08)
+#define REX_R_FROM_R64(r64) ((r64) >> 1 & 0x04)
+#define REX_X_FROM_R64(r64) ((r64) >> 2 & 0x02)
+#define REX_B_FROM_R64(r64) ((r64) >> 3 & 0x01)
+
+#define IMM32_L0(x) ((x) & 0xff)
+#define IMM32_L1(x) (((x) >> 8) & 0xff)
+#define IMM32_L2(x) (((x) >> 16) & 0xff)
+#define IMM32_L3(x) (((x) >> 24) & 0xff)
+#define IMM64_L4(x) (((x) >> 32) & 0xff)
+#define IMM64_L5(x) (((x) >> 40) & 0xff)
+#define IMM64_L6(x) (((x) >> 48) & 0xff)
+#define IMM64_L7(x) (((x) >> 56) & 0xff)
+
+#define UNSIGNED_FIT8(x) (((x) & 0xffffffffffffff00) == 0)
+#define UNSIGNED_FIT32(x) (((x) & 0xffffffff00000000) == 0)
+#define SIGNED_FIT8(x) (((x) & 0xffffff80) == 0) || (((x) & 0xffffff80) == 0xffffff80)
+
+static inline byte *asm_x64_get_cur_to_write_bytes(asm_x64_t *as, int n) {
+ return mp_asm_base_get_cur_to_write_bytes(&as->base, n);
+}
+
+STATIC void asm_x64_write_byte_1(asm_x64_t *as, byte b1) {
+ byte *c = asm_x64_get_cur_to_write_bytes(as, 1);
+ if (c != NULL) {
+ c[0] = b1;
+ }
+}
+
+STATIC void asm_x64_write_byte_2(asm_x64_t *as, byte b1, byte b2) {
+ byte *c = asm_x64_get_cur_to_write_bytes(as, 2);
+ if (c != NULL) {
+ c[0] = b1;
+ c[1] = b2;
+ }
+}
+
+STATIC void asm_x64_write_byte_3(asm_x64_t *as, byte b1, byte b2, byte b3) {
+ byte *c = asm_x64_get_cur_to_write_bytes(as, 3);
+ if (c != NULL) {
+ c[0] = b1;
+ c[1] = b2;
+ c[2] = b3;
+ }
+}
+
+STATIC void asm_x64_write_word32(asm_x64_t *as, int w32) {
+ byte *c = asm_x64_get_cur_to_write_bytes(as, 4);
+ if (c != NULL) {
+ c[0] = IMM32_L0(w32);
+ c[1] = IMM32_L1(w32);
+ c[2] = IMM32_L2(w32);
+ c[3] = IMM32_L3(w32);
+ }
+}
+
+STATIC void asm_x64_write_word64(asm_x64_t *as, int64_t w64) {
+ byte *c = asm_x64_get_cur_to_write_bytes(as, 8);
+ if (c != NULL) {
+ c[0] = IMM32_L0(w64);
+ c[1] = IMM32_L1(w64);
+ c[2] = IMM32_L2(w64);
+ c[3] = IMM32_L3(w64);
+ c[4] = IMM64_L4(w64);
+ c[5] = IMM64_L5(w64);
+ c[6] = IMM64_L6(w64);
+ c[7] = IMM64_L7(w64);
+ }
+}
+
+/* unused
+STATIC void asm_x64_write_word32_to(asm_x64_t *as, int offset, int w32) {
+ byte* c;
+ assert(offset + 4 <= as->code_size);
+ c = as->code_base + offset;
+ c[0] = IMM32_L0(w32);
+ c[1] = IMM32_L1(w32);
+ c[2] = IMM32_L2(w32);
+ c[3] = IMM32_L3(w32);
+}
+*/
+
+STATIC void asm_x64_write_r64_disp(asm_x64_t *as, int r64, int disp_r64, int disp_offset) {
+ uint8_t rm_disp;
+ if (disp_offset == 0 && (disp_r64 & 7) != ASM_X64_REG_RBP) {
+ rm_disp = MODRM_RM_DISP0;
+ } else if (SIGNED_FIT8(disp_offset)) {
+ rm_disp = MODRM_RM_DISP8;
+ } else {
+ rm_disp = MODRM_RM_DISP32;
+ }
+ asm_x64_write_byte_1(as, MODRM_R64(r64) | rm_disp | MODRM_RM_R64(disp_r64));
+ if ((disp_r64 & 7) == ASM_X64_REG_RSP) {
+ // Special case for rsp and r12, they need a SIB byte
+ asm_x64_write_byte_1(as, 0x24);
+ }
+ if (rm_disp == MODRM_RM_DISP8) {
+ asm_x64_write_byte_1(as, IMM32_L0(disp_offset));
+ } else if (rm_disp == MODRM_RM_DISP32) {
+ asm_x64_write_word32(as, disp_offset);
+ }
+}
+
+STATIC void asm_x64_generic_r64_r64(asm_x64_t *as, int dest_r64, int src_r64, int op) {
+ asm_x64_write_byte_3(as, REX_PREFIX | REX_W | REX_R_FROM_R64(src_r64) | REX_B_FROM_R64(dest_r64), op, MODRM_R64(src_r64) | MODRM_RM_REG | MODRM_RM_R64(dest_r64));
+}
+
+void asm_x64_nop(asm_x64_t *as) {
+ asm_x64_write_byte_1(as, OPCODE_NOP);
+}
+
+void asm_x64_push_r64(asm_x64_t *as, int src_r64) {
+ if (src_r64 < 8) {
+ asm_x64_write_byte_1(as, OPCODE_PUSH_R64 | src_r64);
+ } else {
+ asm_x64_write_byte_2(as, REX_PREFIX | REX_B, OPCODE_PUSH_R64 | (src_r64 & 7));
+ }
+}
+
+/*
+void asm_x64_push_i32(asm_x64_t *as, int src_i32) {
+ asm_x64_write_byte_1(as, OPCODE_PUSH_I64);
+ asm_x64_write_word32(as, src_i32); // will be sign extended to 64 bits
+}
+*/
+
+/*
+void asm_x64_push_disp(asm_x64_t *as, int src_r64, int src_offset) {
+ assert(src_r64 < 8);
+ asm_x64_write_byte_1(as, OPCODE_PUSH_M64);
+ asm_x64_write_r64_disp(as, 6, src_r64, src_offset);
+}
+*/
+
+void asm_x64_pop_r64(asm_x64_t *as, int dest_r64) {
+ if (dest_r64 < 8) {
+ asm_x64_write_byte_1(as, OPCODE_POP_R64 | dest_r64);
+ } else {
+ asm_x64_write_byte_2(as, REX_PREFIX | REX_B, OPCODE_POP_R64 | (dest_r64 & 7));
+ }
+}
+
+STATIC void asm_x64_ret(asm_x64_t *as) {
+ asm_x64_write_byte_1(as, OPCODE_RET);
+}
+
+void asm_x64_mov_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
+ asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_MOV_R64_TO_RM64);
+}
+
+void asm_x64_mov_r8_to_mem8(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp) {
+ if (src_r64 < 8 && dest_r64 < 8) {
+ asm_x64_write_byte_1(as, OPCODE_MOV_R8_TO_RM8);
+ } else {
+ asm_x64_write_byte_2(as, REX_PREFIX | REX_R_FROM_R64(src_r64) | REX_B_FROM_R64(dest_r64), OPCODE_MOV_R8_TO_RM8);
+ }
+ asm_x64_write_r64_disp(as, src_r64, dest_r64, dest_disp);
+}
+
+void asm_x64_mov_r16_to_mem16(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp) {
+ if (src_r64 < 8 && dest_r64 < 8) {
+ asm_x64_write_byte_2(as, OP_SIZE_PREFIX, OPCODE_MOV_R64_TO_RM64);
+ } else {
+ asm_x64_write_byte_3(as, OP_SIZE_PREFIX, REX_PREFIX | REX_R_FROM_R64(src_r64) | REX_B_FROM_R64(dest_r64), OPCODE_MOV_R64_TO_RM64);
+ }
+ asm_x64_write_r64_disp(as, src_r64, dest_r64, dest_disp);
+}
+
+void asm_x64_mov_r32_to_mem32(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp) {
+ if (src_r64 < 8 && dest_r64 < 8) {
+ asm_x64_write_byte_1(as, OPCODE_MOV_R64_TO_RM64);
+ } else {
+ asm_x64_write_byte_2(as, REX_PREFIX | REX_R_FROM_R64(src_r64) | REX_B_FROM_R64(dest_r64), OPCODE_MOV_R64_TO_RM64);
+ }
+ asm_x64_write_r64_disp(as, src_r64, dest_r64, dest_disp);
+}
+
+void asm_x64_mov_r64_to_mem64(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp) {
+ // use REX prefix for 64 bit operation
+ asm_x64_write_byte_2(as, REX_PREFIX | REX_W | REX_R_FROM_R64(src_r64) | REX_B_FROM_R64(dest_r64), OPCODE_MOV_R64_TO_RM64);
+ asm_x64_write_r64_disp(as, src_r64, dest_r64, dest_disp);
+}
+
+void asm_x64_mov_mem8_to_r64zx(asm_x64_t *as, int src_r64, int src_disp, int dest_r64) {
+ if (src_r64 < 8 && dest_r64 < 8) {
+ asm_x64_write_byte_2(as, 0x0f, OPCODE_MOVZX_RM8_TO_R64);
+ } else {
+ asm_x64_write_byte_3(as, REX_PREFIX | REX_R_FROM_R64(dest_r64) | REX_B_FROM_R64(src_r64), 0x0f, OPCODE_MOVZX_RM8_TO_R64);
+ }
+ asm_x64_write_r64_disp(as, dest_r64, src_r64, src_disp);
+}
+
+void asm_x64_mov_mem16_to_r64zx(asm_x64_t *as, int src_r64, int src_disp, int dest_r64) {
+ if (src_r64 < 8 && dest_r64 < 8) {
+ asm_x64_write_byte_2(as, 0x0f, OPCODE_MOVZX_RM16_TO_R64);
+ } else {
+ asm_x64_write_byte_3(as, REX_PREFIX | REX_R_FROM_R64(dest_r64) | REX_B_FROM_R64(src_r64), 0x0f, OPCODE_MOVZX_RM16_TO_R64);
+ }
+ asm_x64_write_r64_disp(as, dest_r64, src_r64, src_disp);
+}
+
+void asm_x64_mov_mem32_to_r64zx(asm_x64_t *as, int src_r64, int src_disp, int dest_r64) {
+ if (src_r64 < 8 && dest_r64 < 8) {
+ asm_x64_write_byte_1(as, OPCODE_MOV_RM64_TO_R64);
+ } else {
+ asm_x64_write_byte_2(as, REX_PREFIX | REX_R_FROM_R64(dest_r64) | REX_B_FROM_R64(src_r64), OPCODE_MOV_RM64_TO_R64);
+ }
+ asm_x64_write_r64_disp(as, dest_r64, src_r64, src_disp);
+}
+
+void asm_x64_mov_mem64_to_r64(asm_x64_t *as, int src_r64, int src_disp, int dest_r64) {
+ // use REX prefix for 64 bit operation
+ asm_x64_write_byte_2(as, REX_PREFIX | REX_W | REX_R_FROM_R64(dest_r64) | REX_B_FROM_R64(src_r64), OPCODE_MOV_RM64_TO_R64);
+ asm_x64_write_r64_disp(as, dest_r64, src_r64, src_disp);
+}
+
+STATIC void asm_x64_lea_disp_to_r64(asm_x64_t *as, int src_r64, int src_disp, int dest_r64) {
+ // use REX prefix for 64 bit operation
+ assert(src_r64 < 8);
+ assert(dest_r64 < 8);
+ asm_x64_write_byte_2(as, REX_PREFIX | REX_W, OPCODE_LEA_MEM_TO_R64);
+ asm_x64_write_r64_disp(as, dest_r64, src_r64, src_disp);
+}
+
+/*
+void asm_x64_mov_i8_to_r8(asm_x64_t *as, int src_i8, int dest_r64) {
+ assert(dest_r64 < 8);
+ asm_x64_write_byte_2(as, OPCODE_MOV_I8_TO_R8 | dest_r64, src_i8);
+}
+*/
+
+size_t asm_x64_mov_i32_to_r64(asm_x64_t *as, int src_i32, int dest_r64) {
+ // cpu defaults to i32 to r64, with zero extension
+ if (dest_r64 < 8) {
+ asm_x64_write_byte_1(as, OPCODE_MOV_I64_TO_R64 | dest_r64);
+ } else {
+ asm_x64_write_byte_2(as, REX_PREFIX | REX_B, OPCODE_MOV_I64_TO_R64 | (dest_r64 & 7));
+ }
+ size_t loc = mp_asm_base_get_code_pos(&as->base);
+ asm_x64_write_word32(as, src_i32);
+ return loc;
+}
+
+void asm_x64_mov_i64_to_r64(asm_x64_t *as, int64_t src_i64, int dest_r64) {
+ // cpu defaults to i32 to r64
+ // to mov i64 to r64 need to use REX prefix
+ asm_x64_write_byte_2(as,
+ REX_PREFIX | REX_W | (dest_r64 < 8 ? 0 : REX_B),
+ OPCODE_MOV_I64_TO_R64 | (dest_r64 & 7));
+ asm_x64_write_word64(as, src_i64);
+}
+
+void asm_x64_mov_i64_to_r64_optimised(asm_x64_t *as, int64_t src_i64, int dest_r64) {
+ // TODO use movzx, movsx if possible
+ if (UNSIGNED_FIT32(src_i64)) {
+ // 5 bytes
+ asm_x64_mov_i32_to_r64(as, src_i64 & 0xffffffff, dest_r64);
+ } else {
+ // 10 bytes
+ asm_x64_mov_i64_to_r64(as, src_i64, dest_r64);
+ }
+}
+
+void asm_x64_and_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
+ asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_AND_R64_TO_RM64);
+}
+
+void asm_x64_or_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
+ asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_OR_R64_TO_RM64);
+}
+
+void asm_x64_xor_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
+ asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_XOR_R64_TO_RM64);
+}
+
+void asm_x64_shl_r64_cl(asm_x64_t *as, int dest_r64) {
+ asm_x64_generic_r64_r64(as, dest_r64, 4, OPCODE_SHL_RM64_CL);
+}
+
+void asm_x64_shr_r64_cl(asm_x64_t *as, int dest_r64) {
+ asm_x64_generic_r64_r64(as, dest_r64, 5, OPCODE_SHR_RM64_CL);
+}
+
+void asm_x64_sar_r64_cl(asm_x64_t *as, int dest_r64) {
+ asm_x64_generic_r64_r64(as, dest_r64, 7, OPCODE_SAR_RM64_CL);
+}
+
+void asm_x64_add_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
+ asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_ADD_R64_TO_RM64);
+}
+
+void asm_x64_sub_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
+ asm_x64_generic_r64_r64(as, dest_r64, src_r64, OPCODE_SUB_R64_FROM_RM64);
+}
+
+void asm_x64_mul_r64_r64(asm_x64_t *as, int dest_r64, int src_r64) {
+ // imul reg64, reg/mem64 -- 0x0f 0xaf /r
+ asm_x64_write_byte_1(as, REX_PREFIX | REX_W | REX_R_FROM_R64(dest_r64) | REX_B_FROM_R64(src_r64));
+ asm_x64_write_byte_3(as, 0x0f, 0xaf, MODRM_R64(dest_r64) | MODRM_RM_REG | MODRM_RM_R64(src_r64));
+}
+
+/*
+void asm_x64_sub_i32_from_r32(asm_x64_t *as, int src_i32, int dest_r32) {
+ if (SIGNED_FIT8(src_i32)) {
+ // defaults to 32 bit operation
+ asm_x64_write_byte_2(as, OPCODE_SUB_I8_FROM_RM64, MODRM_R64(5) | MODRM_RM_REG | MODRM_RM_R64(dest_r32));
+ asm_x64_write_byte_1(as, src_i32 & 0xff);
+ } else {
+ // defaults to 32 bit operation
+ asm_x64_write_byte_2(as, OPCODE_SUB_I32_FROM_RM64, MODRM_R64(5) | MODRM_RM_REG | MODRM_RM_R64(dest_r32));
+ asm_x64_write_word32(as, src_i32);
+ }
+}
+*/
+
+STATIC void asm_x64_sub_r64_i32(asm_x64_t *as, int dest_r64, int src_i32) {
+ assert(dest_r64 < 8);
+ if (SIGNED_FIT8(src_i32)) {
+ // use REX prefix for 64 bit operation
+ asm_x64_write_byte_3(as, REX_PREFIX | REX_W, OPCODE_SUB_I8_FROM_RM64, MODRM_R64(5) | MODRM_RM_REG | MODRM_RM_R64(dest_r64));
+ asm_x64_write_byte_1(as, src_i32 & 0xff);
+ } else {
+ // use REX prefix for 64 bit operation
+ asm_x64_write_byte_3(as, REX_PREFIX | REX_W, OPCODE_SUB_I32_FROM_RM64, MODRM_R64(5) | MODRM_RM_REG | MODRM_RM_R64(dest_r64));
+ asm_x64_write_word32(as, src_i32);
+ }
+}
+
+/*
+void asm_x64_shl_r32_by_imm(asm_x64_t *as, int r32, int imm) {
+ asm_x64_write_byte_2(as, OPCODE_SHL_RM32_BY_I8, MODRM_R64(4) | MODRM_RM_REG | MODRM_RM_R64(r32));
+ asm_x64_write_byte_1(as, imm);
+}
+
+void asm_x64_shr_r32_by_imm(asm_x64_t *as, int r32, int imm) {
+ asm_x64_write_byte_2(as, OPCODE_SHR_RM32_BY_I8, MODRM_R64(5) | MODRM_RM_REG | MODRM_RM_R64(r32));
+ asm_x64_write_byte_1(as, imm);
+}
+
+void asm_x64_sar_r32_by_imm(asm_x64_t *as, int r32, int imm) {
+ asm_x64_write_byte_2(as, OPCODE_SAR_RM32_BY_I8, MODRM_R64(7) | MODRM_RM_REG | MODRM_RM_R64(r32));
+ asm_x64_write_byte_1(as, imm);
+}
+*/
+
+void asm_x64_cmp_r64_with_r64(asm_x64_t *as, int src_r64_a, int src_r64_b) {
+ asm_x64_generic_r64_r64(as, src_r64_b, src_r64_a, OPCODE_CMP_R64_WITH_RM64);
+}
+
+/*
+void asm_x64_cmp_i32_with_r32(asm_x64_t *as, int src_i32, int src_r32) {
+ if (SIGNED_FIT8(src_i32)) {
+ asm_x64_write_byte_2(as, OPCODE_CMP_I8_WITH_RM32, MODRM_R64(7) | MODRM_RM_REG | MODRM_RM_R64(src_r32));
+ asm_x64_write_byte_1(as, src_i32 & 0xff);
+ } else {
+ asm_x64_write_byte_2(as, OPCODE_CMP_I32_WITH_RM32, MODRM_R64(7) | MODRM_RM_REG | MODRM_RM_R64(src_r32));
+ asm_x64_write_word32(as, src_i32);
+ }
+}
+*/
+
+void asm_x64_test_r8_with_r8(asm_x64_t *as, int src_r64_a, int src_r64_b) {
+ assert(src_r64_a < 8);
+ assert(src_r64_b < 8);
+ asm_x64_write_byte_2(as, OPCODE_TEST_R8_WITH_RM8, MODRM_R64(src_r64_a) | MODRM_RM_REG | MODRM_RM_R64(src_r64_b));
+}
+
+void asm_x64_test_r64_with_r64(asm_x64_t *as, int src_r64_a, int src_r64_b) {
+ asm_x64_generic_r64_r64(as, src_r64_b, src_r64_a, OPCODE_TEST_R64_WITH_RM64);
+}
+
+void asm_x64_setcc_r8(asm_x64_t *as, int jcc_type, int dest_r8) {
+ assert(dest_r8 < 8);
+ asm_x64_write_byte_3(as, OPCODE_SETCC_RM8_A, OPCODE_SETCC_RM8_B | jcc_type, MODRM_R64(0) | MODRM_RM_REG | MODRM_RM_R64(dest_r8));
+}
+
+void asm_x64_jmp_reg(asm_x64_t *as, int src_r64) {
+ assert(src_r64 < 8);
+ asm_x64_write_byte_2(as, OPCODE_JMP_RM64, MODRM_R64(4) | MODRM_RM_REG | MODRM_RM_R64(src_r64));
+}
+
+STATIC mp_uint_t get_label_dest(asm_x64_t *as, mp_uint_t label) {
+ assert(label < as->base.max_num_labels);
+ return as->base.label_offsets[label];
+}
+
+void asm_x64_jmp_label(asm_x64_t *as, mp_uint_t label) {
+ mp_uint_t dest = get_label_dest(as, label);
+ mp_int_t rel = dest - as->base.code_offset;
+ if (dest != (mp_uint_t)-1 && rel < 0) {
+ // is a backwards jump, so we know the size of the jump on the first pass
+ // calculate rel assuming 8 bit relative jump
+ rel -= 2;
+ if (SIGNED_FIT8(rel)) {
+ asm_x64_write_byte_2(as, OPCODE_JMP_REL8, rel & 0xff);
+ } else {
+ rel += 2;
+ goto large_jump;
+ }
+ } else {
+ // is a forwards jump, so need to assume it's large
+ large_jump:
+ rel -= 5;
+ asm_x64_write_byte_1(as, OPCODE_JMP_REL32);
+ asm_x64_write_word32(as, rel);
+ }
+}
+
+void asm_x64_jcc_label(asm_x64_t *as, int jcc_type, mp_uint_t label) {
+ mp_uint_t dest = get_label_dest(as, label);
+ mp_int_t rel = dest - as->base.code_offset;
+ if (dest != (mp_uint_t)-1 && rel < 0) {
+ // is a backwards jump, so we know the size of the jump on the first pass
+ // calculate rel assuming 8 bit relative jump
+ rel -= 2;
+ if (SIGNED_FIT8(rel)) {
+ asm_x64_write_byte_2(as, OPCODE_JCC_REL8 | jcc_type, rel & 0xff);
+ } else {
+ rel += 2;
+ goto large_jump;
+ }
+ } else {
+ // is a forwards jump, so need to assume it's large
+ large_jump:
+ rel -= 6;
+ asm_x64_write_byte_2(as, OPCODE_JCC_REL32_A, OPCODE_JCC_REL32_B | jcc_type);
+ asm_x64_write_word32(as, rel);
+ }
+}
+
+void asm_x64_entry(asm_x64_t *as, int num_locals) {
+ assert(num_locals >= 0);
+ asm_x64_push_r64(as, ASM_X64_REG_RBP);
+ asm_x64_push_r64(as, ASM_X64_REG_RBX);
+ asm_x64_push_r64(as, ASM_X64_REG_R12);
+ asm_x64_push_r64(as, ASM_X64_REG_R13);
+ num_locals |= 1; // make it odd so stack is aligned on 16 byte boundary
+ asm_x64_sub_r64_i32(as, ASM_X64_REG_RSP, num_locals * WORD_SIZE);
+ as->num_locals = num_locals;
+}
+
+void asm_x64_exit(asm_x64_t *as) {
+ asm_x64_sub_r64_i32(as, ASM_X64_REG_RSP, -as->num_locals * WORD_SIZE);
+ asm_x64_pop_r64(as, ASM_X64_REG_R13);
+ asm_x64_pop_r64(as, ASM_X64_REG_R12);
+ asm_x64_pop_r64(as, ASM_X64_REG_RBX);
+ asm_x64_pop_r64(as, ASM_X64_REG_RBP);
+ asm_x64_ret(as);
+}
+
+// locals:
+// - stored on the stack in ascending order
+// - numbered 0 through as->num_locals-1
+// - RSP points to the first local
+//
+// | RSP
+// v
+// l0 l1 l2 ... l(n-1)
+// ^ ^
+// | low address | high address in RAM
+//
+STATIC int asm_x64_local_offset_from_rsp(asm_x64_t *as, int local_num) {
+ (void)as;
+ // Stack is full descending, RSP points to local0
+ return local_num * WORD_SIZE;
+}
+
+void asm_x64_mov_local_to_r64(asm_x64_t *as, int src_local_num, int dest_r64) {
+ asm_x64_mov_mem64_to_r64(as, ASM_X64_REG_RSP, asm_x64_local_offset_from_rsp(as, src_local_num), dest_r64);
+}
+
+void asm_x64_mov_r64_to_local(asm_x64_t *as, int src_r64, int dest_local_num) {
+ asm_x64_mov_r64_to_mem64(as, src_r64, ASM_X64_REG_RSP, asm_x64_local_offset_from_rsp(as, dest_local_num));
+}
+
+void asm_x64_mov_local_addr_to_r64(asm_x64_t *as, int local_num, int dest_r64) {
+ int offset = asm_x64_local_offset_from_rsp(as, local_num);
+ if (offset == 0) {
+ asm_x64_mov_r64_r64(as, dest_r64, ASM_X64_REG_RSP);
+ } else {
+ asm_x64_lea_disp_to_r64(as, ASM_X64_REG_RSP, offset, dest_r64);
+ }
+}
+
+void asm_x64_mov_reg_pcrel(asm_x64_t *as, int dest_r64, mp_uint_t label) {
+ mp_uint_t dest = get_label_dest(as, label);
+ mp_int_t rel = dest - (as->base.code_offset + 7);
+ asm_x64_write_byte_3(as, REX_PREFIX | REX_W | REX_R_FROM_R64(dest_r64), OPCODE_LEA_MEM_TO_R64, MODRM_R64(dest_r64) | MODRM_RM_R64(5));
+ asm_x64_write_word32(as, rel);
+}
+
+/*
+void asm_x64_push_local(asm_x64_t *as, int local_num) {
+ asm_x64_push_disp(as, ASM_X64_REG_RSP, asm_x64_local_offset_from_rsp(as, local_num));
+}
+
+void asm_x64_push_local_addr(asm_x64_t *as, int local_num, int temp_r64) {
+ asm_x64_mov_r64_r64(as, temp_r64, ASM_X64_REG_RSP);
+ asm_x64_add_i32_to_r32(as, asm_x64_local_offset_from_rsp(as, local_num), temp_r64);
+ asm_x64_push_r64(as, temp_r64);
+}
+*/
+
+/*
+ can't use these because code might be relocated when resized
+
+void asm_x64_call(asm_x64_t *as, void* func) {
+ asm_x64_sub_i32_from_r32(as, 8, ASM_X64_REG_RSP);
+ asm_x64_write_byte_1(as, OPCODE_CALL_REL32);
+ asm_x64_write_word32(as, func - (void*)(as->code_cur + 4));
+ asm_x64_mov_r64_r64(as, ASM_X64_REG_RSP, ASM_X64_REG_RBP);
+}
+
+void asm_x64_call_i1(asm_x64_t *as, void* func, int i1) {
+ asm_x64_sub_i32_from_r32(as, 8, ASM_X64_REG_RSP);
+ asm_x64_sub_i32_from_r32(as, 12, ASM_X64_REG_RSP);
+ asm_x64_push_i32(as, i1);
+ asm_x64_write_byte_1(as, OPCODE_CALL_REL32);
+ asm_x64_write_word32(as, func - (void*)(as->code_cur + 4));
+ asm_x64_add_i32_to_r32(as, 16, ASM_X64_REG_RSP);
+ asm_x64_mov_r64_r64(as, ASM_X64_REG_RSP, ASM_X64_REG_RBP);
+}
+*/
+
+void asm_x64_call_ind(asm_x64_t *as, size_t fun_id, int temp_r64) {
+ assert(temp_r64 < 8);
+ asm_x64_mov_mem64_to_r64(as, ASM_X64_REG_FUN_TABLE, fun_id * WORD_SIZE, temp_r64);
+ asm_x64_write_byte_2(as, OPCODE_CALL_RM32, MODRM_R64(2) | MODRM_RM_REG | MODRM_RM_R64(temp_r64));
+}
+
+#endif // MICROPY_EMIT_X64
diff --git a/circuitpython/py/asmx64.h b/circuitpython/py/asmx64.h
new file mode 100644
index 0000000..ec9a088
--- /dev/null
+++ b/circuitpython/py/asmx64.h
@@ -0,0 +1,220 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_ASMX64_H
+#define MICROPY_INCLUDED_PY_ASMX64_H
+
+#include "py/mpconfig.h"
+#include "py/misc.h"
+#include "py/asmbase.h"
+
+// AMD64 calling convention is:
+// - args pass in: RDI, RSI, RDX, RCX, R08, R09
+// - return value in RAX
+// - stack must be aligned on a 16-byte boundary before all calls
+// - RAX, RCX, RDX, RSI, RDI, R08, R09, R10, R11 are caller-save
+// - RBX, RBP, R12, R13, R14, R15 are callee-save
+
+// In the functions below, argument order follows x86 docs and generally
+// the destination is the first argument.
+// NOTE: this is a change from the old convention used in this file and
+// some functions still use the old (reverse) convention.
+
+#define ASM_X64_REG_RAX (0)
+#define ASM_X64_REG_RCX (1)
+#define ASM_X64_REG_RDX (2)
+#define ASM_X64_REG_RBX (3)
+#define ASM_X64_REG_RSP (4)
+#define ASM_X64_REG_RBP (5)
+#define ASM_X64_REG_RSI (6)
+#define ASM_X64_REG_RDI (7)
+#define ASM_X64_REG_R08 (8)
+#define ASM_X64_REG_R09 (9)
+#define ASM_X64_REG_R10 (10)
+#define ASM_X64_REG_R11 (11)
+#define ASM_X64_REG_R12 (12)
+#define ASM_X64_REG_R13 (13)
+#define ASM_X64_REG_R14 (14)
+#define ASM_X64_REG_R15 (15)
+
+// condition codes, used for jcc and setcc (despite their j-name!)
+#define ASM_X64_CC_JB (0x2) // below, unsigned
+#define ASM_X64_CC_JAE (0x3) // above or equal, unsigned
+#define ASM_X64_CC_JZ (0x4)
+#define ASM_X64_CC_JE (0x4)
+#define ASM_X64_CC_JNZ (0x5)
+#define ASM_X64_CC_JNE (0x5)
+#define ASM_X64_CC_JBE (0x6) // below or equal, unsigned
+#define ASM_X64_CC_JA (0x7) // above, unsigned
+#define ASM_X64_CC_JL (0xc) // less, signed
+#define ASM_X64_CC_JGE (0xd) // greater or equal, signed
+#define ASM_X64_CC_JLE (0xe) // less or equal, signed
+#define ASM_X64_CC_JG (0xf) // greater, signed
+
+typedef struct _asm_x64_t {
+ mp_asm_base_t base;
+ int num_locals;
+} asm_x64_t;
+
+static inline void asm_x64_end_pass(asm_x64_t *as) {
+ (void)as;
+}
+
+void asm_x64_nop(asm_x64_t *as);
+void asm_x64_push_r64(asm_x64_t *as, int src_r64);
+void asm_x64_pop_r64(asm_x64_t *as, int dest_r64);
+void asm_x64_mov_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
+size_t asm_x64_mov_i32_to_r64(asm_x64_t *as, int src_i32, int dest_r64);
+void asm_x64_mov_i64_to_r64(asm_x64_t *as, int64_t src_i64, int dest_r64);
+void asm_x64_mov_i64_to_r64_optimised(asm_x64_t *as, int64_t src_i64, int dest_r64);
+void asm_x64_mov_r8_to_mem8(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp);
+void asm_x64_mov_r16_to_mem16(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp);
+void asm_x64_mov_r32_to_mem32(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp);
+void asm_x64_mov_r64_to_mem64(asm_x64_t *as, int src_r64, int dest_r64, int dest_disp);
+void asm_x64_mov_mem8_to_r64zx(asm_x64_t *as, int src_r64, int src_disp, int dest_r64);
+void asm_x64_mov_mem16_to_r64zx(asm_x64_t *as, int src_r64, int src_disp, int dest_r64);
+void asm_x64_mov_mem32_to_r64zx(asm_x64_t *as, int src_r64, int src_disp, int dest_r64);
+void asm_x64_mov_mem64_to_r64(asm_x64_t *as, int src_r64, int src_disp, int dest_r64);
+void asm_x64_and_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
+void asm_x64_or_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
+void asm_x64_xor_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
+void asm_x64_shl_r64_cl(asm_x64_t *as, int dest_r64);
+void asm_x64_shr_r64_cl(asm_x64_t *as, int dest_r64);
+void asm_x64_sar_r64_cl(asm_x64_t *as, int dest_r64);
+void asm_x64_add_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
+void asm_x64_sub_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
+void asm_x64_mul_r64_r64(asm_x64_t *as, int dest_r64, int src_r64);
+void asm_x64_cmp_r64_with_r64(asm_x64_t *as, int src_r64_a, int src_r64_b);
+void asm_x64_test_r8_with_r8(asm_x64_t *as, int src_r64_a, int src_r64_b);
+void asm_x64_test_r64_with_r64(asm_x64_t *as, int src_r64_a, int src_r64_b);
+void asm_x64_setcc_r8(asm_x64_t *as, int jcc_type, int dest_r8);
+void asm_x64_jmp_reg(asm_x64_t *as, int src_r64);
+void asm_x64_jmp_label(asm_x64_t *as, mp_uint_t label);
+void asm_x64_jcc_label(asm_x64_t *as, int jcc_type, mp_uint_t label);
+void asm_x64_entry(asm_x64_t *as, int num_locals);
+void asm_x64_exit(asm_x64_t *as);
+void asm_x64_mov_local_to_r64(asm_x64_t *as, int src_local_num, int dest_r64);
+void asm_x64_mov_r64_to_local(asm_x64_t *as, int src_r64, int dest_local_num);
+void asm_x64_mov_local_addr_to_r64(asm_x64_t *as, int local_num, int dest_r64);
+void asm_x64_mov_reg_pcrel(asm_x64_t *as, int dest_r64, mp_uint_t label);
+void asm_x64_call_ind(asm_x64_t *as, size_t fun_id, int temp_r32);
+
+// Holds a pointer to mp_fun_table
+#define ASM_X64_REG_FUN_TABLE ASM_X64_REG_RBP
+
+#if defined(GENERIC_ASM_API) && GENERIC_ASM_API
+
+// The following macros provide a (mostly) arch-independent API to
+// generate native code, and are used by the native emitter.
+
+#define ASM_WORD_SIZE (8)
+
+#define REG_RET ASM_X64_REG_RAX
+#define REG_ARG_1 ASM_X64_REG_RDI
+#define REG_ARG_2 ASM_X64_REG_RSI
+#define REG_ARG_3 ASM_X64_REG_RDX
+#define REG_ARG_4 ASM_X64_REG_RCX
+#define REG_ARG_5 ASM_X64_REG_R08
+
+// caller-save
+#define REG_TEMP0 ASM_X64_REG_RAX
+#define REG_TEMP1 ASM_X64_REG_RDI
+#define REG_TEMP2 ASM_X64_REG_RSI
+
+// callee-save
+#define REG_LOCAL_1 ASM_X64_REG_RBX
+#define REG_LOCAL_2 ASM_X64_REG_R12
+#define REG_LOCAL_3 ASM_X64_REG_R13
+#define REG_LOCAL_NUM (3)
+
+// Holds a pointer to mp_fun_table
+#define REG_FUN_TABLE ASM_X64_REG_FUN_TABLE
+
+#define ASM_T asm_x64_t
+#define ASM_END_PASS asm_x64_end_pass
+#define ASM_ENTRY asm_x64_entry
+#define ASM_EXIT asm_x64_exit
+
+#define ASM_JUMP asm_x64_jmp_label
+#define ASM_JUMP_IF_REG_ZERO(as, reg, label, bool_test) \
+ do { \
+ if (bool_test) { \
+ asm_x64_test_r8_with_r8((as), (reg), (reg)); \
+ } else { \
+ asm_x64_test_r64_with_r64((as), (reg), (reg)); \
+ } \
+ asm_x64_jcc_label(as, ASM_X64_CC_JZ, label); \
+ } while (0)
+#define ASM_JUMP_IF_REG_NONZERO(as, reg, label, bool_test) \
+ do { \
+ if (bool_test) { \
+ asm_x64_test_r8_with_r8((as), (reg), (reg)); \
+ } else { \
+ asm_x64_test_r64_with_r64((as), (reg), (reg)); \
+ } \
+ asm_x64_jcc_label(as, ASM_X64_CC_JNZ, label); \
+ } while (0)
+#define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \
+ do { \
+ asm_x64_cmp_r64_with_r64(as, reg1, reg2); \
+ asm_x64_jcc_label(as, ASM_X64_CC_JE, label); \
+ } while (0)
+#define ASM_JUMP_REG(as, reg) asm_x64_jmp_reg((as), (reg))
+#define ASM_CALL_IND(as, idx) asm_x64_call_ind(as, idx, ASM_X64_REG_RAX)
+
+#define ASM_MOV_LOCAL_REG(as, local_num, reg_src) asm_x64_mov_r64_to_local((as), (reg_src), (local_num))
+#define ASM_MOV_REG_IMM(as, reg_dest, imm) asm_x64_mov_i64_to_r64_optimised((as), (imm), (reg_dest))
+#define ASM_MOV_REG_IMM_FIX_U16(as, reg_dest, imm) asm_x64_mov_i32_to_r64((as), (imm), (reg_dest))
+#define ASM_MOV_REG_IMM_FIX_WORD(as, reg_dest, imm) asm_x64_mov_i32_to_r64((as), (imm), (reg_dest))
+#define ASM_MOV_REG_LOCAL(as, reg_dest, local_num) asm_x64_mov_local_to_r64((as), (local_num), (reg_dest))
+#define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_x64_mov_r64_r64((as), (reg_dest), (reg_src))
+#define ASM_MOV_REG_LOCAL_ADDR(as, reg_dest, local_num) asm_x64_mov_local_addr_to_r64((as), (local_num), (reg_dest))
+#define ASM_MOV_REG_PCREL(as, reg_dest, label) asm_x64_mov_reg_pcrel((as), (reg_dest), (label))
+
+#define ASM_LSL_REG(as, reg) asm_x64_shl_r64_cl((as), (reg))
+#define ASM_LSR_REG(as, reg) asm_x64_shr_r64_cl((as), (reg))
+#define ASM_ASR_REG(as, reg) asm_x64_sar_r64_cl((as), (reg))
+#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_x64_or_r64_r64((as), (reg_dest), (reg_src))
+#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_x64_xor_r64_r64((as), (reg_dest), (reg_src))
+#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_x64_and_r64_r64((as), (reg_dest), (reg_src))
+#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_x64_add_r64_r64((as), (reg_dest), (reg_src))
+#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_x64_sub_r64_r64((as), (reg_dest), (reg_src))
+#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_x64_mul_r64_r64((as), (reg_dest), (reg_src))
+
+#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem64_to_r64((as), (reg_base), 0, (reg_dest))
+#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_x64_mov_mem64_to_r64((as), (reg_base), 8 * (word_offset), (reg_dest))
+#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem8_to_r64zx((as), (reg_base), 0, (reg_dest))
+#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem16_to_r64zx((as), (reg_base), 0, (reg_dest))
+#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_x64_mov_mem32_to_r64zx((as), (reg_base), 0, (reg_dest))
+
+#define ASM_STORE_REG_REG(as, reg_src, reg_base) asm_x64_mov_r64_to_mem64((as), (reg_src), (reg_base), 0)
+#define ASM_STORE_REG_REG_OFFSET(as, reg_src, reg_base, word_offset) asm_x64_mov_r64_to_mem64((as), (reg_src), (reg_base), 8 * (word_offset))
+#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_x64_mov_r8_to_mem8((as), (reg_src), (reg_base), 0)
+#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_x64_mov_r16_to_mem16((as), (reg_src), (reg_base), 0)
+#define ASM_STORE32_REG_REG(as, reg_src, reg_base) asm_x64_mov_r32_to_mem32((as), (reg_src), (reg_base), 0)
+
+#endif // GENERIC_ASM_API
+
+#endif // MICROPY_INCLUDED_PY_ASMX64_H
diff --git a/circuitpython/py/asmx86.c b/circuitpython/py/asmx86.c
new file mode 100644
index 0000000..b44da76
--- /dev/null
+++ b/circuitpython/py/asmx86.c
@@ -0,0 +1,535 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+
+#include "py/mpconfig.h"
+
+// wrapper around everything in this file
+#if MICROPY_EMIT_X86
+
+#include "py/asmx86.h"
+
+/* all offsets are measured in multiples of 4 bytes */
+#define WORD_SIZE (4)
+
+#define OPCODE_NOP (0x90)
+#define OPCODE_PUSH_R32 (0x50)
+// #define OPCODE_PUSH_I32 (0x68)
+// #define OPCODE_PUSH_M32 (0xff) /* /6 */
+#define OPCODE_POP_R32 (0x58)
+#define OPCODE_RET (0xc3)
+// #define OPCODE_MOV_I8_TO_R8 (0xb0) /* +rb */
+#define OPCODE_MOV_I32_TO_R32 (0xb8)
+// #define OPCODE_MOV_I32_TO_RM32 (0xc7)
+#define OPCODE_MOV_R8_TO_RM8 (0x88) /* /r */
+#define OPCODE_MOV_R32_TO_RM32 (0x89) /* /r */
+#define OPCODE_MOV_RM32_TO_R32 (0x8b) /* /r */
+#define OPCODE_MOVZX_RM8_TO_R32 (0xb6) /* 0x0f 0xb6/r */
+#define OPCODE_MOVZX_RM16_TO_R32 (0xb7) /* 0x0f 0xb7/r */
+#define OPCODE_LEA_MEM_TO_R32 (0x8d) /* /r */
+#define OPCODE_AND_R32_TO_RM32 (0x21) /* /r */
+#define OPCODE_OR_R32_TO_RM32 (0x09) /* /r */
+#define OPCODE_XOR_R32_TO_RM32 (0x31) /* /r */
+#define OPCODE_ADD_R32_TO_RM32 (0x01)
+#define OPCODE_ADD_I32_TO_RM32 (0x81) /* /0 */
+#define OPCODE_ADD_I8_TO_RM32 (0x83) /* /0 */
+#define OPCODE_SUB_R32_FROM_RM32 (0x29)
+#define OPCODE_SUB_I32_FROM_RM32 (0x81) /* /5 */
+#define OPCODE_SUB_I8_FROM_RM32 (0x83) /* /5 */
+// #define OPCODE_SHL_RM32_BY_I8 (0xc1) /* /4 */
+// #define OPCODE_SHR_RM32_BY_I8 (0xc1) /* /5 */
+// #define OPCODE_SAR_RM32_BY_I8 (0xc1) /* /7 */
+#define OPCODE_SHL_RM32_CL (0xd3) /* /4 */
+#define OPCODE_SHR_RM32_CL (0xd3) /* /5 */
+#define OPCODE_SAR_RM32_CL (0xd3) /* /7 */
+// #define OPCODE_CMP_I32_WITH_RM32 (0x81) /* /7 */
+// #define OPCODE_CMP_I8_WITH_RM32 (0x83) /* /7 */
+#define OPCODE_CMP_R32_WITH_RM32 (0x39)
+// #define OPCODE_CMP_RM32_WITH_R32 (0x3b)
+#define OPCODE_TEST_R8_WITH_RM8 (0x84) /* /r */
+#define OPCODE_TEST_R32_WITH_RM32 (0x85) /* /r */
+#define OPCODE_JMP_REL8 (0xeb)
+#define OPCODE_JMP_REL32 (0xe9)
+#define OPCODE_JMP_RM32 (0xff) /* /4 */
+#define OPCODE_JCC_REL8 (0x70) /* | jcc type */
+#define OPCODE_JCC_REL32_A (0x0f)
+#define OPCODE_JCC_REL32_B (0x80) /* | jcc type */
+#define OPCODE_SETCC_RM8_A (0x0f)
+#define OPCODE_SETCC_RM8_B (0x90) /* | jcc type, /0 */
+#define OPCODE_CALL_REL32 (0xe8)
+#define OPCODE_CALL_RM32 (0xff) /* /2 */
+#define OPCODE_LEAVE (0xc9)
+
+#define MODRM_R32(x) ((x) << 3)
+#define MODRM_RM_DISP0 (0x00)
+#define MODRM_RM_DISP8 (0x40)
+#define MODRM_RM_DISP32 (0x80)
+#define MODRM_RM_REG (0xc0)
+#define MODRM_RM_R32(x) (x)
+
+#define OP_SIZE_PREFIX (0x66)
+
+#define IMM32_L0(x) ((x) & 0xff)
+#define IMM32_L1(x) (((x) >> 8) & 0xff)
+#define IMM32_L2(x) (((x) >> 16) & 0xff)
+#define IMM32_L3(x) (((x) >> 24) & 0xff)
+
+#define SIGNED_FIT8(x) (((x) & 0xffffff80) == 0) || (((x) & 0xffffff80) == 0xffffff80)
+
+STATIC void asm_x86_write_byte_1(asm_x86_t *as, byte b1) {
+ byte *c = mp_asm_base_get_cur_to_write_bytes(&as->base, 1);
+ if (c != NULL) {
+ c[0] = b1;
+ }
+}
+
+STATIC void asm_x86_write_byte_2(asm_x86_t *as, byte b1, byte b2) {
+ byte *c = mp_asm_base_get_cur_to_write_bytes(&as->base, 2);
+ if (c != NULL) {
+ c[0] = b1;
+ c[1] = b2;
+ }
+}
+
+STATIC void asm_x86_write_byte_3(asm_x86_t *as, byte b1, byte b2, byte b3) {
+ byte *c = mp_asm_base_get_cur_to_write_bytes(&as->base, 3);
+ if (c != NULL) {
+ c[0] = b1;
+ c[1] = b2;
+ c[2] = b3;
+ }
+}
+
+STATIC void asm_x86_write_word32(asm_x86_t *as, int w32) {
+ byte *c = mp_asm_base_get_cur_to_write_bytes(&as->base, 4);
+ if (c != NULL) {
+ c[0] = IMM32_L0(w32);
+ c[1] = IMM32_L1(w32);
+ c[2] = IMM32_L2(w32);
+ c[3] = IMM32_L3(w32);
+ }
+}
+
+STATIC void asm_x86_write_r32_disp(asm_x86_t *as, int r32, int disp_r32, int disp_offset) {
+ uint8_t rm_disp;
+ if (disp_offset == 0 && disp_r32 != ASM_X86_REG_EBP) {
+ rm_disp = MODRM_RM_DISP0;
+ } else if (SIGNED_FIT8(disp_offset)) {
+ rm_disp = MODRM_RM_DISP8;
+ } else {
+ rm_disp = MODRM_RM_DISP32;
+ }
+ asm_x86_write_byte_1(as, MODRM_R32(r32) | rm_disp | MODRM_RM_R32(disp_r32));
+ if (disp_r32 == ASM_X86_REG_ESP) {
+ // Special case for esp, it needs a SIB byte
+ asm_x86_write_byte_1(as, 0x24);
+ }
+ if (rm_disp == MODRM_RM_DISP8) {
+ asm_x86_write_byte_1(as, IMM32_L0(disp_offset));
+ } else if (rm_disp == MODRM_RM_DISP32) {
+ asm_x86_write_word32(as, disp_offset);
+ }
+}
+
+STATIC void asm_x86_generic_r32_r32(asm_x86_t *as, int dest_r32, int src_r32, int op) {
+ asm_x86_write_byte_2(as, op, MODRM_R32(src_r32) | MODRM_RM_REG | MODRM_RM_R32(dest_r32));
+}
+
+#if 0
+STATIC void asm_x86_nop(asm_x86_t *as) {
+ asm_x86_write_byte_1(as, OPCODE_NOP);
+}
+#endif
+
+STATIC void asm_x86_push_r32(asm_x86_t *as, int src_r32) {
+ asm_x86_write_byte_1(as, OPCODE_PUSH_R32 | src_r32);
+}
+
+#if 0
+void asm_x86_push_i32(asm_x86_t *as, int src_i32) {
+ asm_x86_write_byte_1(as, OPCODE_PUSH_I32);
+ asm_x86_write_word32(as, src_i32);
+}
+
+void asm_x86_push_disp(asm_x86_t *as, int src_r32, int src_offset) {
+ asm_x86_write_byte_1(as, OPCODE_PUSH_M32);
+ asm_x86_write_r32_disp(as, 6, src_r32, src_offset);
+}
+#endif
+
+STATIC void asm_x86_pop_r32(asm_x86_t *as, int dest_r32) {
+ asm_x86_write_byte_1(as, OPCODE_POP_R32 | dest_r32);
+}
+
+STATIC void asm_x86_ret(asm_x86_t *as) {
+ asm_x86_write_byte_1(as, OPCODE_RET);
+}
+
+void asm_x86_mov_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
+ asm_x86_generic_r32_r32(as, dest_r32, src_r32, OPCODE_MOV_R32_TO_RM32);
+}
+
+void asm_x86_mov_r8_to_mem8(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp) {
+ asm_x86_write_byte_1(as, OPCODE_MOV_R8_TO_RM8);
+ asm_x86_write_r32_disp(as, src_r32, dest_r32, dest_disp);
+}
+
+void asm_x86_mov_r16_to_mem16(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp) {
+ asm_x86_write_byte_2(as, OP_SIZE_PREFIX, OPCODE_MOV_R32_TO_RM32);
+ asm_x86_write_r32_disp(as, src_r32, dest_r32, dest_disp);
+}
+
+void asm_x86_mov_r32_to_mem32(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp) {
+ asm_x86_write_byte_1(as, OPCODE_MOV_R32_TO_RM32);
+ asm_x86_write_r32_disp(as, src_r32, dest_r32, dest_disp);
+}
+
+void asm_x86_mov_mem8_to_r32zx(asm_x86_t *as, int src_r32, int src_disp, int dest_r32) {
+ asm_x86_write_byte_2(as, 0x0f, OPCODE_MOVZX_RM8_TO_R32);
+ asm_x86_write_r32_disp(as, dest_r32, src_r32, src_disp);
+}
+
+void asm_x86_mov_mem16_to_r32zx(asm_x86_t *as, int src_r32, int src_disp, int dest_r32) {
+ asm_x86_write_byte_2(as, 0x0f, OPCODE_MOVZX_RM16_TO_R32);
+ asm_x86_write_r32_disp(as, dest_r32, src_r32, src_disp);
+}
+
+void asm_x86_mov_mem32_to_r32(asm_x86_t *as, int src_r32, int src_disp, int dest_r32) {
+ asm_x86_write_byte_1(as, OPCODE_MOV_RM32_TO_R32);
+ asm_x86_write_r32_disp(as, dest_r32, src_r32, src_disp);
+}
+
+STATIC void asm_x86_lea_disp_to_r32(asm_x86_t *as, int src_r32, int src_disp, int dest_r32) {
+ asm_x86_write_byte_1(as, OPCODE_LEA_MEM_TO_R32);
+ asm_x86_write_r32_disp(as, dest_r32, src_r32, src_disp);
+}
+
+#if 0
+void asm_x86_mov_i8_to_r8(asm_x86_t *as, int src_i8, int dest_r32) {
+ asm_x86_write_byte_2(as, OPCODE_MOV_I8_TO_R8 | dest_r32, src_i8);
+}
+#endif
+
+size_t asm_x86_mov_i32_to_r32(asm_x86_t *as, int32_t src_i32, int dest_r32) {
+ asm_x86_write_byte_1(as, OPCODE_MOV_I32_TO_R32 | dest_r32);
+ size_t loc = mp_asm_base_get_code_pos(&as->base);
+ asm_x86_write_word32(as, src_i32);
+ return loc;
+}
+
+void asm_x86_and_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
+ asm_x86_generic_r32_r32(as, dest_r32, src_r32, OPCODE_AND_R32_TO_RM32);
+}
+
+void asm_x86_or_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
+ asm_x86_generic_r32_r32(as, dest_r32, src_r32, OPCODE_OR_R32_TO_RM32);
+}
+
+void asm_x86_xor_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
+ asm_x86_generic_r32_r32(as, dest_r32, src_r32, OPCODE_XOR_R32_TO_RM32);
+}
+
+void asm_x86_shl_r32_cl(asm_x86_t *as, int dest_r32) {
+ asm_x86_generic_r32_r32(as, dest_r32, 4, OPCODE_SHL_RM32_CL);
+}
+
+void asm_x86_shr_r32_cl(asm_x86_t *as, int dest_r32) {
+ asm_x86_generic_r32_r32(as, dest_r32, 5, OPCODE_SHR_RM32_CL);
+}
+
+void asm_x86_sar_r32_cl(asm_x86_t *as, int dest_r32) {
+ asm_x86_generic_r32_r32(as, dest_r32, 7, OPCODE_SAR_RM32_CL);
+}
+
+void asm_x86_add_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
+ asm_x86_generic_r32_r32(as, dest_r32, src_r32, OPCODE_ADD_R32_TO_RM32);
+}
+
+STATIC void asm_x86_add_i32_to_r32(asm_x86_t *as, int src_i32, int dest_r32) {
+ if (SIGNED_FIT8(src_i32)) {
+ asm_x86_write_byte_2(as, OPCODE_ADD_I8_TO_RM32, MODRM_R32(0) | MODRM_RM_REG | MODRM_RM_R32(dest_r32));
+ asm_x86_write_byte_1(as, src_i32 & 0xff);
+ } else {
+ asm_x86_write_byte_2(as, OPCODE_ADD_I32_TO_RM32, MODRM_R32(0) | MODRM_RM_REG | MODRM_RM_R32(dest_r32));
+ asm_x86_write_word32(as, src_i32);
+ }
+}
+
+void asm_x86_sub_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
+ asm_x86_generic_r32_r32(as, dest_r32, src_r32, OPCODE_SUB_R32_FROM_RM32);
+}
+
+STATIC void asm_x86_sub_r32_i32(asm_x86_t *as, int dest_r32, int src_i32) {
+ if (SIGNED_FIT8(src_i32)) {
+ // defaults to 32 bit operation
+ asm_x86_write_byte_2(as, OPCODE_SUB_I8_FROM_RM32, MODRM_R32(5) | MODRM_RM_REG | MODRM_RM_R32(dest_r32));
+ asm_x86_write_byte_1(as, src_i32 & 0xff);
+ } else {
+ // defaults to 32 bit operation
+ asm_x86_write_byte_2(as, OPCODE_SUB_I32_FROM_RM32, MODRM_R32(5) | MODRM_RM_REG | MODRM_RM_R32(dest_r32));
+ asm_x86_write_word32(as, src_i32);
+ }
+}
+
+void asm_x86_mul_r32_r32(asm_x86_t *as, int dest_r32, int src_r32) {
+ // imul reg32, reg/mem32 -- 0x0f 0xaf /r
+ asm_x86_write_byte_3(as, 0x0f, 0xaf, MODRM_R32(dest_r32) | MODRM_RM_REG | MODRM_RM_R32(src_r32));
+}
+
+#if 0
+/* shifts not tested */
+void asm_x86_shl_r32_by_imm(asm_x86_t *as, int r32, int imm) {
+ asm_x86_write_byte_2(as, OPCODE_SHL_RM32_BY_I8, MODRM_R32(4) | MODRM_RM_REG | MODRM_RM_R32(r32));
+ asm_x86_write_byte_1(as, imm);
+}
+
+void asm_x86_shr_r32_by_imm(asm_x86_t *as, int r32, int imm) {
+ asm_x86_write_byte_2(as, OPCODE_SHR_RM32_BY_I8, MODRM_R32(5) | MODRM_RM_REG | MODRM_RM_R32(r32));
+ asm_x86_write_byte_1(as, imm);
+}
+
+void asm_x86_sar_r32_by_imm(asm_x86_t *as, int r32, int imm) {
+ asm_x86_write_byte_2(as, OPCODE_SAR_RM32_BY_I8, MODRM_R32(7) | MODRM_RM_REG | MODRM_RM_R32(r32));
+ asm_x86_write_byte_1(as, imm);
+}
+#endif
+
+void asm_x86_cmp_r32_with_r32(asm_x86_t *as, int src_r32_a, int src_r32_b) {
+ asm_x86_generic_r32_r32(as, src_r32_b, src_r32_a, OPCODE_CMP_R32_WITH_RM32);
+}
+
+#if 0
+void asm_x86_cmp_i32_with_r32(asm_x86_t *as, int src_i32, int src_r32) {
+ if (SIGNED_FIT8(src_i32)) {
+ asm_x86_write_byte_2(as, OPCODE_CMP_I8_WITH_RM32, MODRM_R32(7) | MODRM_RM_REG | MODRM_RM_R32(src_r32));
+ asm_x86_write_byte_1(as, src_i32 & 0xff);
+ } else {
+ asm_x86_write_byte_2(as, OPCODE_CMP_I32_WITH_RM32, MODRM_R32(7) | MODRM_RM_REG | MODRM_RM_R32(src_r32));
+ asm_x86_write_word32(as, src_i32);
+ }
+}
+#endif
+
+void asm_x86_test_r8_with_r8(asm_x86_t *as, int src_r32_a, int src_r32_b) {
+ asm_x86_write_byte_2(as, OPCODE_TEST_R8_WITH_RM8, MODRM_R32(src_r32_a) | MODRM_RM_REG | MODRM_RM_R32(src_r32_b));
+}
+
+void asm_x86_test_r32_with_r32(asm_x86_t *as, int src_r32_a, int src_r32_b) {
+ asm_x86_generic_r32_r32(as, src_r32_b, src_r32_a, OPCODE_TEST_R32_WITH_RM32);
+}
+
+void asm_x86_setcc_r8(asm_x86_t *as, mp_uint_t jcc_type, int dest_r8) {
+ asm_x86_write_byte_3(as, OPCODE_SETCC_RM8_A, OPCODE_SETCC_RM8_B | jcc_type, MODRM_R32(0) | MODRM_RM_REG | MODRM_RM_R32(dest_r8));
+}
+
+void asm_x86_jmp_reg(asm_x86_t *as, int src_r32) {
+ asm_x86_write_byte_2(as, OPCODE_JMP_RM32, MODRM_R32(4) | MODRM_RM_REG | MODRM_RM_R32(src_r32));
+}
+
+STATIC mp_uint_t get_label_dest(asm_x86_t *as, mp_uint_t label) {
+ assert(label < as->base.max_num_labels);
+ return as->base.label_offsets[label];
+}
+
+void asm_x86_jmp_label(asm_x86_t *as, mp_uint_t label) {
+ mp_uint_t dest = get_label_dest(as, label);
+ mp_int_t rel = dest - as->base.code_offset;
+ if (dest != (mp_uint_t)-1 && rel < 0) {
+ // is a backwards jump, so we know the size of the jump on the first pass
+ // calculate rel assuming 8 bit relative jump
+ rel -= 2;
+ if (SIGNED_FIT8(rel)) {
+ asm_x86_write_byte_2(as, OPCODE_JMP_REL8, rel & 0xff);
+ } else {
+ rel += 2;
+ goto large_jump;
+ }
+ } else {
+ // is a forwards jump, so need to assume it's large
+ large_jump:
+ rel -= 5;
+ asm_x86_write_byte_1(as, OPCODE_JMP_REL32);
+ asm_x86_write_word32(as, rel);
+ }
+}
+
+void asm_x86_jcc_label(asm_x86_t *as, mp_uint_t jcc_type, mp_uint_t label) {
+ mp_uint_t dest = get_label_dest(as, label);
+ mp_int_t rel = dest - as->base.code_offset;
+ if (dest != (mp_uint_t)-1 && rel < 0) {
+ // is a backwards jump, so we know the size of the jump on the first pass
+ // calculate rel assuming 8 bit relative jump
+ rel -= 2;
+ if (SIGNED_FIT8(rel)) {
+ asm_x86_write_byte_2(as, OPCODE_JCC_REL8 | jcc_type, rel & 0xff);
+ } else {
+ rel += 2;
+ goto large_jump;
+ }
+ } else {
+ // is a forwards jump, so need to assume it's large
+ large_jump:
+ rel -= 6;
+ asm_x86_write_byte_2(as, OPCODE_JCC_REL32_A, OPCODE_JCC_REL32_B | jcc_type);
+ asm_x86_write_word32(as, rel);
+ }
+}
+
+void asm_x86_entry(asm_x86_t *as, int num_locals) {
+ assert(num_locals >= 0);
+ asm_x86_push_r32(as, ASM_X86_REG_EBP);
+ asm_x86_push_r32(as, ASM_X86_REG_EBX);
+ asm_x86_push_r32(as, ASM_X86_REG_ESI);
+ asm_x86_push_r32(as, ASM_X86_REG_EDI);
+ num_locals |= 3; // make it odd so stack is aligned on 16 byte boundary
+ asm_x86_sub_r32_i32(as, ASM_X86_REG_ESP, num_locals * WORD_SIZE);
+ as->num_locals = num_locals;
+}
+
+void asm_x86_exit(asm_x86_t *as) {
+ asm_x86_sub_r32_i32(as, ASM_X86_REG_ESP, -as->num_locals * WORD_SIZE);
+ asm_x86_pop_r32(as, ASM_X86_REG_EDI);
+ asm_x86_pop_r32(as, ASM_X86_REG_ESI);
+ asm_x86_pop_r32(as, ASM_X86_REG_EBX);
+ asm_x86_pop_r32(as, ASM_X86_REG_EBP);
+ asm_x86_ret(as);
+}
+
+STATIC int asm_x86_arg_offset_from_esp(asm_x86_t *as, size_t arg_num) {
+ // Above esp are: locals, 4 saved registers, return eip, arguments
+ return (as->num_locals + 4 + 1 + arg_num) * WORD_SIZE;
+}
+
+#if 0
+void asm_x86_push_arg(asm_x86_t *as, int src_arg_num) {
+ asm_x86_push_disp(as, ASM_X86_REG_ESP, asm_x86_arg_offset_from_esp(as, src_arg_num));
+}
+#endif
+
+void asm_x86_mov_arg_to_r32(asm_x86_t *as, int src_arg_num, int dest_r32) {
+ asm_x86_mov_mem32_to_r32(as, ASM_X86_REG_ESP, asm_x86_arg_offset_from_esp(as, src_arg_num), dest_r32);
+}
+
+#if 0
+void asm_x86_mov_r32_to_arg(asm_x86_t *as, int src_r32, int dest_arg_num) {
+ asm_x86_mov_r32_to_mem32(as, src_r32, ASM_X86_REG_ESP, asm_x86_arg_offset_from_esp(as, dest_arg_num));
+}
+#endif
+
+// locals:
+// - stored on the stack in ascending order
+// - numbered 0 through as->num_locals-1
+// - ESP points to the first local
+//
+// | ESP
+// v
+// l0 l1 l2 ... l(n-1)
+// ^ ^
+// | low address | high address in RAM
+//
+STATIC int asm_x86_local_offset_from_esp(asm_x86_t *as, int local_num) {
+ (void)as;
+ // Stack is full descending, ESP points to local0
+ return local_num * WORD_SIZE;
+}
+
+void asm_x86_mov_local_to_r32(asm_x86_t *as, int src_local_num, int dest_r32) {
+ asm_x86_mov_mem32_to_r32(as, ASM_X86_REG_ESP, asm_x86_local_offset_from_esp(as, src_local_num), dest_r32);
+}
+
+void asm_x86_mov_r32_to_local(asm_x86_t *as, int src_r32, int dest_local_num) {
+ asm_x86_mov_r32_to_mem32(as, src_r32, ASM_X86_REG_ESP, asm_x86_local_offset_from_esp(as, dest_local_num));
+}
+
+void asm_x86_mov_local_addr_to_r32(asm_x86_t *as, int local_num, int dest_r32) {
+ int offset = asm_x86_local_offset_from_esp(as, local_num);
+ if (offset == 0) {
+ asm_x86_mov_r32_r32(as, dest_r32, ASM_X86_REG_ESP);
+ } else {
+ asm_x86_lea_disp_to_r32(as, ASM_X86_REG_ESP, offset, dest_r32);
+ }
+}
+
+void asm_x86_mov_reg_pcrel(asm_x86_t *as, int dest_r32, mp_uint_t label) {
+ asm_x86_write_byte_1(as, OPCODE_CALL_REL32);
+ asm_x86_write_word32(as, 0);
+ mp_uint_t dest = get_label_dest(as, label);
+ mp_int_t rel = dest - as->base.code_offset;
+ asm_x86_pop_r32(as, dest_r32);
+ // PC rel is usually a forward reference, so need to assume it's large
+ asm_x86_write_byte_2(as, OPCODE_ADD_I32_TO_RM32, MODRM_R32(0) | MODRM_RM_REG | MODRM_RM_R32(dest_r32));
+ asm_x86_write_word32(as, rel);
+}
+
+#if 0
+void asm_x86_push_local(asm_x86_t *as, int local_num) {
+ asm_x86_push_disp(as, ASM_X86_REG_ESP, asm_x86_local_offset_from_esp(as, local_num));
+}
+
+void asm_x86_push_local_addr(asm_x86_t *as, int local_num, int temp_r32) {
+ asm_x86_mov_r32_r32(as, temp_r32, ASM_X86_REG_ESP);
+ asm_x86_add_i32_to_r32(as, asm_x86_local_offset_from_esp(as, local_num), temp_r32);
+ asm_x86_push_r32(as, temp_r32);
+}
+#endif
+
+void asm_x86_call_ind(asm_x86_t *as, size_t fun_id, mp_uint_t n_args, int temp_r32) {
+ assert(n_args <= 4);
+
+ // Align stack on 16-byte boundary during the call
+ unsigned int align = ((n_args + 3) & ~3) - n_args;
+ if (align) {
+ asm_x86_sub_r32_i32(as, ASM_X86_REG_ESP, align * WORD_SIZE);
+ }
+
+ if (n_args > 3) {
+ asm_x86_push_r32(as, ASM_X86_REG_ARG_4);
+ }
+ if (n_args > 2) {
+ asm_x86_push_r32(as, ASM_X86_REG_ARG_3);
+ }
+ if (n_args > 1) {
+ asm_x86_push_r32(as, ASM_X86_REG_ARG_2);
+ }
+ if (n_args > 0) {
+ asm_x86_push_r32(as, ASM_X86_REG_ARG_1);
+ }
+
+ // Load the pointer to the function and make the call
+ asm_x86_mov_mem32_to_r32(as, ASM_X86_REG_FUN_TABLE, fun_id * WORD_SIZE, temp_r32);
+ asm_x86_write_byte_2(as, OPCODE_CALL_RM32, MODRM_R32(2) | MODRM_RM_REG | MODRM_RM_R32(temp_r32));
+
+ // the caller must clean up the stack
+ if (n_args > 0) {
+ asm_x86_add_i32_to_r32(as, (n_args + align) * WORD_SIZE, ASM_X86_REG_ESP);
+ }
+}
+
+#endif // MICROPY_EMIT_X86
diff --git a/circuitpython/py/asmx86.h b/circuitpython/py/asmx86.h
new file mode 100644
index 0000000..a8fb4e3
--- /dev/null
+++ b/circuitpython/py/asmx86.h
@@ -0,0 +1,215 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_ASMX86_H
+#define MICROPY_INCLUDED_PY_ASMX86_H
+
+#include "py/mpconfig.h"
+#include "py/misc.h"
+#include "py/asmbase.h"
+
+// x86 cdecl calling convention is:
+// - args passed on the stack in reverse order
+// - return value in EAX
+// - caller cleans up the stack after a call
+// - stack must be aligned to 16-byte boundary before all calls
+// - EAX, ECX, EDX are caller-save
+// - EBX, ESI, EDI, EBP, ESP, EIP are callee-save
+
+// In the functions below, argument order follows x86 docs and generally
+// the destination is the first argument.
+// NOTE: this is a change from the old convention used in this file and
+// some functions still use the old (reverse) convention.
+
+#define ASM_X86_REG_EAX (0)
+#define ASM_X86_REG_ECX (1)
+#define ASM_X86_REG_EDX (2)
+#define ASM_X86_REG_EBX (3)
+#define ASM_X86_REG_ESP (4)
+#define ASM_X86_REG_EBP (5)
+#define ASM_X86_REG_ESI (6)
+#define ASM_X86_REG_EDI (7)
+
+// x86 passes values on the stack, but the emitter is register based, so we need
+// to define registers that can temporarily hold the function arguments. They
+// need to be defined here so that asm_x86_call_ind can push them onto the stack
+// before the call.
+#define ASM_X86_REG_ARG_1 ASM_X86_REG_EAX
+#define ASM_X86_REG_ARG_2 ASM_X86_REG_ECX
+#define ASM_X86_REG_ARG_3 ASM_X86_REG_EDX
+#define ASM_X86_REG_ARG_4 ASM_X86_REG_EBX
+
+// condition codes, used for jcc and setcc (despite their j-name!)
+#define ASM_X86_CC_JB (0x2) // below, unsigned
+#define ASM_X86_CC_JAE (0x3) // above or equal, unsigned
+#define ASM_X86_CC_JZ (0x4)
+#define ASM_X86_CC_JE (0x4)
+#define ASM_X86_CC_JNZ (0x5)
+#define ASM_X86_CC_JNE (0x5)
+#define ASM_X86_CC_JBE (0x6) // below or equal, unsigned
+#define ASM_X86_CC_JA (0x7) // above, unsigned
+#define ASM_X86_CC_JL (0xc) // less, signed
+#define ASM_X86_CC_JGE (0xd) // greater or equal, signed
+#define ASM_X86_CC_JLE (0xe) // less or equal, signed
+#define ASM_X86_CC_JG (0xf) // greater, signed
+
+typedef struct _asm_x86_t {
+ mp_asm_base_t base;
+ int num_locals;
+} asm_x86_t;
+
+static inline void asm_x86_end_pass(asm_x86_t *as) {
+ (void)as;
+}
+
+void asm_x86_mov_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
+size_t asm_x86_mov_i32_to_r32(asm_x86_t *as, int32_t src_i32, int dest_r32);
+void asm_x86_mov_r8_to_mem8(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp);
+void asm_x86_mov_r16_to_mem16(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp);
+void asm_x86_mov_r32_to_mem32(asm_x86_t *as, int src_r32, int dest_r32, int dest_disp);
+void asm_x86_mov_mem8_to_r32zx(asm_x86_t *as, int src_r32, int src_disp, int dest_r32);
+void asm_x86_mov_mem16_to_r32zx(asm_x86_t *as, int src_r32, int src_disp, int dest_r32);
+void asm_x86_mov_mem32_to_r32(asm_x86_t *as, int src_r32, int src_disp, int dest_r32);
+void asm_x86_and_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
+void asm_x86_or_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
+void asm_x86_xor_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
+void asm_x86_shl_r32_cl(asm_x86_t *as, int dest_r32);
+void asm_x86_shr_r32_cl(asm_x86_t *as, int dest_r32);
+void asm_x86_sar_r32_cl(asm_x86_t *as, int dest_r32);
+void asm_x86_add_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
+void asm_x86_sub_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
+void asm_x86_mul_r32_r32(asm_x86_t *as, int dest_r32, int src_r32);
+void asm_x86_cmp_r32_with_r32(asm_x86_t *as, int src_r32_a, int src_r32_b);
+void asm_x86_test_r8_with_r8(asm_x86_t *as, int src_r32_a, int src_r32_b);
+void asm_x86_test_r32_with_r32(asm_x86_t *as, int src_r32_a, int src_r32_b);
+void asm_x86_setcc_r8(asm_x86_t *as, mp_uint_t jcc_type, int dest_r8);
+void asm_x86_jmp_reg(asm_x86_t *as, int src_r86);
+void asm_x86_jmp_label(asm_x86_t *as, mp_uint_t label);
+void asm_x86_jcc_label(asm_x86_t *as, mp_uint_t jcc_type, mp_uint_t label);
+void asm_x86_entry(asm_x86_t *as, int num_locals);
+void asm_x86_exit(asm_x86_t *as);
+void asm_x86_mov_arg_to_r32(asm_x86_t *as, int src_arg_num, int dest_r32);
+void asm_x86_mov_local_to_r32(asm_x86_t *as, int src_local_num, int dest_r32);
+void asm_x86_mov_r32_to_local(asm_x86_t *as, int src_r32, int dest_local_num);
+void asm_x86_mov_local_addr_to_r32(asm_x86_t *as, int local_num, int dest_r32);
+void asm_x86_mov_reg_pcrel(asm_x86_t *as, int dest_r64, mp_uint_t label);
+void asm_x86_call_ind(asm_x86_t *as, size_t fun_id, mp_uint_t n_args, int temp_r32);
+
+// Holds a pointer to mp_fun_table
+#define ASM_X86_REG_FUN_TABLE ASM_X86_REG_EBP
+
+#if defined(GENERIC_ASM_API) && GENERIC_ASM_API
+
+// The following macros provide a (mostly) arch-independent API to
+// generate native code, and are used by the native emitter.
+
+#define ASM_WORD_SIZE (4)
+
+#define REG_RET ASM_X86_REG_EAX
+#define REG_ARG_1 ASM_X86_REG_ARG_1
+#define REG_ARG_2 ASM_X86_REG_ARG_2
+#define REG_ARG_3 ASM_X86_REG_ARG_3
+#define REG_ARG_4 ASM_X86_REG_ARG_4
+
+// caller-save, so can be used as temporaries
+#define REG_TEMP0 ASM_X86_REG_EAX
+#define REG_TEMP1 ASM_X86_REG_ECX
+#define REG_TEMP2 ASM_X86_REG_EDX
+
+// callee-save, so can be used as locals
+#define REG_LOCAL_1 ASM_X86_REG_EBX
+#define REG_LOCAL_2 ASM_X86_REG_ESI
+#define REG_LOCAL_3 ASM_X86_REG_EDI
+#define REG_LOCAL_NUM (3)
+
+// Holds a pointer to mp_fun_table
+#define REG_FUN_TABLE ASM_X86_REG_FUN_TABLE
+
+#define ASM_T asm_x86_t
+#define ASM_END_PASS asm_x86_end_pass
+#define ASM_ENTRY asm_x86_entry
+#define ASM_EXIT asm_x86_exit
+
+#define ASM_JUMP asm_x86_jmp_label
+#define ASM_JUMP_IF_REG_ZERO(as, reg, label, bool_test) \
+ do { \
+ if (bool_test) { \
+ asm_x86_test_r8_with_r8(as, reg, reg); \
+ } else { \
+ asm_x86_test_r32_with_r32(as, reg, reg); \
+ } \
+ asm_x86_jcc_label(as, ASM_X86_CC_JZ, label); \
+ } while (0)
+#define ASM_JUMP_IF_REG_NONZERO(as, reg, label, bool_test) \
+ do { \
+ if (bool_test) { \
+ asm_x86_test_r8_with_r8(as, reg, reg); \
+ } else { \
+ asm_x86_test_r32_with_r32(as, reg, reg); \
+ } \
+ asm_x86_jcc_label(as, ASM_X86_CC_JNZ, label); \
+ } while (0)
+#define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \
+ do { \
+ asm_x86_cmp_r32_with_r32(as, reg1, reg2); \
+ asm_x86_jcc_label(as, ASM_X86_CC_JE, label); \
+ } while (0)
+#define ASM_JUMP_REG(as, reg) asm_x86_jmp_reg((as), (reg))
+#define ASM_CALL_IND(as, idx) asm_x86_call_ind(as, idx, mp_f_n_args[idx], ASM_X86_REG_EAX)
+
+#define ASM_MOV_LOCAL_REG(as, local_num, reg_src) asm_x86_mov_r32_to_local((as), (reg_src), (local_num))
+#define ASM_MOV_REG_IMM(as, reg_dest, imm) asm_x86_mov_i32_to_r32((as), (imm), (reg_dest))
+#define ASM_MOV_REG_IMM_FIX_U16(as, reg_dest, imm) asm_x86_mov_i32_to_r32((as), (imm), (reg_dest))
+#define ASM_MOV_REG_IMM_FIX_WORD(as, reg_dest, imm) asm_x86_mov_i32_to_r32((as), (imm), (reg_dest))
+#define ASM_MOV_REG_LOCAL(as, reg_dest, local_num) asm_x86_mov_local_to_r32((as), (local_num), (reg_dest))
+#define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_x86_mov_r32_r32((as), (reg_dest), (reg_src))
+#define ASM_MOV_REG_LOCAL_ADDR(as, reg_dest, local_num) asm_x86_mov_local_addr_to_r32((as), (local_num), (reg_dest))
+#define ASM_MOV_REG_PCREL(as, reg_dest, label) asm_x86_mov_reg_pcrel((as), (reg_dest), (label))
+
+#define ASM_LSL_REG(as, reg) asm_x86_shl_r32_cl((as), (reg))
+#define ASM_LSR_REG(as, reg) asm_x86_shr_r32_cl((as), (reg))
+#define ASM_ASR_REG(as, reg) asm_x86_sar_r32_cl((as), (reg))
+#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_x86_or_r32_r32((as), (reg_dest), (reg_src))
+#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_x86_xor_r32_r32((as), (reg_dest), (reg_src))
+#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_x86_and_r32_r32((as), (reg_dest), (reg_src))
+#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_x86_add_r32_r32((as), (reg_dest), (reg_src))
+#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_x86_sub_r32_r32((as), (reg_dest), (reg_src))
+#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_x86_mul_r32_r32((as), (reg_dest), (reg_src))
+
+#define ASM_LOAD_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem32_to_r32((as), (reg_base), 0, (reg_dest))
+#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_x86_mov_mem32_to_r32((as), (reg_base), 4 * (word_offset), (reg_dest))
+#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem8_to_r32zx((as), (reg_base), 0, (reg_dest))
+#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem16_to_r32zx((as), (reg_base), 0, (reg_dest))
+#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_x86_mov_mem32_to_r32((as), (reg_base), 0, (reg_dest))
+
+#define ASM_STORE_REG_REG(as, reg_src, reg_base) asm_x86_mov_r32_to_mem32((as), (reg_src), (reg_base), 0)
+#define ASM_STORE_REG_REG_OFFSET(as, reg_src, reg_base, word_offset) asm_x86_mov_r32_to_mem32((as), (reg_src), (reg_base), 4 * (word_offset))
+#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_x86_mov_r8_to_mem8((as), (reg_src), (reg_base), 0)
+#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_x86_mov_r16_to_mem16((as), (reg_src), (reg_base), 0)
+#define ASM_STORE32_REG_REG(as, reg_src, reg_base) asm_x86_mov_r32_to_mem32((as), (reg_src), (reg_base), 0)
+
+#endif // GENERIC_ASM_API
+
+#endif // MICROPY_INCLUDED_PY_ASMX86_H
diff --git a/circuitpython/py/asmxtensa.c b/circuitpython/py/asmxtensa.c
new file mode 100644
index 0000000..fa53e83
--- /dev/null
+++ b/circuitpython/py/asmxtensa.c
@@ -0,0 +1,253 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2016 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <assert.h>
+
+#include "py/mpconfig.h"
+
+// wrapper around everything in this file
+#if MICROPY_EMIT_XTENSA || MICROPY_EMIT_INLINE_XTENSA || MICROPY_EMIT_XTENSAWIN
+
+#include "py/asmxtensa.h"
+
+#define WORD_SIZE (4)
+#define SIGNED_FIT8(x) ((((x) & 0xffffff80) == 0) || (((x) & 0xffffff80) == 0xffffff80))
+#define SIGNED_FIT12(x) ((((x) & 0xfffff800) == 0) || (((x) & 0xfffff800) == 0xfffff800))
+
+void asm_xtensa_end_pass(asm_xtensa_t *as) {
+ as->num_const = as->cur_const;
+ as->cur_const = 0;
+
+ #if 0
+ // make a hex dump of the machine code
+ if (as->base.pass == MP_ASM_PASS_EMIT) {
+ uint8_t *d = as->base.code_base;
+ printf("XTENSA ASM:");
+ for (int i = 0; i < ((as->base.code_size + 15) & ~15); ++i) {
+ if (i % 16 == 0) {
+ printf("\n%08x:", (uint32_t)&d[i]);
+ }
+ if (i % 2 == 0) {
+ printf(" ");
+ }
+ printf("%02x", d[i]);
+ }
+ printf("\n");
+ }
+ #endif
+}
+
+void asm_xtensa_entry(asm_xtensa_t *as, int num_locals) {
+ // jump over the constants
+ asm_xtensa_op_j(as, as->num_const * WORD_SIZE + 4 - 4);
+ mp_asm_base_get_cur_to_write_bytes(&as->base, 1); // padding/alignment byte
+ as->const_table = (uint32_t *)mp_asm_base_get_cur_to_write_bytes(&as->base, as->num_const * 4);
+
+ // adjust the stack-pointer to store a0, a12, a13, a14, a15 and locals, 16-byte aligned
+ as->stack_adjust = (((ASM_XTENSA_NUM_REGS_SAVED + num_locals) * WORD_SIZE) + 15) & ~15;
+ if (SIGNED_FIT8(-as->stack_adjust)) {
+ asm_xtensa_op_addi(as, ASM_XTENSA_REG_A1, ASM_XTENSA_REG_A1, -as->stack_adjust);
+ } else {
+ asm_xtensa_op_movi(as, ASM_XTENSA_REG_A9, as->stack_adjust);
+ asm_xtensa_op_sub(as, ASM_XTENSA_REG_A1, ASM_XTENSA_REG_A1, ASM_XTENSA_REG_A9);
+ }
+
+ // save return value (a0) and callee-save registers (a12, a13, a14, a15)
+ asm_xtensa_op_s32i_n(as, ASM_XTENSA_REG_A0, ASM_XTENSA_REG_A1, 0);
+ for (int i = 1; i < ASM_XTENSA_NUM_REGS_SAVED; ++i) {
+ asm_xtensa_op_s32i_n(as, ASM_XTENSA_REG_A11 + i, ASM_XTENSA_REG_A1, i);
+ }
+}
+
+void asm_xtensa_exit(asm_xtensa_t *as) {
+ // restore registers
+ for (int i = ASM_XTENSA_NUM_REGS_SAVED - 1; i >= 1; --i) {
+ asm_xtensa_op_l32i_n(as, ASM_XTENSA_REG_A11 + i, ASM_XTENSA_REG_A1, i);
+ }
+ asm_xtensa_op_l32i_n(as, ASM_XTENSA_REG_A0, ASM_XTENSA_REG_A1, 0);
+
+ // restore stack-pointer and return
+ if (SIGNED_FIT8(as->stack_adjust)) {
+ asm_xtensa_op_addi(as, ASM_XTENSA_REG_A1, ASM_XTENSA_REG_A1, as->stack_adjust);
+ } else {
+ asm_xtensa_op_movi(as, ASM_XTENSA_REG_A9, as->stack_adjust);
+ asm_xtensa_op_add_n(as, ASM_XTENSA_REG_A1, ASM_XTENSA_REG_A1, ASM_XTENSA_REG_A9);
+ }
+
+ asm_xtensa_op_ret_n(as);
+}
+
+void asm_xtensa_entry_win(asm_xtensa_t *as, int num_locals) {
+ // jump over the constants
+ asm_xtensa_op_j(as, as->num_const * WORD_SIZE + 4 - 4);
+ mp_asm_base_get_cur_to_write_bytes(&as->base, 1); // padding/alignment byte
+ as->const_table = (uint32_t *)mp_asm_base_get_cur_to_write_bytes(&as->base, as->num_const * 4);
+
+ as->stack_adjust = 32 + ((((ASM_XTENSA_NUM_REGS_SAVED_WIN + num_locals) * WORD_SIZE) + 15) & ~15);
+ asm_xtensa_op_entry(as, ASM_XTENSA_REG_A1, as->stack_adjust);
+ asm_xtensa_op_s32i_n(as, ASM_XTENSA_REG_A0, ASM_XTENSA_REG_A1, 0);
+}
+
+void asm_xtensa_exit_win(asm_xtensa_t *as) {
+ asm_xtensa_op_l32i_n(as, ASM_XTENSA_REG_A0, ASM_XTENSA_REG_A1, 0);
+ asm_xtensa_op_retw_n(as);
+}
+
+STATIC uint32_t get_label_dest(asm_xtensa_t *as, uint label) {
+ assert(label < as->base.max_num_labels);
+ return as->base.label_offsets[label];
+}
+
+void asm_xtensa_op16(asm_xtensa_t *as, uint16_t op) {
+ uint8_t *c = mp_asm_base_get_cur_to_write_bytes(&as->base, 2);
+ if (c != NULL) {
+ c[0] = op;
+ c[1] = op >> 8;
+ }
+}
+
+void asm_xtensa_op24(asm_xtensa_t *as, uint32_t op) {
+ uint8_t *c = mp_asm_base_get_cur_to_write_bytes(&as->base, 3);
+ if (c != NULL) {
+ c[0] = op;
+ c[1] = op >> 8;
+ c[2] = op >> 16;
+ }
+}
+
+void asm_xtensa_j_label(asm_xtensa_t *as, uint label) {
+ uint32_t dest = get_label_dest(as, label);
+ int32_t rel = dest - as->base.code_offset - 4;
+ // we assume rel, as a signed int, fits in 18-bits
+ asm_xtensa_op_j(as, rel);
+}
+
+void asm_xtensa_bccz_reg_label(asm_xtensa_t *as, uint cond, uint reg, uint label) {
+ uint32_t dest = get_label_dest(as, label);
+ int32_t rel = dest - as->base.code_offset - 4;
+ if (as->base.pass == MP_ASM_PASS_EMIT && !SIGNED_FIT12(rel)) {
+ printf("ERROR: xtensa bccz out of range\n");
+ }
+ asm_xtensa_op_bccz(as, cond, reg, rel);
+}
+
+void asm_xtensa_bcc_reg_reg_label(asm_xtensa_t *as, uint cond, uint reg1, uint reg2, uint label) {
+ uint32_t dest = get_label_dest(as, label);
+ int32_t rel = dest - as->base.code_offset - 4;
+ if (as->base.pass == MP_ASM_PASS_EMIT && !SIGNED_FIT8(rel)) {
+ printf("ERROR: xtensa bcc out of range\n");
+ }
+ asm_xtensa_op_bcc(as, cond, reg1, reg2, rel);
+}
+
+// convenience function; reg_dest must be different from reg_src[12]
+void asm_xtensa_setcc_reg_reg_reg(asm_xtensa_t *as, uint cond, uint reg_dest, uint reg_src1, uint reg_src2) {
+ asm_xtensa_op_movi_n(as, reg_dest, 1);
+ asm_xtensa_op_bcc(as, cond, reg_src1, reg_src2, 1);
+ asm_xtensa_op_movi_n(as, reg_dest, 0);
+}
+
+size_t asm_xtensa_mov_reg_i32(asm_xtensa_t *as, uint reg_dest, uint32_t i32) {
+ // load the constant
+ uint32_t const_table_offset = (uint8_t *)as->const_table - as->base.code_base;
+ size_t loc = const_table_offset + as->cur_const * WORD_SIZE;
+ asm_xtensa_op_l32r(as, reg_dest, as->base.code_offset, loc);
+ // store the constant in the table
+ if (as->const_table != NULL) {
+ as->const_table[as->cur_const] = i32;
+ }
+ ++as->cur_const;
+ return loc;
+}
+
+void asm_xtensa_mov_reg_i32_optimised(asm_xtensa_t *as, uint reg_dest, uint32_t i32) {
+ if (SIGNED_FIT12(i32)) {
+ asm_xtensa_op_movi(as, reg_dest, i32);
+ } else {
+ asm_xtensa_mov_reg_i32(as, reg_dest, i32);
+ }
+}
+
+void asm_xtensa_mov_local_reg(asm_xtensa_t *as, int local_num, uint reg_src) {
+ asm_xtensa_op_s32i(as, reg_src, ASM_XTENSA_REG_A1, local_num);
+}
+
+void asm_xtensa_mov_reg_local(asm_xtensa_t *as, uint reg_dest, int local_num) {
+ asm_xtensa_op_l32i(as, reg_dest, ASM_XTENSA_REG_A1, local_num);
+}
+
+void asm_xtensa_mov_reg_local_addr(asm_xtensa_t *as, uint reg_dest, int local_num) {
+ uint off = local_num * WORD_SIZE;
+ if (SIGNED_FIT8(off)) {
+ asm_xtensa_op_addi(as, reg_dest, ASM_XTENSA_REG_A1, off);
+ } else {
+ asm_xtensa_op_movi(as, reg_dest, off);
+ asm_xtensa_op_add_n(as, reg_dest, reg_dest, ASM_XTENSA_REG_A1);
+ }
+}
+
+void asm_xtensa_mov_reg_pcrel(asm_xtensa_t *as, uint reg_dest, uint label) {
+ // Get relative offset from PC
+ uint32_t dest = get_label_dest(as, label);
+ int32_t rel = dest - as->base.code_offset;
+ rel -= 3 + 3; // account for 3 bytes of movi instruction, 3 bytes call0 adjustment
+ asm_xtensa_op_movi(as, reg_dest, rel); // imm has 12-bit range
+
+ // Use call0 to get PC+3 into a0
+ // call0 destination must be aligned on 4 bytes:
+ // - code_offset&3=0: off=0, pad=1
+ // - code_offset&3=1: off=0, pad=0
+ // - code_offset&3=2: off=1, pad=3
+ // - code_offset&3=3: off=1, pad=2
+ uint32_t off = as->base.code_offset >> 1 & 1;
+ uint32_t pad = (5 - as->base.code_offset) & 3;
+ asm_xtensa_op_call0(as, off);
+ mp_asm_base_get_cur_to_write_bytes(&as->base, pad);
+
+ // Add PC to relative offset
+ asm_xtensa_op_add_n(as, reg_dest, reg_dest, ASM_XTENSA_REG_A0);
+}
+
+void asm_xtensa_call_ind(asm_xtensa_t *as, uint idx) {
+ if (idx < 16) {
+ asm_xtensa_op_l32i_n(as, ASM_XTENSA_REG_A0, ASM_XTENSA_REG_FUN_TABLE, idx);
+ } else {
+ asm_xtensa_op_l32i(as, ASM_XTENSA_REG_A0, ASM_XTENSA_REG_FUN_TABLE, idx);
+ }
+ asm_xtensa_op_callx0(as, ASM_XTENSA_REG_A0);
+}
+
+void asm_xtensa_call_ind_win(asm_xtensa_t *as, uint idx) {
+ if (idx < 16) {
+ asm_xtensa_op_l32i_n(as, ASM_XTENSA_REG_A8, ASM_XTENSA_REG_FUN_TABLE_WIN, idx);
+ } else {
+ asm_xtensa_op_l32i(as, ASM_XTENSA_REG_A8, ASM_XTENSA_REG_FUN_TABLE_WIN, idx);
+ }
+ asm_xtensa_op_callx8(as, ASM_XTENSA_REG_A8);
+}
+
+#endif // MICROPY_EMIT_XTENSA || MICROPY_EMIT_INLINE_XTENSA || MICROPY_EMIT_XTENSAWIN
diff --git a/circuitpython/py/asmxtensa.h b/circuitpython/py/asmxtensa.h
new file mode 100644
index 0000000..529bfa5
--- /dev/null
+++ b/circuitpython/py/asmxtensa.h
@@ -0,0 +1,408 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2016 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_ASMXTENSA_H
+#define MICROPY_INCLUDED_PY_ASMXTENSA_H
+
+#include "py/misc.h"
+#include "py/asmbase.h"
+
+// calling conventions:
+// up to 6 args in a2-a7
+// return value in a2
+// PC stored in a0
+// stack pointer is a1, stack full descending, is aligned to 16 bytes
+// callee save: a1, a12, a13, a14, a15
+// caller save: a3
+
+// With windowed registers, size 8:
+// - a0: return PC
+// - a1: stack pointer, full descending, aligned to 16 bytes
+// - a2-a7: incoming args, and essentially callee save
+// - a2: return value
+// - a8-a15: caller save temporaries
+// - a10-a15: input args to called function
+// - a10: return value of called function
+// note: a0-a7 are saved automatically via window shift of called function
+
+#define ASM_XTENSA_REG_A0 (0)
+#define ASM_XTENSA_REG_A1 (1)
+#define ASM_XTENSA_REG_A2 (2)
+#define ASM_XTENSA_REG_A3 (3)
+#define ASM_XTENSA_REG_A4 (4)
+#define ASM_XTENSA_REG_A5 (5)
+#define ASM_XTENSA_REG_A6 (6)
+#define ASM_XTENSA_REG_A7 (7)
+#define ASM_XTENSA_REG_A8 (8)
+#define ASM_XTENSA_REG_A9 (9)
+#define ASM_XTENSA_REG_A10 (10)
+#define ASM_XTENSA_REG_A11 (11)
+#define ASM_XTENSA_REG_A12 (12)
+#define ASM_XTENSA_REG_A13 (13)
+#define ASM_XTENSA_REG_A14 (14)
+#define ASM_XTENSA_REG_A15 (15)
+
+// for bccz
+#define ASM_XTENSA_CCZ_EQ (0)
+#define ASM_XTENSA_CCZ_NE (1)
+
+// for bcc and setcc
+#define ASM_XTENSA_CC_NONE (0)
+#define ASM_XTENSA_CC_EQ (1)
+#define ASM_XTENSA_CC_LT (2)
+#define ASM_XTENSA_CC_LTU (3)
+#define ASM_XTENSA_CC_ALL (4)
+#define ASM_XTENSA_CC_BC (5)
+#define ASM_XTENSA_CC_ANY (8)
+#define ASM_XTENSA_CC_NE (9)
+#define ASM_XTENSA_CC_GE (10)
+#define ASM_XTENSA_CC_GEU (11)
+#define ASM_XTENSA_CC_NALL (12)
+#define ASM_XTENSA_CC_BS (13)
+
+// macros for encoding instructions (little endian versions)
+#define ASM_XTENSA_ENCODE_RRR(op0, op1, op2, r, s, t) \
+ ((((uint32_t)op2) << 20) | (((uint32_t)op1) << 16) | ((r) << 12) | ((s) << 8) | ((t) << 4) | (op0))
+#define ASM_XTENSA_ENCODE_RRI4(op0, op1, r, s, t, imm4) \
+ (((imm4) << 20) | ((op1) << 16) | ((r) << 12) | ((s) << 8) | ((t) << 4) | (op0))
+#define ASM_XTENSA_ENCODE_RRI8(op0, r, s, t, imm8) \
+ ((((uint32_t)imm8) << 16) | ((r) << 12) | ((s) << 8) | ((t) << 4) | (op0))
+#define ASM_XTENSA_ENCODE_RI16(op0, t, imm16) \
+ (((imm16) << 8) | ((t) << 4) | (op0))
+#define ASM_XTENSA_ENCODE_RSR(op0, op1, op2, rs, t) \
+ (((op2) << 20) | ((op1) << 16) | ((rs) << 8) | ((t) << 4) | (op0))
+#define ASM_XTENSA_ENCODE_CALL(op0, n, offset) \
+ (((offset) << 6) | ((n) << 4) | (op0))
+#define ASM_XTENSA_ENCODE_CALLX(op0, op1, op2, r, s, m, n) \
+ ((((uint32_t)op2) << 20) | (((uint32_t)op1) << 16) | ((r) << 12) | ((s) << 8) | ((m) << 6) | ((n) << 4) | (op0))
+#define ASM_XTENSA_ENCODE_BRI8(op0, r, s, m, n, imm8) \
+ (((imm8) << 16) | ((r) << 12) | ((s) << 8) | ((m) << 6) | ((n) << 4) | (op0))
+#define ASM_XTENSA_ENCODE_BRI12(op0, s, m, n, imm12) \
+ (((imm12) << 12) | ((s) << 8) | ((m) << 6) | ((n) << 4) | (op0))
+#define ASM_XTENSA_ENCODE_RRRN(op0, r, s, t) \
+ (((r) << 12) | ((s) << 8) | ((t) << 4) | (op0))
+#define ASM_XTENSA_ENCODE_RI7(op0, s, imm7) \
+ ((((imm7) & 0xf) << 12) | ((s) << 8) | ((imm7) & 0x70) | (op0))
+
+// Number of registers saved on the stack upon entry to function
+#define ASM_XTENSA_NUM_REGS_SAVED (5)
+#define ASM_XTENSA_NUM_REGS_SAVED_WIN (1)
+
+typedef struct _asm_xtensa_t {
+ mp_asm_base_t base;
+ uint32_t cur_const;
+ uint32_t num_const;
+ uint32_t *const_table;
+ uint32_t stack_adjust;
+} asm_xtensa_t;
+
+void asm_xtensa_end_pass(asm_xtensa_t *as);
+
+void asm_xtensa_entry(asm_xtensa_t *as, int num_locals);
+void asm_xtensa_exit(asm_xtensa_t *as);
+
+void asm_xtensa_entry_win(asm_xtensa_t *as, int num_locals);
+void asm_xtensa_exit_win(asm_xtensa_t *as);
+
+void asm_xtensa_op16(asm_xtensa_t *as, uint16_t op);
+void asm_xtensa_op24(asm_xtensa_t *as, uint32_t op);
+
+// raw instructions
+
+static inline void asm_xtensa_op_entry(asm_xtensa_t *as, uint reg_src, int32_t num_bytes) {
+ asm_xtensa_op24(as, ASM_XTENSA_ENCODE_BRI12(6, reg_src, 0, 3, (num_bytes / 8) & 0xfff));
+}
+
+static inline void asm_xtensa_op_add_n(asm_xtensa_t *as, uint reg_dest, uint reg_src_a, uint reg_src_b) {
+ asm_xtensa_op16(as, ASM_XTENSA_ENCODE_RRRN(10, reg_dest, reg_src_a, reg_src_b));
+}
+
+static inline void asm_xtensa_op_addi(asm_xtensa_t *as, uint reg_dest, uint reg_src, int imm8) {
+ asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, 12, reg_src, reg_dest, imm8 & 0xff));
+}
+
+static inline void asm_xtensa_op_and(asm_xtensa_t *as, uint reg_dest, uint reg_src_a, uint reg_src_b) {
+ asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 0, 1, reg_dest, reg_src_a, reg_src_b));
+}
+
+static inline void asm_xtensa_op_bcc(asm_xtensa_t *as, uint cond, uint reg_src1, uint reg_src2, int32_t rel8) {
+ asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(7, cond, reg_src1, reg_src2, rel8 & 0xff));
+}
+
+static inline void asm_xtensa_op_bccz(asm_xtensa_t *as, uint cond, uint reg_src, int32_t rel12) {
+ asm_xtensa_op24(as, ASM_XTENSA_ENCODE_BRI12(6, reg_src, cond, 1, rel12 & 0xfff));
+}
+
+static inline void asm_xtensa_op_call0(asm_xtensa_t *as, int32_t rel18) {
+ asm_xtensa_op24(as, ASM_XTENSA_ENCODE_CALL(5, 0, rel18 & 0x3ffff));
+}
+
+static inline void asm_xtensa_op_callx0(asm_xtensa_t *as, uint reg) {
+ asm_xtensa_op24(as, ASM_XTENSA_ENCODE_CALLX(0, 0, 0, 0, reg, 3, 0));
+}
+
+static inline void asm_xtensa_op_callx8(asm_xtensa_t *as, uint reg) {
+ asm_xtensa_op24(as, ASM_XTENSA_ENCODE_CALLX(0, 0, 0, 0, reg, 3, 2));
+}
+
+static inline void asm_xtensa_op_j(asm_xtensa_t *as, int32_t rel18) {
+ asm_xtensa_op24(as, ASM_XTENSA_ENCODE_CALL(6, 0, rel18 & 0x3ffff));
+}
+
+static inline void asm_xtensa_op_jx(asm_xtensa_t *as, uint reg) {
+ asm_xtensa_op24(as, ASM_XTENSA_ENCODE_CALLX(0, 0, 0, 0, reg, 2, 2));
+}
+
+static inline void asm_xtensa_op_l8ui(asm_xtensa_t *as, uint reg_dest, uint reg_base, uint byte_offset) {
+ asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, 0, reg_base, reg_dest, byte_offset & 0xff));
+}
+
+static inline void asm_xtensa_op_l16ui(asm_xtensa_t *as, uint reg_dest, uint reg_base, uint half_word_offset) {
+ asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, 1, reg_base, reg_dest, half_word_offset & 0xff));
+}
+
+static inline void asm_xtensa_op_l32i(asm_xtensa_t *as, uint reg_dest, uint reg_base, uint word_offset) {
+ asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, 2, reg_base, reg_dest, word_offset & 0xff));
+}
+
+static inline void asm_xtensa_op_l32i_n(asm_xtensa_t *as, uint reg_dest, uint reg_base, uint word_offset) {
+ asm_xtensa_op16(as, ASM_XTENSA_ENCODE_RRRN(8, word_offset & 0xf, reg_base, reg_dest));
+}
+
+static inline void asm_xtensa_op_l32r(asm_xtensa_t *as, uint reg_dest, uint32_t op_off, uint32_t dest_off) {
+ asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RI16(1, reg_dest, ((dest_off - ((op_off + 3) & ~3)) >> 2) & 0xffff));
+}
+
+static inline void asm_xtensa_op_mov_n(asm_xtensa_t *as, uint reg_dest, uint reg_src) {
+ asm_xtensa_op16(as, ASM_XTENSA_ENCODE_RRRN(13, 0, reg_src, reg_dest));
+}
+
+static inline void asm_xtensa_op_movi(asm_xtensa_t *as, uint reg_dest, int32_t imm12) {
+ asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, 10, (imm12 >> 8) & 0xf, reg_dest, imm12 & 0xff));
+}
+
+static inline void asm_xtensa_op_movi_n(asm_xtensa_t *as, uint reg_dest, int imm4) {
+ asm_xtensa_op16(as, ASM_XTENSA_ENCODE_RI7(12, reg_dest, imm4));
+}
+
+static inline void asm_xtensa_op_mull(asm_xtensa_t *as, uint reg_dest, uint reg_src_a, uint reg_src_b) {
+ asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 2, 8, reg_dest, reg_src_a, reg_src_b));
+}
+
+static inline void asm_xtensa_op_or(asm_xtensa_t *as, uint reg_dest, uint reg_src_a, uint reg_src_b) {
+ asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 0, 2, reg_dest, reg_src_a, reg_src_b));
+}
+
+static inline void asm_xtensa_op_ret_n(asm_xtensa_t *as) {
+ asm_xtensa_op16(as, ASM_XTENSA_ENCODE_RRRN(13, 15, 0, 0));
+}
+
+static inline void asm_xtensa_op_retw_n(asm_xtensa_t *as) {
+ asm_xtensa_op16(as, ASM_XTENSA_ENCODE_RRRN(13, 15, 0, 1));
+}
+
+static inline void asm_xtensa_op_s8i(asm_xtensa_t *as, uint reg_src, uint reg_base, uint byte_offset) {
+ asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, 4, reg_base, reg_src, byte_offset & 0xff));
+}
+
+static inline void asm_xtensa_op_s16i(asm_xtensa_t *as, uint reg_src, uint reg_base, uint half_word_offset) {
+ asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, 5, reg_base, reg_src, half_word_offset & 0xff));
+}
+
+static inline void asm_xtensa_op_s32i(asm_xtensa_t *as, uint reg_src, uint reg_base, uint word_offset) {
+ asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRI8(2, 6, reg_base, reg_src, word_offset & 0xff));
+}
+
+static inline void asm_xtensa_op_s32i_n(asm_xtensa_t *as, uint reg_src, uint reg_base, uint word_offset) {
+ asm_xtensa_op16(as, ASM_XTENSA_ENCODE_RRRN(9, word_offset & 0xf, reg_base, reg_src));
+}
+
+static inline void asm_xtensa_op_sll(asm_xtensa_t *as, uint reg_dest, uint reg_src) {
+ asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 1, 10, reg_dest, reg_src, 0));
+}
+
+static inline void asm_xtensa_op_srl(asm_xtensa_t *as, uint reg_dest, uint reg_src) {
+ asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 1, 9, reg_dest, 0, reg_src));
+}
+
+static inline void asm_xtensa_op_sra(asm_xtensa_t *as, uint reg_dest, uint reg_src) {
+ asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 1, 11, reg_dest, 0, reg_src));
+}
+
+static inline void asm_xtensa_op_ssl(asm_xtensa_t *as, uint reg_src) {
+ asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 0, 4, 1, reg_src, 0));
+}
+
+static inline void asm_xtensa_op_ssr(asm_xtensa_t *as, uint reg_src) {
+ asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 0, 4, 0, reg_src, 0));
+}
+
+static inline void asm_xtensa_op_sub(asm_xtensa_t *as, uint reg_dest, uint reg_src_a, uint reg_src_b) {
+ asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 0, 12, reg_dest, reg_src_a, reg_src_b));
+}
+
+static inline void asm_xtensa_op_xor(asm_xtensa_t *as, uint reg_dest, uint reg_src_a, uint reg_src_b) {
+ asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 0, 3, reg_dest, reg_src_a, reg_src_b));
+}
+
+// convenience functions
+void asm_xtensa_j_label(asm_xtensa_t *as, uint label);
+void asm_xtensa_bccz_reg_label(asm_xtensa_t *as, uint cond, uint reg, uint label);
+void asm_xtensa_bcc_reg_reg_label(asm_xtensa_t *as, uint cond, uint reg1, uint reg2, uint label);
+void asm_xtensa_setcc_reg_reg_reg(asm_xtensa_t *as, uint cond, uint reg_dest, uint reg_src1, uint reg_src2);
+size_t asm_xtensa_mov_reg_i32(asm_xtensa_t *as, uint reg_dest, uint32_t i32);
+void asm_xtensa_mov_reg_i32_optimised(asm_xtensa_t *as, uint reg_dest, uint32_t i32);
+void asm_xtensa_mov_local_reg(asm_xtensa_t *as, int local_num, uint reg_src);
+void asm_xtensa_mov_reg_local(asm_xtensa_t *as, uint reg_dest, int local_num);
+void asm_xtensa_mov_reg_local_addr(asm_xtensa_t *as, uint reg_dest, int local_num);
+void asm_xtensa_mov_reg_pcrel(asm_xtensa_t *as, uint reg_dest, uint label);
+void asm_xtensa_call_ind(asm_xtensa_t *as, uint idx);
+void asm_xtensa_call_ind_win(asm_xtensa_t *as, uint idx);
+
+// Holds a pointer to mp_fun_table
+#define ASM_XTENSA_REG_FUN_TABLE ASM_XTENSA_REG_A15
+#define ASM_XTENSA_REG_FUN_TABLE_WIN ASM_XTENSA_REG_A7
+
+#if defined(GENERIC_ASM_API) && GENERIC_ASM_API
+
+// The following macros provide a (mostly) arch-independent API to
+// generate native code, and are used by the native emitter.
+
+#define ASM_WORD_SIZE (4)
+
+#if !GENERIC_ASM_API_WIN
+// Configuration for non-windowed calls
+
+#define REG_RET ASM_XTENSA_REG_A2
+#define REG_ARG_1 ASM_XTENSA_REG_A2
+#define REG_ARG_2 ASM_XTENSA_REG_A3
+#define REG_ARG_3 ASM_XTENSA_REG_A4
+#define REG_ARG_4 ASM_XTENSA_REG_A5
+#define REG_ARG_5 ASM_XTENSA_REG_A6
+
+#define REG_TEMP0 ASM_XTENSA_REG_A2
+#define REG_TEMP1 ASM_XTENSA_REG_A3
+#define REG_TEMP2 ASM_XTENSA_REG_A4
+
+#define REG_LOCAL_1 ASM_XTENSA_REG_A12
+#define REG_LOCAL_2 ASM_XTENSA_REG_A13
+#define REG_LOCAL_3 ASM_XTENSA_REG_A14
+#define REG_LOCAL_NUM (3)
+
+#define ASM_NUM_REGS_SAVED ASM_XTENSA_NUM_REGS_SAVED
+#define REG_FUN_TABLE ASM_XTENSA_REG_FUN_TABLE
+
+#define ASM_ENTRY(as, nlocal) asm_xtensa_entry((as), (nlocal))
+#define ASM_EXIT(as) asm_xtensa_exit((as))
+#define ASM_CALL_IND(as, idx) asm_xtensa_call_ind((as), (idx))
+
+#else
+// Configuration for windowed calls with window size 8
+
+#define REG_PARENT_RET ASM_XTENSA_REG_A2
+#define REG_PARENT_ARG_1 ASM_XTENSA_REG_A2
+#define REG_PARENT_ARG_2 ASM_XTENSA_REG_A3
+#define REG_PARENT_ARG_3 ASM_XTENSA_REG_A4
+#define REG_PARENT_ARG_4 ASM_XTENSA_REG_A5
+#define REG_RET ASM_XTENSA_REG_A10
+#define REG_ARG_1 ASM_XTENSA_REG_A10
+#define REG_ARG_2 ASM_XTENSA_REG_A11
+#define REG_ARG_3 ASM_XTENSA_REG_A12
+#define REG_ARG_4 ASM_XTENSA_REG_A13
+
+#define REG_TEMP0 ASM_XTENSA_REG_A10
+#define REG_TEMP1 ASM_XTENSA_REG_A11
+#define REG_TEMP2 ASM_XTENSA_REG_A12
+
+#define REG_LOCAL_1 ASM_XTENSA_REG_A4
+#define REG_LOCAL_2 ASM_XTENSA_REG_A5
+#define REG_LOCAL_3 ASM_XTENSA_REG_A6
+#define REG_LOCAL_NUM (3)
+
+#define ASM_NUM_REGS_SAVED ASM_XTENSA_NUM_REGS_SAVED_WIN
+#define REG_FUN_TABLE ASM_XTENSA_REG_FUN_TABLE_WIN
+
+#define ASM_ENTRY(as, nlocal) asm_xtensa_entry_win((as), (nlocal))
+#define ASM_EXIT(as) asm_xtensa_exit_win((as))
+#define ASM_CALL_IND(as, idx) asm_xtensa_call_ind_win((as), (idx))
+
+#endif
+
+#define ASM_T asm_xtensa_t
+#define ASM_END_PASS asm_xtensa_end_pass
+
+#define ASM_JUMP asm_xtensa_j_label
+#define ASM_JUMP_IF_REG_ZERO(as, reg, label, bool_test) \
+ asm_xtensa_bccz_reg_label(as, ASM_XTENSA_CCZ_EQ, reg, label)
+#define ASM_JUMP_IF_REG_NONZERO(as, reg, label, bool_test) \
+ asm_xtensa_bccz_reg_label(as, ASM_XTENSA_CCZ_NE, reg, label)
+#define ASM_JUMP_IF_REG_EQ(as, reg1, reg2, label) \
+ asm_xtensa_bcc_reg_reg_label(as, ASM_XTENSA_CC_EQ, reg1, reg2, label)
+#define ASM_JUMP_REG(as, reg) asm_xtensa_op_jx((as), (reg))
+
+#define ASM_MOV_LOCAL_REG(as, local_num, reg_src) asm_xtensa_mov_local_reg((as), ASM_NUM_REGS_SAVED + (local_num), (reg_src))
+#define ASM_MOV_REG_IMM(as, reg_dest, imm) asm_xtensa_mov_reg_i32_optimised((as), (reg_dest), (imm))
+#define ASM_MOV_REG_IMM_FIX_U16(as, reg_dest, imm) asm_xtensa_mov_reg_i32((as), (reg_dest), (imm))
+#define ASM_MOV_REG_IMM_FIX_WORD(as, reg_dest, imm) asm_xtensa_mov_reg_i32((as), (reg_dest), (imm))
+#define ASM_MOV_REG_LOCAL(as, reg_dest, local_num) asm_xtensa_mov_reg_local((as), (reg_dest), ASM_NUM_REGS_SAVED + (local_num))
+#define ASM_MOV_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_mov_n((as), (reg_dest), (reg_src))
+#define ASM_MOV_REG_LOCAL_ADDR(as, reg_dest, local_num) asm_xtensa_mov_reg_local_addr((as), (reg_dest), ASM_NUM_REGS_SAVED + (local_num))
+#define ASM_MOV_REG_PCREL(as, reg_dest, label) asm_xtensa_mov_reg_pcrel((as), (reg_dest), (label))
+
+#define ASM_LSL_REG_REG(as, reg_dest, reg_shift) \
+ do { \
+ asm_xtensa_op_ssl((as), (reg_shift)); \
+ asm_xtensa_op_sll((as), (reg_dest), (reg_dest)); \
+ } while (0)
+#define ASM_LSR_REG_REG(as, reg_dest, reg_shift) \
+ do { \
+ asm_xtensa_op_ssr((as), (reg_shift)); \
+ asm_xtensa_op_srl((as), (reg_dest), (reg_dest)); \
+ } while (0)
+#define ASM_ASR_REG_REG(as, reg_dest, reg_shift) \
+ do { \
+ asm_xtensa_op_ssr((as), (reg_shift)); \
+ asm_xtensa_op_sra((as), (reg_dest), (reg_dest)); \
+ } while (0)
+#define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_or((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_xor((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_AND_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_and((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_ADD_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_add_n((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_SUB_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_sub((as), (reg_dest), (reg_dest), (reg_src))
+#define ASM_MUL_REG_REG(as, reg_dest, reg_src) asm_xtensa_op_mull((as), (reg_dest), (reg_dest), (reg_src))
+
+#define ASM_LOAD_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_xtensa_op_l32i_n((as), (reg_dest), (reg_base), (word_offset))
+#define ASM_LOAD8_REG_REG(as, reg_dest, reg_base) asm_xtensa_op_l8ui((as), (reg_dest), (reg_base), 0)
+#define ASM_LOAD16_REG_REG(as, reg_dest, reg_base) asm_xtensa_op_l16ui((as), (reg_dest), (reg_base), 0)
+#define ASM_LOAD32_REG_REG(as, reg_dest, reg_base) asm_xtensa_op_l32i_n((as), (reg_dest), (reg_base), 0)
+
+#define ASM_STORE_REG_REG_OFFSET(as, reg_dest, reg_base, word_offset) asm_xtensa_op_s32i_n((as), (reg_dest), (reg_base), (word_offset))
+#define ASM_STORE8_REG_REG(as, reg_src, reg_base) asm_xtensa_op_s8i((as), (reg_src), (reg_base), 0)
+#define ASM_STORE16_REG_REG(as, reg_src, reg_base) asm_xtensa_op_s16i((as), (reg_src), (reg_base), 0)
+#define ASM_STORE32_REG_REG(as, reg_src, reg_base) asm_xtensa_op_s32i_n((as), (reg_src), (reg_base), 0)
+
+#endif // GENERIC_ASM_API
+
+#endif // MICROPY_INCLUDED_PY_ASMXTENSA_H
diff --git a/circuitpython/py/bc.c b/circuitpython/py/bc.c
new file mode 100644
index 0000000..33b94c4
--- /dev/null
+++ b/circuitpython/py/bc.c
@@ -0,0 +1,339 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2014 Damien P. George
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdbool.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/runtime.h"
+#include "py/bc0.h"
+#include "py/bc.h"
+
+#include "supervisor/shared/translate.h"
+
+#if MICROPY_DEBUG_VERBOSE // print debugging info
+#define DEBUG_PRINT (1)
+#else // don't print debugging info
+#define DEBUG_PRINT (0)
+#define DEBUG_printf(...) (void)0
+#endif
+
+#if !MICROPY_PERSISTENT_CODE
+
+mp_uint_t mp_decode_uint(const byte **ptr) {
+ mp_uint_t unum = 0;
+ byte val;
+ const byte *p = *ptr;
+ do {
+ val = *p++;
+ unum = (unum << 7) | (val & 0x7f);
+ } while ((val & 0x80) != 0);
+ *ptr = p;
+ return unum;
+}
+
+// This function is used to help reduce stack usage at the caller, for the case when
+// the caller doesn't need to increase the ptr argument. If ptr is a local variable
+// and the caller uses mp_decode_uint(&ptr) instead of this function, then the compiler
+// must allocate a slot on the stack for ptr, and this slot cannot be reused for
+// anything else in the function because the pointer may have been stored in a global
+// and reused later in the function.
+mp_uint_t mp_decode_uint_value(const byte *ptr) {
+ return mp_decode_uint(&ptr);
+}
+
+// This function is used to help reduce stack usage at the caller, for the case when
+// the caller doesn't need the actual value and just wants to skip over it.
+const byte *mp_decode_uint_skip(const byte *ptr) {
+ while ((*ptr++) & 0x80) {
+ }
+ return ptr;
+}
+
+#endif
+
+STATIC NORETURN void fun_pos_args_mismatch(mp_obj_fun_bc_t *f, size_t expected, size_t given) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ // generic message, used also for other argument issues
+ (void)f;
+ (void)expected;
+ (void)given;
+ mp_arg_error_terse_mismatch();
+ #elif MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_NORMAL
+ (void)f;
+ mp_raise_TypeError_varg(
+ MP_ERROR_TEXT("function takes %d positional arguments but %d were given"), expected, given);
+ #elif MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_DETAILED
+ mp_raise_TypeError_varg(
+ MP_ERROR_TEXT("%q() takes %d positional arguments but %d were given"),
+ mp_obj_fun_get_name(MP_OBJ_FROM_PTR(f)), expected, given);
+ #endif
+}
+
+#if DEBUG_PRINT
+STATIC void dump_args(const mp_obj_t *a, size_t sz) {
+ DEBUG_printf("%p: ", a);
+ for (size_t i = 0; i < sz; i++) {
+ DEBUG_printf("%p ", a[i]);
+ }
+ DEBUG_printf("\n");
+}
+#else
+#define dump_args(...) (void)0
+#endif
+
+// On entry code_state should be allocated somewhere (stack/heap) and
+// contain the following valid entries:
+// - code_state->fun_bc should contain a pointer to the function object
+// - code_state->ip should contain the offset in bytes from the pointer
+// code_state->fun_bc->bytecode to the entry n_state (0 for bytecode, non-zero for native)
+void mp_setup_code_state(mp_code_state_t *code_state, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ // This function is pretty complicated. It's main aim is to be efficient in speed and RAM
+ // usage for the common case of positional only args.
+
+ // get the function object that we want to set up (could be bytecode or native code)
+ mp_obj_fun_bc_t *self = code_state->fun_bc;
+
+ // ip comes in as an offset into bytecode, so turn it into a true pointer
+ code_state->ip = self->bytecode + (size_t)code_state->ip;
+
+ #if MICROPY_STACKLESS
+ code_state->prev = NULL;
+ #endif
+
+ #if MICROPY_PY_SYS_SETTRACE
+ code_state->prev_state = NULL;
+ code_state->frame = NULL;
+ #endif
+
+ // Get cached n_state (rather than decode it again)
+ size_t n_state = code_state->n_state;
+
+ // Decode prelude
+ size_t n_state_unused, n_exc_stack_unused, scope_flags, n_pos_args, n_kwonly_args, n_def_pos_args;
+ MP_BC_PRELUDE_SIG_DECODE_INTO(code_state->ip, n_state_unused, n_exc_stack_unused, scope_flags, n_pos_args, n_kwonly_args, n_def_pos_args);
+ (void)n_state_unused;
+ (void)n_exc_stack_unused;
+
+ code_state->sp = &code_state->state[0] - 1;
+ code_state->exc_sp_idx = 0;
+
+ // zero out the local stack to begin with
+ memset(code_state->state, 0, n_state * sizeof(*code_state->state));
+
+ const mp_obj_t *kwargs = args + n_args;
+
+ // var_pos_kw_args points to the stack where the var-args tuple, and var-kw dict, should go (if they are needed)
+ mp_obj_t *var_pos_kw_args = &code_state->state[n_state - 1 - n_pos_args - n_kwonly_args];
+
+ // check positional arguments
+
+ if (n_args > n_pos_args) {
+ // given more than enough arguments
+ if ((scope_flags & MP_SCOPE_FLAG_VARARGS) == 0) {
+ fun_pos_args_mismatch(self, n_pos_args, n_args);
+ }
+ // put extra arguments in varargs tuple
+ *var_pos_kw_args-- = mp_obj_new_tuple(n_args - n_pos_args, args + n_pos_args);
+ n_args = n_pos_args;
+ } else {
+ if ((scope_flags & MP_SCOPE_FLAG_VARARGS) != 0) {
+ DEBUG_printf("passing empty tuple as *args\n");
+ *var_pos_kw_args-- = mp_const_empty_tuple;
+ }
+ // Apply processing and check below only if we don't have kwargs,
+ // otherwise, kw handling code below has own extensive checks.
+ if (n_kw == 0 && (scope_flags & MP_SCOPE_FLAG_DEFKWARGS) == 0) {
+ if (n_args >= (size_t)(n_pos_args - n_def_pos_args)) {
+ // given enough arguments, but may need to use some default arguments
+ for (size_t i = n_args; i < n_pos_args; i++) {
+ code_state->state[n_state - 1 - i] = self->extra_args[i - (n_pos_args - n_def_pos_args)];
+ }
+ } else {
+ fun_pos_args_mismatch(self, n_pos_args - n_def_pos_args, n_args);
+ }
+ }
+ }
+
+ // copy positional args into state
+ for (size_t i = 0; i < n_args; i++) {
+ code_state->state[n_state - 1 - i] = args[i];
+ }
+
+ // check keyword arguments
+
+ if (n_kw != 0 || (scope_flags & MP_SCOPE_FLAG_DEFKWARGS) != 0) {
+ DEBUG_printf("Initial args: ");
+ dump_args(code_state->state + n_state - n_pos_args - n_kwonly_args, n_pos_args + n_kwonly_args);
+
+ mp_obj_t dict = MP_OBJ_NULL;
+ if ((scope_flags & MP_SCOPE_FLAG_VARKEYWORDS) != 0) {
+ dict = mp_obj_new_dict(n_kw); // TODO: better go conservative with 0?
+ *var_pos_kw_args = dict;
+ }
+
+ // get pointer to arg_names array
+ const mp_obj_t *arg_names = (const mp_obj_t *)self->const_table;
+
+ for (size_t i = 0; i < n_kw; i++) {
+ // the keys in kwargs are expected to be qstr objects
+ mp_obj_t wanted_arg_name = kwargs[2 * i];
+ if (MP_UNLIKELY(!mp_obj_is_qstr(wanted_arg_name))) {
+ #if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_TypeError(MP_ERROR_TEXT("unexpected keyword argument"));
+ #else
+ mp_raise_TypeError(MP_ERROR_TEXT("keywords must be strings"));
+ #endif
+ }
+ for (size_t j = 0; j < n_pos_args + n_kwonly_args; j++) {
+ if (wanted_arg_name == arg_names[j]) {
+ if (code_state->state[n_state - 1 - j] != MP_OBJ_NULL) {
+ mp_raise_TypeError_varg(
+ MP_ERROR_TEXT("function got multiple values for argument '%q'"), MP_OBJ_QSTR_VALUE(wanted_arg_name));
+ }
+ code_state->state[n_state - 1 - j] = kwargs[2 * i + 1];
+ goto continue2;
+ }
+ }
+ // Didn't find name match with positional args
+ if ((scope_flags & MP_SCOPE_FLAG_VARKEYWORDS) == 0) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_TypeError(MP_ERROR_TEXT("unexpected keyword argument"));
+ #else
+ mp_raise_TypeError_varg(
+ MP_ERROR_TEXT("unexpected keyword argument '%q'"), MP_OBJ_QSTR_VALUE(wanted_arg_name));
+ #endif
+ }
+ mp_obj_dict_store(dict, kwargs[2 * i], kwargs[2 * i + 1]);
+ continue2:;
+ }
+
+ DEBUG_printf("Args with kws flattened: ");
+ dump_args(code_state->state + n_state - n_pos_args - n_kwonly_args, n_pos_args + n_kwonly_args);
+
+ // fill in defaults for positional args
+ mp_obj_t *d = &code_state->state[n_state - n_pos_args];
+ mp_obj_t *s = &self->extra_args[n_def_pos_args - 1];
+ for (size_t i = n_def_pos_args; i > 0; i--, d++, s--) {
+ if (*d == MP_OBJ_NULL) {
+ *d = *s;
+ }
+ }
+
+ DEBUG_printf("Args after filling default positional: ");
+ dump_args(code_state->state + n_state - n_pos_args - n_kwonly_args, n_pos_args + n_kwonly_args);
+
+ // Check that all mandatory positional args are specified
+ while (d < &code_state->state[n_state]) {
+ if (*d++ == MP_OBJ_NULL) {
+ mp_raise_TypeError_varg(
+ MP_ERROR_TEXT("function missing required positional argument #%d"), &code_state->state[n_state] - d);
+ }
+ }
+
+ // Check that all mandatory keyword args are specified
+ // Fill in default kw args if we have them
+ for (size_t i = 0; i < n_kwonly_args; i++) {
+ if (code_state->state[n_state - 1 - n_pos_args - i] == MP_OBJ_NULL) {
+ mp_map_elem_t *elem = NULL;
+ if ((scope_flags & MP_SCOPE_FLAG_DEFKWARGS) != 0) {
+ elem = mp_map_lookup(&((mp_obj_dict_t *)MP_OBJ_TO_PTR(self->extra_args[n_def_pos_args]))->map, arg_names[n_pos_args + i], MP_MAP_LOOKUP);
+ }
+ if (elem != NULL) {
+ code_state->state[n_state - 1 - n_pos_args - i] = elem->value;
+ } else {
+ mp_raise_TypeError_varg(
+ MP_ERROR_TEXT("function missing required keyword argument '%q'"),
+ MP_OBJ_QSTR_VALUE(arg_names[n_pos_args + i]));
+ }
+ }
+ }
+
+ } else {
+ // no keyword arguments given
+ if (n_kwonly_args != 0) {
+ mp_raise_TypeError(MP_ERROR_TEXT("function missing keyword-only argument"));
+ }
+ if ((scope_flags & MP_SCOPE_FLAG_VARKEYWORDS) != 0) {
+ *var_pos_kw_args = mp_obj_new_dict(0);
+ }
+ }
+
+ // read the size part of the prelude
+ const byte *ip = code_state->ip;
+ MP_BC_PRELUDE_SIZE_DECODE(ip);
+
+ // jump over code info (source file and line-number mapping)
+ ip += n_info;
+
+ // bytecode prelude: initialise closed over variables
+ for (; n_cell; --n_cell) {
+ size_t local_num = *ip++;
+ code_state->state[n_state - 1 - local_num] =
+ mp_obj_new_cell(code_state->state[n_state - 1 - local_num]);
+ }
+
+ #if !MICROPY_PERSISTENT_CODE
+ // so bytecode is aligned
+ ip = MP_ALIGN(ip, sizeof(mp_uint_t));
+ #endif
+
+ // now that we skipped over the prelude, set the ip for the VM
+ code_state->ip = ip;
+
+ DEBUG_printf("Calling: n_pos_args=%d, n_kwonly_args=%d\n", n_pos_args, n_kwonly_args);
+ dump_args(code_state->state + n_state - n_pos_args - n_kwonly_args, n_pos_args + n_kwonly_args);
+ dump_args(code_state->state, n_state);
+}
+
+#if MICROPY_PERSISTENT_CODE_LOAD || MICROPY_PERSISTENT_CODE_SAVE
+
+// The following table encodes the number of bytes that a specific opcode
+// takes up. Some opcodes have an extra byte, defined by MP_BC_MASK_EXTRA_BYTE.
+uint mp_opcode_format(const byte *ip, size_t *opcode_size, bool count_var_uint) {
+ uint f = MP_BC_FORMAT(*ip);
+ const byte *ip_start = ip;
+ if (f == MP_BC_FORMAT_QSTR) {
+ ip += 3;
+ } else {
+ int extra_byte = (*ip & MP_BC_MASK_EXTRA_BYTE) == 0;
+ ip += 1;
+ if (f == MP_BC_FORMAT_VAR_UINT) {
+ if (count_var_uint) {
+ while ((*ip++ & 0x80) != 0) {
+ }
+ }
+ } else if (f == MP_BC_FORMAT_OFFSET) {
+ ip += 2;
+ }
+ ip += extra_byte;
+ }
+ *opcode_size = ip - ip_start;
+ return f;
+}
+
+#endif // MICROPY_PERSISTENT_CODE_LOAD || MICROPY_PERSISTENT_CODE_SAVE
diff --git a/circuitpython/py/bc.h b/circuitpython/py/bc.h
new file mode 100644
index 0000000..eeccc34
--- /dev/null
+++ b/circuitpython/py/bc.h
@@ -0,0 +1,281 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ * SPDX-FileCopyrightText: Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_BC_H
+#define MICROPY_INCLUDED_PY_BC_H
+
+#include "py/runtime.h"
+#include "py/objfun.h"
+
+// bytecode layout:
+//
+// func signature : var uint
+// contains six values interleaved bit-wise as: xSSSSEAA [xFSSKAED repeated]
+// x = extension another byte follows
+// S = n_state - 1 number of entries in Python value stack
+// E = n_exc_stack number of entries in exception stack
+// F = scope_flags four bits of flags, MP_SCOPE_FLAG_xxx
+// A = n_pos_args number of arguments this function takes
+// K = n_kwonly_args number of keyword-only arguments this function takes
+// D = n_def_pos_args number of default positional arguments
+//
+// prelude size : var uint
+// contains two values interleaved bit-wise as: xIIIIIIC repeated
+// x = extension another byte follows
+// I = n_info number of bytes in source info section
+// C = n_cells number of bytes/cells in closure section
+//
+// source info section:
+// simple_name : var qstr
+// source_file : var qstr
+// <line number info>
+//
+// closure section:
+// local_num0 : byte
+// ... : byte
+// local_numN : byte N = n_cells-1
+//
+// <word alignment padding> only needed if bytecode contains pointers
+//
+// <bytecode>
+//
+//
+// constant table layout:
+//
+// argname0 : obj (qstr)
+// ... : obj (qstr)
+// argnameN : obj (qstr) N = num_pos_args + num_kwonly_args
+// const0 : obj
+// constN : obj
+
+#define MP_BC_PRELUDE_SIG_ENCODE(S, E, scope, out_byte, out_env) \
+ do { \
+ /*// Get values to store in prelude */ \
+ size_t F = scope->scope_flags & MP_SCOPE_FLAG_ALL_SIG; \
+ size_t A = scope->num_pos_args; \
+ size_t K = scope->num_kwonly_args; \
+ size_t D = scope->num_def_pos_args; \
+ \
+ /* Adjust S to shrink range, to compress better */ \
+ S -= 1; \
+ \
+ /* Encode prelude */ \
+ /* xSSSSEAA */ \
+ uint8_t z = (S & 0xf) << 3 | (E & 1) << 2 | (A & 3); \
+ S >>= 4; \
+ E >>= 1; \
+ A >>= 2; \
+ while (S | E | F | A | K | D) { \
+ out_byte(out_env, 0x80 | z); \
+ /* xFSSKAED */ \
+ z = (F & 1) << 6 | (S & 3) << 4 | (K & 1) << 3 \
+ | (A & 1) << 2 | (E & 1) << 1 | (D & 1); \
+ S >>= 2; \
+ E >>= 1; \
+ F >>= 1; \
+ A >>= 1; \
+ K >>= 1; \
+ D >>= 1; \
+ } \
+ out_byte(out_env, z); \
+ } while (0)
+
+#define MP_BC_PRELUDE_SIG_DECODE_INTO(ip, S, E, F, A, K, D) \
+ do { \
+ uint8_t z = *(ip)++; \
+ /* xSSSSEAA */ \
+ S = (z >> 3) & 0xf; \
+ E = (z >> 2) & 0x1; \
+ F = 0; \
+ A = z & 0x3; \
+ K = 0; \
+ D = 0; \
+ for (unsigned n = 0; z & 0x80; ++n) { \
+ z = *(ip)++; \
+ /* xFSSKAED */ \
+ S |= (z & 0x30) << (2 * n); \
+ E |= (z & 0x02) << n; \
+ F |= ((z & 0x40) >> 6) << n; \
+ A |= (z & 0x4) << n; \
+ K |= ((z & 0x08) >> 3) << n; \
+ D |= (z & 0x1) << n; \
+ } \
+ S += 1; \
+ (void)E; \
+ (void)F; \
+ (void)A; \
+ (void)K; \
+ (void)D; \
+ } while (0)
+
+#define MP_BC_PRELUDE_SIG_DECODE(ip) \
+ size_t n_state, n_exc_stack, scope_flags, n_pos_args, n_kwonly_args, n_def_pos_args; \
+ MP_BC_PRELUDE_SIG_DECODE_INTO(ip, n_state, n_exc_stack, scope_flags, n_pos_args, n_kwonly_args, n_def_pos_args); \
+ (void)n_state; (void)n_exc_stack; (void)scope_flags; \
+ (void)n_pos_args; (void)n_kwonly_args; (void)n_def_pos_args
+
+#define MP_BC_PRELUDE_SIZE_ENCODE(I, C, out_byte, out_env) \
+ do { \
+ /* Encode bit-wise as: xIIIIIIC */ \
+ uint8_t z = 0; \
+ do { \
+ z = (I & 0x3f) << 1 | (C & 1); \
+ C >>= 1; \
+ I >>= 6; \
+ if (C | I) { \
+ z |= 0x80; \
+ } \
+ out_byte(out_env, z); \
+ } while (C | I); \
+ } while (0)
+
+#define MP_BC_PRELUDE_SIZE_DECODE_INTO(ip, I, C) \
+ do { \
+ uint8_t z; \
+ C = 0; \
+ I = 0; \
+ for (unsigned n = 0;; ++n) { \
+ z = *(ip)++; \
+ /* xIIIIIIC */ \
+ C |= (z & 1) << n; \
+ I |= ((z & 0x7e) >> 1) << (6 * n); \
+ if (!(z & 0x80)) { \
+ break; \
+ } \
+ } \
+ } while (0)
+
+#define MP_BC_PRELUDE_SIZE_DECODE(ip) \
+ size_t n_info, n_cell; \
+ MP_BC_PRELUDE_SIZE_DECODE_INTO(ip, n_info, n_cell); \
+ (void)n_info; (void)n_cell
+
+// Sentinel value for mp_code_state_t.exc_sp_idx
+#define MP_CODE_STATE_EXC_SP_IDX_SENTINEL ((uint16_t)-1)
+
+// To convert mp_code_state_t.exc_sp_idx to/from a pointer to mp_exc_stack_t
+#define MP_CODE_STATE_EXC_SP_IDX_FROM_PTR(exc_stack, exc_sp) ((exc_sp) + 1 - (exc_stack))
+#define MP_CODE_STATE_EXC_SP_IDX_TO_PTR(exc_stack, exc_sp_idx) ((exc_stack) + (exc_sp_idx) - 1)
+
+typedef struct _mp_bytecode_prelude_t {
+ uint n_state;
+ uint n_exc_stack;
+ uint scope_flags;
+ uint n_pos_args;
+ uint n_kwonly_args;
+ uint n_def_pos_args;
+ qstr qstr_block_name;
+ qstr qstr_source_file;
+ const byte *line_info;
+ const byte *opcodes;
+} mp_bytecode_prelude_t;
+
+// Exception stack entry
+typedef struct _mp_exc_stack_t {
+ const byte *handler;
+ // bit 0 is currently unused
+ // bit 1 is whether the opcode was SETUP_WITH or SETUP_FINALLY
+ mp_obj_t *val_sp;
+ // Saved exception
+ mp_obj_base_t *prev_exc;
+} mp_exc_stack_t;
+
+typedef struct _mp_code_state_t {
+ // The fun_bc entry points to the underlying function object that is being executed.
+ // It is needed to access the start of bytecode and the const_table.
+ // It is also needed to prevent the GC from reclaiming the bytecode during execution,
+ // because the ip pointer below will always point to the interior of the bytecode.
+ mp_obj_fun_bc_t *fun_bc;
+ const byte *ip;
+ mp_obj_t *sp;
+ uint16_t n_state;
+ uint16_t exc_sp_idx;
+ mp_obj_dict_t *old_globals;
+ #if MICROPY_STACKLESS
+ struct _mp_code_state_t *prev;
+ #endif
+ #if MICROPY_PY_SYS_SETTRACE
+ struct _mp_code_state_t *prev_state;
+ struct _mp_obj_frame_t *frame;
+ #endif
+ // Variable-length
+ mp_obj_t state[0];
+ // Variable-length, never accessed by name, only as (void*)(state + n_state)
+ // mp_exc_stack_t exc_state[0];
+} mp_code_state_t;
+
+mp_uint_t mp_decode_uint(const byte **ptr);
+mp_uint_t mp_decode_uint_value(const byte *ptr);
+const byte *mp_decode_uint_skip(const byte *ptr);
+
+mp_vm_return_kind_t mp_execute_bytecode(mp_code_state_t *code_state, volatile mp_obj_t inject_exc);
+mp_code_state_t *mp_obj_fun_bc_prepare_codestate(mp_obj_t func, size_t n_args, size_t n_kw, const mp_obj_t *args);
+void mp_setup_code_state(mp_code_state_t *code_state, size_t n_args, size_t n_kw, const mp_obj_t *args);
+void mp_bytecode_print(const mp_print_t *print, const void *descr, const byte *code, mp_uint_t len, const mp_uint_t *const_table);
+void mp_bytecode_print2(const mp_print_t *print, const byte *code, size_t len, const mp_uint_t *const_table);
+const byte *mp_bytecode_print_str(const mp_print_t *print, const byte *ip);
+#define mp_bytecode_print_inst(print, code, const_table) mp_bytecode_print2(print, code, 1, const_table)
+
+// Helper macros to access pointer with least significant bits holding flags
+#define MP_TAGPTR_PTR(x) ((void *)((uintptr_t)(x) & ~((uintptr_t)3)))
+#define MP_TAGPTR_TAG0(x) ((uintptr_t)(x) & 1)
+#define MP_TAGPTR_TAG1(x) ((uintptr_t)(x) & 2)
+#define MP_TAGPTR_MAKE(ptr, tag) ((void *)((uintptr_t)(ptr) | (tag)))
+
+#if MICROPY_PERSISTENT_CODE_LOAD || MICROPY_PERSISTENT_CODE_SAVE
+
+uint mp_opcode_format(const byte *ip, size_t *opcode_size, bool count_var_uint);
+
+#endif
+
+static inline size_t mp_bytecode_get_source_line(const byte *line_info, size_t bc_offset) {
+ size_t source_line = 1;
+ size_t c;
+ while ((c = *line_info)) {
+ size_t b, l;
+ if ((c & 0x80) == 0) {
+ // 0b0LLBBBBB encoding
+ b = c & 0x1f;
+ l = c >> 5;
+ line_info += 1;
+ } else {
+ // 0b1LLLBBBB 0bLLLLLLLL encoding (l's LSB in second byte)
+ b = c & 0xf;
+ l = ((c << 4) & 0x700) | line_info[1];
+ line_info += 2;
+ }
+ if (bc_offset >= b) {
+ bc_offset -= b;
+ source_line += l;
+ } else {
+ // found source line corresponding to bytecode offset
+ break;
+ }
+ }
+ return source_line;
+}
+
+#endif // MICROPY_INCLUDED_PY_BC_H
diff --git a/circuitpython/py/bc0.h b/circuitpython/py/bc0.h
new file mode 100644
index 0000000..500dee5
--- /dev/null
+++ b/circuitpython/py/bc0.h
@@ -0,0 +1,150 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_BC0_H
+#define MICROPY_INCLUDED_PY_BC0_H
+
+// MicroPython bytecode opcodes, grouped based on the format of the opcode
+
+#define MP_BC_MASK_FORMAT (0xf0)
+#define MP_BC_MASK_EXTRA_BYTE (0x9e)
+
+#define MP_BC_FORMAT_BYTE (0)
+#define MP_BC_FORMAT_QSTR (1)
+#define MP_BC_FORMAT_VAR_UINT (2)
+#define MP_BC_FORMAT_OFFSET (3)
+
+// Nibbles in magic number are: BB BB BB BB BB BO VV QU
+#define MP_BC_FORMAT(op) ((0x000003a4 >> (2 * ((op) >> 4))) & 3)
+
+// Load, Store, Delete, Import, Make, Build, Unpack, Call, Jump, Exception, For, sTack, Return, Yield, Op
+#define MP_BC_BASE_RESERVED (0x00) // ----------------
+#define MP_BC_BASE_QSTR_O (0x10) // LLLLLLSSSDDII---
+#define MP_BC_BASE_VINT_E (0x20) // MMLLLLSSDDBBBBBB
+#define MP_BC_BASE_VINT_O (0x30) // UUMMCCCC--------
+#define MP_BC_BASE_JUMP_E (0x40) // J-JJJJJEEEEF----
+#define MP_BC_BASE_BYTE_O (0x50) // LLLLSSDTTTTTEEFF
+#define MP_BC_BASE_BYTE_E (0x60) // --BREEEYYI------
+#define MP_BC_LOAD_CONST_SMALL_INT_MULTI (0x70) // LLLLLLLLLLLLLLLL
+// (0x80) // LLLLLLLLLLLLLLLL
+// (0x90) // LLLLLLLLLLLLLLLL
+// (0xa0) // LLLLLLLLLLLLLLLL
+#define MP_BC_LOAD_FAST_MULTI (0xb0) // LLLLLLLLLLLLLLLL
+#define MP_BC_STORE_FAST_MULTI (0xc0) // SSSSSSSSSSSSSSSS
+#define MP_BC_UNARY_OP_MULTI (0xd0) // OOOOOOO
+#define MP_BC_BINARY_OP_MULTI (0xd7) // OOOOOOOOO
+// (0xe0) // OOOOOOOOOOOOOOOO
+// (0xf0) // OOOOOOOOOO------
+
+#define MP_BC_LOAD_CONST_SMALL_INT_MULTI_NUM (64)
+#define MP_BC_LOAD_CONST_SMALL_INT_MULTI_EXCESS (16)
+#define MP_BC_LOAD_FAST_MULTI_NUM (16)
+#define MP_BC_STORE_FAST_MULTI_NUM (16)
+#define MP_BC_UNARY_OP_MULTI_NUM (MP_UNARY_OP_NUM_BYTECODE)
+#define MP_BC_BINARY_OP_MULTI_NUM (MP_BINARY_OP_NUM_BYTECODE)
+
+#define MP_BC_LOAD_CONST_FALSE (MP_BC_BASE_BYTE_O + 0x00)
+#define MP_BC_LOAD_CONST_NONE (MP_BC_BASE_BYTE_O + 0x01)
+#define MP_BC_LOAD_CONST_TRUE (MP_BC_BASE_BYTE_O + 0x02)
+#define MP_BC_LOAD_CONST_SMALL_INT (MP_BC_BASE_VINT_E + 0x02) // signed var-int
+#define MP_BC_LOAD_CONST_STRING (MP_BC_BASE_QSTR_O + 0x00) // qstr
+#define MP_BC_LOAD_CONST_OBJ (MP_BC_BASE_VINT_E + 0x03) // ptr
+#define MP_BC_LOAD_NULL (MP_BC_BASE_BYTE_O + 0x03)
+
+#define MP_BC_LOAD_FAST_N (MP_BC_BASE_VINT_E + 0x04) // uint
+#define MP_BC_LOAD_DEREF (MP_BC_BASE_VINT_E + 0x05) // uint
+#define MP_BC_LOAD_NAME (MP_BC_BASE_QSTR_O + 0x01) // qstr
+#define MP_BC_LOAD_GLOBAL (MP_BC_BASE_QSTR_O + 0x02) // qstr
+#define MP_BC_LOAD_ATTR (MP_BC_BASE_QSTR_O + 0x03) // qstr
+#define MP_BC_LOAD_METHOD (MP_BC_BASE_QSTR_O + 0x04) // qstr
+#define MP_BC_LOAD_SUPER_METHOD (MP_BC_BASE_QSTR_O + 0x05) // qstr
+#define MP_BC_LOAD_BUILD_CLASS (MP_BC_BASE_BYTE_O + 0x04)
+#define MP_BC_LOAD_SUBSCR (MP_BC_BASE_BYTE_O + 0x05)
+
+#define MP_BC_STORE_FAST_N (MP_BC_BASE_VINT_E + 0x06) // uint
+#define MP_BC_STORE_DEREF (MP_BC_BASE_VINT_E + 0x07) // uint
+#define MP_BC_STORE_NAME (MP_BC_BASE_QSTR_O + 0x06) // qstr
+#define MP_BC_STORE_GLOBAL (MP_BC_BASE_QSTR_O + 0x07) // qstr
+#define MP_BC_STORE_ATTR (MP_BC_BASE_QSTR_O + 0x08) // qstr
+#define MP_BC_STORE_SUBSCR (MP_BC_BASE_BYTE_O + 0x06)
+
+#define MP_BC_DELETE_FAST (MP_BC_BASE_VINT_E + 0x08) // uint
+#define MP_BC_DELETE_DEREF (MP_BC_BASE_VINT_E + 0x09) // uint
+#define MP_BC_DELETE_NAME (MP_BC_BASE_QSTR_O + 0x09) // qstr
+#define MP_BC_DELETE_GLOBAL (MP_BC_BASE_QSTR_O + 0x0a) // qstr
+
+#define MP_BC_DUP_TOP (MP_BC_BASE_BYTE_O + 0x07)
+#define MP_BC_DUP_TOP_TWO (MP_BC_BASE_BYTE_O + 0x08)
+#define MP_BC_POP_TOP (MP_BC_BASE_BYTE_O + 0x09)
+#define MP_BC_ROT_TWO (MP_BC_BASE_BYTE_O + 0x0a)
+#define MP_BC_ROT_THREE (MP_BC_BASE_BYTE_O + 0x0b)
+
+#define MP_BC_JUMP (MP_BC_BASE_JUMP_E + 0x02) // rel byte code offset, 16-bit signed, in excess
+#define MP_BC_POP_JUMP_IF_TRUE (MP_BC_BASE_JUMP_E + 0x03) // rel byte code offset, 16-bit signed, in excess
+#define MP_BC_POP_JUMP_IF_FALSE (MP_BC_BASE_JUMP_E + 0x04) // rel byte code offset, 16-bit signed, in excess
+#define MP_BC_JUMP_IF_TRUE_OR_POP (MP_BC_BASE_JUMP_E + 0x05) // rel byte code offset, 16-bit signed, in excess
+#define MP_BC_JUMP_IF_FALSE_OR_POP (MP_BC_BASE_JUMP_E + 0x06) // rel byte code offset, 16-bit signed, in excess
+#define MP_BC_UNWIND_JUMP (MP_BC_BASE_JUMP_E + 0x00) // rel byte code offset, 16-bit signed, in excess; then a byte
+#define MP_BC_SETUP_WITH (MP_BC_BASE_JUMP_E + 0x07) // rel byte code offset, 16-bit unsigned
+#define MP_BC_SETUP_EXCEPT (MP_BC_BASE_JUMP_E + 0x08) // rel byte code offset, 16-bit unsigned
+#define MP_BC_SETUP_FINALLY (MP_BC_BASE_JUMP_E + 0x09) // rel byte code offset, 16-bit unsigned
+#define MP_BC_POP_EXCEPT_JUMP (MP_BC_BASE_JUMP_E + 0x0a) // rel byte code offset, 16-bit unsigned
+#define MP_BC_FOR_ITER (MP_BC_BASE_JUMP_E + 0x0b) // rel byte code offset, 16-bit unsigned
+#define MP_BC_WITH_CLEANUP (MP_BC_BASE_BYTE_O + 0x0c)
+#define MP_BC_END_FINALLY (MP_BC_BASE_BYTE_O + 0x0d)
+#define MP_BC_GET_ITER (MP_BC_BASE_BYTE_O + 0x0e)
+#define MP_BC_GET_ITER_STACK (MP_BC_BASE_BYTE_O + 0x0f)
+
+#define MP_BC_BUILD_TUPLE (MP_BC_BASE_VINT_E + 0x0a) // uint
+#define MP_BC_BUILD_LIST (MP_BC_BASE_VINT_E + 0x0b) // uint
+#define MP_BC_BUILD_MAP (MP_BC_BASE_VINT_E + 0x0c) // uint
+#define MP_BC_STORE_MAP (MP_BC_BASE_BYTE_E + 0x02)
+#define MP_BC_BUILD_SET (MP_BC_BASE_VINT_E + 0x0d) // uint
+#define MP_BC_BUILD_SLICE (MP_BC_BASE_VINT_E + 0x0e) // uint
+#define MP_BC_STORE_COMP (MP_BC_BASE_VINT_E + 0x0f) // uint
+#define MP_BC_UNPACK_SEQUENCE (MP_BC_BASE_VINT_O + 0x00) // uint
+#define MP_BC_UNPACK_EX (MP_BC_BASE_VINT_O + 0x01) // uint
+
+#define MP_BC_RETURN_VALUE (MP_BC_BASE_BYTE_E + 0x03)
+#define MP_BC_RAISE_LAST (MP_BC_BASE_BYTE_E + 0x04)
+#define MP_BC_RAISE_OBJ (MP_BC_BASE_BYTE_E + 0x05)
+#define MP_BC_RAISE_FROM (MP_BC_BASE_BYTE_E + 0x06)
+#define MP_BC_YIELD_VALUE (MP_BC_BASE_BYTE_E + 0x07)
+#define MP_BC_YIELD_FROM (MP_BC_BASE_BYTE_E + 0x08)
+
+#define MP_BC_MAKE_FUNCTION (MP_BC_BASE_VINT_O + 0x02) // uint
+#define MP_BC_MAKE_FUNCTION_DEFARGS (MP_BC_BASE_VINT_O + 0x03) // uint
+#define MP_BC_MAKE_CLOSURE (MP_BC_BASE_VINT_E + 0x00) // uint; extra byte
+#define MP_BC_MAKE_CLOSURE_DEFARGS (MP_BC_BASE_VINT_E + 0x01) // uint; extra byte
+#define MP_BC_CALL_FUNCTION (MP_BC_BASE_VINT_O + 0x04) // uint
+#define MP_BC_CALL_FUNCTION_VAR_KW (MP_BC_BASE_VINT_O + 0x05) // uint
+#define MP_BC_CALL_METHOD (MP_BC_BASE_VINT_O + 0x06) // uint
+#define MP_BC_CALL_METHOD_VAR_KW (MP_BC_BASE_VINT_O + 0x07) // uint
+
+#define MP_BC_IMPORT_NAME (MP_BC_BASE_QSTR_O + 0x0b) // qstr
+#define MP_BC_IMPORT_FROM (MP_BC_BASE_QSTR_O + 0x0c) // qstr
+#define MP_BC_IMPORT_STAR (MP_BC_BASE_BYTE_E + 0x09)
+
+#endif // MICROPY_INCLUDED_PY_BC0_H
diff --git a/circuitpython/py/binary.c b/circuitpython/py/binary.c
new file mode 100644
index 0000000..06f0157
--- /dev/null
+++ b/circuitpython/py/binary.c
@@ -0,0 +1,466 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2014-2017 Paul Sokolovsky
+ * SPDX-FileCopyrightText: Copyright (c) 2014-2019 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/binary.h"
+#include "py/smallint.h"
+#include "py/objint.h"
+#include "py/runtime.h"
+
+#include "supervisor/shared/translate.h"
+
+// Helpers to work with binary-encoded data
+
+#ifndef alignof
+#define alignof(type) offsetof(struct { char c; type t; }, t)
+#endif
+
+size_t mp_binary_get_size(char struct_type, char val_type, size_t *palign) {
+ size_t size = 0;
+ int align = 1;
+ switch (struct_type) {
+ case '<':
+ case '>':
+ switch (val_type) {
+ case 'b':
+ case 'B':
+ case 'x':
+ size = 1;
+ break;
+ case 'h':
+ case 'H':
+ size = 2;
+ break;
+ case 'i':
+ case 'I':
+ size = 4;
+ break;
+ case 'l':
+ case 'L':
+ size = 4;
+ break;
+ case 'q':
+ case 'Q':
+ size = 8;
+ break;
+ #if MICROPY_NONSTANDARD_TYPECODES
+ case 'P':
+ case 'O':
+ case 'S':
+ size = sizeof(void *);
+ break;
+ #endif
+ case 'f':
+ size = sizeof(float);
+ break;
+ case 'd':
+ size = sizeof(double);
+ break;
+ }
+ break;
+ case '@': {
+ // TODO:
+ // The simplest heuristic for alignment is to align by value
+ // size, but that doesn't work for "bigger than int" types,
+ // for example, long long may very well have long alignment
+ // So, we introduce separate alignment handling, but having
+ // formal support for that is different from actually supporting
+ // particular (or any) ABI.
+ switch (val_type) {
+ case BYTEARRAY_TYPECODE:
+ case 'b':
+ case 'B':
+ case 'x':
+ align = size = 1;
+ break;
+ case 'h':
+ case 'H':
+ align = alignof(short);
+ size = sizeof(short);
+ break;
+ case 'i':
+ case 'I':
+ align = alignof(int);
+ size = sizeof(int);
+ break;
+ case 'l':
+ case 'L':
+ align = alignof(long);
+ size = sizeof(long);
+ break;
+ case 'q':
+ case 'Q':
+ align = alignof(long long);
+ size = sizeof(long long);
+ break;
+ #if MICROPY_NONSTANDARD_TYPECODES
+ case 'P':
+ case 'O':
+ case 'S':
+ align = alignof(void *);
+ size = sizeof(void *);
+ break;
+ #endif
+ case 'f':
+ align = alignof(float);
+ size = sizeof(float);
+ break;
+ case 'd':
+ align = alignof(double);
+ size = sizeof(double);
+ break;
+ }
+ }
+ }
+
+ if (size == 0) {
+ mp_raise_ValueError(MP_ERROR_TEXT("bad typecode"));
+ }
+
+ if (palign != NULL) {
+ *palign = align;
+ }
+ return size;
+}
+
+mp_obj_t mp_binary_get_val_array(char typecode, void *p, size_t index) {
+ mp_int_t val = 0;
+ switch (typecode) {
+ case 'b':
+ val = ((signed char *)p)[index];
+ break;
+ case BYTEARRAY_TYPECODE:
+ case 'B':
+ val = ((unsigned char *)p)[index];
+ break;
+ case 'h':
+ val = ((short *)p)[index];
+ break;
+ case 'H':
+ val = ((unsigned short *)p)[index];
+ break;
+ case 'i':
+ return mp_obj_new_int(((int *)p)[index]);
+ case 'I':
+ return mp_obj_new_int_from_uint(((unsigned int *)p)[index]);
+ case 'l':
+ return mp_obj_new_int(((long *)p)[index]);
+ case 'L':
+ return mp_obj_new_int_from_uint(((unsigned long *)p)[index]);
+ #if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE
+ case 'q':
+ return mp_obj_new_int_from_ll(((long long *)p)[index]);
+ case 'Q':
+ return mp_obj_new_int_from_ull(((unsigned long long *)p)[index]);
+ #endif
+ #if MICROPY_PY_BUILTINS_FLOAT
+ case 'f':
+ return mp_obj_new_float_from_f(((float *)p)[index]);
+ case 'd':
+ return mp_obj_new_float_from_d(((double *)p)[index]);
+ #endif
+ #if MICROPY_NONSTANDARD_TYPECODES
+ // Extension to CPython: array of objects
+ case 'O':
+ return ((mp_obj_t *)p)[index];
+ // Extension to CPython: array of pointers
+ case 'P':
+ return mp_obj_new_int((mp_int_t)(uintptr_t)((void **)p)[index]);
+ #endif
+ }
+ return MP_OBJ_NEW_SMALL_INT(val);
+}
+
+// The long long type is guaranteed to hold at least 64 bits, and size is at
+// most 8 (for q and Q), so we will always be able to parse the given data
+// and fit it into a long long.
+long long mp_binary_get_int(size_t size, bool is_signed, bool big_endian, const byte *src) {
+ int delta;
+ if (!big_endian) {
+ delta = -1;
+ src += size - 1;
+ } else {
+ delta = 1;
+ }
+
+ unsigned long long val = 0;
+ if (is_signed && *src & 0x80) {
+ val = -1;
+ }
+ for (uint i = 0; i < size; i++) {
+ val *= 256;
+ val |= *src;
+ src += delta;
+ }
+
+ return val;
+}
+
+#define is_signed(typecode) (typecode > 'Z')
+mp_obj_t mp_binary_get_val(char struct_type, char val_type, byte *p_base, byte **ptr) {
+ byte *p = *ptr;
+ size_t align;
+
+ size_t size = mp_binary_get_size(struct_type, val_type, &align);
+ if (struct_type == '@') {
+ // Align p relative to p_base
+ p = p_base + (uintptr_t)MP_ALIGN(p - p_base, align);
+ #if MP_ENDIANNESS_LITTLE
+ struct_type = '<';
+ #else
+ struct_type = '>';
+ #endif
+ }
+ *ptr = p + size;
+
+ long long val = mp_binary_get_int(size, is_signed(val_type), (struct_type == '>'), p);
+
+ if (MICROPY_NONSTANDARD_TYPECODES && (val_type == 'O')) {
+ return (mp_obj_t)(mp_uint_t)val;
+ #if MICROPY_NONSTANDARD_TYPECODES
+ } else if (val_type == 'S') {
+ const char *s_val = (const char *)(uintptr_t)(mp_uint_t)val;
+ return mp_obj_new_str(s_val, strlen(s_val));
+ #endif
+ #if MICROPY_PY_BUILTINS_FLOAT
+ } else if (val_type == 'f') {
+ union { uint32_t i;
+ float f;
+ } fpu = {val};
+ return mp_obj_new_float_from_f(fpu.f);
+ } else if (val_type == 'd') {
+ union { uint64_t i;
+ double f;
+ } fpu = {val};
+ return mp_obj_new_float_from_d(fpu.f);
+ #endif
+ } else if (is_signed(val_type)) {
+ if ((long long)MP_SMALL_INT_MIN <= val && val <= (long long)MP_SMALL_INT_MAX) {
+ return mp_obj_new_int((mp_int_t)val);
+ } else {
+ return mp_obj_new_int_from_ll(val);
+ }
+ } else {
+ if ((unsigned long long)val <= (unsigned long long)MP_SMALL_INT_MAX) {
+ return mp_obj_new_int_from_uint((mp_uint_t)val);
+ } else {
+ return mp_obj_new_int_from_ull(val);
+ }
+ }
+}
+
+void mp_binary_set_int(size_t val_sz, bool big_endian, byte *dest, mp_uint_t val) {
+ if (MP_ENDIANNESS_LITTLE && !big_endian) {
+ memcpy(dest, &val, val_sz);
+ } else if (MP_ENDIANNESS_BIG && big_endian) {
+ // only copy the least-significant val_sz bytes
+ memcpy(dest, (byte *)&val + sizeof(mp_uint_t) - val_sz, val_sz);
+ } else {
+ const byte *src;
+ if (MP_ENDIANNESS_LITTLE) {
+ src = (const byte *)&val + val_sz;
+ } else {
+ src = (const byte *)&val + sizeof(mp_uint_t);
+ }
+ while (val_sz--) {
+ *dest++ = *--src;
+ }
+ }
+}
+
+void mp_binary_set_val(char struct_type, char val_type, mp_obj_t val_in, byte *p_base, byte **ptr) {
+ byte *p = *ptr;
+ size_t align;
+
+ size_t size = mp_binary_get_size(struct_type, val_type, &align);
+ if (struct_type == '@') {
+ // Align p relative to p_base
+ p = p_base + (uintptr_t)MP_ALIGN(p - p_base, align);
+ if (MP_ENDIANNESS_LITTLE) {
+ struct_type = '<';
+ } else {
+ struct_type = '>';
+ }
+ }
+ *ptr = p + size;
+
+ mp_uint_t val;
+ switch (val_type) {
+ #if MICROPY_NONSTANDARD_TYPECODES
+ case 'O':
+ val = (mp_uint_t)val_in;
+ break;
+ #endif
+ #if MICROPY_PY_BUILTINS_FLOAT
+ case 'f': {
+ union { uint32_t i;
+ float f;
+ } fp_sp;
+ fp_sp.f = mp_obj_get_float_to_f(val_in);
+ val = fp_sp.i;
+ break;
+ }
+ case 'd': {
+ union { uint64_t i64;
+ uint32_t i32[2];
+ double f;
+ } fp_dp;
+ fp_dp.f = mp_obj_get_float_to_d(val_in);
+ if (MP_BYTES_PER_OBJ_WORD == 8) {
+ val = fp_dp.i64;
+ } else {
+ int be = struct_type == '>';
+ mp_binary_set_int(sizeof(uint32_t), be, p, fp_dp.i32[MP_ENDIANNESS_BIG ^ be]);
+ p += sizeof(uint32_t);
+ val = fp_dp.i32[MP_ENDIANNESS_LITTLE ^ be];
+ }
+ break;
+ }
+ #endif
+ default: {
+ bool signed_type = is_signed(val_type);
+ #if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE
+ if (mp_obj_is_type(val_in, &mp_type_int)) {
+ // It's a longint.
+ mp_obj_int_buffer_overflow_check(val_in, size, signed_type);
+ mp_obj_int_to_bytes_impl(val_in, struct_type == '>', size, p);
+ return;
+ }
+ #endif
+ {
+ val = mp_obj_get_int(val_in);
+ // Small int checking is separate, to be fast.
+ mp_small_int_buffer_overflow_check(val, size, signed_type);
+ // zero/sign extend if needed
+ if (MP_BYTES_PER_OBJ_WORD < 8 && size > sizeof(val)) {
+ int c = (is_signed(val_type) && (mp_int_t)val < 0) ? 0xff : 0x00;
+ memset(p, c, size);
+ if (struct_type == '>') {
+ p += size - sizeof(val);
+ }
+ }
+ break;
+ }
+ }
+ }
+
+ mp_binary_set_int(MIN((size_t)size, sizeof(val)), struct_type == '>', p, val);
+}
+
+void mp_binary_set_val_array(char typecode, void *p, size_t index, mp_obj_t val_in) {
+ switch (typecode) {
+ #if MICROPY_PY_BUILTINS_FLOAT
+ case 'f':
+ ((float *)p)[index] = mp_obj_get_float_to_f(val_in);
+ break;
+ case 'd':
+ ((double *)p)[index] = mp_obj_get_float_to_d(val_in);
+ break;
+ #endif
+ #if MICROPY_NONSTANDARD_TYPECODES
+ // Extension to CPython: array of objects
+ case 'O':
+ ((mp_obj_t *)p)[index] = val_in;
+ break;
+ #endif
+ default: {
+ size_t size = mp_binary_get_size('@', typecode, NULL);
+ bool signed_type = is_signed(typecode);
+
+ #if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE
+ if (mp_obj_is_type(val_in, &mp_type_int)) {
+ // It's a long int.
+ mp_obj_int_buffer_overflow_check(val_in, size, signed_type);
+ mp_obj_int_to_bytes_impl(val_in, MP_ENDIANNESS_BIG,
+ size, (uint8_t *)p + index * size);
+ return;
+ }
+ #endif
+ mp_int_t val = mp_obj_get_int(val_in);
+ // Small int checking is separate, to be fast.
+ mp_small_int_buffer_overflow_check(val, size, signed_type);
+ mp_binary_set_val_array_from_int(typecode, p, index, val);
+ }
+ }
+}
+
+void mp_binary_set_val_array_from_int(char typecode, void *p, size_t index, mp_int_t val) {
+ switch (typecode) {
+ case 'b':
+ ((signed char *)p)[index] = val;
+ break;
+ case BYTEARRAY_TYPECODE:
+ case 'B':
+ ((unsigned char *)p)[index] = val;
+ break;
+ case 'h':
+ ((short *)p)[index] = val;
+ break;
+ case 'H':
+ ((unsigned short *)p)[index] = val;
+ break;
+ case 'i':
+ ((int *)p)[index] = val;
+ break;
+ case 'I':
+ ((unsigned int *)p)[index] = val;
+ break;
+ case 'l':
+ ((long *)p)[index] = val;
+ break;
+ case 'L':
+ ((unsigned long *)p)[index] = val;
+ break;
+ #if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE
+ case 'q':
+ ((long long *)p)[index] = val;
+ break;
+ case 'Q':
+ ((unsigned long long *)p)[index] = val;
+ break;
+ #endif
+ #if MICROPY_PY_BUILTINS_FLOAT
+ case 'f':
+ ((float *)p)[index] = (float)val;
+ break;
+ case 'd':
+ ((double *)p)[index] = (double)val;
+ break;
+ #endif
+ #if MICROPY_NONSTANDARD_TYPECODES
+ // Extension to CPython: array of pointers
+ case 'P':
+ ((void **)p)[index] = (void *)(uintptr_t)val;
+ break;
+ #endif
+ }
+}
diff --git a/circuitpython/py/binary.h b/circuitpython/py/binary.h
new file mode 100644
index 0000000..9fade81
--- /dev/null
+++ b/circuitpython/py/binary.h
@@ -0,0 +1,46 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2014 Paul Sokolovsky
+ * SPDX-FileCopyrightText: Copyright (c) 2014-2017 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_BINARY_H
+#define MICROPY_INCLUDED_PY_BINARY_H
+
+#include "py/obj.h"
+
+// Use special typecode to differentiate repr() of bytearray vs array.array('B')
+// (underlyingly they're same). Can't use 0 here because that's used to detect
+// type-specification errors due to end-of-string.
+#define BYTEARRAY_TYPECODE 1
+
+size_t mp_binary_get_size(char struct_type, char val_type, size_t *palign);
+mp_obj_t mp_binary_get_val_array(char typecode, void *p, size_t index);
+void mp_binary_set_val_array(char typecode, void *p, size_t index, mp_obj_t val_in);
+void mp_binary_set_val_array_from_int(char typecode, void *p, size_t index, mp_int_t val);
+mp_obj_t mp_binary_get_val(char struct_type, char val_type, byte *p_base, byte **ptr);
+void mp_binary_set_val(char struct_type, char val_type, mp_obj_t val_in, byte *p_base, byte **ptr);
+long long mp_binary_get_int(size_t size, bool is_signed, bool big_endian, const byte *src);
+void mp_binary_set_int(size_t val_sz, bool big_endian, byte *dest, mp_uint_t val);
+
+#endif // MICROPY_INCLUDED_PY_BINARY_H
diff --git a/circuitpython/py/builtin.h b/circuitpython/py/builtin.h
new file mode 100644
index 0000000..4fa4b08
--- /dev/null
+++ b/circuitpython/py/builtin.h
@@ -0,0 +1,127 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_BUILTIN_H
+#define MICROPY_INCLUDED_PY_BUILTIN_H
+
+#include "py/obj.h"
+
+mp_obj_t mp_builtin___import__(size_t n_args, const mp_obj_t *args);
+mp_obj_t mp_builtin_open(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs);
+mp_obj_t mp_micropython_mem_info(size_t n_args, const mp_obj_t *args);
+
+MP_DECLARE_CONST_FUN_OBJ_VAR(mp_builtin___build_class___obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin___import___obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin___repl_print___obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_abs_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_all_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_any_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_bin_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_callable_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_compile_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_chr_obj);
+MP_DECLARE_CONST_FUN_OBJ_2(mp_builtin_delattr_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_dir_obj);
+MP_DECLARE_CONST_FUN_OBJ_2(mp_builtin_divmod_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_eval_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_exec_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_execfile_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_getattr_obj);
+MP_DECLARE_CONST_FUN_OBJ_3(mp_builtin_setattr_obj);
+MP_DECLARE_CONST_FUN_OBJ_0(mp_builtin_globals_obj);
+MP_DECLARE_CONST_FUN_OBJ_2(mp_builtin_hasattr_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_hash_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_help_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_hex_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_id_obj);
+MP_DECLARE_CONST_FUN_OBJ_2(mp_builtin_isinstance_obj);
+MP_DECLARE_CONST_FUN_OBJ_2(mp_builtin_issubclass_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_iter_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_len_obj);
+MP_DECLARE_CONST_FUN_OBJ_0(mp_builtin_locals_obj);
+MP_DECLARE_CONST_FUN_OBJ_KW(mp_builtin_max_obj);
+MP_DECLARE_CONST_FUN_OBJ_KW(mp_builtin_min_obj);
+#if MICROPY_PY_BUILTINS_NEXT2
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_next_obj);
+#else
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_next_obj);
+#endif
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_oct_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_ord_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_pow_obj);
+MP_DECLARE_CONST_FUN_OBJ_KW(mp_builtin_print_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_builtin_repr_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_round_obj);
+MP_DECLARE_CONST_FUN_OBJ_KW(mp_builtin_sorted_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_sum_obj);
+// Defined by a port, but declared here for simplicity
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_input_obj);
+MP_DECLARE_CONST_FUN_OBJ_KW(mp_builtin_open_obj);
+
+MP_DECLARE_CONST_FUN_OBJ_2(mp_namedtuple_obj);
+
+MP_DECLARE_CONST_FUN_OBJ_2(mp_op_contains_obj);
+MP_DECLARE_CONST_FUN_OBJ_2(mp_op_getitem_obj);
+MP_DECLARE_CONST_FUN_OBJ_3(mp_op_setitem_obj);
+MP_DECLARE_CONST_FUN_OBJ_2(mp_op_delitem_obj);
+
+extern const mp_obj_module_t mp_module___main__;
+extern const mp_obj_module_t mp_module_builtins;
+extern const mp_obj_module_t mp_module_array;
+extern const mp_obj_module_t mp_module_collections;
+extern const mp_obj_module_t mp_module_io;
+extern const mp_obj_module_t mp_module_math;
+extern const mp_obj_module_t mp_module_cmath;
+extern const mp_obj_module_t mp_module_micropython;
+extern const mp_obj_module_t mp_module_ustruct;
+extern const mp_obj_module_t mp_module_sys;
+extern const mp_obj_module_t mp_module_gc;
+extern const mp_obj_module_t mp_module_thread;
+
+extern const mp_obj_dict_t mp_module_builtins_globals;
+
+// extmod modules
+extern const mp_obj_module_t mp_module_uasyncio;
+extern const mp_obj_module_t mp_module_uerrno;
+extern const mp_obj_module_t mp_module_uctypes;
+extern const mp_obj_module_t mp_module_uzlib;
+extern const mp_obj_module_t mp_module_ujson;
+extern const mp_obj_module_t mp_module_ure;
+extern const mp_obj_module_t mp_module_uheapq;
+extern const mp_obj_module_t mp_module_uhashlib;
+extern const mp_obj_module_t mp_module_ucryptolib;
+extern const mp_obj_module_t mp_module_ubinascii;
+extern const mp_obj_module_t mp_module_urandom;
+extern const mp_obj_module_t mp_module_uselect;
+extern const mp_obj_module_t mp_module_utimeq;
+extern const mp_obj_module_t mp_module_machine;
+extern const mp_obj_module_t mp_module_framebuf;
+extern const mp_obj_module_t mp_module_btree;
+extern const mp_obj_module_t mp_module_ubluetooth;
+extern const mp_obj_module_t mp_module_uplatform;
+
+extern const char MICROPY_PY_BUILTINS_HELP_TEXT[];
+
+#endif // MICROPY_INCLUDED_PY_BUILTIN_H
diff --git a/circuitpython/py/builtinevex.c b/circuitpython/py/builtinevex.c
new file mode 100644
index 0000000..a96a3a5
--- /dev/null
+++ b/circuitpython/py/builtinevex.c
@@ -0,0 +1,176 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdint.h>
+
+#include "py/objfun.h"
+#include "py/compile.h"
+#include "py/runtime.h"
+#include "py/builtin.h"
+
+#include "supervisor/shared/translate.h"
+
+#if MICROPY_PY_BUILTINS_COMPILE
+
+typedef struct _mp_obj_code_t {
+ mp_obj_base_t base;
+ mp_obj_t module_fun;
+} mp_obj_code_t;
+
+STATIC const mp_obj_type_t mp_type_code = {
+ { &mp_type_type },
+ .name = MP_QSTR_code,
+};
+
+STATIC mp_obj_t code_execute(mp_obj_code_t *self, mp_obj_dict_t *globals, mp_obj_dict_t *locals) {
+ // save context and set new context
+ mp_obj_dict_t *old_globals = mp_globals_get();
+ mp_obj_dict_t *old_locals = mp_locals_get();
+ mp_globals_set(globals);
+ mp_locals_set(locals);
+
+ // a bit of a hack: fun_bc will re-set globals, so need to make sure it's
+ // the correct one
+ if (mp_obj_is_type(self->module_fun, &mp_type_fun_bc)) {
+ mp_obj_fun_bc_t *fun_bc = MP_OBJ_TO_PTR(self->module_fun);
+ fun_bc->globals = globals;
+ }
+
+ // execute code
+ nlr_buf_t nlr;
+ if (nlr_push(&nlr) == 0) {
+ mp_obj_t ret = mp_call_function_0(self->module_fun);
+ nlr_pop();
+ mp_globals_set(old_globals);
+ mp_locals_set(old_locals);
+ return ret;
+ } else {
+ // exception; restore context and re-raise same exception
+ mp_globals_set(old_globals);
+ mp_locals_set(old_locals);
+ nlr_jump(nlr.ret_val);
+ }
+}
+
+STATIC mp_obj_t mp_builtin_compile(size_t n_args, const mp_obj_t *args) {
+ (void)n_args;
+
+ // get the source
+ size_t str_len;
+ const char *str = mp_obj_str_get_data(args[0], &str_len);
+
+ // get the filename
+ qstr filename = mp_obj_str_get_qstr(args[1]);
+
+ // create the lexer
+ mp_lexer_t *lex = mp_lexer_new_from_str_len(filename, str, str_len, 0);
+
+ // get the compile mode
+ qstr mode = mp_obj_str_get_qstr(args[2]);
+ mp_parse_input_kind_t parse_input_kind;
+ switch (mode) {
+ case MP_QSTR_single:
+ parse_input_kind = MP_PARSE_SINGLE_INPUT;
+ break;
+ case MP_QSTR_exec:
+ parse_input_kind = MP_PARSE_FILE_INPUT;
+ break;
+ case MP_QSTR_eval:
+ parse_input_kind = MP_PARSE_EVAL_INPUT;
+ break;
+ default:
+ mp_raise_ValueError(MP_ERROR_TEXT("bad compile mode"));
+ }
+
+ mp_obj_code_t *code = m_new_obj(mp_obj_code_t);
+ code->base.type = &mp_type_code;
+ code->module_fun = mp_parse_compile_execute(lex, parse_input_kind, NULL, NULL);
+ return MP_OBJ_FROM_PTR(code);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_compile_obj, 3, 6, mp_builtin_compile);
+
+#endif // MICROPY_PY_BUILTINS_COMPILE
+
+#if MICROPY_PY_BUILTINS_EVAL_EXEC
+
+STATIC mp_obj_t eval_exec_helper(size_t n_args, const mp_obj_t *args, mp_parse_input_kind_t parse_input_kind) {
+ // work out the context
+ mp_obj_dict_t *globals = mp_globals_get();
+ mp_obj_dict_t *locals = mp_locals_get();
+ for (size_t i = 1; i < 3 && i < n_args; ++i) {
+ if (args[i] != mp_const_none) {
+ if (!mp_obj_is_type(args[i], &mp_type_dict)) {
+ mp_raise_TypeError(NULL);
+ }
+ locals = MP_OBJ_TO_PTR(args[i]);
+ if (i == 1) {
+ globals = locals;
+ }
+ }
+ }
+
+ #if MICROPY_PY_BUILTINS_COMPILE
+ if (mp_obj_is_type(args[0], &mp_type_code)) {
+ return code_execute(MP_OBJ_TO_PTR(args[0]), globals, locals);
+ }
+ #endif
+
+ // Extract the source code.
+ mp_buffer_info_t bufinfo;
+ mp_get_buffer_raise(args[0], &bufinfo, MP_BUFFER_READ);
+
+ // create the lexer
+ // MP_PARSE_SINGLE_INPUT is used to indicate a file input
+ mp_lexer_t *lex;
+ if (MICROPY_PY_BUILTINS_EXECFILE && parse_input_kind == MP_PARSE_SINGLE_INPUT) {
+ lex = mp_lexer_new_from_file(bufinfo.buf);
+ parse_input_kind = MP_PARSE_FILE_INPUT;
+ } else {
+ lex = mp_lexer_new_from_str_len(MP_QSTR__lt_string_gt_, bufinfo.buf, bufinfo.len, 0);
+ }
+
+ return mp_parse_compile_execute(lex, parse_input_kind, globals, locals);
+}
+
+STATIC mp_obj_t mp_builtin_eval(size_t n_args, const mp_obj_t *args) {
+ return eval_exec_helper(n_args, args, MP_PARSE_EVAL_INPUT);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_eval_obj, 1, 3, mp_builtin_eval);
+
+STATIC mp_obj_t mp_builtin_exec(size_t n_args, const mp_obj_t *args) {
+ return eval_exec_helper(n_args, args, MP_PARSE_FILE_INPUT);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_exec_obj, 1, 3, mp_builtin_exec);
+
+#endif // MICROPY_PY_BUILTINS_EVAL_EXEC
+
+#if MICROPY_PY_BUILTINS_EXECFILE
+STATIC mp_obj_t mp_builtin_execfile(size_t n_args, const mp_obj_t *args) {
+ // MP_PARSE_SINGLE_INPUT is used to indicate a file input
+ return eval_exec_helper(n_args, args, MP_PARSE_SINGLE_INPUT);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_execfile_obj, 1, 3, mp_builtin_execfile);
+#endif
diff --git a/circuitpython/py/builtinhelp.c b/circuitpython/py/builtinhelp.c
new file mode 100644
index 0000000..7411c57
--- /dev/null
+++ b/circuitpython/py/builtinhelp.c
@@ -0,0 +1,182 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013-2016 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include "genhdr/mpversion.h"
+#include "py/builtin.h"
+#include "py/mpconfig.h"
+#include "py/objmodule.h"
+
+#if MICROPY_PY_BUILTINS_HELP
+
+const char mp_help_default_text[] =
+ "Welcome to MicroPython!\n"
+ "\n"
+ "For online docs please visit http://docs.micropython.org/\n"
+ "\n"
+ "Control commands:\n"
+ " CTRL-A -- on a blank line, enter raw REPL mode\n"
+ " CTRL-B -- on a blank line, enter normal REPL mode\n"
+ " CTRL-C -- interrupt a running program\n"
+ " CTRL-D -- on a blank line, exit or do a soft reset\n"
+ " CTRL-E -- on a blank line, enter paste mode\n"
+ "\n"
+ "For further help on a specific object, type help(obj)\n"
+;
+
+STATIC void mp_help_print_info_about_object(mp_obj_t name_o, mp_obj_t value) {
+ mp_print_str(MP_PYTHON_PRINTER, " ");
+ mp_obj_print(name_o, PRINT_STR);
+ mp_print_str(MP_PYTHON_PRINTER, " -- ");
+ mp_obj_print(value, PRINT_STR);
+ mp_print_str(MP_PYTHON_PRINTER, "\n");
+}
+
+#if MICROPY_PY_BUILTINS_HELP_MODULES
+STATIC void mp_help_add_from_map(mp_obj_t list, const mp_map_t *map) {
+ for (size_t i = 0; i < map->alloc; i++) {
+ if (mp_map_slot_is_filled(map, i)) {
+ mp_obj_list_append(list, map->table[i].key);
+ }
+ }
+}
+
+#if MICROPY_MODULE_FROZEN
+STATIC void mp_help_add_from_names(mp_obj_t list, const char *name) {
+ while (*name) {
+ size_t len = strlen(name);
+ // name should end in '.py' and we strip it off
+ mp_obj_list_append(list, mp_obj_new_str(name, len - 3));
+ name += len + 1;
+ }
+}
+#endif
+
+// These externs were originally declared inside mp_help_print_modules(),
+// but they triggered -Wnested-externs, so they were moved outside.
+#if MICROPY_MODULE_FROZEN
+extern const char mp_frozen_names[];
+#endif
+
+STATIC void mp_help_print_modules(void) {
+ mp_obj_t list = mp_obj_new_list(0, NULL);
+
+ mp_help_add_from_map(list, &mp_builtin_module_map);
+
+ #if MICROPY_MODULE_FROZEN
+ mp_help_add_from_names(list, mp_frozen_names);
+ #endif
+
+ // sort the list so it's printed in alphabetical order
+ mp_obj_list_sort(1, &list, (mp_map_t *)&mp_const_empty_map);
+
+ // print the list of modules in a column-first order
+ #define NUM_COLUMNS (4)
+ #define COLUMN_WIDTH (18)
+ size_t len;
+ mp_obj_t *items;
+ mp_obj_list_get(list, &len, &items);
+ unsigned int num_rows = (len + NUM_COLUMNS - 1) / NUM_COLUMNS;
+ for (unsigned int i = 0; i < num_rows; ++i) {
+ unsigned int j = i;
+ for (;;) {
+ int l = mp_print_str(MP_PYTHON_PRINTER, mp_obj_str_get_str(items[j]));
+ j += num_rows;
+ if (j >= len) {
+ break;
+ }
+ int gap = COLUMN_WIDTH - l;
+ while (gap < 1) {
+ gap += COLUMN_WIDTH;
+ }
+ while (gap--) {
+ mp_print_str(MP_PYTHON_PRINTER, " ");
+ }
+ }
+ mp_print_str(MP_PYTHON_PRINTER, "\n");
+ }
+
+ #if MICROPY_ENABLE_EXTERNAL_IMPORT
+ // let the user know there may be other modules available from the filesystem
+ serial_write_compressed(translate("Plus any modules on the filesystem\n"));
+ #endif
+}
+#endif
+
+STATIC void mp_help_print_obj(const mp_obj_t obj) {
+ #if MICROPY_PY_BUILTINS_HELP_MODULES
+ if (obj == MP_OBJ_NEW_QSTR(MP_QSTR_modules)) {
+ mp_help_print_modules();
+ return;
+ }
+ #endif
+
+ const mp_obj_type_t *type = mp_obj_get_type(obj);
+
+ // try to print something sensible about the given object
+ mp_cprintf(MP_PYTHON_PRINTER, translate("object "));
+ mp_obj_print(obj, PRINT_STR);
+
+ mp_cprintf(MP_PYTHON_PRINTER, translate(" is of type %q\n"), type->name);
+
+ mp_map_t *map = NULL;
+ if (type == &mp_type_module) {
+ map = &mp_obj_module_get_globals(obj)->map;
+ } else {
+ if (type == &mp_type_type) {
+ type = MP_OBJ_TO_PTR(obj);
+ }
+ if (type->locals_dict != NULL) {
+ map = &type->locals_dict->map;
+ }
+ }
+ if (map != NULL) {
+ for (uint i = 0; i < map->alloc; i++) {
+ if (map->table[i].key != MP_OBJ_NULL) {
+ mp_help_print_info_about_object(map->table[i].key, map->table[i].value);
+ }
+ }
+ }
+}
+
+STATIC mp_obj_t mp_builtin_help(size_t n_args, const mp_obj_t *args) {
+ if (n_args == 0) {
+ // print a general help message. Translate only works on single strings on one line.
+ mp_cprintf(MP_PYTHON_PRINTER,
+ translate("Welcome to Adafruit CircuitPython %s!\n\nVisit circuitpython.org for more information.\n\nTo list built-in modules type `help(\"modules\")`.\n"),
+ MICROPY_GIT_TAG);
+ } else {
+ // try to print something sensible about the given object
+ mp_help_print_obj(args[0]);
+ }
+
+ return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_help_obj, 0, 1, mp_builtin_help);
+
+#endif // MICROPY_PY_BUILTINS_HELP
diff --git a/circuitpython/py/builtinimport.c b/circuitpython/py/builtinimport.c
new file mode 100644
index 0000000..10539d0
--- /dev/null
+++ b/circuitpython/py/builtinimport.c
@@ -0,0 +1,607 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013-2019 Damien P. George
+ * Copyright (c) 2014 Paul Sokolovsky
+ * Copyright (c) 2021 Jim Mussared
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/compile.h"
+#include "py/gc_long_lived.h"
+#include "py/gc.h"
+#include "py/objmodule.h"
+#include "py/persistentcode.h"
+#include "py/runtime.h"
+#include "py/builtin.h"
+#include "py/frozenmod.h"
+
+#include "supervisor/shared/translate.h"
+
+#if MICROPY_DEBUG_VERBOSE // print debugging info
+#define DEBUG_PRINT (1)
+#define DEBUG_printf DEBUG_printf
+#else // don't print debugging info
+#define DEBUG_PRINT (0)
+#define DEBUG_printf(...) (void)0
+#endif
+
+#if MICROPY_ENABLE_EXTERNAL_IMPORT
+
+// Must be a string of one byte.
+#define PATH_SEP_CHAR "/"
+
+// Virtual sys.path entry that maps to the frozen modules.
+#define MP_FROZEN_PATH_PREFIX ".frozen/"
+
+bool mp_obj_is_package(mp_obj_t module) {
+ mp_obj_t dest[2];
+ mp_load_method_maybe(module, MP_QSTR___path__, dest);
+ return dest[0] != MP_OBJ_NULL;
+}
+
+// Wrapper for mp_import_stat (which is provided by the port, and typically
+// uses mp_vfs_import_stat) to also search frozen modules. Given an exact
+// path to a file or directory (e.g. "foo/bar", foo/bar.py" or "foo/bar.mpy"),
+// will return whether the path is a file, directory, or doesn't exist.
+STATIC mp_import_stat_t stat_path_or_frozen(const char *path) {
+ #if MICROPY_MODULE_FROZEN
+ // Only try and load as a frozen module if it starts with .frozen/.
+ const int frozen_path_prefix_len = strlen(MP_FROZEN_PATH_PREFIX);
+ if (strncmp(path, MP_FROZEN_PATH_PREFIX, frozen_path_prefix_len) == 0) {
+ return mp_find_frozen_module(path + frozen_path_prefix_len, NULL, NULL);
+ }
+ #endif
+ return mp_import_stat(path);
+}
+
+// Given a path to a .py file, try and find this path as either a .py or .mpy
+// in either the filesystem or frozen modules.
+STATIC mp_import_stat_t stat_file_py_or_mpy(vstr_t *path) {
+ mp_import_stat_t stat = stat_path_or_frozen(vstr_null_terminated_str(path));
+ if (stat == MP_IMPORT_STAT_FILE) {
+ return stat;
+ }
+
+ #if MICROPY_PERSISTENT_CODE_LOAD
+ // Didn't find .py -- try the .mpy instead by inserting an 'm' into the '.py'.
+ vstr_ins_byte(path, path->len - 2, 'm');
+ stat = stat_path_or_frozen(vstr_null_terminated_str(path));
+ if (stat == MP_IMPORT_STAT_FILE) {
+ return stat;
+ }
+ #endif
+
+ return MP_IMPORT_STAT_NO_EXIST;
+}
+
+// Given an import path (e.g. "foo/bar"), try and find "foo/bar" (a directory)
+// or "foo/bar.(m)py" in either the filesystem or frozen modules.
+STATIC mp_import_stat_t stat_dir_or_file(vstr_t *path) {
+ mp_import_stat_t stat = stat_path_or_frozen(vstr_null_terminated_str(path));
+ DEBUG_printf("stat %s: %d\n", vstr_str(path), stat);
+ if (stat == MP_IMPORT_STAT_DIR) {
+ return stat;
+ }
+
+ // not a directory, add .py and try as a file
+ vstr_add_str(path, ".py");
+ return stat_file_py_or_mpy(path);
+}
+
+// Given a top-level module, try and find it in each of the sys.path entries
+// via stat_dir_or_file.
+STATIC mp_import_stat_t stat_top_level_dir_or_file(qstr mod_name, vstr_t *dest) {
+ DEBUG_printf("stat_top_level_dir_or_file: '%s'\n", qstr_str(mod_name));
+ #if MICROPY_PY_SYS
+ size_t path_num;
+ mp_obj_t *path_items;
+ mp_obj_list_get(mp_sys_path, &path_num, &path_items);
+
+ if (path_num > 0) {
+ // go through each path looking for a directory or file
+ for (size_t i = 0; i < path_num; i++) {
+ vstr_reset(dest);
+ size_t p_len;
+ const char *p = mp_obj_str_get_data(path_items[i], &p_len);
+ if (p_len > 0) {
+ vstr_add_strn(dest, p, p_len);
+ vstr_add_char(dest, PATH_SEP_CHAR[0]);
+ }
+ vstr_add_str(dest, qstr_str(mod_name));
+ mp_import_stat_t stat = stat_dir_or_file(dest);
+ if (stat != MP_IMPORT_STAT_NO_EXIST) {
+ return stat;
+ }
+ }
+
+ // could not find a directory or file
+ return MP_IMPORT_STAT_NO_EXIST;
+ }
+ #endif
+
+ // mp_sys_path is empty (or not enabled), so just stat the given path
+ // directly.
+ vstr_add_str(dest, qstr_str(mod_name));
+ return stat_dir_or_file(dest);
+}
+
+#if MICROPY_MODULE_FROZEN_STR || MICROPY_ENABLE_COMPILER
+STATIC void do_load_from_lexer(mp_obj_t module_obj, mp_lexer_t *lex) {
+ #if MICROPY_PY___FILE__
+ qstr source_name = lex->source_name;
+ mp_store_attr(module_obj, MP_QSTR___file__, MP_OBJ_NEW_QSTR(source_name));
+ #endif
+
+ // parse, compile and execute the module in its context
+ mp_obj_dict_t *mod_globals = mp_obj_module_get_globals(module_obj);
+ mp_parse_compile_execute(lex, MP_PARSE_FILE_INPUT, mod_globals, mod_globals);
+ mp_obj_module_set_globals(module_obj, make_dict_long_lived(mod_globals, 10));
+}
+#endif
+
+#if (MICROPY_HAS_FILE_READER && MICROPY_PERSISTENT_CODE_LOAD) || MICROPY_MODULE_FROZEN_MPY
+STATIC void do_execute_raw_code(mp_obj_t module_obj, mp_raw_code_t *raw_code, const char *source_name) {
+ (void)source_name;
+ #if MICROPY_PY___FILE__
+ mp_store_attr(module_obj, MP_QSTR___file__, MP_OBJ_NEW_QSTR(qstr_from_str(source_name)));
+ #endif
+
+ // execute the module in its context
+ mp_obj_dict_t *mod_globals = mp_obj_module_get_globals(module_obj);
+
+ // save context
+ mp_obj_dict_t *volatile old_globals = mp_globals_get();
+ mp_obj_dict_t *volatile old_locals = mp_locals_get();
+
+ // set new context
+ mp_globals_set(mod_globals);
+ mp_locals_set(mod_globals);
+
+ nlr_buf_t nlr;
+ if (nlr_push(&nlr) == 0) {
+ mp_obj_t module_fun = mp_make_function_from_raw_code(raw_code, MP_OBJ_NULL, MP_OBJ_NULL);
+ mp_call_function_0(module_fun);
+
+ // finish nlr block, restore context
+ nlr_pop();
+ mp_obj_module_set_globals(module_obj,
+ make_dict_long_lived(mp_obj_module_get_globals(module_obj), 10));
+ mp_globals_set(old_globals);
+ mp_locals_set(old_locals);
+ } else {
+ // exception; restore context and re-raise same exception
+ mp_globals_set(old_globals);
+ mp_locals_set(old_locals);
+ nlr_jump(nlr.ret_val);
+ }
+}
+#endif
+
+STATIC void do_load(mp_obj_t module_obj, vstr_t *file) {
+ #if MICROPY_MODULE_FROZEN || MICROPY_ENABLE_COMPILER || (MICROPY_PERSISTENT_CODE_LOAD && MICROPY_HAS_FILE_READER)
+ const char *file_str = vstr_null_terminated_str(file);
+ #endif
+
+ // If we support frozen modules (either as str or mpy) then try to find the
+ // requested filename in the list of frozen module filenames.
+ #if MICROPY_MODULE_FROZEN
+ void *modref;
+ int frozen_type;
+ const int frozen_path_prefix_len = strlen(MP_FROZEN_PATH_PREFIX);
+ if (strncmp(file_str, MP_FROZEN_PATH_PREFIX, frozen_path_prefix_len) == 0) {
+ mp_find_frozen_module(file_str + frozen_path_prefix_len, &frozen_type, &modref);
+
+ // If we support frozen str modules and the compiler is enabled, and we
+ // found the filename in the list of frozen files, then load and execute it.
+ #if MICROPY_MODULE_FROZEN_STR
+ if (frozen_type == MP_FROZEN_STR) {
+ do_load_from_lexer(module_obj, modref);
+ return;
+ }
+ #endif
+
+ // If we support frozen mpy modules and we found a corresponding file (and
+ // its data) in the list of frozen files, execute it.
+ #if MICROPY_MODULE_FROZEN_MPY
+ if (frozen_type == MP_FROZEN_MPY) {
+ do_execute_raw_code(module_obj, modref, file_str + frozen_path_prefix_len);
+ return;
+ }
+ #endif
+ }
+ #endif // MICROPY_MODULE_FROZEN
+
+ // If we support loading .mpy files then check if the file extension is of
+ // the correct format and, if so, load and execute the file.
+ #if MICROPY_HAS_FILE_READER && MICROPY_PERSISTENT_CODE_LOAD
+ if (file_str[file->len - 3] == 'm') {
+ mp_raw_code_t *raw_code = mp_raw_code_load_file(file_str);
+ do_execute_raw_code(module_obj, raw_code, file_str);
+ return;
+ }
+ #endif
+
+ // If we can compile scripts then load the file and compile and execute it.
+ #if MICROPY_ENABLE_COMPILER
+ {
+ mp_lexer_t *lex = mp_lexer_new_from_file(file_str);
+ do_load_from_lexer(module_obj, lex);
+ return;
+ }
+ #else
+ // If we get here then the file was not frozen and we can't compile scripts.
+ mp_raise_ImportError(MP_ERROR_TEXT("script compilation not supported"));
+ #endif
+}
+
+// Convert a relative (to the current module) import, going up "level" levels,
+// into an absolute import.
+STATIC void evaluate_relative_import(mp_int_t level, const char **module_name, size_t *module_name_len) {
+ // What we want to do here is to take the name of the current module,
+ // remove <level> trailing components, and concatenate the passed-in
+ // module name.
+ // For example, level=3, module_name="foo.bar", __name__="a.b.c.d" --> "a.foo.bar"
+ // "Relative imports use a module's __name__ attribute to determine that
+ // module's position in the package hierarchy."
+ // http://legacy.python.org/dev/peps/pep-0328/#relative-imports-and-name
+
+ mp_obj_t current_module_name_obj = mp_obj_dict_get(MP_OBJ_FROM_PTR(mp_globals_get()), MP_OBJ_NEW_QSTR(MP_QSTR___name__));
+ assert(current_module_name_obj != MP_OBJ_NULL);
+
+ #if MICROPY_MODULE_OVERRIDE_MAIN_IMPORT && MICROPY_CPYTHON_COMPAT
+ if (MP_OBJ_QSTR_VALUE(current_module_name_obj) == MP_QSTR___main__) {
+ // This is a module loaded by -m command-line switch (e.g. unix port),
+ // and so its __name__ has been set to "__main__". Get its real name
+ // that we stored during import in the __main__ attribute.
+ current_module_name_obj = mp_obj_dict_get(MP_OBJ_FROM_PTR(mp_globals_get()), MP_OBJ_NEW_QSTR(MP_QSTR___main__));
+ }
+ #endif
+
+ // If we have a __path__ in the globals dict, then we're a package.
+ bool is_pkg = mp_map_lookup(&mp_globals_get()->map, MP_OBJ_NEW_QSTR(MP_QSTR___path__), MP_MAP_LOOKUP);
+
+ #if DEBUG_PRINT
+ DEBUG_printf("Current module/package: ");
+ mp_obj_print_helper(MICROPY_DEBUG_PRINTER, current_module_name_obj, PRINT_REPR);
+ DEBUG_printf(", is_package: %d", is_pkg);
+ DEBUG_printf("\n");
+ #endif
+
+ size_t current_module_name_len;
+ const char *current_module_name = mp_obj_str_get_data(current_module_name_obj, &current_module_name_len);
+
+ const char *p = current_module_name + current_module_name_len;
+ if (is_pkg) {
+ // If we're evaluating relative to a package, then take off one fewer
+ // level (i.e. the relative search starts inside the package, rather
+ // than as a sibling of the package).
+ --level;
+ }
+
+ // Walk back 'level' dots (or run out of path).
+ while (level && p > current_module_name) {
+ if (*--p == '.') {
+ --level;
+ }
+ }
+
+ // We must have some component left over to import from.
+ if (p == current_module_name) {
+ mp_raise_msg(&mp_type_ImportError, MP_ERROR_TEXT("can't perform relative import"));
+ }
+
+ // New length is len("<chopped path>.<module_name>"). Note: might be one byte
+ // more than we need if module_name is empty (for the extra . we will
+ // append).
+ uint new_module_name_len = (size_t)(p - current_module_name) + 1 + *module_name_len;
+ char *new_mod = mp_local_alloc(new_module_name_len);
+ memcpy(new_mod, current_module_name, p - current_module_name);
+
+ // Only append ".<module_name>" if there was one).
+ if (*module_name_len != 0) {
+ new_mod[p - current_module_name] = '.';
+ memcpy(new_mod + (p - current_module_name) + 1, *module_name, *module_name_len);
+ } else {
+ --new_module_name_len;
+ }
+
+ // Copy into a QSTR.
+ qstr new_mod_q = qstr_from_strn(new_mod, new_module_name_len);
+ mp_local_free(new_mod);
+
+ DEBUG_printf("Resolved base name for relative import: '%s'\n", qstr_str(new_mod_q));
+ *module_name = qstr_str(new_mod_q);
+ *module_name_len = new_module_name_len;
+}
+
+// Load a module at the specified absolute path, possibly as a submodule of the given outer module.
+// full_mod_name: The full absolute path to this module (e.g. "foo.bar.baz").
+// level_mod_name: The final component of the path (e.g. "baz").
+// outer_module_obj: The parent module (we need to store this module as an
+// attribute on it) (or MP_OBJ_NULL for top-level).
+// path: The filesystem path where we found the parent module
+// (or empty for a top level module).
+// override_main: Whether to set the __name__ to "__main__" (and use __main__
+// for the actual path).
+STATIC mp_obj_t process_import_at_level(qstr full_mod_name, qstr level_mod_name, mp_obj_t outer_module_obj, vstr_t *path, bool override_main) {
+ mp_import_stat_t stat = MP_IMPORT_STAT_NO_EXIST;
+
+ // Exact-match of built-in (or already-loaded) takes priority.
+ mp_obj_t module_obj = mp_module_get_loaded_or_builtin(full_mod_name);
+
+ // Even if we find the module, go through the motions of searching for it
+ // because we may actually be in the process of importing a sub-module.
+ // So we need to (re-)find the correct path to be finding the sub-module
+ // on the next iteration of process_import_at_level.
+
+ if (outer_module_obj == MP_OBJ_NULL) {
+ DEBUG_printf("Searching for top-level module\n");
+
+ // First module in the dotted-name; search for a directory or file
+ // relative to all the locations in sys.path.
+ stat = stat_top_level_dir_or_file(full_mod_name, path);
+
+ // If the module "foo" doesn't exist on the filesystem, and it's not a
+ // builtin, try and find "ufoo" as a built-in. (This feature was
+ // formerly known as "weak links").
+ #if MICROPY_MODULE_WEAK_LINKS
+ if (stat == MP_IMPORT_STAT_NO_EXIST && module_obj == MP_OBJ_NULL) {
+ char *umodule_buf = vstr_str(path);
+ umodule_buf[0] = 'u';
+ strcpy(umodule_buf + 1, qstr_str(level_mod_name));
+ qstr umodule_name = qstr_from_str(umodule_buf);
+ module_obj = mp_module_get_builtin(umodule_name);
+ }
+ #endif
+ } else {
+ DEBUG_printf("Searching for sub-module\n");
+
+ // Add the current part of the module name to the path.
+ vstr_add_char(path, PATH_SEP_CHAR[0]);
+ vstr_add_str(path, qstr_str(level_mod_name));
+
+ // Because it's not top level, we already know which path the parent was found in.
+ stat = stat_dir_or_file(path);
+ }
+ DEBUG_printf("Current path: %.*s\n", (int)vstr_len(path), vstr_str(path));
+
+ if (module_obj == MP_OBJ_NULL) {
+ // Not a built-in and not already-loaded.
+
+ if (stat == MP_IMPORT_STAT_NO_EXIST) {
+ // And the file wasn't found -- fail.
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_msg(&mp_type_ImportError, MP_ERROR_TEXT("module not found"));
+ #else
+ mp_raise_msg_varg(&mp_type_ImportError, MP_ERROR_TEXT("no module named '%q'"), full_mod_name);
+ #endif
+ }
+
+ // Not a built-in but found on the filesystem, try and load it.
+
+ DEBUG_printf("Found path: %.*s\n", (int)vstr_len(path), vstr_str(path));
+
+ // Prepare for loading from the filesystem. Create a new shell module.
+ module_obj = mp_obj_new_module(full_mod_name);
+
+ #if MICROPY_MODULE_OVERRIDE_MAIN_IMPORT
+ // If this module is being loaded via -m on unix, then
+ // override __name__ to "__main__". Do this only for *modules*
+ // however - packages never have their names replaced, instead
+ // they're -m'ed using a special __main__ submodule in them. (This all
+ // apparently is done to not touch the package name itself, which is
+ // important for future imports).
+ if (override_main && stat != MP_IMPORT_STAT_DIR) {
+ mp_obj_module_t *o = MP_OBJ_TO_PTR(module_obj);
+ mp_obj_dict_store(MP_OBJ_FROM_PTR(o->globals), MP_OBJ_NEW_QSTR(MP_QSTR___name__), MP_OBJ_NEW_QSTR(MP_QSTR___main__));
+ #if MICROPY_CPYTHON_COMPAT
+ // Store module as "__main__" in the dictionary of loaded modules (returned by sys.modules).
+ mp_obj_dict_store(MP_OBJ_FROM_PTR(&MP_STATE_VM(mp_loaded_modules_dict)), MP_OBJ_NEW_QSTR(MP_QSTR___main__), module_obj);
+ // Store real name in "__main__" attribute. Need this for
+ // resolving relative imports later. "__main__ was chosen
+ // semi-randonly, to reuse existing qstr's.
+ mp_obj_dict_store(MP_OBJ_FROM_PTR(o->globals), MP_OBJ_NEW_QSTR(MP_QSTR___main__), MP_OBJ_NEW_QSTR(full_mod_name));
+ #endif
+ }
+ #endif // MICROPY_MODULE_OVERRIDE_MAIN_IMPORT
+
+ if (stat == MP_IMPORT_STAT_DIR) {
+ // Directory -- execute "path/__init__.py".
+ DEBUG_printf("%.*s is dir\n", (int)vstr_len(path), vstr_str(path));
+ // Store the __path__ attribute onto this module.
+ // https://docs.python.org/3/reference/import.html
+ // "Specifically, any module that contains a __path__ attribute is considered a package."
+ mp_store_attr(module_obj, MP_QSTR___path__, mp_obj_new_str(vstr_str(path), vstr_len(path)));
+ size_t orig_path_len = path->len;
+ vstr_add_str(path, PATH_SEP_CHAR "__init__.py");
+ if (stat_file_py_or_mpy(path) == MP_IMPORT_STAT_FILE) {
+ do_load(module_obj, path);
+ } else {
+ // No-op. Nothing to load.
+ // mp_warning("%s is imported as namespace package", vstr_str(&path));
+ }
+ // Remove /__init__.py suffix.
+ path->len = orig_path_len;
+ } else { // MP_IMPORT_STAT_FILE
+ // File -- execute "path.(m)py".
+ do_load(module_obj, path);
+ // Note: This should be the last component in the import path. If
+ // there are remaining components then it's an ImportError
+ // because the current path(the module that was just loaded) is
+ // not a package. This will be caught on the next iteration
+ // because the file will not exist.
+ }
+
+ // Loading a module thrashes the heap significantly so we explicitly clean up
+ // afterwards.
+ gc_collect();
+ }
+
+ if (outer_module_obj != MP_OBJ_NULL && VERIFY_PTR(MP_OBJ_TO_PTR(outer_module_obj))) {
+ // If it's a sub-module (not a built-in one), then make it available on
+ // the parent module.
+ mp_store_attr(outer_module_obj, level_mod_name, module_obj);
+ }
+
+ return module_obj;
+}
+
+mp_obj_t mp_builtin___import__(size_t n_args, const mp_obj_t *args) {
+ #if DEBUG_PRINT
+ DEBUG_printf("__import__:\n");
+ for (size_t i = 0; i < n_args; i++) {
+ DEBUG_printf(" ");
+ mp_obj_print_helper(MICROPY_DEBUG_PRINTER, args[i], PRINT_REPR);
+ DEBUG_printf("\n");
+ }
+ #endif
+
+ // This is the import path, with any leading dots stripped.
+ // "import foo.bar" --> module_name="foo.bar"
+ // "from foo.bar import baz" --> module_name="foo.bar"
+ // "from . import foo" --> module_name=""
+ // "from ...foo.bar import baz" --> module_name="foo.bar"
+ mp_obj_t module_name_obj = args[0];
+
+ // These are the imported names.
+ // i.e. "from foo.bar import baz, zap" --> fromtuple=("baz", "zap",)
+ // Note: There's a special case on the Unix port, where this is set to mp_const_false which means that it's __main__.
+ mp_obj_t fromtuple = mp_const_none;
+
+ // Level is the number of leading dots in a relative import.
+ // i.e. "from . import foo" --> level=1
+ // i.e. "from ...foo.bar import baz" --> level=3
+ mp_int_t level = 0;
+
+ if (n_args >= 4) {
+ fromtuple = args[3];
+ if (n_args >= 5) {
+ level = MP_OBJ_SMALL_INT_VALUE(args[4]);
+ if (level < 0) {
+ mp_raise_ValueError(NULL);
+ }
+ }
+ }
+
+ size_t module_name_len;
+ const char *module_name = mp_obj_str_get_data(module_name_obj, &module_name_len);
+
+ if (level != 0) {
+ // Turn "foo.bar" into "<current module minus 3 components>.foo.bar".
+ evaluate_relative_import(level, &module_name, &module_name_len);
+ }
+
+ if (module_name_len == 0) {
+ mp_raise_ValueError(NULL);
+ }
+
+ DEBUG_printf("Starting module search for '%s'\n", module_name);
+
+ VSTR_FIXED(path, MICROPY_ALLOC_PATH_MAX)
+ mp_obj_t top_module_obj = MP_OBJ_NULL;
+ mp_obj_t outer_module_obj = MP_OBJ_NULL;
+
+ // Search for the end of each component.
+ size_t current_component_start = 0;
+ for (size_t i = 1; i <= module_name_len; i++) {
+ if (i == module_name_len || module_name[i] == '.') {
+ // The module name up to this depth (e.g. foo.bar.baz).
+ qstr full_mod_name = qstr_from_strn(module_name, i);
+ // The current level name (e.g. baz).
+ qstr level_mod_name = qstr_from_strn(module_name + current_component_start, i - current_component_start);
+
+ DEBUG_printf("Processing module: '%s' at level '%s'\n", qstr_str(full_mod_name), qstr_str(level_mod_name));
+ DEBUG_printf("Previous path: =%.*s=\n", (int)vstr_len(&path), vstr_str(&path));
+
+ #if MICROPY_MODULE_OVERRIDE_MAIN_IMPORT
+ // On unix, if this is being loaded via -m (magic mp_const_false),
+ // then handle that if it's the final component.
+ bool override_main = (i == module_name_len && fromtuple == mp_const_false);
+ #else
+ bool override_main = false;
+ #endif
+
+ // Import this module.
+ mp_obj_t module_obj = process_import_at_level(full_mod_name, level_mod_name, outer_module_obj, &path, override_main);
+
+ // Set this as the parent module, and remember the top-level module if it's the first.
+ outer_module_obj = module_obj;
+ if (top_module_obj == MP_OBJ_NULL) {
+ top_module_obj = module_obj;
+ }
+
+ current_component_start = i + 1;
+ }
+ }
+
+ if (fromtuple != mp_const_none) {
+ // If fromtuple is not empty, return leaf module
+ return outer_module_obj;
+ } else {
+ // Otherwise, we need to return top-level package
+ return top_module_obj;
+ }
+}
+
+#else // MICROPY_ENABLE_EXTERNAL_IMPORT
+
+mp_obj_t mp_builtin___import__(size_t n_args, const mp_obj_t *args) {
+ // Check that it's not a relative import
+ if (n_args >= 5 && MP_OBJ_SMALL_INT_VALUE(args[4]) != 0) {
+ mp_raise_NotImplementedError(MP_ERROR_TEXT("relative import"));
+ }
+
+ // Check if module already exists, and return it if it does
+ qstr module_name_qstr = mp_obj_str_get_qstr(args[0]);
+ mp_obj_t module_obj = mp_module_get_loaded_or_builtin(module_name_qstr);
+ if (module_obj != MP_OBJ_NULL) {
+ return module_obj;
+ }
+
+ #if MICROPY_MODULE_WEAK_LINKS
+ // Check if there is a weak link to this module
+ char umodule_buf[MICROPY_ALLOC_PATH_MAX];
+ umodule_buf[0] = 'u';
+ strcpy(umodule_buf + 1, args[0]);
+ qstr umodule_name_qstr = qstr_from_str(umodule_buf);
+ module_obj = mp_module_get_loaded_or_builtin(umodule_name_qstr);
+ if (module_obj != MP_OBJ_NULL) {
+ return module_obj;
+ }
+ #endif
+
+ // Couldn't find the module, so fail
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_msg(&mp_type_ImportError, MP_ERROR_TEXT("module not found"));
+ #else
+ mp_raise_msg_varg(&mp_type_ImportError, MP_ERROR_TEXT("no module named '%q'"), module_name_qstr);
+ #endif
+}
+
+#endif // MICROPY_ENABLE_EXTERNAL_IMPORT
+
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin___import___obj, 1, 5, mp_builtin___import__);
diff --git a/circuitpython/py/circuitpy_defns.mk b/circuitpython/py/circuitpy_defns.mk
new file mode 100644
index 0000000..0ae3341
--- /dev/null
+++ b/circuitpython/py/circuitpy_defns.mk
@@ -0,0 +1,759 @@
+# This file is part of the MicroPython project, http://micropython.org/
+#
+# The MIT License (MIT)
+#
+# SPDX-FileCopyrightText: Copyright (c) 2019 Dan Halbert for Adafruit Industries
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+# Common Makefile definitions that can be shared across CircuitPython ports.
+
+###
+# Common compile warnings.
+
+BASE_CFLAGS = \
+ -fsingle-precision-constant \
+ -fno-strict-aliasing \
+ -Wdouble-promotion \
+ -Wimplicit-fallthrough=2 \
+ -Wno-endif-labels \
+ -Wstrict-prototypes \
+ -Werror-implicit-function-declaration \
+ -Wfloat-equal \
+ -Wundef \
+ -Wshadow \
+ -Wwrite-strings \
+ -Wsign-compare \
+ -Wmissing-format-attribute \
+ -Wno-deprecated-declarations \
+ -Wnested-externs \
+ -Wunreachable-code \
+ -Wcast-align \
+ -D__$(CHIP_VARIANT)__ \
+ -ffunction-sections \
+ -fdata-sections \
+ -DCIRCUITPY_SOFTWARE_SAFE_MODE=0x0ADABEEF \
+ -DCIRCUITPY_CANARY_WORD=0xADAF00 \
+ -DCIRCUITPY_SAFE_RESTART_WORD=0xDEADBEEF \
+ -DCIRCUITPY_BOARD_ID="\"$(BOARD)\"" \
+ --param max-inline-insns-single=500
+
+# Use these flags to debug build times and header includes.
+# -ftime-report
+# -H
+
+
+# Set a global CIRCUITPY_DEBUG flag.
+# Don't just call it "DEBUG": too many libraries use plain DEBUG.
+ifneq ($(DEBUG),)
+CFLAGS += -DCIRCUITPY_DEBUG=$(DEBUG)
+else
+CFLAGS += -DCIRCUITPY_DEBUG=0
+endif
+
+###
+# Handle frozen modules.
+
+ifneq ($(FROZEN_DIR),)
+# To use frozen source modules, put your .py files in a subdirectory (eg scripts/)
+# and then invoke make with FROZEN_DIR=scripts (be sure to build from scratch).
+CFLAGS += -DMICROPY_MODULE_FROZEN_STR
+endif
+
+# To use frozen bytecode, put your .py files in a subdirectory (eg frozen/) and
+# then invoke make with FROZEN_MPY_DIR=frozen or FROZEN_MPY_DIRS="dir1 dir2"
+# (be sure to build from scratch).
+
+ifneq ($(FROZEN_MPY_DIRS),)
+CFLAGS += -DMICROPY_QSTR_EXTRA_POOL=mp_qstr_frozen_const_pool
+CFLAGS += -DMICROPY_MODULE_FROZEN_MPY
+endif
+
+###
+# Select which builtin modules to compile and include.
+
+ifeq ($(CIRCUITPY_AESIO),1)
+SRC_PATTERNS += aesio/%
+endif
+ifeq ($(CIRCUITPY_ALARM),1)
+SRC_PATTERNS += alarm/%
+endif
+ifeq ($(CIRCUITPY_ANALOGIO),1)
+SRC_PATTERNS += analogio/%
+endif
+ifeq ($(CIRCUITPY_ATEXIT),1)
+SRC_PATTERNS += atexit/%
+endif
+ifeq ($(CIRCUITPY_AUDIOBUSIO),1)
+SRC_PATTERNS += audiobusio/%
+endif
+ifeq ($(CIRCUITPY_AUDIOIO),1)
+SRC_PATTERNS += audioio/%
+endif
+ifeq ($(CIRCUITPY_AUDIOPWMIO),1)
+SRC_PATTERNS += audiopwmio/%
+endif
+ifeq ($(CIRCUITPY_AUDIOCORE),1)
+SRC_PATTERNS += audiocore/%
+endif
+ifeq ($(CIRCUITPY_AUDIOMIXER),1)
+SRC_PATTERNS += audiomixer/%
+endif
+ifeq ($(CIRCUITPY_AUDIOMP3),1)
+SRC_PATTERNS += audiomp3/%
+endif
+ifeq ($(CIRCUITPY_BITBANGIO),1)
+SRC_PATTERNS += bitbangio/%
+endif
+# Some builds need bitbang SPI for the dotstar but don't make bitbangio available so include it separately.
+ifeq ($(CIRCUITPY_BITBANG_APA102),1)
+SRC_PATTERNS += bitbangio/SPI%
+endif
+ifeq ($(CIRCUITPY_BITMAPTOOLS),1)
+SRC_PATTERNS += bitmaptools/%
+endif
+ifeq ($(CIRCUITPY_BITOPS),1)
+SRC_PATTERNS += bitops/%
+endif
+ifeq ($(CIRCUITPY_BLEIO),1)
+SRC_PATTERNS += _bleio/%
+endif
+ifeq ($(CIRCUITPY_BOARD),1)
+SRC_PATTERNS += board/%
+endif
+ifeq ($(CIRCUITPY_BUSDEVICE),1)
+SRC_PATTERNS += adafruit_bus_device/%
+endif
+ifeq ($(CIRCUITPY_BUSIO),1)
+SRC_PATTERNS += busio/%
+endif
+ifeq ($(CIRCUITPY_CAMERA),1)
+SRC_PATTERNS += camera/%
+endif
+ifeq ($(CIRCUITPY_CANIO),1)
+SRC_PATTERNS += canio/%
+endif
+ifeq ($(CIRCUITPY_COUNTIO),1)
+SRC_PATTERNS += countio/%
+endif
+ifeq ($(CIRCUITPY_DIGITALIO),1)
+SRC_PATTERNS += digitalio/%
+endif
+ifeq ($(CIRCUITPY_DISPLAYIO),1)
+SRC_PATTERNS += displayio/%
+endif
+ifeq ($(CIRCUITPY_PARALLELDISPLAY),1)
+SRC_PATTERNS += paralleldisplay/%
+endif
+ifeq ($(CIRCUITPY_VECTORIO),1)
+SRC_PATTERNS += vectorio/%
+endif
+ifeq ($(CIRCUITPY_FLOPPYIO),1)
+SRC_PATTERNS += floppyio/%
+endif
+ifeq ($(CIRCUITPY_FRAMEBUFFERIO),1)
+SRC_PATTERNS += framebufferio/%
+endif
+ifeq ($(CIRCUITPY__EVE),1)
+SRC_PATTERNS += _eve/%
+endif
+ifeq ($(CIRCUITPY_FREQUENCYIO),1)
+SRC_PATTERNS += frequencyio/%
+endif
+
+ifeq ($(CIRCUITPY_FUTURE),1)
+SRC_PATTERNS += __future__/%
+endif
+
+ifeq ($(CIRCUITPY_GAMEPADSHIFT),1)
+SRC_PATTERNS += gamepadshift/%
+endif
+ifeq ($(CIRCUITPY_GETPASS),1)
+SRC_PATTERNS += getpass/%
+endif
+ifeq ($(CIRCUITPY_GIFIO),1)
+SRC_PATTERNS += gifio/%
+endif
+ifeq ($(CIRCUITPY_GNSS),1)
+SRC_PATTERNS += gnss/%
+endif
+ifeq ($(CIRCUITPY_I2CPERIPHERAL),1)
+SRC_PATTERNS += i2cperipheral/%
+endif
+ifeq ($(CIRCUITPY_IMAGECAPTURE),1)
+SRC_PATTERNS += imagecapture/%
+endif
+ifeq ($(CIRCUITPY_IPADDRESS),1)
+SRC_PATTERNS += ipaddress/%
+endif
+ifeq ($(CIRCUITPY_IS31FL3741),1)
+SRC_PATTERNS += is31fl3741/%
+endif
+ifeq ($(CIRCUITPY_KEYPAD),1)
+SRC_PATTERNS += keypad/%
+endif
+ifeq ($(CIRCUITPY_MATH),1)
+SRC_PATTERNS += math/%
+endif
+ifeq ($(CIRCUITPY_MEMORYMONITOR),1)
+SRC_PATTERNS += memorymonitor/%
+endif
+ifeq ($(CIRCUITPY_MICROCONTROLLER),1)
+SRC_PATTERNS += microcontroller/%
+endif
+ifeq ($(CIRCUITPY_MDNS),1)
+SRC_PATTERNS += mdns/%
+endif
+ifeq ($(CIRCUITPY_NEOPIXEL_WRITE),1)
+SRC_PATTERNS += neopixel_write/%
+endif
+ifeq ($(CIRCUITPY_NVM),1)
+SRC_PATTERNS += nvm/%
+endif
+ifeq ($(CIRCUITPY_ONEWIREIO),1)
+SRC_PATTERNS += onewireio/%
+endif
+ifeq ($(CIRCUITPY_OS),1)
+SRC_PATTERNS += os/%
+endif
+ifeq ($(CIRCUITPY_DUALBANK),1)
+SRC_PATTERNS += dualbank/%
+endif
+ifeq ($(CIRCUITPY_PIXELBUF),1)
+SRC_PATTERNS += adafruit_pixelbuf/%
+endif
+ifeq ($(CIRCUITPY_QRIO),1)
+SRC_PATTERNS += qrio/%
+endif
+ifeq ($(CIRCUITPY_RAINBOWIO),1)
+SRC_PATTERNS += rainbowio/%
+endif
+ifeq ($(CIRCUITPY_RGBMATRIX),1)
+SRC_PATTERNS += rgbmatrix/%
+endif
+ifeq ($(CIRCUITPY_PS2IO),1)
+SRC_PATTERNS += ps2io/%
+endif
+ifeq ($(CIRCUITPY_PULSEIO),1)
+SRC_PATTERNS += pulseio/%
+endif
+ifeq ($(CIRCUITPY_PWMIO),1)
+SRC_PATTERNS += pwmio/%
+endif
+ifeq ($(CIRCUITPY_RANDOM),1)
+SRC_PATTERNS += random/%
+endif
+ifeq ($(CIRCUITPY_RP2PIO),1)
+SRC_PATTERNS += rp2pio/%
+endif
+ifeq ($(CIRCUITPY_ROTARYIO),1)
+SRC_PATTERNS += rotaryio/%
+endif
+ifeq ($(CIRCUITPY_RTC),1)
+SRC_PATTERNS += rtc/%
+endif
+ifeq ($(CIRCUITPY_SAMD),1)
+SRC_PATTERNS += samd/%
+endif
+ifeq ($(CIRCUITPY_SDCARDIO),1)
+SRC_PATTERNS += sdcardio/%
+endif
+ifeq ($(CIRCUITPY_SDIOIO),1)
+SRC_PATTERNS += sdioio/%
+endif
+ifeq ($(CIRCUITPY_SHARPDISPLAY),1)
+SRC_PATTERNS += sharpdisplay/%
+endif
+ifeq ($(CIRCUITPY_SOCKETPOOL),1)
+SRC_PATTERNS += socketpool/%
+endif
+ifeq ($(CIRCUITPY_SSL),1)
+SRC_PATTERNS += ssl/%
+endif
+ifeq ($(CIRCUITPY_STAGE),1)
+SRC_PATTERNS += _stage/%
+endif
+ifeq ($(CIRCUITPY_STORAGE),1)
+SRC_PATTERNS += storage/%
+endif
+ifeq ($(CIRCUITPY_STRUCT),1)
+SRC_PATTERNS += struct/%
+endif
+ifeq ($(CIRCUITPY_SUPERVISOR),1)
+SRC_PATTERNS += supervisor/%
+endif
+ifeq ($(CIRCUITPY_SYNTHIO),1)
+SRC_PATTERNS += synthio/%
+endif
+ifeq ($(CIRCUITPY_TERMINALIO),1)
+SRC_PATTERNS += terminalio/% fontio/%
+endif
+ifeq ($(CIRCUITPY_TIME),1)
+SRC_PATTERNS += time/%
+endif
+ifeq ($(CIRCUITPY_TOUCHIO),1)
+SRC_PATTERNS += touchio/%
+endif
+ifeq ($(CIRCUITPY_TRACEBACK),1)
+SRC_PATTERNS += traceback/%
+endif
+ifeq ($(CIRCUITPY_UHEAP),1)
+SRC_PATTERNS += uheap/%
+endif
+ifeq ($(CIRCUITPY_USB_CDC),1)
+SRC_PATTERNS += usb_cdc/%
+endif
+ifeq ($(CIRCUITPY_USB_HID),1)
+SRC_PATTERNS += usb_hid/%
+endif
+ifeq ($(CIRCUITPY_USB_HOST),1)
+SRC_PATTERNS += usb_host/% usb/%
+endif
+ifeq ($(CIRCUITPY_USB_MIDI),1)
+SRC_PATTERNS += usb_midi/%
+endif
+ifeq ($(CIRCUITPY_USB_VENDOR),1)
+SRC_PATTERNS += usb_vendor/%
+endif
+ifeq ($(CIRCUITPY_USTACK),1)
+SRC_PATTERNS += ustack/%
+endif
+ifeq ($(CIRCUITPY_ZLIB),1)
+SRC_PATTERNS += zlib/%
+endif
+ifeq ($(CIRCUITPY_VIDEOCORE),1)
+SRC_PATTERNS += videocore/%
+endif
+ifeq ($(CIRCUITPY_WATCHDOG),1)
+SRC_PATTERNS += watchdog/%
+endif
+ifeq ($(CIRCUITPY_WIFI),1)
+SRC_PATTERNS += wifi/%
+endif
+ifeq ($(CIRCUITPY_PEW),1)
+SRC_PATTERNS += _pew/%
+endif
+ifeq ($(CIRCUITPY_MSGPACK),1)
+SRC_PATTERNS += msgpack/%
+endif
+
+# All possible sources are listed here, and are filtered by SRC_PATTERNS in SRC_COMMON_HAL
+SRC_COMMON_HAL_ALL = \
+ _bleio/Adapter.c \
+ _bleio/Attribute.c \
+ _bleio/Characteristic.c \
+ _bleio/CharacteristicBuffer.c \
+ _bleio/Connection.c \
+ _bleio/Descriptor.c \
+ _bleio/PacketBuffer.c \
+ _bleio/Service.c \
+ _bleio/UUID.c \
+ _bleio/__init__.c \
+ _pew/PewPew.c \
+ _pew/__init__.c \
+ alarm/SleepMemory.c \
+ alarm/__init__.c \
+ alarm/pin/PinAlarm.c \
+ alarm/time/TimeAlarm.c \
+ alarm/touch/TouchAlarm.c \
+ analogio/AnalogIn.c \
+ analogio/AnalogOut.c \
+ analogio/__init__.c \
+ audiobusio/I2SOut.c \
+ audiobusio/PDMIn.c \
+ audiobusio/__init__.c \
+ audioio/AudioOut.c \
+ audioio/__init__.c \
+ audiopwmio/PWMAudioOut.c \
+ audiopwmio/__init__.c \
+ board/__init__.c \
+ busio/I2C.c \
+ busio/SPI.c \
+ busio/UART.c \
+ busio/__init__.c \
+ camera/__init__.c \
+ camera/Camera.c \
+ canio/CAN.c \
+ canio/Listener.c \
+ canio/__init__.c \
+ countio/Counter.c \
+ countio/__init__.c \
+ digitalio/DigitalInOut.c \
+ digitalio/__init__.c \
+ dualbank/__init__.c \
+ frequencyio/FrequencyIn.c \
+ frequencyio/__init__.c \
+ imagecapture/ParallelImageCapture.c \
+ imagecapture/__init__.c \
+ gnss/__init__.c \
+ gnss/GNSS.c \
+ gnss/PositionFix.c \
+ gnss/SatelliteSystem.c \
+ i2cperipheral/I2CPeripheral.c \
+ i2cperipheral/__init__.c \
+ microcontroller/Pin.c \
+ microcontroller/Processor.c \
+ microcontroller/__init__.c \
+ mdns/__init__.c \
+ mdns/Server.c \
+ mdns/RemoteService.c \
+ neopixel_write/__init__.c \
+ nvm/ByteArray.c \
+ nvm/__init__.c \
+ os/__init__.c \
+ paralleldisplay/ParallelBus.c \
+ ps2io/Ps2.c \
+ ps2io/__init__.c \
+ pulseio/PulseIn.c \
+ pulseio/PulseOut.c \
+ pulseio/__init__.c \
+ pwmio/PWMOut.c \
+ pwmio/__init__.c \
+ rgbmatrix/RGBMatrix.c \
+ rgbmatrix/__init__.c \
+ rotaryio/IncrementalEncoder.c \
+ rotaryio/__init__.c \
+ rtc/RTC.c \
+ rtc/__init__.c \
+ sdioio/SDCard.c \
+ sdioio/__init__.c \
+ socketpool/__init__.c \
+ socketpool/SocketPool.c \
+ socketpool/Socket.c \
+ ssl/__init__.c \
+ ssl/SSLContext.c \
+ ssl/SSLSocket.c \
+ supervisor/Runtime.c \
+ supervisor/__init__.c \
+ usb_host/__init__.c \
+ usb_host/Port.c \
+ watchdog/WatchDogMode.c \
+ watchdog/WatchDogTimer.c \
+ watchdog/__init__.c \
+ wifi/Monitor.c \
+ wifi/Network.c \
+ wifi/Radio.c \
+ wifi/ScannedNetworks.c \
+ wifi/__init__.c \
+
+ifeq ($(CIRCUITPY_BLEIO_HCI),1)
+# Helper code for _bleio HCI.
+SRC_C += \
+ common-hal/_bleio/att.c \
+ common-hal/_bleio/hci.c \
+
+endif
+
+
+SRC_COMMON_HAL = $(filter $(SRC_PATTERNS), $(SRC_COMMON_HAL_ALL))
+
+# These don't have corresponding files in each port but are still located in
+# shared-bindings to make it clear what the contents of the modules are.
+# All possible sources are listed here, and are filtered by SRC_PATTERNS.
+SRC_BINDINGS_ENUMS = \
+$(filter $(SRC_PATTERNS), \
+ _bleio/Address.c \
+ _bleio/Attribute.c \
+ _bleio/ScanEntry.c \
+ _eve/__init__.c \
+ __future__/__init__.c \
+ camera/ImageFormat.c \
+ canio/Match.c \
+ countio/Edge.c \
+ digitalio/Direction.c \
+ digitalio/DriveMode.c \
+ digitalio/Pull.c \
+ displayio/Colorspace.c \
+ fontio/Glyph.c \
+ imagecapture/ParallelImageCapture.c \
+ math/__init__.c \
+ microcontroller/ResetReason.c \
+ microcontroller/RunMode.c \
+ msgpack/__init__.c \
+ msgpack/ExtType.c \
+ paralleldisplay/__init__.c \
+ paralleldisplay/ParallelBus.c \
+ qrio/PixelPolicy.c \
+ qrio/QRInfo.c \
+ supervisor/RunReason.c \
+ wifi/AuthMode.c \
+ wifi/Packet.c \
+)
+
+SRC_BINDINGS_ENUMS += \
+ util.c
+
+SRC_SHARED_MODULE_ALL = \
+ _bleio/Address.c \
+ _bleio/Attribute.c \
+ _bleio/ScanEntry.c \
+ _bleio/ScanResults.c \
+ _eve/__init__.c \
+ adafruit_pixelbuf/PixelBuf.c \
+ adafruit_pixelbuf/__init__.c \
+ _stage/Layer.c \
+ _stage/Text.c \
+ _stage/__init__.c \
+ aesio/__init__.c \
+ aesio/aes.c \
+ atexit/__init__.c \
+ audiocore/RawSample.c \
+ audiocore/WaveFile.c \
+ audiocore/__init__.c \
+ audioio/__init__.c \
+ audiomixer/Mixer.c \
+ audiomixer/MixerVoice.c \
+ audiomixer/__init__.c \
+ audiomp3/MP3Decoder.c \
+ audiomp3/__init__.c \
+ audiopwmio/__init__.c \
+ bitbangio/I2C.c \
+ bitbangio/SPI.c \
+ bitbangio/__init__.c \
+ bitmaptools/__init__.c \
+ bitops/__init__.c \
+ board/__init__.c \
+ adafruit_bus_device/__init__.c \
+ adafruit_bus_device/i2c_device/I2CDevice.c \
+ adafruit_bus_device/spi_device/SPIDevice.c \
+ canio/Match.c \
+ canio/Message.c \
+ canio/RemoteTransmissionRequest.c \
+ displayio/Bitmap.c \
+ displayio/ColorConverter.c \
+ displayio/Display.c \
+ displayio/EPaperDisplay.c \
+ displayio/FourWire.c \
+ displayio/Group.c \
+ displayio/I2CDisplay.c \
+ displayio/OnDiskBitmap.c \
+ displayio/Palette.c \
+ displayio/Shape.c \
+ displayio/TileGrid.c \
+ displayio/area.c \
+ displayio/__init__.c \
+ floppyio/__init__.c \
+ fontio/BuiltinFont.c \
+ fontio/__init__.c \
+ framebufferio/FramebufferDisplay.c \
+ framebufferio/__init__.c \
+ gamepadshift/GamePadShift.c \
+ gamepadshift/__init__.c \
+ getpass/__init__.c \
+ gifio/__init__.c \
+ gifio/GifWriter.c \
+ imagecapture/ParallelImageCapture.c \
+ ipaddress/IPv4Address.c \
+ ipaddress/__init__.c \
+ is31fl3741/IS31FL3741.c \
+ is31fl3741/FrameBuffer.c \
+ is31fl3741/__init__.c \
+ keypad/__init__.c \
+ keypad/Event.c \
+ keypad/EventQueue.c \
+ keypad/KeyMatrix.c \
+ keypad/ShiftRegisterKeys.c \
+ keypad/Keys.c \
+ memorymonitor/__init__.c \
+ memorymonitor/AllocationAlarm.c \
+ memorymonitor/AllocationSize.c \
+ network/__init__.c \
+ msgpack/__init__.c \
+ onewireio/__init__.c \
+ onewireio/OneWire.c \
+ os/__init__.c \
+ paralleldisplay/ParallelBus.c \
+ qrio/__init__.c \
+ qrio/QRDecoder.c \
+ rainbowio/__init__.c \
+ random/__init__.c \
+ rgbmatrix/RGBMatrix.c \
+ rgbmatrix/__init__.c \
+ rotaryio/IncrementalEncoder.c \
+ sdcardio/SDCard.c \
+ sdcardio/__init__.c \
+ sharpdisplay/SharpMemoryFramebuffer.c \
+ sharpdisplay/__init__.c \
+ socket/__init__.c \
+ storage/__init__.c \
+ struct/__init__.c \
+ synthio/MidiTrack.c \
+ synthio/__init__.c \
+ terminalio/Terminal.c \
+ terminalio/__init__.c \
+ time/__init__.c \
+ traceback/__init__.c \
+ uheap/__init__.c \
+ usb/__init__.c \
+ usb/core/__init__.c \
+ usb/core/Device.c \
+ ustack/__init__.c \
+ zlib/__init__.c \
+ vectorio/Circle.c \
+ vectorio/Polygon.c \
+ vectorio/Rectangle.c \
+ vectorio/VectorShape.c \
+ vectorio/__init__.c \
+
+# All possible sources are listed here, and are filtered by SRC_PATTERNS.
+SRC_SHARED_MODULE = $(filter $(SRC_PATTERNS), $(SRC_SHARED_MODULE_ALL))
+
+# Use the native touchio if requested. This flag is set conditionally in, say, mpconfigport.h.
+# The presence of common-hal/touchio/* does not imply it's available for all chips in a port,
+# so there is an explicit flag. For example, SAMD21 touchio is native, but SAMD51 is not.
+ifeq ($(CIRCUITPY_TOUCHIO_USE_NATIVE),1)
+SRC_COMMON_HAL_ALL += \
+ touchio/TouchIn.c \
+ touchio/__init__.c
+else
+SRC_SHARED_MODULE_ALL += \
+ touchio/TouchIn.c \
+ touchio/__init__.c
+endif
+
+# If supporting _bleio via HCI, make devices/ble_hci/common-hal/_bleio be includable,
+# and use C source files in devices/ble_hci/common-hal.
+ifeq ($(CIRCUITPY_BLEIO_HCI),1)
+INC += -I$(TOP)/devices/ble_hci
+DEVICES_MODULES += $(TOP)/devices/ble_hci
+endif
+
+ifeq ($(CIRCUITPY_AUDIOMP3),1)
+SRC_MOD += $(addprefix lib/mp3/src/, \
+ bitstream.c \
+ buffers.c \
+ dct32.c \
+ dequant.c \
+ dqchan.c \
+ huffman.c \
+ hufftabs.c \
+ imdct.c \
+ mp3dec.c \
+ mp3tabs.c \
+ polyphase.c \
+ scalfact.c \
+ stproc.c \
+ subband.c \
+ trigtabs.c \
+)
+$(BUILD)/lib/mp3/src/buffers.o: CFLAGS += -include "py/misc.h" -D'MPDEC_ALLOCATOR(x)=m_malloc(x,0)' -D'MPDEC_FREE(x)=m_free(x)'
+endif
+ifeq ($(CIRCUITPY_RGBMATRIX),1)
+SRC_MOD += $(addprefix lib/protomatter/src/, \
+ core.c \
+)
+$(BUILD)/lib/protomatter/src/core.o: CFLAGS += -include "shared-module/rgbmatrix/allocator.h" -DCIRCUITPY -Wno-missing-braces -Wno-missing-prototypes
+endif
+
+ifeq ($(CIRCUITPY_ZLIB),1)
+SRC_MOD += $(addprefix lib/uzlib/, \
+ tinflate.c \
+ tinfzlib.c \
+ tinfgzip.c \
+ adler32.c \
+ crc32.c \
+)
+$(BUILD)/lib/uzlib/tinflate.o: CFLAGS += -Wno-missing-braces -Wno-missing-prototypes
+endif
+
+# All possible sources are listed here, and are filtered by SRC_PATTERNS.
+SRC_SHARED_MODULE_INTERNAL = \
+$(filter $(SRC_PATTERNS), \
+ displayio/display_core.c \
+ usb/utf16le.c \
+)
+
+SRC_COMMON_HAL_INTERNAL = \
+$(filter $(SRC_PATTERNS), \
+ _bleio/ \
+)
+
+ifeq ($(INTERNAL_LIBM),1)
+SRC_LIBM = \
+$(addprefix lib/,\
+ libm/math.c \
+ libm/roundf.c \
+ libm/fmodf.c \
+ libm/nearbyintf.c \
+ libm/ef_sqrt.c \
+ libm/kf_rem_pio2.c \
+ libm/kf_sin.c \
+ libm/kf_cos.c \
+ libm/kf_tan.c \
+ libm/ef_rem_pio2.c \
+ libm/sf_sin.c \
+ libm/sf_cos.c \
+ libm/sf_tan.c \
+ libm/sf_frexp.c \
+ libm/sf_modf.c \
+ libm/sf_ldexp.c \
+ libm/asinfacosf.c \
+ libm/atanf.c \
+ libm/atan2f.c \
+ )
+ifeq ($(CIRCUITPY_ULAB),1)
+SRC_LIBM += \
+$(addprefix lib/,\
+ libm/acoshf.c \
+ libm/asinhf.c \
+ libm/atanhf.c \
+ libm/erf_lgamma.c \
+ libm/log1pf.c \
+ libm/sf_erf.c \
+ libm/wf_lgamma.c \
+ libm/wf_tgamma.c \
+ )
+endif
+$(patsubst %.c,$(BUILD)/%.o,$(SRC_LIBM)): CFLAGS += -Wno-missing-prototypes
+endif
+
+SRC_CIRCUITPY_COMMON = \
+ shared/libc/string0.c \
+ shared/readline/readline.c \
+ lib/oofatfs/ff.c \
+ lib/oofatfs/ffunicode.c \
+ shared/timeutils/timeutils.c \
+ shared/runtime/buffer_helper.c \
+ shared/runtime/context_manager_helpers.c \
+ shared/runtime/interrupt_char.c \
+ shared/runtime/pyexec.c \
+ shared/runtime/stdout_helpers.c \
+ shared/runtime/sys_stdio_mphal.c
+
+ifeq ($(CIRCUITPY_QRIO),1)
+SRC_CIRCUITPY_COMMON += lib/quirc/lib/decode.c lib/quirc/lib/identify.c lib/quirc/lib/quirc.c lib/quirc/lib/version_db.c
+$(BUILD)/lib/quirc/lib/%.o: CFLAGS += -Wno-shadow -Wno-sign-compare -include shared-module/qrio/quirc_alloc.h
+endif
+
+ifdef LD_TEMPLATE_FILE
+# Generate a linker script (.ld file) from a template, for those builds that use it.
+GENERATED_LD_FILE = $(BUILD)/$(notdir $(patsubst %.template.ld,%.ld,$(LD_TEMPLATE_FILE)))
+#
+# ld_defines.pp is generated from ld_defines.c. See py/mkrules.mk.
+# Run gen_ld_files.py over ALL *.template.ld files, not just LD_TEMPLATE_FILE,
+# because it may include other template files.
+$(GENERATED_LD_FILE): $(BUILD)/ld_defines.pp boards/*.template.ld
+ $(STEPECHO) "GEN $@"
+ $(Q)$(PYTHON) $(TOP)/tools/gen_ld_files.py --defines $< --out_dir $(BUILD) boards/*.template.ld
+endif
+
+.PHONY: check-release-needs-clean-build
+
+check-release-needs-clean-build:
+ @echo "RELEASE_NEEDS_CLEAN_BUILD = $(RELEASE_NEEDS_CLEAN_BUILD)"
diff --git a/circuitpython/py/circuitpy_mpconfig.h b/circuitpython/py/circuitpy_mpconfig.h
new file mode 100644
index 0000000..1c44305
--- /dev/null
+++ b/circuitpython/py/circuitpy_mpconfig.h
@@ -0,0 +1,578 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2019 Dan Halbert for Adafruit Industries
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+// This file contains settings that are common across CircuitPython ports, to make
+// sure that the same feature set and settings are used, such as in atmel-samd
+// and nrf.
+
+#ifndef __INCLUDED_MPCONFIG_CIRCUITPY_H
+#define __INCLUDED_MPCONFIG_CIRCUITPY_H
+
+#include <stdint.h>
+#include <stdatomic.h>
+
+// This is CircuitPython.
+#define CIRCUITPY 1
+
+// REPR_C encodes qstrs, 31-bit ints, and 30-bit floats in a single 32-bit word.
+#ifndef MICROPY_OBJ_REPR
+#define MICROPY_OBJ_REPR (MICROPY_OBJ_REPR_C)
+#endif
+
+// options to control how MicroPython is built
+// TODO(tannewt): Reduce this number if we want the REPL to function under 512
+// free bytes.
+// #define MICROPY_ALLOC_PARSE_RULE_INIT (64)
+
+// These critical-section macros are used only a few places in MicroPython, but
+// we need to provide actual implementations.
+extern void common_hal_mcu_disable_interrupts(void);
+extern void common_hal_mcu_enable_interrupts(void);
+#define MICROPY_BEGIN_ATOMIC_SECTION() (common_hal_mcu_disable_interrupts(), 0)
+#define MICROPY_END_ATOMIC_SECTION(state) ((void)state, common_hal_mcu_enable_interrupts())
+
+// Sorted alphabetically for easy finding.
+//
+// default is 128; consider raising to reduce fragmentation.
+#define MICROPY_ALLOC_PARSE_CHUNK_INIT (16)
+// default is 512.
+#define MICROPY_ALLOC_PATH_MAX (256)
+#define MICROPY_CAN_OVERRIDE_BUILTINS (1)
+#define MICROPY_COMP_CONST (1)
+#define MICROPY_COMP_DOUBLE_TUPLE_ASSIGN (1)
+#define MICROPY_COMP_MODULE_CONST (1)
+#define MICROPY_COMP_TRIPLE_TUPLE_ASSIGN (0)
+#define MICROPY_DEBUG_PRINTERS (0)
+#define MICROPY_EMIT_INLINE_THUMB (CIRCUITPY_ENABLE_MPY_NATIVE)
+#define MICROPY_EMIT_THUMB (CIRCUITPY_ENABLE_MPY_NATIVE)
+#define MICROPY_EMIT_X64 (0)
+#define MICROPY_ENABLE_DOC_STRING (0)
+#define MICROPY_ENABLE_FINALISER (1)
+#define MICROPY_ENABLE_GC (1)
+#define MICROPY_ENABLE_SOURCE_LINE (1)
+#define MICROPY_EPOCH_IS_1970 (1)
+#define MICROPY_ERROR_REPORTING (MICROPY_ERROR_REPORTING_NORMAL)
+#define MICROPY_FLOAT_HIGH_QUALITY_HASH (0)
+#define MICROPY_FLOAT_IMPL (MICROPY_FLOAT_IMPL_FLOAT)
+#define MICROPY_GC_ALLOC_THRESHOLD (0)
+#define MICROPY_HELPER_LEXER_UNIX (0)
+#define MICROPY_HELPER_REPL (1)
+#define MICROPY_KBD_EXCEPTION (1)
+#define MICROPY_MEM_STATS (0)
+#define MICROPY_MODULE_BUILTIN_INIT (1)
+#define MICROPY_NONSTANDARD_TYPECODES (0)
+#define MICROPY_OPT_COMPUTED_GOTO (1)
+#define MICROPY_OPT_COMPUTED_GOTO_SAVE_SPACE (CIRCUITPY_COMPUTED_GOTO_SAVE_SPACE)
+#define MICROPY_OPT_LOAD_ATTR_FAST_PATH (CIRCUITPY_OPT_LOAD_ATTR_FAST_PATH)
+#define MICROPY_OPT_MAP_LOOKUP_CACHE (CIRCUITPY_OPT_MAP_LOOKUP_CACHE)
+#define MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE (CIRCUITPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE)
+#define MICROPY_PERSISTENT_CODE_LOAD (1)
+
+#define MICROPY_PY_ARRAY (1)
+#define MICROPY_PY_ARRAY_SLICE_ASSIGN (1)
+#define MICROPY_PY_ATTRTUPLE (1)
+
+#define MICROPY_PY_BUILTINS_BYTEARRAY (1)
+#define MICROPY_PY_BUILTINS_ENUMERATE (1)
+#define MICROPY_PY_BUILTINS_FILTER (1)
+#define MICROPY_PY_BUILTINS_HELP (1)
+#define MICROPY_PY_BUILTINS_HELP_MODULES (1)
+#define MICROPY_PY_BUILTINS_INPUT (1)
+#define MICROPY_PY_BUILTINS_MEMORYVIEW (1)
+#define MICROPY_PY_BUILTINS_MIN_MAX (1)
+#define MICROPY_PY_BUILTINS_PROPERTY (1)
+#define MICROPY_PY_BUILTINS_REVERSED (1)
+#define MICROPY_PY_BUILTINS_ROUND_INT (1)
+#define MICROPY_PY_BUILTINS_SET (1)
+#define MICROPY_PY_BUILTINS_SLICE (1)
+#define MICROPY_PY_BUILTINS_SLICE_ATTRS (1)
+#define MICROPY_PY_BUILTINS_SLICE_INDICES (1)
+#define MICROPY_PY_BUILTINS_STR_UNICODE (1)
+
+#define MICROPY_PY_CMATH (0)
+#define MICROPY_PY_COLLECTIONS (1)
+#define MICROPY_PY_DESCRIPTORS (1)
+#define MICROPY_PY_IO_FILEIO (1)
+#define MICROPY_PY_GC (1)
+// Supplanted by shared-bindings/math
+#define MICROPY_PY_MATH (0)
+#define MICROPY_PY_MICROPYTHON_MEM_INFO (0)
+// Supplanted by shared-bindings/struct
+#define MICROPY_PY_STRUCT (0)
+#define MICROPY_PY_SYS (1)
+#define MICROPY_PY_SYS_MAXSIZE (1)
+#define MICROPY_PY_SYS_STDFILES (1)
+// Supplanted by shared-bindings/random
+#define MICROPY_PY_URANDOM (0)
+#define MICROPY_PY_URANDOM_EXTRA_FUNCS (0)
+#define MICROPY_PY___FILE__ (1)
+
+#define MICROPY_QSTR_BYTES_IN_HASH (1)
+#define MICROPY_REPL_AUTO_INDENT (1)
+#define MICROPY_REPL_EVENT_DRIVEN (0)
+#define MICROPY_ENABLE_PYSTACK (1)
+#define MICROPY_STACK_CHECK (1)
+#define MICROPY_STREAMS_NON_BLOCK (1)
+#ifndef MICROPY_USE_INTERNAL_PRINTF
+#define MICROPY_USE_INTERNAL_PRINTF (1)
+#endif
+
+// fatfs configuration used in ffconf.h
+//
+// 1 = SFN/ANSI 437=LFN/U.S.(OEM)
+#define MICROPY_FATFS_ENABLE_LFN (1)
+// Code page is ignored because unicode is enabled.
+// Don't use parens on the value below because it gets combined with a prefix in
+// the preprocessor.
+#define MICROPY_FATFS_LFN_CODE_PAGE 437
+#define MICROPY_FATFS_USE_LABEL (1)
+#define MICROPY_FATFS_RPATH (2)
+#define MICROPY_FATFS_MULTI_PARTITION (1)
+#define MICROPY_FATFS_LFN_UNICODE 2 // UTF-8
+
+// Only enable this if you really need it. It allocates a byte cache of this size.
+// #define MICROPY_FATFS_MAX_SS (4096)
+
+#define FILESYSTEM_BLOCK_SIZE (512)
+
+#define MICROPY_VFS (1)
+#define MICROPY_VFS_FAT (MICROPY_VFS)
+#define MICROPY_READER_VFS (MICROPY_VFS)
+
+// type definitions for the specific machine
+
+#define BYTES_PER_WORD (4)
+
+#define MICROPY_MAKE_POINTER_CALLABLE(p) ((void *)((mp_uint_t)(p) | 1))
+
+// Track stack usage. Expose results via ustack module.
+#define MICROPY_MAX_STACK_USAGE (0)
+
+#define UINT_FMT "%u"
+#define INT_FMT "%d"
+#ifdef __LP64__
+typedef long mp_int_t; // must be pointer size
+typedef unsigned long mp_uint_t; // must be pointer size
+#else
+// These are definitions for machines where sizeof(int) == sizeof(void*),
+// regardless of actual size.
+typedef int mp_int_t; // must be pointer size
+typedef unsigned int mp_uint_t; // must be pointer size
+#endif
+#if __GNUC__ >= 10 // on recent gcc versions we can check that this is so
+_Static_assert(sizeof(mp_int_t) == sizeof(void *));
+_Static_assert(sizeof(mp_uint_t) == sizeof(void *));
+#endif
+typedef long mp_off_t;
+
+#define MP_PLAT_PRINT_STRN(str, len) mp_hal_stdout_tx_strn_cooked(str, len)
+
+#define mp_type_fileio mp_type_vfs_fat_fileio
+#define mp_type_textio mp_type_vfs_fat_textio
+
+#define mp_import_stat mp_vfs_import_stat
+#define mp_builtin_open_obj mp_vfs_open_obj
+
+
+// extra built in names to add to the global namespace
+#define MICROPY_PORT_BUILTINS \
+ { MP_OBJ_NEW_QSTR(MP_QSTR_help), (mp_obj_t)&mp_builtin_help_obj }, \
+ { MP_OBJ_NEW_QSTR(MP_QSTR_input), (mp_obj_t)&mp_builtin_input_obj }, \
+ { MP_OBJ_NEW_QSTR(MP_QSTR_open), (mp_obj_t)&mp_builtin_open_obj },
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+// board-specific definitions, which control and may override definitions below.
+#include "mpconfigboard.h"
+
+// Turning off FULL_BUILD removes some functionality to reduce flash size on tiny SAMD21s
+#define MICROPY_BUILTIN_METHOD_CHECK_SELF_ARG (CIRCUITPY_FULL_BUILD)
+#ifndef MICROPY_CPYTHON_COMPAT
+#define MICROPY_CPYTHON_COMPAT (CIRCUITPY_FULL_BUILD)
+#endif
+#define MICROPY_PY_BUILTINS_POW3 (CIRCUITPY_BUILTINS_POW3)
+#define MICROPY_PY_FSTRINGS (1)
+#define MICROPY_MODULE_WEAK_LINKS (0)
+#define MICROPY_PY_ALL_SPECIAL_METHODS (CIRCUITPY_FULL_BUILD)
+#ifndef MICROPY_PY_BUILTINS_COMPLEX
+#define MICROPY_PY_BUILTINS_COMPLEX (CIRCUITPY_FULL_BUILD)
+#endif
+#define MICROPY_PY_BUILTINS_FROZENSET (CIRCUITPY_FULL_BUILD)
+#define MICROPY_PY_BUILTINS_STR_CENTER (CIRCUITPY_FULL_BUILD)
+#define MICROPY_PY_BUILTINS_STR_PARTITION (CIRCUITPY_FULL_BUILD)
+#define MICROPY_PY_BUILTINS_STR_SPLITLINES (CIRCUITPY_FULL_BUILD)
+#ifndef MICROPY_PY_COLLECTIONS_ORDEREDDICT
+#define MICROPY_PY_COLLECTIONS_ORDEREDDICT (CIRCUITPY_FULL_BUILD)
+#endif
+#define MICROPY_PY_URE_MATCH_GROUPS (CIRCUITPY_RE)
+#define MICROPY_PY_URE_MATCH_SPAN_START_END (CIRCUITPY_RE)
+#define MICROPY_PY_URE_SUB (CIRCUITPY_RE)
+
+#define CIRCUITPY_MICROPYTHON_ADVANCED (CIRCUITPY_FULL_BUILD)
+
+#ifndef MICROPY_FATFS_EXFAT
+#define MICROPY_FATFS_EXFAT (CIRCUITPY_FULL_BUILD)
+#endif
+
+// LONGINT_IMPL_xxx are defined in the Makefile.
+//
+#ifdef LONGINT_IMPL_NONE
+#define MICROPY_LONGINT_IMPL (MICROPY_LONGINT_IMPL_NONE)
+#endif
+
+#ifdef LONGINT_IMPL_MPZ
+#define MICROPY_LONGINT_IMPL (MICROPY_LONGINT_IMPL_MPZ)
+#define MP_SSIZE_MAX (0x7fffffff)
+#endif
+
+#ifdef LONGINT_IMPL_LONGLONG
+#define MICROPY_LONGINT_IMPL (MICROPY_LONGINT_IMPL_LONGLONG)
+#define MP_SSIZE_MAX (0x7fffffff)
+#endif
+
+#ifndef MICROPY_PY_REVERSE_SPECIAL_METHODS
+#define MICROPY_PY_REVERSE_SPECIAL_METHODS (CIRCUITPY_ULAB || CIRCUITPY_FULL_BUILD)
+#endif
+
+#if INTERNAL_FLASH_FILESYSTEM == 0 && QSPI_FLASH_FILESYSTEM == 0 && SPI_FLASH_FILESYSTEM == 0 && !DISABLE_FILESYSTEM
+#error No *_FLASH_FILESYSTEM set!
+#endif
+
+// Default board buses.
+
+#ifndef CIRCUITPY_BOARD_I2C
+#if defined(DEFAULT_I2C_BUS_SCL) && defined(DEFAULT_I2C_BUS_SDA)
+#define CIRCUITPY_BOARD_I2C (1)
+#define CIRCUITPY_BOARD_I2C_PIN {{.scl = DEFAULT_I2C_BUS_SCL, .sda = DEFAULT_I2C_BUS_SDA}}
+#else
+#define CIRCUITPY_BOARD_I2C (0)
+#endif
+#endif
+
+#ifndef CIRCUITPY_BOARD_SPI
+#if defined(DEFAULT_SPI_BUS_SCK) && defined(DEFAULT_SPI_BUS_MOSI) && defined(DEFAULT_SPI_BUS_MISO)
+#define CIRCUITPY_BOARD_SPI (1)
+#define CIRCUITPY_BOARD_SPI_PIN {{.clock = DEFAULT_SPI_BUS_SCK, .mosi = DEFAULT_SPI_BUS_MOSI, .miso = DEFAULT_SPI_BUS_MISO}}
+#else
+#define CIRCUITPY_BOARD_SPI (0)
+#endif
+#endif
+
+#ifndef CIRCUITPY_BOARD_UART
+#if defined(DEFAULT_UART_BUS_TX) && defined(DEFAULT_UART_BUS_RX)
+#define CIRCUITPY_BOARD_UART (1)
+#define CIRCUITPY_BOARD_UART_PIN {{.tx = DEFAULT_UART_BUS_TX, .rx = DEFAULT_UART_BUS_RX}}
+#define BOARD_UART_ROOT_POINTER mp_obj_t board_uart_bus;
+#else
+#define CIRCUITPY_BOARD_UART (0)
+#define BOARD_UART_ROOT_POINTER
+#endif
+#else
+#define BOARD_UART_ROOT_POINTER mp_obj_t board_uart_bus;
+#endif
+
+// These CIRCUITPY_xxx values should all be defined in the *.mk files as being on or off.
+// So if any are not defined in *.mk, they'll throw an error here.
+
+#if CIRCUITPY_DISPLAYIO
+#ifndef CIRCUITPY_DISPLAY_LIMIT
+#define CIRCUITPY_DISPLAY_LIMIT (1)
+#endif
+
+// Framebuffer area size in bytes. Rounded down to power of four for alignment.
+#ifndef CIRCUITPY_DISPLAY_AREA_BUFFER_SIZE
+#define CIRCUITPY_DISPLAY_AREA_BUFFER_SIZE (128)
+#endif
+
+#else
+#define CIRCUITPY_DISPLAY_LIMIT (0)
+#define CIRCUITPY_DISPLAY_AREA_BUFFER_SIZE (0)
+#endif
+
+#if CIRCUITPY_GAMEPADSHIFT
+// Scan gamepad every 32ms
+#define CIRCUITPY_GAMEPAD_TICKS 0x1f
+#define GAMEPAD_ROOT_POINTERS mp_obj_t gamepad_singleton;
+#else
+#define GAMEPAD_ROOT_POINTERS
+#endif
+
+#if CIRCUITPY_KEYPAD
+#define KEYPAD_ROOT_POINTERS mp_obj_t keypad_scanners_linked_list;
+#else
+#define KEYPAD_ROOT_POINTERS
+#endif
+
+#if CIRCUITPY_MEMORYMONITOR
+#define MEMORYMONITOR_ROOT_POINTERS mp_obj_t active_allocationsizes; \
+ mp_obj_t active_allocationalarms;
+#else
+#define MEMORYMONITOR_ROOT_POINTERS
+#endif
+
+// This is not a top-level module; it's microcontroller.nvm.
+#if CIRCUITPY_NVM
+extern const struct _mp_obj_module_t nvm_module;
+#endif
+
+// Following modules are implemented in either extmod or py directory.
+
+#define MICROPY_PY_UBINASCII CIRCUITPY_BINASCII
+
+#define MICROPY_PY_UERRNO CIRCUITPY_ERRNO
+// Uses about 80 bytes.
+#define MICROPY_PY_UERRNO_ERRORCODE CIRCUITPY_ERRNO
+
+#define MICROPY_PY_URE CIRCUITPY_RE
+
+#if CIRCUITPY_JSON
+#define MICROPY_PY_UJSON (1)
+#define MICROPY_PY_IO (1)
+#else
+#ifndef MICROPY_PY_IO
+// We don't need MICROPY_PY_IO unless someone else wants it.
+#define MICROPY_PY_IO (0)
+#endif
+#endif
+
+#ifndef ULAB_SUPPORTS_COMPLEX
+#define ULAB_SUPPORTS_COMPLEX (0)
+#endif
+
+#if CIRCUITPY_ULAB
+// ulab requires reverse special methods
+#if defined(MICROPY_PY_REVERSE_SPECIAL_METHODS) && !MICROPY_PY_REVERSE_SPECIAL_METHODS
+#error "ulab requires MICROPY_PY_REVERSE_SPECIAL_METHODS"
+#endif
+#endif
+
+#if CIRCUITPY_WIFI
+#define WIFI_MONITOR_ROOT_POINTERS mp_obj_t wifi_monitor_singleton;
+#else
+#define WIFI_MONITOR_ROOT_POINTERS
+#endif
+
+// Define certain native modules with weak links so they can be replaced with Python
+// implementations. This list may grow over time.
+
+#define MICROPY_PORT_BUILTIN_MODULE_WEAK_LINKS
+
+// Native modules that are weak links can be accessed directly
+// by prepending their name with an underscore. This list should correspond to
+// MICROPY_PORT_BUILTIN_MODULE_WEAK_LINKS, assuming you want the native modules
+// to be accessible when overriden.
+
+#define MICROPY_PORT_BUILTIN_MODULE_ALT_NAMES
+
+// This is an inclusive list that should correspond to the CIRCUITPY_XXX list above,
+// including dependencies.
+// Some of these definitions will be blank depending on what is turned on and off.
+// Some are omitted because they're in MICROPY_PORT_BUILTIN_MODULE_WEAK_LINKS above.
+
+#define MICROPY_PORT_BUILTIN_MODULES_STRONG_LINKS
+
+// If weak links are enabled, just include strong links in the main list of modules,
+// and also include the underscore alternate names.
+#if MICROPY_MODULE_WEAK_LINKS
+#define MICROPY_PORT_BUILTIN_MODULES \
+ MICROPY_PORT_BUILTIN_MODULES_STRONG_LINKS \
+ MICROPY_PORT_BUILTIN_MODULE_ALT_NAMES
+#else
+// If weak links are disabled, included both strong and potentially weak lines
+#define MICROPY_PORT_BUILTIN_MODULES \
+ MICROPY_PORT_BUILTIN_MODULES_STRONG_LINKS \
+ MICROPY_PORT_BUILTIN_MODULE_WEAK_LINKS
+#endif
+
+// We need to provide a declaration/definition of alloca()
+#include <alloca.h>
+
+#define MP_STATE_PORT MP_STATE_VM
+
+#include "supervisor/flash_root_pointers.h"
+
+// From supervisor/memory.c
+struct _supervisor_allocation_node;
+
+#define CIRCUITPY_COMMON_ROOT_POINTERS \
+ FLASH_ROOT_POINTERS \
+ KEYPAD_ROOT_POINTERS \
+ GAMEPAD_ROOT_POINTERS \
+ BOARD_UART_ROOT_POINTER \
+ WIFI_MONITOR_ROOT_POINTERS \
+ MEMORYMONITOR_ROOT_POINTERS \
+ vstr_t *repl_line; \
+ mp_obj_t pew_singleton; \
+ mp_obj_t rtc_time_source; \
+ const char *readline_hist[8]; \
+ struct _supervisor_allocation_node *first_embedded_allocation; \
+
+void supervisor_run_background_tasks_if_tick(void);
+#define RUN_BACKGROUND_TASKS (supervisor_run_background_tasks_if_tick())
+
+#define MICROPY_VM_HOOK_LOOP RUN_BACKGROUND_TASKS;
+#define MICROPY_VM_HOOK_RETURN RUN_BACKGROUND_TASKS;
+
+// CIRCUITPY_AUTORELOAD_DELAY_MS = 0 will completely disable autoreload.
+#ifndef CIRCUITPY_AUTORELOAD_DELAY_MS
+#define CIRCUITPY_AUTORELOAD_DELAY_MS 750
+#endif
+
+#ifndef CIRCUITPY_FILESYSTEM_FLUSH_INTERVAL_MS
+#define CIRCUITPY_FILESYSTEM_FLUSH_INTERVAL_MS 1000
+#endif
+
+#ifndef CIRCUITPY_PYSTACK_SIZE
+#define CIRCUITPY_PYSTACK_SIZE 1536
+#endif
+
+// Wait this long before sleeping immediately after startup, to see if we are connected via USB or BLE.
+#ifndef CIRCUITPY_WORKFLOW_CONNECTION_SLEEP_DELAY
+#define CIRCUITPY_WORKFLOW_CONNECTION_SLEEP_DELAY 5
+#endif
+
+#ifndef CIRCUITPY_PROCESSOR_COUNT
+#define CIRCUITPY_PROCESSOR_COUNT (1)
+#endif
+
+#ifndef CIRCUITPY_STATUS_LED_POWER_INVERTED
+#define CIRCUITPY_STATUS_LED_POWER_INVERTED (0)
+#endif
+
+#define CIRCUITPY_BOOT_OUTPUT_FILE "/boot_out.txt"
+
+#ifndef CIRCUITPY_BOOT_COUNTER
+#define CIRCUITPY_BOOT_COUNTER 0
+#endif
+
+#if !defined(CIRCUITPY_INTERNAL_NVM_SIZE) && CIRCUITPY_BOOT_COUNTER != 0
+#error "boot counter requires CIRCUITPY_NVM enabled"
+#endif
+
+#define CIRCUITPY_VERBOSE_BLE 0
+
+// This trades ~1k flash space (1) for that much in RAM plus the cost to compute
+// the values once on init (0). Only turn it off, when you really need the flash
+// space and are willing to trade the RAM.
+#ifndef CIRCUITPY_PRECOMPUTE_QSTR_ATTR
+#define CIRCUITPY_PRECOMPUTE_QSTR_ATTR (1)
+#endif
+
+// Display the Blinka logo in the REPL on displayio displays.
+#ifndef CIRCUITPY_REPL_LOGO
+#define CIRCUITPY_REPL_LOGO (1)
+#endif
+
+// USB settings
+
+// Debug level for TinyUSB. Only outputs over debug UART so it doesn't cause
+// additional USB logging.
+#ifndef CIRCUITPY_DEBUG_TINYUSB
+#define CIRCUITPY_DEBUG_TINYUSB 0
+#endif
+
+#ifndef CIRCUITPY_USB_DEVICE_INSTANCE
+#define CIRCUITPY_USB_DEVICE_INSTANCE 0
+#endif
+
+#ifndef CIRCUITPY_USB_HOST_INSTANCE
+#define CIRCUITPY_USB_HOST_INSTANCE -1
+#endif
+
+// If the port requires certain USB endpoint numbers, define these in mpconfigport.h.
+
+#ifndef USB_CDC_EP_NUM_NOTIFICATION
+#define USB_CDC_EP_NUM_NOTIFICATION (0)
+#endif
+
+#ifndef USB_CDC_EP_NUM_DATA_OUT
+#define USB_CDC_EP_NUM_DATA_OUT (0)
+#endif
+
+#ifndef USB_CDC_EP_NUM_DATA_IN
+#define USB_CDC_EP_NUM_DATA_IN (0)
+#endif
+
+#ifndef USB_CDC2_EP_NUM_NOTIFICATION
+#define USB_CDC2_EP_NUM_NOTIFICATION (0)
+#endif
+
+#ifndef USB_CDC2_EP_NUM_DATA_OUT
+#define USB_CDC2_EP_NUM_DATA_OUT (0)
+#endif
+
+#ifndef USB_CDC2_EP_NUM_DATA_IN
+#define USB_CDC2_EP_NUM_DATA_IN (0)
+#endif
+
+#ifndef USB_MSC_EP_NUM_OUT
+#define USB_MSC_EP_NUM_OUT (0)
+#endif
+
+#ifndef USB_MSC_EP_NUM_IN
+#define USB_MSC_EP_NUM_IN (0)
+#endif
+
+#ifndef USB_HID_EP_NUM_OUT
+#define USB_HID_EP_NUM_OUT (0)
+#endif
+
+#ifndef USB_HID_EP_NUM_IN
+#define USB_HID_EP_NUM_IN (0)
+#endif
+
+// The most complicated device currently known of is the head and eye tracker, which requires 5
+// report ids.
+// https://usb.org/sites/default/files/hutrr74_-_usage_page_for_head_and_eye_trackers_0.pdf
+// The default descriptors only use 1, so that is the minimum.
+#ifndef CIRCUITPY_USB_HID_MAX_REPORT_IDS_PER_DESCRIPTOR
+#define CIRCUITPY_USB_HID_MAX_REPORT_IDS_PER_DESCRIPTOR (6)
+#elif CIRCUITPY_USB_HID_MAX_REPORT_IDS_PER_DESCRIPTOR < 1
+#error "CIRCUITPY_USB_HID_MAX_REPORT_IDS_PER_DESCRIPTOR must be at least 1"
+#endif
+
+#ifndef USB_MIDI_EP_NUM_OUT
+#define USB_MIDI_EP_NUM_OUT (0)
+#endif
+
+#ifndef USB_MIDI_EP_NUM_IN
+#define USB_MIDI_EP_NUM_IN (0)
+#endif
+
+#ifndef MICROPY_WRAP_MP_MAP_LOOKUP
+#define MICROPY_WRAP_MP_MAP_LOOKUP PLACE_IN_ITCM
+#endif
+
+#ifndef MICROPY_WRAP_MP_BINARY_OP
+#define MICROPY_WRAP_MP_BINARY_OP PLACE_IN_ITCM
+#endif
+
+#ifndef MICROPY_WRAP_MP_EXECUTE_BYTECODE
+#define MICROPY_WRAP_MP_EXECUTE_BYTECODE PLACE_IN_ITCM
+#endif
+
+#define MICROPY_PY_OPTIMIZE_PROPERTY_FLASH_SIZE (CIRCUITPY_OPTIMIZE_PROPERTY_FLASH_SIZE)
+
+#endif // __INCLUDED_MPCONFIG_CIRCUITPY_H
diff --git a/circuitpython/py/circuitpy_mpconfig.mk b/circuitpython/py/circuitpy_mpconfig.mk
new file mode 100644
index 0000000..1899541
--- /dev/null
+++ b/circuitpython/py/circuitpy_mpconfig.mk
@@ -0,0 +1,531 @@
+#
+# This file is part of the MicroPython project, http://micropython.org/
+#
+# The MIT License (MIT)
+#
+# SPDX-FileCopyrightText: Copyright (c) 2019 Dan Halbert for Adafruit Industries
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+# Boards default to all modules enabled (with exceptions)
+# Manually disable by overriding in #mpconfigboard.mk
+
+# Smaller builds can be forced for resource constrained chips (typically SAMD21s
+# without external flash) by setting CIRCUITPY_FULL_BUILD=0. Avoid using this
+# for merely incomplete ports, as it changes settings in other files.
+CIRCUITPY_FULL_BUILD ?= 1
+CFLAGS += -DCIRCUITPY_FULL_BUILD=$(CIRCUITPY_FULL_BUILD)
+
+# Reduce the size of in-flash properties. Requires support in the .ld linker
+# file, so not enabled by default.
+CIRCUITPY_OPTIMIZE_PROPERTY_FLASH_SIZE ?= 0
+CFLAGS += -DCIRCUITPY_OPTIMIZE_PROPERTY_FLASH_SIZE=$(CIRCUITPY_OPTIMIZE_PROPERTY_FLASH_SIZE)
+
+# async/await language keyword support
+MICROPY_PY_ASYNC_AWAIT ?= $(CIRCUITPY_FULL_BUILD)
+CFLAGS += -DMICROPY_PY_ASYNC_AWAIT=$(MICROPY_PY_ASYNC_AWAIT)
+
+# uasyncio
+# By default, include uasyncio if async/await are available.
+MICROPY_PY_UASYNCIO ?= $(MICROPY_PY_ASYNC_AWAIT)
+CFLAGS += -DMICROPY_PY_UASYNCIO=$(MICROPY_PY_UASYNCIO)
+
+# uasyncio normally needs select
+MICROPY_PY_USELECT ?= $(MICROPY_PY_UASYNCIO)
+CFLAGS += -DMICROPY_PY_USELECT=$(MICROPY_PY_USELECT)
+
+# enable select.select if select is enabled.
+MICROPY_PY_USELECT_SELECT ?= $(MICROPY_PY_USELECT)
+CFLAGS += -DMICROPY_PY_USELECT_SELECT=$(MICROPY_PY_USELECT_SELECT)
+
+CIRCUITPY_AESIO ?= $(CIRCUITPY_FULL_BUILD)
+CFLAGS += -DCIRCUITPY_AESIO=$(CIRCUITPY_AESIO)
+
+# TODO: CIRCUITPY_ALARM will gradually be added to
+# as many ports as possible
+# so make this 1 or CIRCUITPY_FULL_BUILD eventually
+CIRCUITPY_ALARM ?= 0
+CFLAGS += -DCIRCUITPY_ALARM=$(CIRCUITPY_ALARM)
+
+CIRCUITPY_ANALOGIO ?= 1
+CFLAGS += -DCIRCUITPY_ANALOGIO=$(CIRCUITPY_ANALOGIO)
+
+CIRCUITPY_ATEXIT ?= $(CIRCUITPY_FULL_BUILD)
+CFLAGS += -DCIRCUITPY_ATEXIT=$(CIRCUITPY_ATEXIT)
+
+CIRCUITPY_AUDIOBUSIO ?= $(CIRCUITPY_FULL_BUILD)
+CFLAGS += -DCIRCUITPY_AUDIOBUSIO=$(CIRCUITPY_AUDIOBUSIO)
+
+# Some boards have PDMIn but do not implement I2SOut.
+CIRCUITPY_AUDIOBUSIO_I2SOUT ?= $(CIRCUITPY_AUDIOBUSIO)
+CFLAGS += -DCIRCUITPY_AUDIOBUSIO_I2SOUT=$(CIRCUITPY_AUDIOBUSIO_I2SOUT)
+
+# Likewise, some boards have I2SOut but do not implement PDMIn.
+CIRCUITPY_AUDIOBUSIO_PDMIN ?= $(CIRCUITPY_AUDIOBUSIO)
+CFLAGS += -DCIRCUITPY_AUDIOBUSIO_PDMIN=$(CIRCUITPY_AUDIOBUSIO_PDMIN)
+
+CIRCUITPY_AUDIOIO ?= $(CIRCUITPY_FULL_BUILD)
+CFLAGS += -DCIRCUITPY_AUDIOIO=$(CIRCUITPY_AUDIOIO)
+
+CIRCUITPY_AUDIOPWMIO ?= 0
+CFLAGS += -DCIRCUITPY_AUDIOPWMIO=$(CIRCUITPY_AUDIOPWMIO)
+
+ifndef CIRCUITPY_AUDIOCORE
+ifeq ($(CIRCUITPY_AUDIOPWMIO),1)
+CIRCUITPY_AUDIOCORE = $(CIRCUITPY_AUDIOPWMIO)
+else
+CIRCUITPY_AUDIOCORE = $(CIRCUITPY_AUDIOIO)
+endif
+endif
+CFLAGS += -DCIRCUITPY_AUDIOCORE=$(CIRCUITPY_AUDIOCORE)
+
+CIRCUITPY_AUDIOMIXER ?= $(CIRCUITPY_AUDIOIO)
+CFLAGS += -DCIRCUITPY_AUDIOMIXER=$(CIRCUITPY_AUDIOMIXER)
+
+ifndef CIRCUITPY_AUDIOMP3
+ifeq ($(CIRCUITPY_FULL_BUILD),1)
+CIRCUITPY_AUDIOMP3 = $(CIRCUITPY_AUDIOCORE)
+else
+CIRCUITPY_AUDIOMP3 = 0
+endif
+endif
+CFLAGS += -DCIRCUITPY_AUDIOMP3=$(CIRCUITPY_AUDIOMP3)
+
+CIRCUITPY_BINASCII ?= $(CIRCUITPY_FULL_BUILD)
+CFLAGS += -DCIRCUITPY_BINASCII=$(CIRCUITPY_BINASCII)
+
+CIRCUITPY_BITBANG_APA102 ?= 0
+CFLAGS += -DCIRCUITPY_BITBANG_APA102=$(CIRCUITPY_BITBANG_APA102)
+
+CIRCUITPY_BITBANGIO ?= $(CIRCUITPY_FULL_BUILD)
+CFLAGS += -DCIRCUITPY_BITBANGIO=$(CIRCUITPY_BITBANGIO)
+
+CIRCUITPY_BITOPS ?= 0
+CFLAGS += -DCIRCUITPY_BITOPS=$(CIRCUITPY_BITOPS)
+
+# _bleio can be supported on most any board via HCI
+CIRCUITPY_BLEIO_HCI ?= $(CIRCUITPY_FULL_BUILD)
+CFLAGS += -DCIRCUITPY_BLEIO_HCI=$(CIRCUITPY_BLEIO_HCI)
+
+# Explicitly enabled for boards that support _bleio.
+CIRCUITPY_BLEIO ?= $(CIRCUITPY_BLEIO_HCI)
+CFLAGS += -DCIRCUITPY_BLEIO=$(CIRCUITPY_BLEIO)
+
+CIRCUITPY_BLE_FILE_SERVICE ?= 0
+CFLAGS += -DCIRCUITPY_BLE_FILE_SERVICE=$(CIRCUITPY_BLE_FILE_SERVICE)
+
+CIRCUITPY_BOARD ?= 1
+CFLAGS += -DCIRCUITPY_BOARD=$(CIRCUITPY_BOARD)
+
+CIRCUITPY_BUSDEVICE ?= $(CIRCUITPY_FULL_BUILD)
+CFLAGS += -DCIRCUITPY_BUSDEVICE=$(CIRCUITPY_BUSDEVICE)
+
+CIRCUITPY_BUILTINS_POW3 ?= $(CIRCUITPY_FULL_BUILD)
+CFLAGS += -DCIRCUITPY_BUILTINS_POW3=$(CIRCUITPY_BUILTINS_POW3)
+
+CIRCUITPY_BUSIO ?= 1
+CFLAGS += -DCIRCUITPY_BUSIO=$(CIRCUITPY_BUSIO)
+
+# These two flags pretend to implement their class but raise a ValueError due to
+# unsupported pins. This should be used sparingly on boards that don't break out
+# generic IO but need parts of busio.
+CIRCUITPY_BUSIO_SPI ?= 1
+CFLAGS += -DCIRCUITPY_BUSIO_SPI=$(CIRCUITPY_BUSIO_SPI)
+
+CIRCUITPY_BUSIO_UART ?= 1
+CFLAGS += -DCIRCUITPY_BUSIO_UART=$(CIRCUITPY_BUSIO_UART)
+
+CIRCUITPY_CAMERA ?= 0
+CFLAGS += -DCIRCUITPY_CAMERA=$(CIRCUITPY_CAMERA)
+
+CIRCUITPY_CANIO ?= 0
+CFLAGS += -DCIRCUITPY_CANIO=$(CIRCUITPY_CANIO)
+
+CIRCUITPY_DIGITALIO ?= 1
+CFLAGS += -DCIRCUITPY_DIGITALIO=$(CIRCUITPY_DIGITALIO)
+
+CIRCUITPY_COMPUTED_GOTO_SAVE_SPACE ?= 0
+CFLAGS += -DCIRCUITPY_COMPUTED_GOTO_SAVE_SPACE=$(CIRCUITPY_COMPUTED_GOTO_SAVE_SPACE)
+
+CIRCUITPY_OPT_LOAD_ATTR_FAST_PATH ?= 1
+CFLAGS += -DCIRCUITPY_OPT_LOAD_ATTR_FAST_PATH=$(CIRCUITPY_OPT_LOAD_ATTR_FAST_PATH)
+
+CIRCUITPY_OPT_MAP_LOOKUP_CACHE ?= $(CIRCUITPY_FULL_BUILD)
+CFLAGS += -DCIRCUITPY_OPT_MAP_LOOKUP_CACHE=$(CIRCUITPY_OPT_MAP_LOOKUP_CACHE)
+
+CIRCUITPY_CONSOLE_UART ?= 0
+CFLAGS += -DCIRCUITPY_CONSOLE_UART=$(CIRCUITPY_CONSOLE_UART)
+
+CIRCUITPY_COUNTIO ?= $(CIRCUITPY_FULL_BUILD)
+CFLAGS += -DCIRCUITPY_COUNTIO=$(CIRCUITPY_COUNTIO)
+
+CIRCUITPY_DISPLAYIO ?= $(CIRCUITPY_FULL_BUILD)
+CFLAGS += -DCIRCUITPY_DISPLAYIO=$(CIRCUITPY_DISPLAYIO)
+
+ifeq ($(CIRCUITPY_DISPLAYIO),1)
+CIRCUITPY_PARALLELDISPLAY ?= $(CIRCUITPY_FULL_BUILD)
+else
+CIRCUITPY_PARALLELDISPLAY = 0
+endif
+CFLAGS += -DCIRCUITPY_PARALLELDISPLAY=$(CIRCUITPY_PARALLELDISPLAY)
+
+# bitmaptools and framebufferio rely on displayio
+ifeq ($(CIRCUITPY_DISPLAYIO),1)
+CIRCUITPY_BITMAPTOOLS ?= $(CIRCUITPY_FULL_BUILD)
+CIRCUITPY_FRAMEBUFFERIO ?= $(CIRCUITPY_FULL_BUILD)
+CIRCUITPY_VECTORIO ?= 1
+else
+CIRCUITPY_BITMAPTOOLS ?= 0
+CIRCUITPY_FRAMEBUFFERIO ?= 0
+CIRCUITPY_VECTORIO ?= 0
+endif
+CFLAGS += -DCIRCUITPY_BITMAPTOOLS=$(CIRCUITPY_BITMAPTOOLS)
+CFLAGS += -DCIRCUITPY_FRAMEBUFFERIO=$(CIRCUITPY_FRAMEBUFFERIO)
+CFLAGS += -DCIRCUITPY_VECTORIO=$(CIRCUITPY_VECTORIO)
+
+CIRCUITPY_DUALBANK ?= 0
+CFLAGS += -DCIRCUITPY_DUALBANK=$(CIRCUITPY_DUALBANK)
+
+# Enabled micropython.native decorator (experimental)
+CIRCUITPY_ENABLE_MPY_NATIVE ?= 0
+CFLAGS += -DCIRCUITPY_ENABLE_MPY_NATIVE=$(CIRCUITPY_ENABLE_MPY_NATIVE)
+
+CIRCUITPY_ERRNO ?= $(CIRCUITPY_FULL_BUILD)
+CFLAGS += -DCIRCUITPY_ERRNO=$(CIRCUITPY_ERRNO)
+
+# CIRCUITPY_ESPIDF is handled in the espressif tree.
+# Only for ESP32S chips.
+# Assume not a ESP build.
+CIRCUITPY_ESPIDF ?= 0
+CFLAGS += -DCIRCUITPY_ESPIDF=$(CIRCUITPY_ESPIDF)
+
+CIRCUITPY__EVE ?= 0
+CFLAGS += -DCIRCUITPY__EVE=$(CIRCUITPY__EVE)
+
+CIRCUITPY_FLOPPYIO ?= 0
+CFLAGS += -DCIRCUITPY_FLOPPYIO=$(CIRCUITPY_FLOPPYIO)
+
+CIRCUITPY_FREQUENCYIO ?= $(CIRCUITPY_FULL_BUILD)
+CFLAGS += -DCIRCUITPY_FREQUENCYIO=$(CIRCUITPY_FREQUENCYIO)
+
+CIRCUITPY_FUTURE ?= 1
+CFLAGS += -DCIRCUITPY_FUTURE=$(CIRCUITPY_FUTURE)
+
+CIRCUITPY_GAMEPADSHIFT ?= 0
+CFLAGS += -DCIRCUITPY_GAMEPADSHIFT=$(CIRCUITPY_GAMEPADSHIFT)
+
+CIRCUITPY_GETPASS ?= $(CIRCUITPY_FULL_BUILD)
+CFLAGS += -DCIRCUITPY_GETPASS=$(CIRCUITPY_GETPASS)
+
+ifeq ($(CIRCUITPY_DISPLAYIO),1)
+CIRCUITPY_GIFIO ?= $(CIRCUITPY_FULL_BUILD)
+else
+CIRCUITPY_GIFIO ?= 0
+endif
+CFLAGS += -DCIRCUITPY_GIFIO=$(CIRCUITPY_GIFIO)
+
+CIRCUITPY_GNSS ?= 0
+CFLAGS += -DCIRCUITPY_GNSS=$(CIRCUITPY_GNSS)
+
+CIRCUITPY_I2CPERIPHERAL ?= $(CIRCUITPY_FULL_BUILD)
+CFLAGS += -DCIRCUITPY_I2CPERIPHERAL=$(CIRCUITPY_I2CPERIPHERAL)
+
+CIRCUITPY_IMAGECAPTURE ?= 0
+CFLAGS += -DCIRCUITPY_IMAGECAPTURE=$(CIRCUITPY_IMAGECAPTURE)
+
+CIRCUITPY_IPADDRESS ?= $(CIRCUITPY_WIFI)
+CFLAGS += -DCIRCUITPY_IPADDRESS=$(CIRCUITPY_IPADDRESS)
+
+CIRCUITPY_IS31FL3741 ?= 0
+CFLAGS += -DCIRCUITPY_IS31FL3741=$(CIRCUITPY_IS31FL3741)
+
+CIRCUITPY_JSON ?= $(CIRCUITPY_FULL_BUILD)
+CFLAGS += -DCIRCUITPY_JSON=$(CIRCUITPY_JSON)
+
+CIRCUITPY_KEYPAD ?= $(CIRCUITPY_FULL_BUILD)
+CFLAGS += -DCIRCUITPY_KEYPAD=$(CIRCUITPY_KEYPAD)
+
+CIRCUITPY_MATH ?= 1
+CFLAGS += -DCIRCUITPY_MATH=$(CIRCUITPY_MATH)
+
+CIRCUITPY_MEMORYMONITOR ?= 0
+CFLAGS += -DCIRCUITPY_MEMORYMONITOR=$(CIRCUITPY_MEMORYMONITOR)
+
+CIRCUITPY_MICROCONTROLLER ?= 1
+CFLAGS += -DCIRCUITPY_MICROCONTROLLER=$(CIRCUITPY_MICROCONTROLLER)
+
+CIRCUITPY_MDNS ?= $(CIRCUITPY_WIFI)
+CFLAGS += -DCIRCUITPY_MDNS=$(CIRCUITPY_MDNS)
+
+CIRCUITPY_MSGPACK ?= $(CIRCUITPY_FULL_BUILD)
+CFLAGS += -DCIRCUITPY_MSGPACK=$(CIRCUITPY_MSGPACK)
+
+CIRCUITPY_NEOPIXEL_WRITE ?= 1
+CFLAGS += -DCIRCUITPY_NEOPIXEL_WRITE=$(CIRCUITPY_NEOPIXEL_WRITE)
+
+CIRCUITPY_NVM ?= 1
+CFLAGS += -DCIRCUITPY_NVM=$(CIRCUITPY_NVM)
+
+CIRCUITPY_ONEWIREIO ?= $(CIRCUITPY_BUSIO)
+CFLAGS += -DCIRCUITPY_ONEWIREIO=$(CIRCUITPY_ONEWIREIO)
+
+CIRCUITPY_OS ?= 1
+CFLAGS += -DCIRCUITPY_OS=$(CIRCUITPY_OS)
+
+CIRCUITPY_PEW ?= 0
+CFLAGS += -DCIRCUITPY_PEW=$(CIRCUITPY_PEW)
+
+CIRCUITPY_PIXELBUF ?= $(CIRCUITPY_FULL_BUILD)
+CFLAGS += -DCIRCUITPY_PIXELBUF=$(CIRCUITPY_PIXELBUF)
+
+# Only for SAMD boards for the moment
+CIRCUITPY_PS2IO ?= 0
+CFLAGS += -DCIRCUITPY_PS2IO=$(CIRCUITPY_PS2IO)
+
+CIRCUITPY_PULSEIO ?= $(CIRCUITPY_FULL_BUILD)
+CFLAGS += -DCIRCUITPY_PULSEIO=$(CIRCUITPY_PULSEIO)
+
+CIRCUITPY_PWMIO ?= 1
+CFLAGS += -DCIRCUITPY_PWMIO=$(CIRCUITPY_PWMIO)
+
+CIRCUITPY_QRIO ?= $(CIRCUITPY_IMAGECAPTURE)
+CFLAGS += -DCIRCUITPY_QRIO=$(CIRCUITPY_QRIO)
+
+CIRCUITPY_RAINBOWIO ?= 1
+CFLAGS += -DCIRCUITPY_RAINBOWIO=$(CIRCUITPY_RAINBOWIO)
+
+CIRCUITPY_RANDOM ?= 1
+CFLAGS += -DCIRCUITPY_RANDOM=$(CIRCUITPY_RANDOM)
+
+CIRCUITPY_RE ?= $(CIRCUITPY_FULL_BUILD)
+CFLAGS += -DCIRCUITPY_RE=$(CIRCUITPY_RE)
+
+# Should busio.I2C() check for pullups?
+# Some boards in combination with certain peripherals may not want this.
+CIRCUITPY_REQUIRE_I2C_PULLUPS ?= 1
+CFLAGS += -DCIRCUITPY_REQUIRE_I2C_PULLUPS=$(CIRCUITPY_REQUIRE_I2C_PULLUPS)
+
+# CIRCUITPY_RP2PIO is handled in the raspberrypi tree.
+# Only for rp2 chips.
+# Assume not a rp2 build.
+CIRCUITPY_RP2PIO ?= 0
+CFLAGS += -DCIRCUITPY_RP2PIO=$(CIRCUITPY_RP2PIO)
+
+CIRCUITPY_RGBMATRIX ?= 0
+CFLAGS += -DCIRCUITPY_RGBMATRIX=$(CIRCUITPY_RGBMATRIX)
+
+CIRCUITPY_ROTARYIO ?= 1
+CFLAGS += -DCIRCUITPY_ROTARYIO=$(CIRCUITPY_ROTARYIO)
+
+CIRCUITPY_ROTARYIO_SOFTENCODER ?= 0
+CFLAGS += -DCIRCUITPY_ROTARYIO_SOFTENCODER=$(CIRCUITPY_ROTARYIO_SOFTENCODER)
+
+CIRCUITPY_RTC ?= 1
+CFLAGS += -DCIRCUITPY_RTC=$(CIRCUITPY_RTC)
+
+# CIRCUITPY_SAMD is handled in the atmel-samd tree.
+# Only for SAMD chips.
+# Assume not a SAMD build.
+CIRCUITPY_SAMD ?= 0
+CFLAGS += -DCIRCUITPY_SAMD=$(CIRCUITPY_SAMD)
+
+CIRCUITPY_SDCARDIO ?= $(CIRCUITPY_FULL_BUILD)
+CFLAGS += -DCIRCUITPY_SDCARDIO=$(CIRCUITPY_SDCARDIO)
+
+CIRCUITPY_SDIOIO ?= 0
+CFLAGS += -DCIRCUITPY_SDIOIO=$(CIRCUITPY_SDIOIO)
+
+CIRCUITPY_SERIAL_BLE ?= 0
+CFLAGS += -DCIRCUITPY_SERIAL_BLE=$(CIRCUITPY_SERIAL_BLE)
+
+CIRCUITPY_SETTABLE_PROCESSOR_FREQUENCY?= 0
+CFLAGS += -DCIRCUITPY_SETTABLE_PROCESSOR_FREQUENCY=$(CIRCUITPY_SETTABLE_PROCESSOR_FREQUENCY)
+
+CIRCUITPY_SHARPDISPLAY ?= $(CIRCUITPY_FRAMEBUFFERIO)
+CFLAGS += -DCIRCUITPY_SHARPDISPLAY=$(CIRCUITPY_SHARPDISPLAY)
+
+CIRCUITPY_SOCKETPOOL ?= $(CIRCUITPY_WIFI)
+CFLAGS += -DCIRCUITPY_SOCKETPOOL=$(CIRCUITPY_SOCKETPOOL)
+
+CIRCUITPY_SSL ?= $(CIRCUITPY_WIFI)
+CFLAGS += -DCIRCUITPY_SSL=$(CIRCUITPY_SSL)
+
+# Currently always off.
+CIRCUITPY_STAGE ?= 0
+CFLAGS += -DCIRCUITPY_STAGE=$(CIRCUITPY_STAGE)
+
+CIRCUITPY_STORAGE ?= 1
+CFLAGS += -DCIRCUITPY_STORAGE=$(CIRCUITPY_STORAGE)
+
+CIRCUITPY_STRUCT ?= 1
+CFLAGS += -DCIRCUITPY_STRUCT=$(CIRCUITPY_STRUCT)
+
+CIRCUITPY_SUPERVISOR ?= 1
+CFLAGS += -DCIRCUITPY_SUPERVISOR=$(CIRCUITPY_SUPERVISOR)
+
+CIRCUITPY_SYNTHIO ?= $(CIRCUITPY_AUDIOCORE)
+CFLAGS += -DCIRCUITPY_SYNTHIO=$(CIRCUITPY_SYNTHIO)
+
+CIRCUITPY_TERMINALIO ?= $(CIRCUITPY_DISPLAYIO)
+CFLAGS += -DCIRCUITPY_TERMINALIO=$(CIRCUITPY_TERMINALIO)
+
+CIRCUITPY_TIME ?= 1
+CFLAGS += -DCIRCUITPY_TIME=$(CIRCUITPY_TIME)
+
+# touchio might be native or generic. See circuitpy_defns.mk.
+CIRCUITPY_TOUCHIO_USE_NATIVE ?= 0
+CFLAGS += -DCIRCUITPY_TOUCHIO_USE_NATIVE=$(CIRCUITPY_TOUCHIO_USE_NATIVE)
+
+CIRCUITPY_TOUCHIO ?= 1
+CFLAGS += -DCIRCUITPY_TOUCHIO=$(CIRCUITPY_TOUCHIO)
+
+CIRCUITPY_TRACEBACK ?= $(CIRCUITPY_FULL_BUILD)
+CFLAGS += -DCIRCUITPY_TRACEBACK=$(CIRCUITPY_TRACEBACK)
+
+# For debugging.
+CIRCUITPY_UHEAP ?= 0
+CFLAGS += -DCIRCUITPY_UHEAP=$(CIRCUITPY_UHEAP)
+
+CIRCUITPY_USB ?= 1
+CFLAGS += -DCIRCUITPY_USB=$(CIRCUITPY_USB)
+
+# Compute these value once, so the shell command is not reinvoked many times.
+USB_NUM_ENDPOINT_PAIRS_5_OR_GREATER := $(shell expr $(USB_NUM_ENDPOINT_PAIRS) '>=' 5)
+USB_NUM_ENDPOINT_PAIRS_8_OR_GREATER := $(shell expr $(USB_NUM_ENDPOINT_PAIRS) '>=' 8)
+
+# Some chips may not support the same number of IN or OUT endpoints as pairs.
+# For instance, the ESP32-S2 only supports 5 IN endpoints at once, even though
+# it has 7 endpoint pairs.
+USB_NUM_IN_ENDPOINTS ?= $(USB_NUM_ENDPOINT_PAIRS)
+CFLAGS += -DUSB_NUM_IN_ENDPOINTS=$(USB_NUM_IN_ENDPOINTS)
+
+USB_NUM_OUT_ENDPOINTS ?= $(USB_NUM_ENDPOINT_PAIRS)
+CFLAGS += -DUSB_NUM_OUT_ENDPOINTS=$(USB_NUM_OUT_ENDPOINTS)
+
+CIRCUITPY_USB_CDC ?= $(CIRCUITPY_USB)
+CFLAGS += -DCIRCUITPY_USB_CDC=$(CIRCUITPY_USB_CDC)
+CIRCUITPY_USB_CDC_CONSOLE_ENABLED_DEFAULT ?= 1
+CFLAGS += -DCIRCUITPY_USB_CDC_CONSOLE_ENABLED_DEFAULT=$(CIRCUITPY_USB_CDC_CONSOLE_ENABLED_DEFAULT)
+CIRCUITPY_USB_CDC_DATA_ENABLED_DEFAULT ?= 0
+CFLAGS += -DCIRCUITPY_USB_CDC_DATA_ENABLED_DEFAULT=$(CIRCUITPY_USB_CDC_DATA_ENABLED_DEFAULT)
+
+# HID is available by default, but is not turned on if there are fewer than 5 endpoints.
+CIRCUITPY_USB_HID ?= $(CIRCUITPY_USB)
+CFLAGS += -DCIRCUITPY_USB_HID=$(CIRCUITPY_USB_HID)
+CIRCUITPY_USB_HID_ENABLED_DEFAULT ?= $(USB_NUM_ENDPOINT_PAIRS_5_OR_GREATER)
+CFLAGS += -DCIRCUITPY_USB_HID_ENABLED_DEFAULT=$(CIRCUITPY_USB_HID_ENABLED_DEFAULT)
+
+CIRCUITPY_USB_HOST ?= 0
+CFLAGS += -DCIRCUITPY_USB_HOST=$(CIRCUITPY_USB_HOST)
+
+# MIDI is available by default, but is not turned on if there are fewer than 8 endpoints.
+CIRCUITPY_USB_MIDI ?= $(CIRCUITPY_USB)
+CFLAGS += -DCIRCUITPY_USB_MIDI=$(CIRCUITPY_USB_MIDI)
+CIRCUITPY_USB_MIDI_ENABLED_DEFAULT ?= $(USB_NUM_ENDPOINT_PAIRS_8_OR_GREATER)
+CFLAGS += -DCIRCUITPY_USB_MIDI_ENABLED_DEFAULT=$(CIRCUITPY_USB_MIDI_ENABLED_DEFAULT)
+
+CIRCUITPY_USB_MSC ?= $(CIRCUITPY_USB)
+CFLAGS += -DCIRCUITPY_USB_MSC=$(CIRCUITPY_USB_MSC)
+CIRCUITPY_USB_MSC_ENABLED_DEFAULT ?= $(CIRCUITPY_USB_MSC)
+CFLAGS += -DCIRCUITPY_USB_MSC_ENABLED_DEFAULT=$(CIRCUITPY_USB_MSC_ENABLED_DEFAULT)
+
+# Defaulting this to OFF initially because it has only been tested on a
+# limited number of platforms, and the other platforms do not have this
+# setting in their mpconfigport.mk and/or mpconfigboard.mk files yet.
+CIRCUITPY_USB_VENDOR ?= 0
+CFLAGS += -DCIRCUITPY_USB_VENDOR=$(CIRCUITPY_USB_VENDOR)
+
+ifndef USB_NUM_ENDPOINT_PAIRS
+$(error "USB_NUM_ENDPOINT_PAIRS (number of USB endpoint pairs)must be defined")
+endif
+CFLAGS += -DUSB_NUM_ENDPOINT_PAIRS=$(USB_NUM_ENDPOINT_PAIRS)
+
+# For debugging.
+CIRCUITPY_USTACK ?= 0
+CFLAGS += -DCIRCUITPY_USTACK=$(CIRCUITPY_USTACK)
+
+# for decompressing utlities
+CIRCUITPY_ZLIB ?= 1
+CFLAGS += -DCIRCUITPY_ZLIB=$(CIRCUITPY_ZLIB)
+
+# ulab numerics library
+CIRCUITPY_ULAB ?= $(CIRCUITPY_FULL_BUILD)
+CFLAGS += -DCIRCUITPY_ULAB=$(CIRCUITPY_ULAB)
+
+# CIRCUITPY_VIDEOCORE is handled in the broadcom tree.
+# Only for Broadcom chips.
+# Assume not a Broadcom build.
+CIRCUITPY_VIDEOCORE ?= 0
+CFLAGS += -DCIRCUITPY_VIDEOCORE=$(CIRCUITPY_VIDEOCORE)
+
+# watchdog hardware support
+CIRCUITPY_WATCHDOG ?= 0
+CFLAGS += -DCIRCUITPY_WATCHDOG=$(CIRCUITPY_WATCHDOG)
+
+CIRCUITPY_WIFI ?= 0
+CFLAGS += -DCIRCUITPY_WIFI=$(CIRCUITPY_WIFI)
+
+# tinyusb port tailored configuration
+CIRCUITPY_TUSB_MEM_ALIGN ?= 4
+CFLAGS += -DCIRCUITPY_TUSB_MEM_ALIGN=$(CIRCUITPY_TUSB_MEM_ALIGN)
+
+CIRCUITPY_TUSB_ATTR_USBRAM ?= ".bss.usbram"
+CFLAGS += -DCIRCUITPY_TUSB_ATTR_USBRAM=$(CIRCUITPY_TUSB_ATTR_USBRAM)
+
+# Define an equivalent for MICROPY_LONGINT_IMPL, to pass to $(MPY-TOOL) in py/mkrules.mk
+# $(MPY-TOOL) needs to know what kind of longint to use (if any) to freeze long integers.
+# This should correspond to the MICROPY_LONGINT_IMPL definition in mpconfigport.h.
+#
+# Also propagate longint choice from .mk to C. There's no easy string comparison
+# in cpp conditionals, so we #define separate names for each.
+
+ifeq ($(LONGINT_IMPL),NONE)
+MPY_TOOL_LONGINT_IMPL = -mlongint-impl=none
+CFLAGS += -DLONGINT_IMPL_NONE
+else ifeq ($(LONGINT_IMPL),MPZ)
+MPY_TOOL_LONGINT_IMPL = -mlongint-impl=mpz
+CFLAGS += -DLONGINT_IMPL_MPZ
+else ifeq ($(LONGINT_IMPL),LONGLONG)
+MPY_TOOL_LONGINT_IMPL = -mlongint-impl=longlong
+CFLAGS += -DLONGINT_IMPL_LONGLONG
+else
+$(error LONGINT_IMPL set to surprising value: "$(LONGINT_IMPL)")
+endif
+MPY_TOOL_FLAGS += $(MPY_TOOL_LONGINT_IMPL)
+
+###
+ifeq ($(LONGINT_IMPL),NONE)
+else ifeq ($(LONGINT_IMPL),MPZ)
+else ifeq ($(LONGINT_IMPL),LONGLONG)
+else
+$(error LONGINT_IMPL set to surprising value: "$(LONGINT_IMPL)")
+endif
+
+PREPROCESS_FROZEN_MODULES = PYTHONPATH=$(TOP)/tools/python-semver $(TOP)/tools/preprocess_frozen_modules.py
+ifneq ($(FROZEN_MPY_DIRS),)
+$(BUILD)/frozen_mpy: $(FROZEN_MPY_DIRS)
+ $(ECHO) FREEZE $(FROZEN_MPY_DIRS)
+ $(Q)$(MKDIR) -p $@
+ $(Q)$(PREPROCESS_FROZEN_MODULES) -o $@ $(FROZEN_MPY_DIRS)
+
+$(BUILD)/manifest.py: $(BUILD)/frozen_mpy | $(TOP)/py/circuitpy_mpconfig.mk mpconfigport.mk boards/$(BOARD)/mpconfigboard.mk
+ $(ECHO) MKMANIFEST $(FROZEN_MPY_DIRS)
+ (cd $(BUILD)/frozen_mpy && find * -name \*.py -exec printf 'freeze_as_mpy("frozen_mpy", "%s")\n' {} \; )> $@.tmp && mv -f $@.tmp $@
+FROZEN_MANIFEST=$(BUILD)/manifest.py
+endif
diff --git a/circuitpython/py/compile.c b/circuitpython/py/compile.c
new file mode 100644
index 0000000..e5f341a
--- /dev/null
+++ b/circuitpython/py/compile.c
@@ -0,0 +1,3611 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013-2020 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/scope.h"
+#include "py/emit.h"
+#include "py/compile.h"
+#include "py/runtime.h"
+#include "py/asmbase.h"
+#include "py/persistentcode.h"
+
+#include "supervisor/shared/translate.h"
+
+#if MICROPY_ENABLE_COMPILER
+
+// TODO need to mangle __attr names
+
+#define INVALID_LABEL (0xffff)
+
+typedef enum {
+// define rules with a compile function
+#define DEF_RULE(rule, comp, kind, ...) PN_##rule,
+#define DEF_RULE_NC(rule, kind, ...)
+ #include "py/grammar.h"
+#undef DEF_RULE
+#undef DEF_RULE_NC
+ PN_const_object, // special node for a constant, generic Python object
+// define rules without a compile function
+#define DEF_RULE(rule, comp, kind, ...)
+#define DEF_RULE_NC(rule, kind, ...) PN_##rule,
+ #include "py/grammar.h"
+#undef DEF_RULE
+#undef DEF_RULE_NC
+} pn_kind_t;
+
+// Whether a mp_parse_node_struct_t that has pns->kind == PN_testlist_comp
+// corresponds to a list comprehension or generator.
+#define MP_PARSE_NODE_TESTLIST_COMP_HAS_COMP_FOR(pns) \
+ (MP_PARSE_NODE_STRUCT_NUM_NODES(pns) == 2 && \
+ MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[1], PN_comp_for))
+
+#define NEED_METHOD_TABLE MICROPY_EMIT_NATIVE
+
+#if NEED_METHOD_TABLE
+
+// we need a method table to do the lookup for the emitter functions
+#define EMIT(fun) (comp->emit_method_table->fun(comp->emit))
+#define EMIT_ARG(fun, ...) (comp->emit_method_table->fun(comp->emit, __VA_ARGS__))
+#define EMIT_LOAD_FAST(qst, local_num) (comp->emit_method_table->load_id.local(comp->emit, qst, local_num, MP_EMIT_IDOP_LOCAL_FAST))
+#define EMIT_LOAD_GLOBAL(qst) (comp->emit_method_table->load_id.global(comp->emit, qst, MP_EMIT_IDOP_GLOBAL_GLOBAL))
+
+#else
+
+// if we only have the bytecode emitter enabled then we can do a direct call to the functions
+#define EMIT(fun) (mp_emit_bc_##fun(comp->emit))
+#define EMIT_ARG(fun, ...) (mp_emit_bc_##fun(comp->emit, __VA_ARGS__))
+#define EMIT_LOAD_FAST(qst, local_num) (mp_emit_bc_load_local(comp->emit, qst, local_num, MP_EMIT_IDOP_LOCAL_FAST))
+#define EMIT_LOAD_GLOBAL(qst) (mp_emit_bc_load_global(comp->emit, qst, MP_EMIT_IDOP_GLOBAL_GLOBAL))
+
+#endif
+
+#if MICROPY_EMIT_NATIVE && MICROPY_DYNAMIC_COMPILER
+
+#define NATIVE_EMITTER(f) emit_native_table[mp_dynamic_compiler.native_arch]->emit_##f
+#define NATIVE_EMITTER_TABLE emit_native_table[mp_dynamic_compiler.native_arch]
+
+STATIC const emit_method_table_t *emit_native_table[] = {
+ NULL,
+ &emit_native_x86_method_table,
+ &emit_native_x64_method_table,
+ &emit_native_arm_method_table,
+ &emit_native_thumb_method_table,
+ &emit_native_thumb_method_table,
+ &emit_native_thumb_method_table,
+ &emit_native_thumb_method_table,
+ &emit_native_thumb_method_table,
+ &emit_native_xtensa_method_table,
+ &emit_native_xtensawin_method_table,
+};
+
+#elif MICROPY_EMIT_NATIVE
+// define a macro to access external native emitter
+#if MICROPY_EMIT_X64
+#define NATIVE_EMITTER(f) emit_native_x64_##f
+#elif MICROPY_EMIT_X86
+#define NATIVE_EMITTER(f) emit_native_x86_##f
+#elif MICROPY_EMIT_THUMB
+#define NATIVE_EMITTER(f) emit_native_thumb_##f
+#elif MICROPY_EMIT_ARM
+#define NATIVE_EMITTER(f) emit_native_arm_##f
+#elif MICROPY_EMIT_XTENSA
+#define NATIVE_EMITTER(f) emit_native_xtensa_##f
+#elif MICROPY_EMIT_XTENSAWIN
+#define NATIVE_EMITTER(f) emit_native_xtensawin_##f
+#else
+#error "unknown native emitter"
+#endif
+#define NATIVE_EMITTER_TABLE &NATIVE_EMITTER(method_table)
+#endif
+
+#if MICROPY_EMIT_INLINE_ASM && MICROPY_DYNAMIC_COMPILER
+
+#define ASM_EMITTER(f) emit_asm_table[mp_dynamic_compiler.native_arch]->asm_##f
+#define ASM_EMITTER_TABLE emit_asm_table[mp_dynamic_compiler.native_arch]
+
+STATIC const emit_inline_asm_method_table_t *emit_asm_table[] = {
+ NULL,
+ NULL,
+ NULL,
+ &emit_inline_thumb_method_table,
+ &emit_inline_thumb_method_table,
+ &emit_inline_thumb_method_table,
+ &emit_inline_thumb_method_table,
+ &emit_inline_thumb_method_table,
+ &emit_inline_thumb_method_table,
+ &emit_inline_xtensa_method_table,
+ NULL,
+};
+
+#elif MICROPY_EMIT_INLINE_ASM
+// define macros for inline assembler
+#if MICROPY_EMIT_INLINE_THUMB
+#define ASM_DECORATOR_QSTR MP_QSTR_asm_thumb
+#define ASM_EMITTER(f) emit_inline_thumb_##f
+#elif MICROPY_EMIT_INLINE_XTENSA
+#define ASM_DECORATOR_QSTR MP_QSTR_asm_xtensa
+#define ASM_EMITTER(f) emit_inline_xtensa_##f
+#else
+#error "unknown asm emitter"
+#endif
+#define ASM_EMITTER_TABLE &ASM_EMITTER(method_table)
+#endif
+
+#define EMIT_INLINE_ASM(fun) (comp->emit_inline_asm_method_table->fun(comp->emit_inline_asm))
+#define EMIT_INLINE_ASM_ARG(fun, ...) (comp->emit_inline_asm_method_table->fun(comp->emit_inline_asm, __VA_ARGS__))
+
+// elements in this struct are ordered to make it compact
+typedef struct _compiler_t {
+ qstr source_file;
+
+ uint8_t is_repl;
+ uint8_t pass; // holds enum type pass_kind_t
+ uint8_t have_star;
+
+ // try to keep compiler clean from nlr
+ mp_obj_t compile_error; // set to an exception object if there's an error
+ size_t compile_error_line; // set to best guess of line of error
+
+ uint next_label;
+
+ uint16_t num_dict_params;
+ uint16_t num_default_params;
+
+ uint16_t break_label; // highest bit set indicates we are breaking out of a for loop
+ uint16_t continue_label;
+ uint16_t cur_except_level; // increased for SETUP_EXCEPT, SETUP_FINALLY; decreased for POP_BLOCK, POP_EXCEPT
+ uint16_t break_continue_except_level;
+
+ scope_t *scope_head;
+ scope_t *scope_cur;
+
+ emit_t *emit; // current emitter
+ #if NEED_METHOD_TABLE
+ const emit_method_table_t *emit_method_table; // current emit method table
+ #endif
+
+ #if MICROPY_EMIT_INLINE_ASM
+ emit_inline_asm_t *emit_inline_asm; // current emitter for inline asm
+ const emit_inline_asm_method_table_t *emit_inline_asm_method_table; // current emit method table for inline asm
+ #endif
+} compiler_t;
+
+STATIC void compile_error_set_line(compiler_t *comp, mp_parse_node_t pn) {
+ // if the line of the error is unknown then try to update it from the pn
+ if (comp->compile_error_line == 0 && MP_PARSE_NODE_IS_STRUCT(pn)) {
+ comp->compile_error_line = ((mp_parse_node_struct_t *)pn)->source_line;
+ }
+}
+
+STATIC void compile_syntax_error(compiler_t *comp, mp_parse_node_t pn, const compressed_string_t *msg) {
+ // only register the error if there has been no other error
+ if (comp->compile_error == MP_OBJ_NULL) {
+ comp->compile_error = mp_obj_new_exception_msg(&mp_type_SyntaxError, msg);
+ compile_error_set_line(comp, pn);
+ }
+}
+
+STATIC void compile_trailer_paren_helper(compiler_t *comp, mp_parse_node_t pn_arglist, bool is_method_call, int n_positional_extra);
+STATIC void compile_comprehension(compiler_t *comp, mp_parse_node_struct_t *pns, scope_kind_t kind);
+STATIC void compile_atom_brace_helper(compiler_t *comp, mp_parse_node_struct_t *pns, bool create_map);
+STATIC void compile_node(compiler_t *comp, mp_parse_node_t pn);
+
+STATIC uint comp_next_label(compiler_t *comp) {
+ return comp->next_label++;
+}
+
+#if MICROPY_EMIT_NATIVE
+STATIC void reserve_labels_for_native(compiler_t *comp, int n) {
+ if (comp->scope_cur->emit_options != MP_EMIT_OPT_BYTECODE) {
+ comp->next_label += n;
+ }
+}
+#else
+#define reserve_labels_for_native(comp, n)
+#endif
+
+STATIC void compile_increase_except_level(compiler_t *comp, uint label, int kind) {
+ EMIT_ARG(setup_block, label, kind);
+ comp->cur_except_level += 1;
+ if (comp->cur_except_level > comp->scope_cur->exc_stack_size) {
+ comp->scope_cur->exc_stack_size = comp->cur_except_level;
+ }
+}
+
+STATIC void compile_decrease_except_level(compiler_t *comp) {
+ assert(comp->cur_except_level > 0);
+ comp->cur_except_level -= 1;
+ EMIT(end_finally);
+ reserve_labels_for_native(comp, 1);
+}
+
+STATIC scope_t *scope_new_and_link(compiler_t *comp, scope_kind_t kind, mp_parse_node_t pn, uint emit_options) {
+ scope_t *scope = scope_new(kind, pn, comp->source_file, emit_options);
+ scope->parent = comp->scope_cur;
+ scope->next = NULL;
+ if (comp->scope_head == NULL) {
+ comp->scope_head = scope;
+ } else {
+ scope_t *s = comp->scope_head;
+ while (s->next != NULL) {
+ s = s->next;
+ }
+ s->next = scope;
+ }
+ return scope;
+}
+
+typedef void (*apply_list_fun_t)(compiler_t *comp, mp_parse_node_t pn);
+
+STATIC void apply_to_single_or_list(compiler_t *comp, mp_parse_node_t pn, pn_kind_t pn_list_kind, apply_list_fun_t f) {
+ if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, pn_list_kind)) {
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+ int num_nodes = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+ for (int i = 0; i < num_nodes; i++) {
+ f(comp, pns->nodes[i]);
+ }
+ } else if (!MP_PARSE_NODE_IS_NULL(pn)) {
+ f(comp, pn);
+ }
+}
+
+STATIC void compile_generic_all_nodes(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ int num_nodes = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+ for (int i = 0; i < num_nodes; i++) {
+ compile_node(comp, pns->nodes[i]);
+ if (comp->compile_error != MP_OBJ_NULL) {
+ // add line info for the error in case it didn't have a line number
+ compile_error_set_line(comp, pns->nodes[i]);
+ return;
+ }
+ }
+}
+
+STATIC void compile_load_id(compiler_t *comp, qstr qst) {
+ if (comp->pass == MP_PASS_SCOPE) {
+ mp_emit_common_get_id_for_load(comp->scope_cur, qst);
+ } else {
+ #if NEED_METHOD_TABLE
+ mp_emit_common_id_op(comp->emit, &comp->emit_method_table->load_id, comp->scope_cur, qst);
+ #else
+ mp_emit_common_id_op(comp->emit, &mp_emit_bc_method_table_load_id_ops, comp->scope_cur, qst);
+ #endif
+ }
+}
+
+STATIC void compile_store_id(compiler_t *comp, qstr qst) {
+ if (comp->pass == MP_PASS_SCOPE) {
+ mp_emit_common_get_id_for_modification(comp->scope_cur, qst);
+ } else {
+ #if NEED_METHOD_TABLE
+ mp_emit_common_id_op(comp->emit, &comp->emit_method_table->store_id, comp->scope_cur, qst);
+ #else
+ mp_emit_common_id_op(comp->emit, &mp_emit_bc_method_table_store_id_ops, comp->scope_cur, qst);
+ #endif
+ }
+}
+
+STATIC void compile_delete_id(compiler_t *comp, qstr qst) {
+ if (comp->pass == MP_PASS_SCOPE) {
+ mp_emit_common_get_id_for_modification(comp->scope_cur, qst);
+ } else {
+ #if NEED_METHOD_TABLE
+ mp_emit_common_id_op(comp->emit, &comp->emit_method_table->delete_id, comp->scope_cur, qst);
+ #else
+ mp_emit_common_id_op(comp->emit, &mp_emit_bc_method_table_delete_id_ops, comp->scope_cur, qst);
+ #endif
+ }
+}
+
+STATIC void compile_generic_tuple(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ // a simple tuple expression
+ size_t num_nodes = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+ for (size_t i = 0; i < num_nodes; i++) {
+ compile_node(comp, pns->nodes[i]);
+ }
+ EMIT_ARG(build, num_nodes, MP_EMIT_BUILD_TUPLE);
+}
+
+STATIC void c_if_cond(compiler_t *comp, mp_parse_node_t pn, bool jump_if, int label) {
+ if (mp_parse_node_is_const_false(pn)) {
+ if (jump_if == false) {
+ EMIT_ARG(jump, label);
+ }
+ return;
+ } else if (mp_parse_node_is_const_true(pn)) {
+ if (jump_if == true) {
+ EMIT_ARG(jump, label);
+ }
+ return;
+ } else if (MP_PARSE_NODE_IS_STRUCT(pn)) {
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+ int n = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+ if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_or_test) {
+ if (jump_if == false) {
+ and_or_logic1:;
+ uint label2 = comp_next_label(comp);
+ for (int i = 0; i < n - 1; i++) {
+ c_if_cond(comp, pns->nodes[i], !jump_if, label2);
+ }
+ c_if_cond(comp, pns->nodes[n - 1], jump_if, label);
+ EMIT_ARG(label_assign, label2);
+ } else {
+ and_or_logic2:
+ for (int i = 0; i < n; i++) {
+ c_if_cond(comp, pns->nodes[i], jump_if, label);
+ }
+ }
+ return;
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_and_test) {
+ if (jump_if == false) {
+ goto and_or_logic2;
+ } else {
+ goto and_or_logic1;
+ }
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_not_test_2) {
+ c_if_cond(comp, pns->nodes[0], !jump_if, label);
+ return;
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_atom_paren) {
+ // cond is something in parenthesis
+ if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+ // empty tuple, acts as false for the condition
+ if (jump_if == false) {
+ EMIT_ARG(jump, label);
+ }
+ } else {
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_testlist_comp));
+ // non-empty tuple, acts as true for the condition
+ if (jump_if == true) {
+ EMIT_ARG(jump, label);
+ }
+ }
+ return;
+ }
+ }
+
+ // nothing special, fall back to default compiling for node and jump
+ compile_node(comp, pn);
+ EMIT_ARG(pop_jump_if, jump_if, label);
+}
+
+typedef enum { ASSIGN_STORE, ASSIGN_AUG_LOAD, ASSIGN_AUG_STORE } assign_kind_t;
+STATIC void c_assign(compiler_t *comp, mp_parse_node_t pn, assign_kind_t kind);
+
+STATIC void c_assign_atom_expr(compiler_t *comp, mp_parse_node_struct_t *pns, assign_kind_t assign_kind) {
+ if (assign_kind != ASSIGN_AUG_STORE) {
+ compile_node(comp, pns->nodes[0]);
+ }
+
+ if (MP_PARSE_NODE_IS_STRUCT(pns->nodes[1])) {
+ mp_parse_node_struct_t *pns1 = (mp_parse_node_struct_t *)pns->nodes[1];
+ if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_atom_expr_trailers) {
+ int n = MP_PARSE_NODE_STRUCT_NUM_NODES(pns1);
+ if (assign_kind != ASSIGN_AUG_STORE) {
+ for (int i = 0; i < n - 1; i++) {
+ compile_node(comp, pns1->nodes[i]);
+ }
+ }
+ assert(MP_PARSE_NODE_IS_STRUCT(pns1->nodes[n - 1]));
+ pns1 = (mp_parse_node_struct_t *)pns1->nodes[n - 1];
+ }
+ if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_trailer_bracket) {
+ if (assign_kind == ASSIGN_AUG_STORE) {
+ EMIT(rot_three);
+ EMIT_ARG(subscr, MP_EMIT_SUBSCR_STORE);
+ } else {
+ compile_node(comp, pns1->nodes[0]);
+ if (assign_kind == ASSIGN_AUG_LOAD) {
+ EMIT(dup_top_two);
+ EMIT_ARG(subscr, MP_EMIT_SUBSCR_LOAD);
+ } else {
+ EMIT_ARG(subscr, MP_EMIT_SUBSCR_STORE);
+ }
+ }
+ return;
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_trailer_period) {
+ assert(MP_PARSE_NODE_IS_ID(pns1->nodes[0]));
+ if (assign_kind == ASSIGN_AUG_LOAD) {
+ EMIT(dup_top);
+ EMIT_ARG(attr, MP_PARSE_NODE_LEAF_ARG(pns1->nodes[0]), MP_EMIT_ATTR_LOAD);
+ } else {
+ if (assign_kind == ASSIGN_AUG_STORE) {
+ EMIT(rot_two);
+ }
+ EMIT_ARG(attr, MP_PARSE_NODE_LEAF_ARG(pns1->nodes[0]), MP_EMIT_ATTR_STORE);
+ }
+ return;
+ }
+ }
+
+ compile_syntax_error(comp, (mp_parse_node_t)pns, MP_ERROR_TEXT("can't assign to expression"));
+}
+
+STATIC void c_assign_tuple(compiler_t *comp, uint num_tail, mp_parse_node_t *nodes_tail) {
+ // look for star expression
+ uint have_star_index = -1;
+ for (uint i = 0; i < num_tail; i++) {
+ if (MP_PARSE_NODE_IS_STRUCT_KIND(nodes_tail[i], PN_star_expr)) {
+ if (have_star_index == (uint)-1) {
+ EMIT_ARG(unpack_ex, i, num_tail - i - 1);
+ have_star_index = i;
+ } else {
+ compile_syntax_error(comp, nodes_tail[i], MP_ERROR_TEXT("multiple *x in assignment"));
+ return;
+ }
+ }
+ }
+ if (have_star_index == (uint)-1) {
+ EMIT_ARG(unpack_sequence, num_tail);
+ }
+ for (uint i = 0; i < num_tail; i++) {
+ if (i == have_star_index) {
+ c_assign(comp, ((mp_parse_node_struct_t *)nodes_tail[i])->nodes[0], ASSIGN_STORE);
+ } else {
+ c_assign(comp, nodes_tail[i], ASSIGN_STORE);
+ }
+ }
+}
+
+// assigns top of stack to pn
+STATIC void c_assign(compiler_t *comp, mp_parse_node_t pn, assign_kind_t assign_kind) {
+ assert(!MP_PARSE_NODE_IS_NULL(pn));
+ if (MP_PARSE_NODE_IS_LEAF(pn)) {
+ if (MP_PARSE_NODE_IS_ID(pn)) {
+ qstr arg = MP_PARSE_NODE_LEAF_ARG(pn);
+ switch (assign_kind) {
+ case ASSIGN_STORE:
+ case ASSIGN_AUG_STORE:
+ compile_store_id(comp, arg);
+ break;
+ case ASSIGN_AUG_LOAD:
+ default:
+ compile_load_id(comp, arg);
+ break;
+ }
+ } else {
+ goto cannot_assign;
+ }
+ } else {
+ // pn must be a struct
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+ switch (MP_PARSE_NODE_STRUCT_KIND(pns)) {
+ case PN_atom_expr_normal:
+ // lhs is an index or attribute
+ c_assign_atom_expr(comp, pns, assign_kind);
+ break;
+
+ case PN_testlist_star_expr:
+ case PN_exprlist:
+ // lhs is a tuple
+ if (assign_kind != ASSIGN_STORE) {
+ goto cannot_assign;
+ }
+ c_assign_tuple(comp, MP_PARSE_NODE_STRUCT_NUM_NODES(pns), pns->nodes);
+ break;
+
+ case PN_atom_paren:
+ // lhs is something in parenthesis
+ if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+ // empty tuple
+ goto cannot_assign;
+ } else {
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_testlist_comp));
+ if (assign_kind != ASSIGN_STORE) {
+ goto cannot_assign;
+ }
+ pns = (mp_parse_node_struct_t *)pns->nodes[0];
+ goto testlist_comp;
+ }
+ break;
+
+ case PN_atom_bracket:
+ // lhs is something in brackets
+ if (assign_kind != ASSIGN_STORE) {
+ goto cannot_assign;
+ }
+ if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+ // empty list, assignment allowed
+ c_assign_tuple(comp, 0, NULL);
+ } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_testlist_comp)) {
+ pns = (mp_parse_node_struct_t *)pns->nodes[0];
+ goto testlist_comp;
+ } else {
+ // brackets around 1 item
+ c_assign_tuple(comp, 1, pns->nodes);
+ }
+ break;
+
+ default:
+ goto cannot_assign;
+ }
+ return;
+
+ testlist_comp:
+ // lhs is a sequence
+ if (MP_PARSE_NODE_TESTLIST_COMP_HAS_COMP_FOR(pns)) {
+ goto cannot_assign;
+ }
+ c_assign_tuple(comp, MP_PARSE_NODE_STRUCT_NUM_NODES(pns), pns->nodes);
+ return;
+ }
+ return;
+
+cannot_assign:
+ compile_syntax_error(comp, pn, MP_ERROR_TEXT("can't assign to expression"));
+}
+
+// stuff for lambda and comprehensions and generators:
+// if n_pos_defaults > 0 then there is a tuple on the stack with the positional defaults
+// if n_kw_defaults > 0 then there is a dictionary on the stack with the keyword defaults
+// if both exist, the tuple is above the dictionary (ie the first pop gets the tuple)
+STATIC void close_over_variables_etc(compiler_t *comp, scope_t *this_scope, int n_pos_defaults, int n_kw_defaults) {
+ assert(n_pos_defaults >= 0);
+ assert(n_kw_defaults >= 0);
+
+ // set flags
+ if (n_kw_defaults > 0) {
+ this_scope->scope_flags |= MP_SCOPE_FLAG_DEFKWARGS;
+ }
+ this_scope->num_def_pos_args = n_pos_defaults;
+
+ #if MICROPY_EMIT_NATIVE
+ // When creating a function/closure it will take a reference to the current globals
+ comp->scope_cur->scope_flags |= MP_SCOPE_FLAG_REFGLOBALS | MP_SCOPE_FLAG_HASCONSTS;
+ #endif
+
+ // make closed over variables, if any
+ // ensure they are closed over in the order defined in the outer scope (mainly to agree with CPython)
+ int nfree = 0;
+ if (comp->scope_cur->kind != SCOPE_MODULE) {
+ for (int i = 0; i < comp->scope_cur->id_info_len; i++) {
+ id_info_t *id = &comp->scope_cur->id_info[i];
+ if (id->kind == ID_INFO_KIND_CELL || id->kind == ID_INFO_KIND_FREE) {
+ for (int j = 0; j < this_scope->id_info_len; j++) {
+ id_info_t *id2 = &this_scope->id_info[j];
+ if (id2->kind == ID_INFO_KIND_FREE && id->qst == id2->qst) {
+ // in MicroPython we load closures using LOAD_FAST
+ EMIT_LOAD_FAST(id->qst, id->local_num);
+ nfree += 1;
+ }
+ }
+ }
+ }
+ }
+
+ // make the function/closure
+ if (nfree == 0) {
+ EMIT_ARG(make_function, this_scope, n_pos_defaults, n_kw_defaults);
+ } else {
+ EMIT_ARG(make_closure, this_scope, nfree, n_pos_defaults, n_kw_defaults);
+ }
+}
+
+STATIC void compile_funcdef_lambdef_param(compiler_t *comp, mp_parse_node_t pn) {
+ // For efficiency of the code below we extract the parse-node kind here
+ int pn_kind;
+ if (MP_PARSE_NODE_IS_ID(pn)) {
+ pn_kind = -1;
+ } else {
+ assert(MP_PARSE_NODE_IS_STRUCT(pn));
+ pn_kind = MP_PARSE_NODE_STRUCT_KIND((mp_parse_node_struct_t *)pn);
+ }
+
+ if (pn_kind == PN_typedargslist_star || pn_kind == PN_varargslist_star) {
+ comp->have_star = true;
+ /* don't need to distinguish bare from named star
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t*)pn;
+ if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+ // bare star
+ } else {
+ // named star
+ }
+ */
+
+ } else if (pn_kind == PN_typedargslist_dbl_star || pn_kind == PN_varargslist_dbl_star) {
+ // named double star
+ // TODO do we need to do anything with this?
+
+ } else {
+ mp_parse_node_t pn_id;
+ mp_parse_node_t pn_equal;
+ if (pn_kind == -1) {
+ // this parameter is just an id
+
+ pn_id = pn;
+ pn_equal = MP_PARSE_NODE_NULL;
+
+ } else if (pn_kind == PN_typedargslist_name) {
+ // this parameter has a colon and/or equal specifier
+
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+ pn_id = pns->nodes[0];
+ // pn_colon = pns->nodes[1]; // unused
+ pn_equal = pns->nodes[2];
+
+ } else {
+ assert(pn_kind == PN_varargslist_name); // should be
+ // this parameter has an equal specifier
+
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+ pn_id = pns->nodes[0];
+ pn_equal = pns->nodes[1];
+ }
+
+ if (MP_PARSE_NODE_IS_NULL(pn_equal)) {
+ // this parameter does not have a default value
+
+ // check for non-default parameters given after default parameters (allowed by parser, but not syntactically valid)
+ if (!comp->have_star && comp->num_default_params != 0) {
+ compile_syntax_error(comp, pn, MP_ERROR_TEXT("non-default argument follows default argument"));
+ return;
+ }
+
+ } else {
+ // this parameter has a default value
+ // in CPython, None (and True, False?) as default parameters are loaded with LOAD_NAME; don't understandy why
+
+ if (comp->have_star) {
+ comp->num_dict_params += 1;
+ // in MicroPython we put the default dict parameters into a dictionary using the bytecode
+ if (comp->num_dict_params == 1) {
+ // in MicroPython we put the default positional parameters into a tuple using the bytecode
+ // we need to do this here before we start building the map for the default keywords
+ if (comp->num_default_params > 0) {
+ EMIT_ARG(build, comp->num_default_params, MP_EMIT_BUILD_TUPLE);
+ } else {
+ EMIT(load_null); // sentinel indicating empty default positional args
+ }
+ // first default dict param, so make the map
+ EMIT_ARG(build, 0, MP_EMIT_BUILD_MAP);
+ }
+
+ // compile value then key, then store it to the dict
+ compile_node(comp, pn_equal);
+ EMIT_ARG(load_const_str, MP_PARSE_NODE_LEAF_ARG(pn_id));
+ EMIT(store_map);
+ } else {
+ comp->num_default_params += 1;
+ compile_node(comp, pn_equal);
+ }
+ }
+ }
+}
+
+STATIC void compile_funcdef_lambdef(compiler_t *comp, scope_t *scope, mp_parse_node_t pn_params, pn_kind_t pn_list_kind) {
+ // When we call compile_funcdef_lambdef_param below it can compile an arbitrary
+ // expression for default arguments, which may contain a lambda. The lambda will
+ // call here in a nested way, so we must save and restore the relevant state.
+ bool orig_have_star = comp->have_star;
+ uint16_t orig_num_dict_params = comp->num_dict_params;
+ uint16_t orig_num_default_params = comp->num_default_params;
+
+ // compile default parameters
+ comp->have_star = false;
+ comp->num_dict_params = 0;
+ comp->num_default_params = 0;
+ apply_to_single_or_list(comp, pn_params, pn_list_kind, compile_funcdef_lambdef_param);
+
+ if (comp->compile_error != MP_OBJ_NULL) {
+ return;
+ }
+
+ // in MicroPython we put the default positional parameters into a tuple using the bytecode
+ // the default keywords args may have already made the tuple; if not, do it now
+ if (comp->num_default_params > 0 && comp->num_dict_params == 0) {
+ EMIT_ARG(build, comp->num_default_params, MP_EMIT_BUILD_TUPLE);
+ EMIT(load_null); // sentinel indicating empty default keyword args
+ }
+
+ // make the function
+ close_over_variables_etc(comp, scope, comp->num_default_params, comp->num_dict_params);
+
+ // restore state
+ comp->have_star = orig_have_star;
+ comp->num_dict_params = orig_num_dict_params;
+ comp->num_default_params = orig_num_default_params;
+}
+
+// leaves function object on stack
+// returns function name
+STATIC qstr compile_funcdef_helper(compiler_t *comp, mp_parse_node_struct_t *pns, uint emit_options) {
+ if (comp->pass == MP_PASS_SCOPE) {
+ // create a new scope for this function
+ scope_t *s = scope_new_and_link(comp, SCOPE_FUNCTION, (mp_parse_node_t)pns, emit_options);
+ // store the function scope so the compiling function can use it at each pass
+ pns->nodes[4] = (mp_parse_node_t)s;
+ }
+
+ // get the scope for this function
+ scope_t *fscope = (scope_t *)pns->nodes[4];
+
+ // compile the function definition
+ compile_funcdef_lambdef(comp, fscope, pns->nodes[1], PN_typedargslist);
+
+ // return its name (the 'f' in "def f(...):")
+ return fscope->simple_name;
+}
+
+// leaves class object on stack
+// returns class name
+STATIC qstr compile_classdef_helper(compiler_t *comp, mp_parse_node_struct_t *pns, uint emit_options) {
+ if (comp->pass == MP_PASS_SCOPE) {
+ // create a new scope for this class
+ scope_t *s = scope_new_and_link(comp, SCOPE_CLASS, (mp_parse_node_t)pns, emit_options);
+ // store the class scope so the compiling function can use it at each pass
+ pns->nodes[3] = (mp_parse_node_t)s;
+ }
+
+ EMIT(load_build_class);
+
+ // scope for this class
+ scope_t *cscope = (scope_t *)pns->nodes[3];
+
+ // compile the class
+ close_over_variables_etc(comp, cscope, 0, 0);
+
+ // get its name
+ EMIT_ARG(load_const_str, cscope->simple_name);
+
+ // nodes[1] has parent classes, if any
+ // empty parenthesis (eg class C():) gets here as an empty PN_classdef_2 and needs special handling
+ mp_parse_node_t parents = pns->nodes[1];
+ if (MP_PARSE_NODE_IS_STRUCT_KIND(parents, PN_classdef_2)) {
+ parents = MP_PARSE_NODE_NULL;
+ }
+ compile_trailer_paren_helper(comp, parents, false, 2);
+
+ // return its name (the 'C' in class C(...):")
+ return cscope->simple_name;
+}
+
+// returns true if it was a built-in decorator (even if the built-in had an error)
+STATIC bool compile_built_in_decorator(compiler_t *comp, size_t name_len, mp_parse_node_t *name_nodes, uint *emit_options) {
+ if (MP_PARSE_NODE_LEAF_ARG(name_nodes[0]) != MP_QSTR_micropython) {
+ return false;
+ }
+
+ if (name_len != 2) {
+ compile_syntax_error(comp, name_nodes[0], MP_ERROR_TEXT("invalid micropython decorator"));
+ return true;
+ }
+
+ qstr attr = MP_PARSE_NODE_LEAF_ARG(name_nodes[1]);
+ if (attr == MP_QSTR_bytecode) {
+ *emit_options = MP_EMIT_OPT_BYTECODE;
+ // @micropython.native decorator.
+ } else if (attr == MP_QSTR_native) {
+ // Different from MicroPython: native doesn't raise SyntaxError if native support isn't
+ // compiled, it just passes through the function unmodified.
+ #if MICROPY_EMIT_NATIVE
+ *emit_options = MP_EMIT_OPT_NATIVE_PYTHON;
+ #else
+ return true;
+ #endif
+ #if MICROPY_EMIT_NATIVE
+ // @micropython.viper decorator.
+ } else if (attr == MP_QSTR_viper) {
+ *emit_options = MP_EMIT_OPT_VIPER;
+ #endif
+ #if MICROPY_EMIT_INLINE_ASM
+ #if MICROPY_DYNAMIC_COMPILER
+ } else if (attr == MP_QSTR_asm_thumb) {
+ *emit_options = MP_EMIT_OPT_ASM;
+ } else if (attr == MP_QSTR_asm_xtensa) {
+ *emit_options = MP_EMIT_OPT_ASM;
+ #else
+ } else if (attr == ASM_DECORATOR_QSTR) {
+ *emit_options = MP_EMIT_OPT_ASM;
+ #endif
+ #endif
+ } else {
+ compile_syntax_error(comp, name_nodes[1], MP_ERROR_TEXT("invalid micropython decorator"));
+ }
+
+ #if MICROPY_DYNAMIC_COMPILER
+ if (*emit_options == MP_EMIT_OPT_NATIVE_PYTHON || *emit_options == MP_EMIT_OPT_VIPER) {
+ if (emit_native_table[mp_dynamic_compiler.native_arch] == NULL) {
+ compile_syntax_error(comp, name_nodes[1], MP_ERROR_TEXT("invalid architecture"));
+ }
+ } else if (*emit_options == MP_EMIT_OPT_ASM) {
+ if (emit_asm_table[mp_dynamic_compiler.native_arch] == NULL) {
+ compile_syntax_error(comp, name_nodes[1], MP_ERROR_TEXT("invalid architecture"));
+ }
+ }
+ #endif
+
+ return true;
+}
+
+STATIC void compile_decorated(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ // get the list of decorators
+ mp_parse_node_t *nodes;
+ size_t n = mp_parse_node_extract_list(&pns->nodes[0], PN_decorators, &nodes);
+
+ // inherit emit options for this function/class definition
+ uint emit_options = comp->scope_cur->emit_options;
+
+ // compile each decorator
+ size_t num_built_in_decorators = 0;
+ for (size_t i = 0; i < n; i++) {
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(nodes[i], PN_decorator)); // should be
+ mp_parse_node_struct_t *pns_decorator = (mp_parse_node_struct_t *)nodes[i];
+
+ // nodes[0] contains the decorator function, which is a dotted name
+ mp_parse_node_t *name_nodes;
+ size_t name_len = mp_parse_node_extract_list(&pns_decorator->nodes[0], PN_dotted_name, &name_nodes);
+
+ // check for built-in decorators
+ if (compile_built_in_decorator(comp, name_len, name_nodes, &emit_options)) {
+ // this was a built-in
+ num_built_in_decorators += 1;
+
+ } else {
+ // not a built-in, compile normally
+
+ // compile the decorator function
+ compile_node(comp, name_nodes[0]);
+ for (size_t j = 1; j < name_len; j++) {
+ assert(MP_PARSE_NODE_IS_ID(name_nodes[j])); // should be
+ EMIT_ARG(attr, MP_PARSE_NODE_LEAF_ARG(name_nodes[j]), MP_EMIT_ATTR_LOAD);
+ }
+
+ // nodes[1] contains arguments to the decorator function, if any
+ if (!MP_PARSE_NODE_IS_NULL(pns_decorator->nodes[1])) {
+ // call the decorator function with the arguments in nodes[1]
+ compile_node(comp, pns_decorator->nodes[1]);
+ }
+ }
+ }
+
+ // compile the body (funcdef, async funcdef or classdef) and get its name
+ mp_parse_node_struct_t *pns_body = (mp_parse_node_struct_t *)pns->nodes[1];
+ qstr body_name = 0;
+ if (MP_PARSE_NODE_STRUCT_KIND(pns_body) == PN_funcdef) {
+ body_name = compile_funcdef_helper(comp, pns_body, emit_options);
+ #if MICROPY_PY_ASYNC_AWAIT
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns_body) == PN_async_funcdef) {
+ assert(MP_PARSE_NODE_IS_STRUCT(pns_body->nodes[0]));
+ mp_parse_node_struct_t *pns0 = (mp_parse_node_struct_t *)pns_body->nodes[0];
+ body_name = compile_funcdef_helper(comp, pns0, emit_options);
+ scope_t *fscope = (scope_t *)pns0->nodes[4];
+ fscope->scope_flags |= MP_SCOPE_FLAG_GENERATOR | MP_SCOPE_FLAG_ASYNC;
+ #endif
+ } else {
+ assert(MP_PARSE_NODE_STRUCT_KIND(pns_body) == PN_classdef); // should be
+ body_name = compile_classdef_helper(comp, pns_body, emit_options);
+ }
+
+ // call each decorator
+ for (size_t i = 0; i < n - num_built_in_decorators; i++) {
+ EMIT_ARG(call_function, 1, 0, 0);
+ }
+
+ // store func/class object into name
+ compile_store_id(comp, body_name);
+}
+
+STATIC void compile_funcdef(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ qstr fname = compile_funcdef_helper(comp, pns, comp->scope_cur->emit_options);
+ // store function object into function name
+ compile_store_id(comp, fname);
+}
+
+STATIC void c_del_stmt(compiler_t *comp, mp_parse_node_t pn) {
+ if (MP_PARSE_NODE_IS_ID(pn)) {
+ compile_delete_id(comp, MP_PARSE_NODE_LEAF_ARG(pn));
+ } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_atom_expr_normal)) {
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+
+ compile_node(comp, pns->nodes[0]); // base of the atom_expr_normal node
+
+ if (MP_PARSE_NODE_IS_STRUCT(pns->nodes[1])) {
+ mp_parse_node_struct_t *pns1 = (mp_parse_node_struct_t *)pns->nodes[1];
+ if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_atom_expr_trailers) {
+ int n = MP_PARSE_NODE_STRUCT_NUM_NODES(pns1);
+ for (int i = 0; i < n - 1; i++) {
+ compile_node(comp, pns1->nodes[i]);
+ }
+ assert(MP_PARSE_NODE_IS_STRUCT(pns1->nodes[n - 1]));
+ pns1 = (mp_parse_node_struct_t *)pns1->nodes[n - 1];
+ }
+ if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_trailer_bracket) {
+ compile_node(comp, pns1->nodes[0]);
+ EMIT_ARG(subscr, MP_EMIT_SUBSCR_DELETE);
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_trailer_period) {
+ assert(MP_PARSE_NODE_IS_ID(pns1->nodes[0]));
+ EMIT_ARG(attr, MP_PARSE_NODE_LEAF_ARG(pns1->nodes[0]), MP_EMIT_ATTR_DELETE);
+ } else {
+ goto cannot_delete;
+ }
+ } else {
+ goto cannot_delete;
+ }
+
+ } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_atom_paren)) {
+ pn = ((mp_parse_node_struct_t *)pn)->nodes[0];
+ if (MP_PARSE_NODE_IS_NULL(pn)) {
+ goto cannot_delete;
+ } else {
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_testlist_comp));
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+ if (MP_PARSE_NODE_TESTLIST_COMP_HAS_COMP_FOR(pns)) {
+ goto cannot_delete;
+ }
+ for (size_t i = 0; i < MP_PARSE_NODE_STRUCT_NUM_NODES(pns); ++i) {
+ c_del_stmt(comp, pns->nodes[i]);
+ }
+ }
+ } else {
+ // some arbitrary statement that we can't delete (eg del 1)
+ goto cannot_delete;
+ }
+
+ return;
+
+cannot_delete:
+ compile_syntax_error(comp, (mp_parse_node_t)pn, MP_ERROR_TEXT("can't delete expression"));
+}
+
+STATIC void compile_del_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ apply_to_single_or_list(comp, pns->nodes[0], PN_exprlist, c_del_stmt);
+}
+
+STATIC void compile_break_cont_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ uint16_t label;
+ const compressed_string_t *error_msg;
+ if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_break_stmt) {
+ label = comp->break_label;
+ error_msg = MP_ERROR_TEXT("'break' outside loop");
+ } else {
+ label = comp->continue_label;
+ error_msg = MP_ERROR_TEXT("'continue' outside loop");
+ }
+ if (label == INVALID_LABEL) {
+ compile_syntax_error(comp, (mp_parse_node_t)pns, error_msg);
+ }
+ assert(comp->cur_except_level >= comp->break_continue_except_level);
+ EMIT_ARG(unwind_jump, label, comp->cur_except_level - comp->break_continue_except_level);
+}
+
+STATIC void compile_return_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ #if MICROPY_CPYTHON_COMPAT
+ if (comp->scope_cur->kind != SCOPE_FUNCTION) {
+ compile_syntax_error(comp, (mp_parse_node_t)pns, MP_ERROR_TEXT("'return' outside function"));
+ return;
+ }
+ #endif
+ if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+ // no argument to 'return', so return None
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ } else if (MICROPY_COMP_RETURN_IF_EXPR
+ && MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_test_if_expr)) {
+ // special case when returning an if-expression; to match CPython optimisation
+ mp_parse_node_struct_t *pns_test_if_expr = (mp_parse_node_struct_t *)pns->nodes[0];
+ mp_parse_node_struct_t *pns_test_if_else = (mp_parse_node_struct_t *)pns_test_if_expr->nodes[1];
+
+ uint l_fail = comp_next_label(comp);
+ c_if_cond(comp, pns_test_if_else->nodes[0], false, l_fail); // condition
+ compile_node(comp, pns_test_if_expr->nodes[0]); // success value
+ EMIT(return_value);
+ EMIT_ARG(label_assign, l_fail);
+ compile_node(comp, pns_test_if_else->nodes[1]); // failure value
+ } else {
+ compile_node(comp, pns->nodes[0]);
+ }
+ EMIT(return_value);
+}
+
+STATIC void compile_yield_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ compile_node(comp, pns->nodes[0]);
+ EMIT(pop_top);
+}
+
+STATIC void compile_raise_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+ // raise
+ EMIT_ARG(raise_varargs, 0);
+ } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_raise_stmt_arg)) {
+ // raise x from y
+ pns = (mp_parse_node_struct_t *)pns->nodes[0];
+ compile_node(comp, pns->nodes[0]);
+ compile_node(comp, pns->nodes[1]);
+ EMIT_ARG(raise_varargs, 2);
+ } else {
+ // raise x
+ compile_node(comp, pns->nodes[0]);
+ EMIT_ARG(raise_varargs, 1);
+ }
+}
+
+// q_base holds the base of the name
+// eg a -> q_base=a
+// a.b.c -> q_base=a
+STATIC void do_import_name(compiler_t *comp, mp_parse_node_t pn, qstr *q_base) {
+ bool is_as = false;
+ if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_dotted_as_name)) {
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+ // a name of the form x as y; unwrap it
+ *q_base = MP_PARSE_NODE_LEAF_ARG(pns->nodes[1]);
+ pn = pns->nodes[0];
+ is_as = true;
+ }
+ if (MP_PARSE_NODE_IS_NULL(pn)) {
+ // empty name (eg, from . import x)
+ *q_base = MP_QSTR_;
+ EMIT_ARG(import, MP_QSTR_, MP_EMIT_IMPORT_NAME); // import the empty string
+ } else if (MP_PARSE_NODE_IS_ID(pn)) {
+ // just a simple name
+ qstr q_full = MP_PARSE_NODE_LEAF_ARG(pn);
+ if (!is_as) {
+ *q_base = q_full;
+ }
+ EMIT_ARG(import, q_full, MP_EMIT_IMPORT_NAME);
+ } else {
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_dotted_name)); // should be
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+ {
+ // a name of the form a.b.c
+ if (!is_as) {
+ *q_base = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]);
+ }
+ int n = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+ int len = n - 1;
+ for (int i = 0; i < n; i++) {
+ len += qstr_len(MP_PARSE_NODE_LEAF_ARG(pns->nodes[i]));
+ }
+ char *q_ptr = mp_local_alloc(len);
+ char *str_dest = q_ptr;
+ for (int i = 0; i < n; i++) {
+ if (i > 0) {
+ *str_dest++ = '.';
+ }
+ size_t str_src_len;
+ const byte *str_src = qstr_data(MP_PARSE_NODE_LEAF_ARG(pns->nodes[i]), &str_src_len);
+ memcpy(str_dest, str_src, str_src_len);
+ str_dest += str_src_len;
+ }
+ qstr q_full = qstr_from_strn(q_ptr, len);
+ mp_local_free(q_ptr);
+ EMIT_ARG(import, q_full, MP_EMIT_IMPORT_NAME);
+ if (is_as) {
+ for (int i = 1; i < n; i++) {
+ EMIT_ARG(attr, MP_PARSE_NODE_LEAF_ARG(pns->nodes[i]), MP_EMIT_ATTR_LOAD);
+ }
+ }
+ }
+ }
+}
+
+STATIC void compile_dotted_as_name(compiler_t *comp, mp_parse_node_t pn) {
+ EMIT_ARG(load_const_small_int, 0); // level 0 import
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE); // not importing from anything
+ qstr q_base;
+ do_import_name(comp, pn, &q_base);
+ compile_store_id(comp, q_base);
+}
+
+STATIC void compile_import_name(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ apply_to_single_or_list(comp, pns->nodes[0], PN_dotted_as_names, compile_dotted_as_name);
+}
+
+STATIC void compile_import_from(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ mp_parse_node_t pn_import_source = pns->nodes[0];
+
+ // extract the preceding .'s (if any) for a relative import, to compute the import level
+ uint import_level = 0;
+ do {
+ mp_parse_node_t pn_rel;
+ if (MP_PARSE_NODE_IS_TOKEN(pn_import_source) || MP_PARSE_NODE_IS_STRUCT_KIND(pn_import_source, PN_one_or_more_period_or_ellipsis)) {
+ // This covers relative imports with dots only like "from .. import"
+ pn_rel = pn_import_source;
+ pn_import_source = MP_PARSE_NODE_NULL;
+ } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pn_import_source, PN_import_from_2b)) {
+ // This covers relative imports starting with dot(s) like "from .foo import"
+ mp_parse_node_struct_t *pns_2b = (mp_parse_node_struct_t *)pn_import_source;
+ pn_rel = pns_2b->nodes[0];
+ pn_import_source = pns_2b->nodes[1];
+ assert(!MP_PARSE_NODE_IS_NULL(pn_import_source)); // should not be
+ } else {
+ // Not a relative import
+ break;
+ }
+
+ // get the list of . and/or ...'s
+ mp_parse_node_t *nodes;
+ size_t n = mp_parse_node_extract_list(&pn_rel, PN_one_or_more_period_or_ellipsis, &nodes);
+
+ // count the total number of .'s
+ for (size_t i = 0; i < n; i++) {
+ if (MP_PARSE_NODE_IS_TOKEN_KIND(nodes[i], MP_TOKEN_DEL_PERIOD)) {
+ import_level++;
+ } else {
+ // should be an MP_TOKEN_ELLIPSIS
+ import_level += 3;
+ }
+ }
+ } while (0);
+
+ if (MP_PARSE_NODE_IS_TOKEN_KIND(pns->nodes[1], MP_TOKEN_OP_STAR)) {
+ #if MICROPY_CPYTHON_COMPAT
+ if (comp->scope_cur->kind != SCOPE_MODULE) {
+ compile_syntax_error(comp, (mp_parse_node_t)pns, MP_ERROR_TEXT("import * not at module level"));
+ return;
+ }
+ #endif
+
+ EMIT_ARG(load_const_small_int, import_level);
+
+ // build the "fromlist" tuple
+ EMIT_ARG(load_const_str, MP_QSTR__star_);
+ EMIT_ARG(build, 1, MP_EMIT_BUILD_TUPLE);
+
+ // do the import
+ qstr dummy_q;
+ do_import_name(comp, pn_import_source, &dummy_q);
+ EMIT_ARG(import, MP_QSTRnull, MP_EMIT_IMPORT_STAR);
+
+ } else {
+ EMIT_ARG(load_const_small_int, import_level);
+
+ // build the "fromlist" tuple
+ mp_parse_node_t *pn_nodes;
+ size_t n = mp_parse_node_extract_list(&pns->nodes[1], PN_import_as_names, &pn_nodes);
+ for (size_t i = 0; i < n; i++) {
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(pn_nodes[i], PN_import_as_name));
+ mp_parse_node_struct_t *pns3 = (mp_parse_node_struct_t *)pn_nodes[i];
+ qstr id2 = MP_PARSE_NODE_LEAF_ARG(pns3->nodes[0]); // should be id
+ EMIT_ARG(load_const_str, id2);
+ }
+ EMIT_ARG(build, n, MP_EMIT_BUILD_TUPLE);
+
+ // do the import
+ qstr dummy_q;
+ do_import_name(comp, pn_import_source, &dummy_q);
+ for (size_t i = 0; i < n; i++) {
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(pn_nodes[i], PN_import_as_name));
+ mp_parse_node_struct_t *pns3 = (mp_parse_node_struct_t *)pn_nodes[i];
+ qstr id2 = MP_PARSE_NODE_LEAF_ARG(pns3->nodes[0]); // should be id
+ EMIT_ARG(import, id2, MP_EMIT_IMPORT_FROM);
+ if (MP_PARSE_NODE_IS_NULL(pns3->nodes[1])) {
+ compile_store_id(comp, id2);
+ } else {
+ compile_store_id(comp, MP_PARSE_NODE_LEAF_ARG(pns3->nodes[1]));
+ }
+ }
+ EMIT(pop_top);
+ }
+}
+
+STATIC void compile_declare_global(compiler_t *comp, mp_parse_node_t pn, id_info_t *id_info) {
+ if (id_info->kind != ID_INFO_KIND_UNDECIDED && id_info->kind != ID_INFO_KIND_GLOBAL_EXPLICIT) {
+ compile_syntax_error(comp, pn, MP_ERROR_TEXT("identifier redefined as global"));
+ return;
+ }
+ id_info->kind = ID_INFO_KIND_GLOBAL_EXPLICIT;
+
+ // if the id exists in the global scope, set its kind to EXPLICIT_GLOBAL
+ id_info = scope_find_global(comp->scope_cur, id_info->qst);
+ if (id_info != NULL) {
+ id_info->kind = ID_INFO_KIND_GLOBAL_EXPLICIT;
+ }
+}
+
+STATIC void compile_declare_nonlocal(compiler_t *comp, mp_parse_node_t pn, id_info_t *id_info) {
+ if (id_info->kind == ID_INFO_KIND_UNDECIDED) {
+ id_info->kind = ID_INFO_KIND_GLOBAL_IMPLICIT;
+ scope_check_to_close_over(comp->scope_cur, id_info);
+ if (id_info->kind == ID_INFO_KIND_GLOBAL_IMPLICIT) {
+ compile_syntax_error(comp, pn, MP_ERROR_TEXT("no binding for nonlocal found"));
+ }
+ } else if (id_info->kind != ID_INFO_KIND_FREE) {
+ compile_syntax_error(comp, pn, MP_ERROR_TEXT("identifier redefined as nonlocal"));
+ }
+}
+
+STATIC void compile_global_nonlocal_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ if (comp->pass == MP_PASS_SCOPE) {
+ bool is_global = MP_PARSE_NODE_STRUCT_KIND(pns) == PN_global_stmt;
+
+ if (!is_global && comp->scope_cur->kind == SCOPE_MODULE) {
+ compile_syntax_error(comp, (mp_parse_node_t)pns, MP_ERROR_TEXT("can't declare nonlocal in outer code"));
+ return;
+ }
+
+ mp_parse_node_t *nodes;
+ size_t n = mp_parse_node_extract_list(&pns->nodes[0], PN_name_list, &nodes);
+ for (size_t i = 0; i < n; i++) {
+ qstr qst = MP_PARSE_NODE_LEAF_ARG(nodes[i]);
+ id_info_t *id_info = scope_find_or_add_id(comp->scope_cur, qst, ID_INFO_KIND_UNDECIDED);
+ if (is_global) {
+ compile_declare_global(comp, (mp_parse_node_t)pns, id_info);
+ } else {
+ compile_declare_nonlocal(comp, (mp_parse_node_t)pns, id_info);
+ }
+ }
+ }
+}
+
+STATIC void compile_assert_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ // with optimisations enabled we don't compile assertions
+ if (MP_STATE_VM(mp_optimise_value) != 0) {
+ return;
+ }
+
+ uint l_end = comp_next_label(comp);
+ c_if_cond(comp, pns->nodes[0], true, l_end);
+ EMIT_LOAD_GLOBAL(MP_QSTR_AssertionError); // we load_global instead of load_id, to be consistent with CPython
+ if (!MP_PARSE_NODE_IS_NULL(pns->nodes[1])) {
+ // assertion message
+ compile_node(comp, pns->nodes[1]);
+ EMIT_ARG(call_function, 1, 0, 0);
+ }
+ EMIT_ARG(raise_varargs, 1);
+ EMIT_ARG(label_assign, l_end);
+}
+
+STATIC void compile_if_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ uint l_end = comp_next_label(comp);
+
+ // optimisation: don't emit anything when "if False"
+ if (!mp_parse_node_is_const_false(pns->nodes[0])) {
+ uint l_fail = comp_next_label(comp);
+ c_if_cond(comp, pns->nodes[0], false, l_fail); // if condition
+
+ compile_node(comp, pns->nodes[1]); // if block
+
+ // optimisation: skip everything else when "if True"
+ if (mp_parse_node_is_const_true(pns->nodes[0])) {
+ goto done;
+ }
+
+ if (
+ // optimisation: don't jump over non-existent elif/else blocks
+ !(MP_PARSE_NODE_IS_NULL(pns->nodes[2]) && MP_PARSE_NODE_IS_NULL(pns->nodes[3]))
+ // optimisation: don't jump if last instruction was return
+ && !EMIT(last_emit_was_return_value)
+ ) {
+ // jump over elif/else blocks
+ EMIT_ARG(jump, l_end);
+ }
+
+ EMIT_ARG(label_assign, l_fail);
+ }
+
+ // compile elif blocks (if any)
+ mp_parse_node_t *pn_elif;
+ size_t n_elif = mp_parse_node_extract_list(&pns->nodes[2], PN_if_stmt_elif_list, &pn_elif);
+ for (size_t i = 0; i < n_elif; i++) {
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(pn_elif[i], PN_if_stmt_elif)); // should be
+ mp_parse_node_struct_t *pns_elif = (mp_parse_node_struct_t *)pn_elif[i];
+
+ // optimisation: don't emit anything when "if False"
+ if (!mp_parse_node_is_const_false(pns_elif->nodes[0])) {
+ uint l_fail = comp_next_label(comp);
+ c_if_cond(comp, pns_elif->nodes[0], false, l_fail); // elif condition
+
+ compile_node(comp, pns_elif->nodes[1]); // elif block
+
+ // optimisation: skip everything else when "elif True"
+ if (mp_parse_node_is_const_true(pns_elif->nodes[0])) {
+ goto done;
+ }
+
+ // optimisation: don't jump if last instruction was return
+ if (!EMIT(last_emit_was_return_value)) {
+ EMIT_ARG(jump, l_end);
+ }
+ EMIT_ARG(label_assign, l_fail);
+ }
+ }
+
+ // compile else block
+ compile_node(comp, pns->nodes[3]); // can be null
+
+done:
+ EMIT_ARG(label_assign, l_end);
+}
+
+#define START_BREAK_CONTINUE_BLOCK \
+ uint16_t old_break_label = comp->break_label; \
+ uint16_t old_continue_label = comp->continue_label; \
+ uint16_t old_break_continue_except_level = comp->break_continue_except_level; \
+ uint break_label = comp_next_label(comp); \
+ uint continue_label = comp_next_label(comp); \
+ comp->break_label = break_label; \
+ comp->continue_label = continue_label; \
+ comp->break_continue_except_level = comp->cur_except_level;
+
+#define END_BREAK_CONTINUE_BLOCK \
+ comp->break_label = old_break_label; \
+ comp->continue_label = old_continue_label; \
+ comp->break_continue_except_level = old_break_continue_except_level;
+
+STATIC void compile_while_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ START_BREAK_CONTINUE_BLOCK
+
+ if (!mp_parse_node_is_const_false(pns->nodes[0])) { // optimisation: don't emit anything for "while False"
+ uint top_label = comp_next_label(comp);
+ if (!mp_parse_node_is_const_true(pns->nodes[0])) { // optimisation: don't jump to cond for "while True"
+ EMIT_ARG(jump, continue_label);
+ }
+ EMIT_ARG(label_assign, top_label);
+ compile_node(comp, pns->nodes[1]); // body
+ EMIT_ARG(label_assign, continue_label);
+ c_if_cond(comp, pns->nodes[0], true, top_label); // condition
+ }
+
+ // break/continue apply to outer loop (if any) in the else block
+ END_BREAK_CONTINUE_BLOCK
+
+ compile_node(comp, pns->nodes[2]); // else
+
+ EMIT_ARG(label_assign, break_label);
+}
+
+// This function compiles an optimised for-loop of the form:
+// for <var> in range(<start>, <end>, <step>):
+// <body>
+// else:
+// <else>
+// <var> must be an identifier and <step> must be a small-int.
+//
+// Semantics of for-loop require:
+// - final failing value should not be stored in the loop variable
+// - if the loop never runs, the loop variable should never be assigned
+// - assignments to <var>, <end> or <step> in the body do not alter the loop
+// (<step> is a constant for us, so no need to worry about it changing)
+//
+// If <end> is a small-int, then the stack during the for-loop contains just
+// the current value of <var>. Otherwise, the stack contains <end> then the
+// current value of <var>.
+STATIC void compile_for_stmt_optimised_range(compiler_t *comp, mp_parse_node_t pn_var, mp_parse_node_t pn_start, mp_parse_node_t pn_end, mp_parse_node_t pn_step, mp_parse_node_t pn_body, mp_parse_node_t pn_else) {
+ START_BREAK_CONTINUE_BLOCK
+
+ uint top_label = comp_next_label(comp);
+ uint entry_label = comp_next_label(comp);
+
+ // put the end value on the stack if it's not a small-int constant
+ bool end_on_stack = !MP_PARSE_NODE_IS_SMALL_INT(pn_end);
+ if (end_on_stack) {
+ compile_node(comp, pn_end);
+ }
+
+ // compile: start
+ compile_node(comp, pn_start);
+
+ EMIT_ARG(jump, entry_label);
+ EMIT_ARG(label_assign, top_label);
+
+ // duplicate next value and store it to var
+ EMIT(dup_top);
+ c_assign(comp, pn_var, ASSIGN_STORE);
+
+ // compile body
+ compile_node(comp, pn_body);
+
+ EMIT_ARG(label_assign, continue_label);
+
+ // compile: var + step
+ compile_node(comp, pn_step);
+ EMIT_ARG(binary_op, MP_BINARY_OP_INPLACE_ADD);
+
+ EMIT_ARG(label_assign, entry_label);
+
+ // compile: if var <cond> end: goto top
+ if (end_on_stack) {
+ EMIT(dup_top_two);
+ EMIT(rot_two);
+ } else {
+ EMIT(dup_top);
+ compile_node(comp, pn_end);
+ }
+ assert(MP_PARSE_NODE_IS_SMALL_INT(pn_step));
+ if (MP_PARSE_NODE_LEAF_SMALL_INT(pn_step) >= 0) {
+ EMIT_ARG(binary_op, MP_BINARY_OP_LESS);
+ } else {
+ EMIT_ARG(binary_op, MP_BINARY_OP_MORE);
+ }
+ EMIT_ARG(pop_jump_if, true, top_label);
+
+ // break/continue apply to outer loop (if any) in the else block
+ END_BREAK_CONTINUE_BLOCK
+
+ // Compile the else block. We must pop the iterator variables before
+ // executing the else code because it may contain break/continue statements.
+ uint end_label = 0;
+ if (!MP_PARSE_NODE_IS_NULL(pn_else)) {
+ // discard final value of "var", and possible "end" value
+ EMIT(pop_top);
+ if (end_on_stack) {
+ EMIT(pop_top);
+ }
+ compile_node(comp, pn_else);
+ end_label = comp_next_label(comp);
+ EMIT_ARG(jump, end_label);
+ EMIT_ARG(adjust_stack_size, 1 + end_on_stack);
+ }
+
+ EMIT_ARG(label_assign, break_label);
+
+ // discard final value of var that failed the loop condition
+ EMIT(pop_top);
+
+ // discard <end> value if it's on the stack
+ if (end_on_stack) {
+ EMIT(pop_top);
+ }
+
+ if (!MP_PARSE_NODE_IS_NULL(pn_else)) {
+ EMIT_ARG(label_assign, end_label);
+ }
+}
+
+STATIC void compile_for_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ // this bit optimises: for <x> in range(...), turning it into an explicitly incremented variable
+ // this is actually slower, but uses no heap memory
+ // for viper it will be much, much faster
+ if (/*comp->scope_cur->emit_options == MP_EMIT_OPT_VIPER &&*/ MP_PARSE_NODE_IS_ID(pns->nodes[0]) && MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[1], PN_atom_expr_normal)) {
+ mp_parse_node_struct_t *pns_it = (mp_parse_node_struct_t *)pns->nodes[1];
+ if (MP_PARSE_NODE_IS_ID(pns_it->nodes[0])
+ && MP_PARSE_NODE_LEAF_ARG(pns_it->nodes[0]) == MP_QSTR_range
+ && MP_PARSE_NODE_STRUCT_KIND((mp_parse_node_struct_t *)pns_it->nodes[1]) == PN_trailer_paren) {
+ mp_parse_node_t pn_range_args = ((mp_parse_node_struct_t *)pns_it->nodes[1])->nodes[0];
+ mp_parse_node_t *args;
+ size_t n_args = mp_parse_node_extract_list(&pn_range_args, PN_arglist, &args);
+ mp_parse_node_t pn_range_start;
+ mp_parse_node_t pn_range_end;
+ mp_parse_node_t pn_range_step;
+ bool optimize = false;
+ if (1 <= n_args && n_args <= 3) {
+ optimize = true;
+ if (n_args == 1) {
+ pn_range_start = mp_parse_node_new_small_int(0);
+ pn_range_end = args[0];
+ pn_range_step = mp_parse_node_new_small_int(1);
+ } else if (n_args == 2) {
+ pn_range_start = args[0];
+ pn_range_end = args[1];
+ pn_range_step = mp_parse_node_new_small_int(1);
+ } else {
+ pn_range_start = args[0];
+ pn_range_end = args[1];
+ pn_range_step = args[2];
+ // the step must be a non-zero constant integer to do the optimisation
+ if (!MP_PARSE_NODE_IS_SMALL_INT(pn_range_step)
+ || MP_PARSE_NODE_LEAF_SMALL_INT(pn_range_step) == 0) {
+ optimize = false;
+ }
+ }
+ // arguments must be able to be compiled as standard expressions
+ if (optimize && MP_PARSE_NODE_IS_STRUCT(pn_range_start)) {
+ int k = MP_PARSE_NODE_STRUCT_KIND((mp_parse_node_struct_t *)pn_range_start);
+ if (k == PN_arglist_star || k == PN_arglist_dbl_star || k == PN_argument) {
+ optimize = false;
+ }
+ }
+ if (optimize && MP_PARSE_NODE_IS_STRUCT(pn_range_end)) {
+ int k = MP_PARSE_NODE_STRUCT_KIND((mp_parse_node_struct_t *)pn_range_end);
+ if (k == PN_arglist_star || k == PN_arglist_dbl_star || k == PN_argument) {
+ optimize = false;
+ }
+ }
+ }
+ if (optimize) {
+ compile_for_stmt_optimised_range(comp, pns->nodes[0], pn_range_start, pn_range_end, pn_range_step, pns->nodes[2], pns->nodes[3]);
+ return;
+ }
+ }
+ }
+
+ START_BREAK_CONTINUE_BLOCK
+ comp->break_label |= MP_EMIT_BREAK_FROM_FOR;
+
+ uint pop_label = comp_next_label(comp);
+
+ compile_node(comp, pns->nodes[1]); // iterator
+ EMIT_ARG(get_iter, true);
+ EMIT_ARG(label_assign, continue_label);
+ EMIT_ARG(for_iter, pop_label);
+ c_assign(comp, pns->nodes[0], ASSIGN_STORE); // variable
+ compile_node(comp, pns->nodes[2]); // body
+ if (!EMIT(last_emit_was_return_value)) {
+ EMIT_ARG(jump, continue_label);
+ }
+ EMIT_ARG(label_assign, pop_label);
+ EMIT(for_iter_end);
+
+ // break/continue apply to outer loop (if any) in the else block
+ END_BREAK_CONTINUE_BLOCK
+
+ compile_node(comp, pns->nodes[3]); // else (may be empty)
+
+ EMIT_ARG(label_assign, break_label);
+}
+
+STATIC void compile_try_except(compiler_t *comp, mp_parse_node_t pn_body, int n_except, mp_parse_node_t *pn_excepts, mp_parse_node_t pn_else) {
+ // setup code
+ uint l1 = comp_next_label(comp);
+ uint success_label = comp_next_label(comp);
+
+ compile_increase_except_level(comp, l1, MP_EMIT_SETUP_BLOCK_EXCEPT);
+
+ compile_node(comp, pn_body); // body
+ EMIT_ARG(pop_except_jump, success_label, false); // jump over exception handler
+
+ EMIT_ARG(label_assign, l1); // start of exception handler
+ EMIT(start_except_handler);
+
+ // at this point the top of the stack contains the exception instance that was raised
+
+ uint l2 = comp_next_label(comp);
+
+ for (int i = 0; i < n_except; i++) {
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(pn_excepts[i], PN_try_stmt_except)); // should be
+ mp_parse_node_struct_t *pns_except = (mp_parse_node_struct_t *)pn_excepts[i];
+
+ qstr qstr_exception_local = 0;
+ uint end_finally_label = comp_next_label(comp);
+ #if MICROPY_PY_SYS_SETTRACE
+ EMIT_ARG(set_source_line, pns_except->source_line);
+ #endif
+
+ if (MP_PARSE_NODE_IS_NULL(pns_except->nodes[0])) {
+ // this is a catch all exception handler
+ if (i + 1 != n_except) {
+ compile_syntax_error(comp, pn_excepts[i], MP_ERROR_TEXT("default 'except' must be last"));
+ compile_decrease_except_level(comp);
+ return;
+ }
+ } else {
+ // this exception handler requires a match to a certain type of exception
+ mp_parse_node_t pns_exception_expr = pns_except->nodes[0];
+ if (MP_PARSE_NODE_IS_STRUCT(pns_exception_expr)) {
+ mp_parse_node_struct_t *pns3 = (mp_parse_node_struct_t *)pns_exception_expr;
+ if (MP_PARSE_NODE_STRUCT_KIND(pns3) == PN_try_stmt_as_name) {
+ // handler binds the exception to a local
+ pns_exception_expr = pns3->nodes[0];
+ qstr_exception_local = MP_PARSE_NODE_LEAF_ARG(pns3->nodes[1]);
+ }
+ }
+ EMIT(dup_top);
+ compile_node(comp, pns_exception_expr);
+ EMIT_ARG(binary_op, MP_BINARY_OP_EXCEPTION_MATCH);
+ EMIT_ARG(pop_jump_if, false, end_finally_label);
+ }
+
+ // either discard or store the exception instance
+ if (qstr_exception_local == 0) {
+ EMIT(pop_top);
+ } else {
+ compile_store_id(comp, qstr_exception_local);
+ }
+
+ // If the exception is bound to a variable <e> then the <body> of the
+ // exception handler is wrapped in a try-finally so that the name <e> can
+ // be deleted (per Python semantics) even if the <body> has an exception.
+ // In such a case the generated code for the exception handler is:
+ // try:
+ // <body>
+ // finally:
+ // <e> = None
+ // del <e>
+ uint l3 = 0;
+ if (qstr_exception_local != 0) {
+ l3 = comp_next_label(comp);
+ compile_increase_except_level(comp, l3, MP_EMIT_SETUP_BLOCK_FINALLY);
+ }
+ compile_node(comp, pns_except->nodes[1]); // the <body>
+ if (qstr_exception_local != 0) {
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ EMIT_ARG(label_assign, l3);
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ compile_store_id(comp, qstr_exception_local);
+ compile_delete_id(comp, qstr_exception_local);
+ compile_decrease_except_level(comp);
+ }
+
+ EMIT_ARG(pop_except_jump, l2, true);
+ EMIT_ARG(label_assign, end_finally_label);
+ EMIT_ARG(adjust_stack_size, 1); // stack adjust for the exception instance
+ }
+
+ compile_decrease_except_level(comp);
+ EMIT(end_except_handler);
+
+ EMIT_ARG(label_assign, success_label);
+ compile_node(comp, pn_else); // else block, can be null
+ EMIT_ARG(label_assign, l2);
+}
+
+STATIC void compile_try_finally(compiler_t *comp, mp_parse_node_t pn_body, int n_except, mp_parse_node_t *pn_except, mp_parse_node_t pn_else, mp_parse_node_t pn_finally) {
+ uint l_finally_block = comp_next_label(comp);
+
+ compile_increase_except_level(comp, l_finally_block, MP_EMIT_SETUP_BLOCK_FINALLY);
+
+ if (n_except == 0) {
+ assert(MP_PARSE_NODE_IS_NULL(pn_else));
+ EMIT_ARG(adjust_stack_size, 3); // stack adjust for possible UNWIND_JUMP state
+ compile_node(comp, pn_body);
+ EMIT_ARG(adjust_stack_size, -3);
+ } else {
+ compile_try_except(comp, pn_body, n_except, pn_except, pn_else);
+ }
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ EMIT_ARG(label_assign, l_finally_block);
+ compile_node(comp, pn_finally);
+
+ compile_decrease_except_level(comp);
+}
+
+STATIC void compile_try_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ assert(MP_PARSE_NODE_IS_STRUCT(pns->nodes[1])); // should be
+ {
+ mp_parse_node_struct_t *pns2 = (mp_parse_node_struct_t *)pns->nodes[1];
+ if (MP_PARSE_NODE_STRUCT_KIND(pns2) == PN_try_stmt_finally) {
+ // just try-finally
+ compile_try_finally(comp, pns->nodes[0], 0, NULL, MP_PARSE_NODE_NULL, pns2->nodes[0]);
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns2) == PN_try_stmt_except_and_more) {
+ // try-except and possibly else and/or finally
+ mp_parse_node_t *pn_excepts;
+ size_t n_except = mp_parse_node_extract_list(&pns2->nodes[0], PN_try_stmt_except_list, &pn_excepts);
+ if (MP_PARSE_NODE_IS_NULL(pns2->nodes[2])) {
+ // no finally
+ compile_try_except(comp, pns->nodes[0], n_except, pn_excepts, pns2->nodes[1]);
+ } else {
+ // have finally
+ compile_try_finally(comp, pns->nodes[0], n_except, pn_excepts, pns2->nodes[1], ((mp_parse_node_struct_t *)pns2->nodes[2])->nodes[0]);
+ }
+ } else {
+ // just try-except
+ mp_parse_node_t *pn_excepts;
+ size_t n_except = mp_parse_node_extract_list(&pns->nodes[1], PN_try_stmt_except_list, &pn_excepts);
+ compile_try_except(comp, pns->nodes[0], n_except, pn_excepts, MP_PARSE_NODE_NULL);
+ }
+ }
+}
+
+STATIC void compile_with_stmt_helper(compiler_t *comp, size_t n, mp_parse_node_t *nodes, mp_parse_node_t body) {
+ if (n == 0) {
+ // no more pre-bits, compile the body of the with
+ compile_node(comp, body);
+ } else {
+ uint l_end = comp_next_label(comp);
+ if (MP_PARSE_NODE_IS_STRUCT_KIND(nodes[0], PN_with_item)) {
+ // this pre-bit is of the form "a as b"
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)nodes[0];
+ compile_node(comp, pns->nodes[0]);
+ compile_increase_except_level(comp, l_end, MP_EMIT_SETUP_BLOCK_WITH);
+ c_assign(comp, pns->nodes[1], ASSIGN_STORE);
+ } else {
+ // this pre-bit is just an expression
+ compile_node(comp, nodes[0]);
+ compile_increase_except_level(comp, l_end, MP_EMIT_SETUP_BLOCK_WITH);
+ EMIT(pop_top);
+ }
+ // compile additional pre-bits and the body
+ compile_with_stmt_helper(comp, n - 1, nodes + 1, body);
+ // finish this with block
+ EMIT_ARG(with_cleanup, l_end);
+ reserve_labels_for_native(comp, 3); // used by native's with_cleanup
+ compile_decrease_except_level(comp);
+ }
+}
+
+STATIC void compile_with_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ // get the nodes for the pre-bit of the with (the a as b, c as d, ... bit)
+ mp_parse_node_t *nodes;
+ size_t n = mp_parse_node_extract_list(&pns->nodes[0], PN_with_stmt_list, &nodes);
+ assert(n > 0);
+
+ // compile in a nested fashion
+ compile_with_stmt_helper(comp, n, nodes, pns->nodes[1]);
+}
+
+STATIC void compile_yield_from(compiler_t *comp) {
+ EMIT_ARG(get_iter, false);
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ EMIT_ARG(yield, MP_EMIT_YIELD_FROM);
+ reserve_labels_for_native(comp, 3);
+}
+
+#if MICROPY_PY_ASYNC_AWAIT
+STATIC void compile_await_object_method(compiler_t *comp, qstr method) {
+ EMIT_ARG(load_method, method, false);
+ EMIT_ARG(call_method, 0, 0, 0);
+ compile_yield_from(comp);
+}
+
+STATIC void compile_async_for_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ // comp->break_label |= MP_EMIT_BREAK_FROM_FOR;
+ qstr context = MP_PARSE_NODE_LEAF_ARG(pns->nodes[1]);
+ uint while_else_label = comp_next_label(comp);
+ uint try_exception_label = comp_next_label(comp);
+ uint try_else_label = comp_next_label(comp);
+ uint try_finally_label = comp_next_label(comp);
+
+ compile_node(comp, pns->nodes[1]); // iterator
+ EMIT_ARG(load_method, MP_QSTR___aiter__, false);
+ EMIT_ARG(call_method, 0, 0, 0);
+ compile_store_id(comp, context);
+
+ START_BREAK_CONTINUE_BLOCK
+
+ EMIT_ARG(label_assign, continue_label);
+
+ compile_increase_except_level(comp, try_exception_label, MP_EMIT_SETUP_BLOCK_EXCEPT);
+
+ compile_load_id(comp, context);
+ compile_await_object_method(comp, MP_QSTR___anext__);
+ c_assign(comp, pns->nodes[0], ASSIGN_STORE); // variable
+ EMIT_ARG(pop_except_jump, try_else_label, false);
+
+ EMIT_ARG(label_assign, try_exception_label);
+ EMIT(start_except_handler);
+ EMIT(dup_top);
+ EMIT_LOAD_GLOBAL(MP_QSTR_StopAsyncIteration);
+ EMIT_ARG(binary_op, MP_BINARY_OP_EXCEPTION_MATCH);
+ EMIT_ARG(pop_jump_if, false, try_finally_label);
+ EMIT(pop_top); // pop exception instance
+ EMIT_ARG(pop_except_jump, while_else_label, true);
+
+ EMIT_ARG(label_assign, try_finally_label);
+ EMIT_ARG(adjust_stack_size, 1); // if we jump here, the exc is on the stack
+ compile_decrease_except_level(comp);
+ EMIT(end_except_handler);
+
+ EMIT_ARG(label_assign, try_else_label);
+ compile_node(comp, pns->nodes[2]); // body
+
+ EMIT_ARG(jump, continue_label);
+ // break/continue apply to outer loop (if any) in the else block
+ END_BREAK_CONTINUE_BLOCK
+
+ EMIT_ARG(label_assign, while_else_label);
+ compile_node(comp, pns->nodes[3]); // else
+
+ EMIT_ARG(label_assign, break_label);
+}
+
+STATIC void compile_async_with_stmt_helper(compiler_t *comp, size_t n, mp_parse_node_t *nodes, mp_parse_node_t body) {
+ if (n == 0) {
+ // no more pre-bits, compile the body of the with
+ compile_node(comp, body);
+ } else {
+ uint l_finally_block = comp_next_label(comp);
+ uint l_aexit_no_exc = comp_next_label(comp);
+ uint l_ret_unwind_jump = comp_next_label(comp);
+ uint l_end = comp_next_label(comp);
+
+ if (MP_PARSE_NODE_IS_STRUCT_KIND(nodes[0], PN_with_item)) {
+ // this pre-bit is of the form "a as b"
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)nodes[0];
+ compile_node(comp, pns->nodes[0]);
+ EMIT(dup_top);
+ compile_await_object_method(comp, MP_QSTR___aenter__);
+ c_assign(comp, pns->nodes[1], ASSIGN_STORE);
+ } else {
+ // this pre-bit is just an expression
+ compile_node(comp, nodes[0]);
+ EMIT(dup_top);
+ compile_await_object_method(comp, MP_QSTR___aenter__);
+ EMIT(pop_top);
+ }
+
+ // To keep the Python stack size down, and because we can't access values on
+ // this stack further down than 3 elements (via rot_three), we don't preload
+ // __aexit__ (as per normal with) but rather wait until we need it below.
+
+ // Start the try-finally statement
+ compile_increase_except_level(comp, l_finally_block, MP_EMIT_SETUP_BLOCK_FINALLY);
+
+ // Compile any additional pre-bits of the "async with", and also the body
+ EMIT_ARG(adjust_stack_size, 3); // stack adjust for possible UNWIND_JUMP state
+ compile_async_with_stmt_helper(comp, n - 1, nodes + 1, body);
+ EMIT_ARG(adjust_stack_size, -3);
+
+ // We have now finished the "try" block and fall through to the "finally"
+
+ // At this point, after the with body has executed, we have 3 cases:
+ // 1. no exception, we just fall through to this point; stack: (..., ctx_mgr)
+ // 2. exception propagating out, we get to the finally block; stack: (..., ctx_mgr, exc)
+ // 3. return or unwind jump, we get to the finally block; stack: (..., ctx_mgr, X, INT)
+
+ // Handle case 1: call __aexit__
+ // Stack: (..., ctx_mgr)
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE); // to tell end_finally there's no exception
+ EMIT(rot_two);
+ EMIT_ARG(jump, l_aexit_no_exc); // jump to code below to call __aexit__
+
+ // Start of "finally" block
+ // At this point we have case 2 or 3, we detect which one by the TOS being an exception or not
+ EMIT_ARG(label_assign, l_finally_block);
+
+ // Detect if TOS an exception or not
+ EMIT(dup_top);
+ EMIT_LOAD_GLOBAL(MP_QSTR_BaseException);
+ EMIT_ARG(binary_op, MP_BINARY_OP_EXCEPTION_MATCH);
+ EMIT_ARG(pop_jump_if, false, l_ret_unwind_jump); // if not an exception then we have case 3
+
+ // Handle case 2: call __aexit__ and either swallow or re-raise the exception
+ // Stack: (..., ctx_mgr, exc)
+ EMIT(dup_top);
+ EMIT(rot_three);
+ EMIT(rot_two);
+ EMIT_ARG(load_method, MP_QSTR___aexit__, false);
+ EMIT(rot_three);
+ EMIT(rot_three);
+ EMIT(dup_top);
+ #if MICROPY_CPYTHON_COMPAT
+ EMIT_ARG(attr, MP_QSTR___class__, MP_EMIT_ATTR_LOAD); // get type(exc)
+ #else
+ compile_load_id(comp, MP_QSTR_type);
+ EMIT(rot_two);
+ EMIT_ARG(call_function, 1, 0, 0); // get type(exc)
+ #endif
+ EMIT(rot_two);
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE); // dummy traceback value
+ // Stack: (..., exc, __aexit__, ctx_mgr, type(exc), exc, None)
+ EMIT_ARG(call_method, 3, 0, 0);
+ compile_yield_from(comp);
+ EMIT_ARG(pop_jump_if, false, l_end);
+ EMIT(pop_top); // pop exception
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE); // replace with None to swallow exception
+ EMIT_ARG(jump, l_end);
+ EMIT_ARG(adjust_stack_size, 2);
+
+ // Handle case 3: call __aexit__
+ // Stack: (..., ctx_mgr, X, INT)
+ EMIT_ARG(label_assign, l_ret_unwind_jump);
+ EMIT(rot_three);
+ EMIT(rot_three);
+ EMIT_ARG(label_assign, l_aexit_no_exc);
+ EMIT_ARG(load_method, MP_QSTR___aexit__, false);
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ EMIT(dup_top);
+ EMIT(dup_top);
+ EMIT_ARG(call_method, 3, 0, 0);
+ compile_yield_from(comp);
+ EMIT(pop_top);
+ EMIT_ARG(adjust_stack_size, -1);
+
+ // End of "finally" block
+ // Stack can have one of three configurations:
+ // a. (..., None) - from either case 1, or case 2 with swallowed exception
+ // b. (..., exc) - from case 2 with re-raised exception
+ // c. (..., X, INT) - from case 3
+ EMIT_ARG(label_assign, l_end);
+ compile_decrease_except_level(comp);
+ }
+}
+
+STATIC void compile_async_with_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ // get the nodes for the pre-bit of the with (the a as b, c as d, ... bit)
+ mp_parse_node_t *nodes;
+ size_t n = mp_parse_node_extract_list(&pns->nodes[0], PN_with_stmt_list, &nodes);
+ assert(n > 0);
+
+ // compile in a nested fashion
+ compile_async_with_stmt_helper(comp, n, nodes, pns->nodes[1]);
+}
+
+STATIC void compile_async_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ assert(MP_PARSE_NODE_IS_STRUCT(pns->nodes[0]));
+ mp_parse_node_struct_t *pns0 = (mp_parse_node_struct_t *)pns->nodes[0];
+ if (MP_PARSE_NODE_STRUCT_KIND(pns0) == PN_funcdef) {
+ // async def
+ compile_funcdef(comp, pns0);
+ scope_t *fscope = (scope_t *)pns0->nodes[4];
+ fscope->scope_flags |= MP_SCOPE_FLAG_GENERATOR | MP_SCOPE_FLAG_ASYNC;
+ } else {
+ // async for/with; first verify the scope is a generator
+ int scope_flags = comp->scope_cur->scope_flags;
+ if (!(scope_flags & MP_SCOPE_FLAG_GENERATOR)) {
+ compile_syntax_error(comp, (mp_parse_node_t)pns0,
+ MP_ERROR_TEXT("'await', 'async for' or 'async with' outside async function"));
+ return;
+ }
+
+ if (MP_PARSE_NODE_STRUCT_KIND(pns0) == PN_for_stmt) {
+ // async for
+ compile_async_for_stmt(comp, pns0);
+ } else {
+ // async with
+ assert(MP_PARSE_NODE_STRUCT_KIND(pns0) == PN_with_stmt);
+ compile_async_with_stmt(comp, pns0);
+ }
+ }
+}
+#endif
+
+STATIC void compile_expr_stmt(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ mp_parse_node_t pn_rhs = pns->nodes[1];
+ if (MP_PARSE_NODE_IS_NULL(pn_rhs)) {
+ if (comp->is_repl && comp->scope_cur->kind == SCOPE_MODULE) {
+ // for REPL, evaluate then print the expression
+ compile_load_id(comp, MP_QSTR___repl_print__);
+ compile_node(comp, pns->nodes[0]);
+ EMIT_ARG(call_function, 1, 0, 0);
+ EMIT(pop_top);
+
+ } else {
+ // for non-REPL, evaluate then discard the expression
+ if ((MP_PARSE_NODE_IS_LEAF(pns->nodes[0]) && !MP_PARSE_NODE_IS_ID(pns->nodes[0]))
+ || MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_const_object)) {
+ // do nothing with a lonely constant
+ } else {
+ compile_node(comp, pns->nodes[0]); // just an expression
+ EMIT(pop_top); // discard last result since this is a statement and leaves nothing on the stack
+ }
+ }
+ } else if (MP_PARSE_NODE_IS_STRUCT(pn_rhs)) {
+ mp_parse_node_struct_t *pns1 = (mp_parse_node_struct_t *)pn_rhs;
+ int kind = MP_PARSE_NODE_STRUCT_KIND(pns1);
+ if (kind == PN_annassign) {
+ // the annotation is in pns1->nodes[0] and is ignored
+ if (MP_PARSE_NODE_IS_NULL(pns1->nodes[1])) {
+ // an annotation of the form "x: y"
+ // inside a function this declares "x" as a local
+ if (comp->scope_cur->kind == SCOPE_FUNCTION) {
+ if (MP_PARSE_NODE_IS_ID(pns->nodes[0])) {
+ qstr lhs = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]);
+ scope_find_or_add_id(comp->scope_cur, lhs, ID_INFO_KIND_LOCAL);
+ }
+ }
+ } else {
+ // an assigned annotation of the form "x: y = z"
+ pn_rhs = pns1->nodes[1];
+ goto plain_assign;
+ }
+ } else if (kind == PN_expr_stmt_augassign) {
+ c_assign(comp, pns->nodes[0], ASSIGN_AUG_LOAD); // lhs load for aug assign
+ compile_node(comp, pns1->nodes[1]); // rhs
+ assert(MP_PARSE_NODE_IS_TOKEN(pns1->nodes[0]));
+ mp_token_kind_t tok = MP_PARSE_NODE_LEAF_ARG(pns1->nodes[0]);
+ mp_binary_op_t op = MP_BINARY_OP_INPLACE_OR + (tok - MP_TOKEN_DEL_PIPE_EQUAL);
+ EMIT_ARG(binary_op, op);
+ c_assign(comp, pns->nodes[0], ASSIGN_AUG_STORE); // lhs store for aug assign
+ } else if (kind == PN_expr_stmt_assign_list) {
+ int rhs = MP_PARSE_NODE_STRUCT_NUM_NODES(pns1) - 1;
+ compile_node(comp, pns1->nodes[rhs]); // rhs
+ // following CPython, we store left-most first
+ if (rhs > 0) {
+ EMIT(dup_top);
+ }
+ c_assign(comp, pns->nodes[0], ASSIGN_STORE); // lhs store
+ for (int i = 0; i < rhs; i++) {
+ if (i + 1 < rhs) {
+ EMIT(dup_top);
+ }
+ c_assign(comp, pns1->nodes[i], ASSIGN_STORE); // middle store
+ }
+ } else {
+ plain_assign:
+ #if MICROPY_COMP_DOUBLE_TUPLE_ASSIGN
+ if (MP_PARSE_NODE_IS_STRUCT_KIND(pn_rhs, PN_testlist_star_expr)
+ && MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_testlist_star_expr)) {
+ mp_parse_node_struct_t *pns0 = (mp_parse_node_struct_t *)pns->nodes[0];
+ pns1 = (mp_parse_node_struct_t *)pn_rhs;
+ uint32_t n_pns0 = MP_PARSE_NODE_STRUCT_NUM_NODES(pns0);
+ // Can only optimise a tuple-to-tuple assignment when all of the following hold:
+ // - equal number of items in LHS and RHS tuples
+ // - 2 or 3 items in the tuples
+ // - there are no star expressions in the LHS tuple
+ if (n_pns0 == MP_PARSE_NODE_STRUCT_NUM_NODES(pns1)
+ && (n_pns0 == 2
+ #if MICROPY_COMP_TRIPLE_TUPLE_ASSIGN
+ || n_pns0 == 3
+ #endif
+ )
+ && !MP_PARSE_NODE_IS_STRUCT_KIND(pns0->nodes[0], PN_star_expr)
+ && !MP_PARSE_NODE_IS_STRUCT_KIND(pns0->nodes[1], PN_star_expr)
+ #if MICROPY_COMP_TRIPLE_TUPLE_ASSIGN
+ && (n_pns0 == 2 || !MP_PARSE_NODE_IS_STRUCT_KIND(pns0->nodes[2], PN_star_expr))
+ #endif
+ ) {
+ // Optimisation for a, b = c, d or a, b, c = d, e, f
+ compile_node(comp, pns1->nodes[0]); // rhs
+ compile_node(comp, pns1->nodes[1]); // rhs
+ #if MICROPY_COMP_TRIPLE_TUPLE_ASSIGN
+ if (n_pns0 == 3) {
+ compile_node(comp, pns1->nodes[2]); // rhs
+ EMIT(rot_three);
+ }
+ #endif
+ EMIT(rot_two);
+ c_assign(comp, pns0->nodes[0], ASSIGN_STORE); // lhs store
+ c_assign(comp, pns0->nodes[1], ASSIGN_STORE); // lhs store
+ #if MICROPY_COMP_TRIPLE_TUPLE_ASSIGN
+ if (n_pns0 == 3) {
+ c_assign(comp, pns0->nodes[2], ASSIGN_STORE); // lhs store
+ }
+ #endif
+ return;
+ }
+ }
+ #endif
+
+ compile_node(comp, pn_rhs); // rhs
+ c_assign(comp, pns->nodes[0], ASSIGN_STORE); // lhs store
+ }
+ } else {
+ goto plain_assign;
+ }
+}
+
+STATIC void compile_test_if_expr(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[1], PN_test_if_else));
+ mp_parse_node_struct_t *pns_test_if_else = (mp_parse_node_struct_t *)pns->nodes[1];
+
+ uint l_fail = comp_next_label(comp);
+ uint l_end = comp_next_label(comp);
+ c_if_cond(comp, pns_test_if_else->nodes[0], false, l_fail); // condition
+ compile_node(comp, pns->nodes[0]); // success value
+ EMIT_ARG(jump, l_end);
+ EMIT_ARG(label_assign, l_fail);
+ EMIT_ARG(adjust_stack_size, -1); // adjust stack size
+ compile_node(comp, pns_test_if_else->nodes[1]); // failure value
+ EMIT_ARG(label_assign, l_end);
+}
+
+STATIC void compile_lambdef(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ if (comp->pass == MP_PASS_SCOPE) {
+ // create a new scope for this lambda
+ scope_t *s = scope_new_and_link(comp, SCOPE_LAMBDA, (mp_parse_node_t)pns, comp->scope_cur->emit_options);
+ // store the lambda scope so the compiling function (this one) can use it at each pass
+ pns->nodes[2] = (mp_parse_node_t)s;
+ }
+
+ // get the scope for this lambda
+ scope_t *this_scope = (scope_t *)pns->nodes[2];
+
+ // compile the lambda definition
+ compile_funcdef_lambdef(comp, this_scope, pns->nodes[0], PN_varargslist);
+}
+
+#if MICROPY_PY_ASSIGN_EXPR
+STATIC void compile_namedexpr_helper(compiler_t *comp, mp_parse_node_t pn_name, mp_parse_node_t pn_expr) {
+ if (!MP_PARSE_NODE_IS_ID(pn_name)) {
+ compile_syntax_error(comp, (mp_parse_node_t)pn_name, MP_ERROR_TEXT("can't assign to expression"));
+ }
+ compile_node(comp, pn_expr);
+ EMIT(dup_top);
+ scope_t *old_scope = comp->scope_cur;
+ if (SCOPE_IS_COMP_LIKE(comp->scope_cur->kind)) {
+ // Use parent's scope for assigned value so it can "escape"
+ comp->scope_cur = comp->scope_cur->parent;
+ }
+ compile_store_id(comp, MP_PARSE_NODE_LEAF_ARG(pn_name));
+ comp->scope_cur = old_scope;
+}
+
+STATIC void compile_namedexpr(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ compile_namedexpr_helper(comp, pns->nodes[0], pns->nodes[1]);
+}
+#endif
+
+STATIC void compile_or_and_test(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ bool cond = MP_PARSE_NODE_STRUCT_KIND(pns) == PN_or_test;
+ uint l_end = comp_next_label(comp);
+ int n = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+ for (int i = 0; i < n; i += 1) {
+ compile_node(comp, pns->nodes[i]);
+ if (i + 1 < n) {
+ EMIT_ARG(jump_if_or_pop, cond, l_end);
+ }
+ }
+ EMIT_ARG(label_assign, l_end);
+}
+
+STATIC void compile_not_test_2(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ compile_node(comp, pns->nodes[0]);
+ EMIT_ARG(unary_op, MP_UNARY_OP_NOT);
+}
+
+STATIC void compile_comparison(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ int num_nodes = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+ compile_node(comp, pns->nodes[0]);
+ bool multi = (num_nodes > 3);
+ uint l_fail = 0;
+ if (multi) {
+ l_fail = comp_next_label(comp);
+ }
+ for (int i = 1; i + 1 < num_nodes; i += 2) {
+ compile_node(comp, pns->nodes[i + 1]);
+ if (i + 2 < num_nodes) {
+ EMIT(dup_top);
+ EMIT(rot_three);
+ }
+ if (MP_PARSE_NODE_IS_TOKEN(pns->nodes[i])) {
+ mp_token_kind_t tok = MP_PARSE_NODE_LEAF_ARG(pns->nodes[i]);
+ mp_binary_op_t op;
+ if (tok == MP_TOKEN_KW_IN) {
+ op = MP_BINARY_OP_IN;
+ } else {
+ op = MP_BINARY_OP_LESS + (tok - MP_TOKEN_OP_LESS);
+ }
+ EMIT_ARG(binary_op, op);
+ } else {
+ assert(MP_PARSE_NODE_IS_STRUCT(pns->nodes[i])); // should be
+ mp_parse_node_struct_t *pns2 = (mp_parse_node_struct_t *)pns->nodes[i];
+ int kind = MP_PARSE_NODE_STRUCT_KIND(pns2);
+ if (kind == PN_comp_op_not_in) {
+ EMIT_ARG(binary_op, MP_BINARY_OP_NOT_IN);
+ } else {
+ assert(kind == PN_comp_op_is); // should be
+ if (MP_PARSE_NODE_IS_NULL(pns2->nodes[0])) {
+ EMIT_ARG(binary_op, MP_BINARY_OP_IS);
+ } else {
+ EMIT_ARG(binary_op, MP_BINARY_OP_IS_NOT);
+ }
+ }
+ }
+ if (i + 2 < num_nodes) {
+ EMIT_ARG(jump_if_or_pop, false, l_fail);
+ }
+ }
+ if (multi) {
+ uint l_end = comp_next_label(comp);
+ EMIT_ARG(jump, l_end);
+ EMIT_ARG(label_assign, l_fail);
+ EMIT_ARG(adjust_stack_size, 1);
+ EMIT(rot_two);
+ EMIT(pop_top);
+ EMIT_ARG(label_assign, l_end);
+ }
+}
+
+STATIC void compile_star_expr(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ compile_syntax_error(comp, (mp_parse_node_t)pns, MP_ERROR_TEXT("*x must be assignment target"));
+}
+
+STATIC void compile_binary_op(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ MP_STATIC_ASSERT(MP_BINARY_OP_OR + PN_xor_expr - PN_expr == MP_BINARY_OP_XOR);
+ MP_STATIC_ASSERT(MP_BINARY_OP_OR + PN_and_expr - PN_expr == MP_BINARY_OP_AND);
+ mp_binary_op_t binary_op = MP_BINARY_OP_OR + MP_PARSE_NODE_STRUCT_KIND(pns) - PN_expr;
+ int num_nodes = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+ compile_node(comp, pns->nodes[0]);
+ for (int i = 1; i < num_nodes; ++i) {
+ compile_node(comp, pns->nodes[i]);
+ EMIT_ARG(binary_op, binary_op);
+ }
+}
+
+STATIC void compile_term(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ int num_nodes = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+ compile_node(comp, pns->nodes[0]);
+ for (int i = 1; i + 1 < num_nodes; i += 2) {
+ compile_node(comp, pns->nodes[i + 1]);
+ mp_token_kind_t tok = MP_PARSE_NODE_LEAF_ARG(pns->nodes[i]);
+ mp_binary_op_t op = MP_BINARY_OP_LSHIFT + (tok - MP_TOKEN_OP_DBL_LESS);
+ EMIT_ARG(binary_op, op);
+ }
+}
+
+STATIC void compile_factor_2(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ compile_node(comp, pns->nodes[1]);
+ mp_token_kind_t tok = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]);
+ mp_unary_op_t op;
+ if (tok == MP_TOKEN_OP_TILDE) {
+ op = MP_UNARY_OP_INVERT;
+ } else {
+ assert(tok == MP_TOKEN_OP_PLUS || tok == MP_TOKEN_OP_MINUS);
+ op = MP_UNARY_OP_POSITIVE + (tok - MP_TOKEN_OP_PLUS);
+ }
+ EMIT_ARG(unary_op, op);
+}
+
+STATIC void compile_atom_expr_normal(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ // compile the subject of the expression
+ compile_node(comp, pns->nodes[0]);
+
+ // compile_atom_expr_await may call us with a NULL node
+ if (MP_PARSE_NODE_IS_NULL(pns->nodes[1])) {
+ return;
+ }
+
+ // get the array of trailers (known to be an array of PARSE_NODE_STRUCT)
+ size_t num_trail = 1;
+ mp_parse_node_struct_t **pns_trail = (mp_parse_node_struct_t **)&pns->nodes[1];
+ if (MP_PARSE_NODE_STRUCT_KIND(pns_trail[0]) == PN_atom_expr_trailers) {
+ num_trail = MP_PARSE_NODE_STRUCT_NUM_NODES(pns_trail[0]);
+ pns_trail = (mp_parse_node_struct_t **)&pns_trail[0]->nodes[0];
+ }
+
+ // the current index into the array of trailers
+ size_t i = 0;
+
+ // handle special super() call
+ if (comp->scope_cur->kind == SCOPE_FUNCTION
+ && MP_PARSE_NODE_IS_ID(pns->nodes[0])
+ && MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]) == MP_QSTR_super
+ && MP_PARSE_NODE_STRUCT_KIND(pns_trail[0]) == PN_trailer_paren
+ && MP_PARSE_NODE_IS_NULL(pns_trail[0]->nodes[0])) {
+ // at this point we have matched "super()" within a function
+
+ // load the class for super to search for a parent
+ compile_load_id(comp, MP_QSTR___class__);
+
+ // look for first argument to function (assumes it's "self")
+ bool found = false;
+ id_info_t *id = &comp->scope_cur->id_info[0];
+ for (size_t n = comp->scope_cur->id_info_len; n > 0; --n, ++id) {
+ if (id->flags & ID_FLAG_IS_PARAM) {
+ // first argument found; load it
+ compile_load_id(comp, id->qst);
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ compile_syntax_error(comp, (mp_parse_node_t)pns_trail[0],
+ MP_ERROR_TEXT("super() can't find self")); // really a TypeError
+ return;
+ }
+
+ if (num_trail >= 3
+ && MP_PARSE_NODE_STRUCT_KIND(pns_trail[1]) == PN_trailer_period
+ && MP_PARSE_NODE_STRUCT_KIND(pns_trail[2]) == PN_trailer_paren) {
+ // optimisation for method calls super().f(...), to eliminate heap allocation
+ mp_parse_node_struct_t *pns_period = pns_trail[1];
+ mp_parse_node_struct_t *pns_paren = pns_trail[2];
+ EMIT_ARG(load_method, MP_PARSE_NODE_LEAF_ARG(pns_period->nodes[0]), true);
+ compile_trailer_paren_helper(comp, pns_paren->nodes[0], true, 0);
+ i = 3;
+ } else {
+ // a super() call
+ EMIT_ARG(call_function, 2, 0, 0);
+ i = 1;
+ }
+
+ #if MICROPY_COMP_CONST_LITERAL && MICROPY_PY_COLLECTIONS_ORDEREDDICT
+ // handle special OrderedDict constructor
+ } else if (MP_PARSE_NODE_IS_ID(pns->nodes[0])
+ && MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]) == MP_QSTR_OrderedDict
+ && MP_PARSE_NODE_STRUCT_KIND(pns_trail[0]) == PN_trailer_paren
+ && MP_PARSE_NODE_IS_STRUCT_KIND(pns_trail[0]->nodes[0], PN_atom_brace)) {
+ // at this point we have matched "OrderedDict({...})"
+
+ EMIT_ARG(call_function, 0, 0, 0);
+ mp_parse_node_struct_t *pns_dict = (mp_parse_node_struct_t *)pns_trail[0]->nodes[0];
+ compile_atom_brace_helper(comp, pns_dict, false);
+ i = 1;
+ #endif
+ }
+
+ // compile the remaining trailers
+ for (; i < num_trail; i++) {
+ if (i + 1 < num_trail
+ && MP_PARSE_NODE_STRUCT_KIND(pns_trail[i]) == PN_trailer_period
+ && MP_PARSE_NODE_STRUCT_KIND(pns_trail[i + 1]) == PN_trailer_paren) {
+ // optimisation for method calls a.f(...), following PyPy
+ mp_parse_node_struct_t *pns_period = pns_trail[i];
+ mp_parse_node_struct_t *pns_paren = pns_trail[i + 1];
+ EMIT_ARG(load_method, MP_PARSE_NODE_LEAF_ARG(pns_period->nodes[0]), false);
+ compile_trailer_paren_helper(comp, pns_paren->nodes[0], true, 0);
+ i += 1;
+ } else {
+ // node is one of: trailer_paren, trailer_bracket, trailer_period
+ compile_node(comp, (mp_parse_node_t)pns_trail[i]);
+ }
+ }
+}
+
+STATIC void compile_power(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ compile_generic_all_nodes(comp, pns); // 2 nodes, arguments of power
+ EMIT_ARG(binary_op, MP_BINARY_OP_POWER);
+}
+
+STATIC void compile_trailer_paren_helper(compiler_t *comp, mp_parse_node_t pn_arglist, bool is_method_call, int n_positional_extra) {
+ // function to call is on top of stack
+
+ // get the list of arguments
+ mp_parse_node_t *args;
+ size_t n_args = mp_parse_node_extract_list(&pn_arglist, PN_arglist, &args);
+
+ // compile the arguments
+ // Rather than calling compile_node on the list, we go through the list of args
+ // explicitly here so that we can count the number of arguments and give sensible
+ // error messages.
+ int n_positional = n_positional_extra;
+ uint n_keyword = 0;
+ uint star_flags = 0;
+ mp_parse_node_struct_t *star_args_node = NULL, *dblstar_args_node = NULL;
+ for (size_t i = 0; i < n_args; i++) {
+ if (MP_PARSE_NODE_IS_STRUCT(args[i])) {
+ mp_parse_node_struct_t *pns_arg = (mp_parse_node_struct_t *)args[i];
+ if (MP_PARSE_NODE_STRUCT_KIND(pns_arg) == PN_arglist_star) {
+ if (star_flags & MP_EMIT_STAR_FLAG_SINGLE) {
+ compile_syntax_error(comp, (mp_parse_node_t)pns_arg, MP_ERROR_TEXT("can't have multiple *x"));
+ return;
+ }
+ star_flags |= MP_EMIT_STAR_FLAG_SINGLE;
+ star_args_node = pns_arg;
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns_arg) == PN_arglist_dbl_star) {
+ if (star_flags & MP_EMIT_STAR_FLAG_DOUBLE) {
+ compile_syntax_error(comp, (mp_parse_node_t)pns_arg, MP_ERROR_TEXT("can't have multiple **x"));
+ return;
+ }
+ star_flags |= MP_EMIT_STAR_FLAG_DOUBLE;
+ dblstar_args_node = pns_arg;
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns_arg) == PN_argument) {
+ #if MICROPY_PY_ASSIGN_EXPR
+ if (MP_PARSE_NODE_IS_STRUCT_KIND(pns_arg->nodes[1], PN_argument_3)) {
+ compile_namedexpr_helper(comp, pns_arg->nodes[0], ((mp_parse_node_struct_t *)pns_arg->nodes[1])->nodes[0]);
+ n_positional++;
+ } else
+ #endif
+ if (!MP_PARSE_NODE_IS_STRUCT_KIND(pns_arg->nodes[1], PN_comp_for)) {
+ if (!MP_PARSE_NODE_IS_ID(pns_arg->nodes[0])) {
+ compile_syntax_error(comp, (mp_parse_node_t)pns_arg, MP_ERROR_TEXT("LHS of keyword arg must be an id"));
+ return;
+ }
+ EMIT_ARG(load_const_str, MP_PARSE_NODE_LEAF_ARG(pns_arg->nodes[0]));
+ compile_node(comp, pns_arg->nodes[1]);
+ n_keyword += 1;
+ } else {
+ compile_comprehension(comp, pns_arg, SCOPE_GEN_EXPR);
+ n_positional++;
+ }
+ } else {
+ goto normal_argument;
+ }
+ } else {
+ normal_argument:
+ if (star_flags) {
+ compile_syntax_error(comp, args[i], MP_ERROR_TEXT("non-keyword arg after */**"));
+ return;
+ }
+ if (n_keyword > 0) {
+ compile_syntax_error(comp, args[i], MP_ERROR_TEXT("non-keyword arg after keyword arg"));
+ return;
+ }
+ compile_node(comp, args[i]);
+ n_positional++;
+ }
+ }
+
+ // compile the star/double-star arguments if we had them
+ // if we had one but not the other then we load "null" as a place holder
+ if (star_flags != 0) {
+ if (star_args_node == NULL) {
+ EMIT(load_null);
+ } else {
+ compile_node(comp, star_args_node->nodes[0]);
+ }
+ if (dblstar_args_node == NULL) {
+ EMIT(load_null);
+ } else {
+ compile_node(comp, dblstar_args_node->nodes[0]);
+ }
+ }
+
+ // emit the function/method call
+ if (is_method_call) {
+ EMIT_ARG(call_method, n_positional, n_keyword, star_flags);
+ } else {
+ EMIT_ARG(call_function, n_positional, n_keyword, star_flags);
+ }
+}
+
+// pns needs to have 2 nodes, first is lhs of comprehension, second is PN_comp_for node
+STATIC void compile_comprehension(compiler_t *comp, mp_parse_node_struct_t *pns, scope_kind_t kind) {
+ assert(MP_PARSE_NODE_STRUCT_NUM_NODES(pns) == 2);
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[1], PN_comp_for));
+ mp_parse_node_struct_t *pns_comp_for = (mp_parse_node_struct_t *)pns->nodes[1];
+
+ if (comp->pass == MP_PASS_SCOPE) {
+ // create a new scope for this comprehension
+ scope_t *s = scope_new_and_link(comp, kind, (mp_parse_node_t)pns, comp->scope_cur->emit_options);
+ // store the comprehension scope so the compiling function (this one) can use it at each pass
+ pns_comp_for->nodes[3] = (mp_parse_node_t)s;
+ }
+
+ // get the scope for this comprehension
+ scope_t *this_scope = (scope_t *)pns_comp_for->nodes[3];
+
+ // compile the comprehension
+ close_over_variables_etc(comp, this_scope, 0, 0);
+
+ compile_node(comp, pns_comp_for->nodes[1]); // source of the iterator
+ if (kind == SCOPE_GEN_EXPR) {
+ EMIT_ARG(get_iter, false);
+ }
+ EMIT_ARG(call_function, 1, 0, 0);
+}
+
+STATIC void compile_atom_paren(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+ // an empty tuple
+ EMIT_ARG(build, 0, MP_EMIT_BUILD_TUPLE);
+ } else {
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_testlist_comp));
+ pns = (mp_parse_node_struct_t *)pns->nodes[0];
+ if (MP_PARSE_NODE_TESTLIST_COMP_HAS_COMP_FOR(pns)) {
+ // generator expression
+ compile_comprehension(comp, pns, SCOPE_GEN_EXPR);
+ } else {
+ // tuple with N items
+ compile_generic_tuple(comp, pns);
+ }
+ }
+}
+
+STATIC void compile_atom_bracket(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+ // empty list
+ EMIT_ARG(build, 0, MP_EMIT_BUILD_LIST);
+ } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_testlist_comp)) {
+ mp_parse_node_struct_t *pns2 = (mp_parse_node_struct_t *)pns->nodes[0];
+ if (MP_PARSE_NODE_TESTLIST_COMP_HAS_COMP_FOR(pns2)) {
+ // list comprehension
+ compile_comprehension(comp, pns2, SCOPE_LIST_COMP);
+ } else {
+ // list with N items
+ compile_generic_all_nodes(comp, pns2);
+ EMIT_ARG(build, MP_PARSE_NODE_STRUCT_NUM_NODES(pns2), MP_EMIT_BUILD_LIST);
+ }
+ } else {
+ // list with 1 item
+ compile_node(comp, pns->nodes[0]);
+ EMIT_ARG(build, 1, MP_EMIT_BUILD_LIST);
+ }
+}
+
+STATIC void compile_atom_brace_helper(compiler_t *comp, mp_parse_node_struct_t *pns, bool create_map) {
+ mp_parse_node_t pn = pns->nodes[0];
+ if (MP_PARSE_NODE_IS_NULL(pn)) {
+ // empty dict
+ if (create_map) {
+ EMIT_ARG(build, 0, MP_EMIT_BUILD_MAP);
+ }
+ } else if (MP_PARSE_NODE_IS_STRUCT(pn)) {
+ pns = (mp_parse_node_struct_t *)pn;
+ if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_dictorsetmaker_item) {
+ // dict with one element
+ if (create_map) {
+ EMIT_ARG(build, 1, MP_EMIT_BUILD_MAP);
+ }
+ compile_node(comp, pn);
+ EMIT(store_map);
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_dictorsetmaker) {
+ assert(MP_PARSE_NODE_IS_STRUCT(pns->nodes[1])); // should succeed
+ mp_parse_node_struct_t *pns1 = (mp_parse_node_struct_t *)pns->nodes[1];
+ if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_dictorsetmaker_list) {
+ // dict/set with multiple elements
+
+ // get tail elements (2nd, 3rd, ...)
+ mp_parse_node_t *nodes;
+ size_t n = mp_parse_node_extract_list(&pns1->nodes[0], PN_dictorsetmaker_list2, &nodes);
+
+ // first element sets whether it's a dict or set
+ bool is_dict;
+ if (!MICROPY_PY_BUILTINS_SET || MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_dictorsetmaker_item)) {
+ // a dictionary
+ if (create_map) {
+ EMIT_ARG(build, 1 + n, MP_EMIT_BUILD_MAP);
+ }
+ compile_node(comp, pns->nodes[0]);
+ EMIT(store_map);
+ is_dict = true;
+ } else {
+ // a set
+ compile_node(comp, pns->nodes[0]); // 1st value of set
+ is_dict = false;
+ }
+
+ // process rest of elements
+ for (size_t i = 0; i < n; i++) {
+ mp_parse_node_t pn_i = nodes[i];
+ bool is_key_value = MP_PARSE_NODE_IS_STRUCT_KIND(pn_i, PN_dictorsetmaker_item);
+ compile_node(comp, pn_i);
+ if (is_dict) {
+ if (!is_key_value) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ compile_syntax_error(comp, (mp_parse_node_t)pns, MP_ERROR_TEXT("invalid syntax"));
+ #else
+ compile_syntax_error(comp, (mp_parse_node_t)pns, MP_ERROR_TEXT("expecting key:value for dict"));
+ #endif
+ return;
+ }
+ EMIT(store_map);
+ } else {
+ if (is_key_value) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ compile_syntax_error(comp, (mp_parse_node_t)pns, MP_ERROR_TEXT("invalid syntax"));
+ #else
+ compile_syntax_error(comp, (mp_parse_node_t)pns, MP_ERROR_TEXT("expecting just a value for set"));
+ #endif
+ return;
+ }
+ }
+ }
+
+ #if MICROPY_PY_BUILTINS_SET
+ // if it's a set, build it
+ if (!is_dict) {
+ EMIT_ARG(build, 1 + n, MP_EMIT_BUILD_SET);
+ }
+ #endif
+ } else {
+ assert(MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_comp_for); // should be
+ // dict/set comprehension
+ if (!MICROPY_PY_BUILTINS_SET || MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_dictorsetmaker_item)) {
+ // a dictionary comprehension
+ compile_comprehension(comp, pns, SCOPE_DICT_COMP);
+ } else {
+ // a set comprehension
+ compile_comprehension(comp, pns, SCOPE_SET_COMP);
+ }
+ }
+ } else {
+ // set with one element
+ goto set_with_one_element;
+ }
+ } else {
+ // set with one element
+ set_with_one_element:
+ #if MICROPY_PY_BUILTINS_SET
+ compile_node(comp, pn);
+ EMIT_ARG(build, 1, MP_EMIT_BUILD_SET);
+ #else
+ assert(0);
+ #endif
+ }
+}
+
+STATIC void compile_atom_brace(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ compile_atom_brace_helper(comp, pns, true);
+}
+
+STATIC void compile_trailer_paren(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ compile_trailer_paren_helper(comp, pns->nodes[0], false, 0);
+}
+
+STATIC void compile_trailer_bracket(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ // object who's index we want is on top of stack
+ compile_node(comp, pns->nodes[0]); // the index
+ EMIT_ARG(subscr, MP_EMIT_SUBSCR_LOAD);
+}
+
+STATIC void compile_trailer_period(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ // object who's attribute we want is on top of stack
+ EMIT_ARG(attr, MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]), MP_EMIT_ATTR_LOAD); // attribute to get
+}
+
+#if MICROPY_PY_BUILTINS_SLICE
+STATIC void compile_subscript(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_subscript_2) {
+ compile_node(comp, pns->nodes[0]); // start of slice
+ assert(MP_PARSE_NODE_IS_STRUCT(pns->nodes[1])); // should always be
+ pns = (mp_parse_node_struct_t *)pns->nodes[1];
+ } else {
+ // pns is a PN_subscript_3, load None for start of slice
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ }
+
+ assert(MP_PARSE_NODE_STRUCT_KIND(pns) == PN_subscript_3); // should always be
+ mp_parse_node_t pn = pns->nodes[0];
+ if (MP_PARSE_NODE_IS_NULL(pn)) {
+ // [?:]
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ EMIT_ARG(build, 2, MP_EMIT_BUILD_SLICE);
+ } else if (MP_PARSE_NODE_IS_STRUCT(pn)) {
+ pns = (mp_parse_node_struct_t *)pn;
+ if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_subscript_3c) {
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ pn = pns->nodes[0];
+ if (MP_PARSE_NODE_IS_NULL(pn)) {
+ // [?::]
+ EMIT_ARG(build, 2, MP_EMIT_BUILD_SLICE);
+ } else {
+ // [?::x]
+ compile_node(comp, pn);
+ EMIT_ARG(build, 3, MP_EMIT_BUILD_SLICE);
+ }
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_subscript_3d) {
+ compile_node(comp, pns->nodes[0]);
+ assert(MP_PARSE_NODE_IS_STRUCT(pns->nodes[1])); // should always be
+ pns = (mp_parse_node_struct_t *)pns->nodes[1];
+ assert(MP_PARSE_NODE_STRUCT_KIND(pns) == PN_sliceop); // should always be
+ if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+ // [?:x:]
+ EMIT_ARG(build, 2, MP_EMIT_BUILD_SLICE);
+ } else {
+ // [?:x:x]
+ compile_node(comp, pns->nodes[0]);
+ EMIT_ARG(build, 3, MP_EMIT_BUILD_SLICE);
+ }
+ } else {
+ // [?:x]
+ compile_node(comp, pn);
+ EMIT_ARG(build, 2, MP_EMIT_BUILD_SLICE);
+ }
+ } else {
+ // [?:x]
+ compile_node(comp, pn);
+ EMIT_ARG(build, 2, MP_EMIT_BUILD_SLICE);
+ }
+}
+#endif // MICROPY_PY_BUILTINS_SLICE
+
+STATIC void compile_dictorsetmaker_item(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ // if this is called then we are compiling a dict key:value pair
+ compile_node(comp, pns->nodes[1]); // value
+ compile_node(comp, pns->nodes[0]); // key
+}
+
+STATIC void compile_classdef(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ qstr cname = compile_classdef_helper(comp, pns, comp->scope_cur->emit_options);
+ // store class object into class name
+ compile_store_id(comp, cname);
+}
+
+STATIC void compile_yield_expr(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ if (comp->scope_cur->kind != SCOPE_FUNCTION && comp->scope_cur->kind != SCOPE_LAMBDA) {
+ compile_syntax_error(comp, (mp_parse_node_t)pns, MP_ERROR_TEXT("'yield' outside function"));
+ return;
+ }
+ if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ EMIT_ARG(yield, MP_EMIT_YIELD_VALUE);
+ reserve_labels_for_native(comp, 1);
+ } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_yield_arg_from)) {
+ pns = (mp_parse_node_struct_t *)pns->nodes[0];
+ #if MICROPY_PY_ASYNC_AWAIT
+ if ((comp->scope_cur->scope_flags & MP_SCOPE_FLAG_ASYNC) != 0) {
+ compile_syntax_error(comp, (mp_parse_node_t)pns, MP_ERROR_TEXT("'yield from' inside async function"));
+ return;
+ }
+ #endif
+ compile_node(comp, pns->nodes[0]);
+ compile_yield_from(comp);
+ } else {
+ compile_node(comp, pns->nodes[0]);
+ EMIT_ARG(yield, MP_EMIT_YIELD_VALUE);
+ reserve_labels_for_native(comp, 1);
+ }
+}
+
+#if MICROPY_PY_ASYNC_AWAIT
+STATIC void compile_atom_expr_await(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ if (comp->scope_cur->kind != SCOPE_FUNCTION && comp->scope_cur->kind != SCOPE_LAMBDA) {
+ compile_syntax_error(comp, (mp_parse_node_t)pns, MP_ERROR_TEXT("'await' outside function"));
+ return;
+ }
+ compile_atom_expr_normal(comp, pns);
+
+ // If it's an awaitable thing, need to reach for the __await__ method for the coroutine.
+ // async def functions' __await__ return themselves, which are able to receive a send(),
+ // while other types with custom __await__ implementations return async generators.
+ EMIT_ARG(load_method, MP_QSTR___await__, false);
+ EMIT_ARG(call_method, 0, 0, 0);
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ EMIT_ARG(yield, MP_EMIT_YIELD_FROM);
+ reserve_labels_for_native(comp, 3);
+}
+#endif
+
+STATIC mp_obj_t get_const_object(mp_parse_node_struct_t *pns) {
+ #if MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_D
+ // nodes are 32-bit pointers, but need to extract 64-bit object
+ return (uint64_t)pns->nodes[0] | ((uint64_t)pns->nodes[1] << 32);
+ #else
+ return (mp_obj_t)pns->nodes[0];
+ #endif
+}
+
+STATIC void compile_const_object(compiler_t *comp, mp_parse_node_struct_t *pns) {
+ EMIT_ARG(load_const_obj, get_const_object(pns));
+}
+
+typedef void (*compile_function_t)(compiler_t *, mp_parse_node_struct_t *);
+STATIC const compile_function_t compile_function[] = {
+// only define rules with a compile function
+#define c(f) compile_##f
+#define DEF_RULE(rule, comp, kind, ...) comp,
+#define DEF_RULE_NC(rule, kind, ...)
+ #include "py/grammar.h"
+#undef c
+#undef DEF_RULE
+#undef DEF_RULE_NC
+ compile_const_object,
+};
+
+STATIC void compile_node(compiler_t *comp, mp_parse_node_t pn) {
+ if (MP_PARSE_NODE_IS_NULL(pn)) {
+ // pass
+ } else if (MP_PARSE_NODE_IS_SMALL_INT(pn)) {
+ mp_int_t arg = MP_PARSE_NODE_LEAF_SMALL_INT(pn);
+ #if MICROPY_DYNAMIC_COMPILER
+ mp_uint_t sign_mask = -((mp_uint_t)1 << (mp_dynamic_compiler.small_int_bits - 1));
+ if ((arg & sign_mask) == 0 || (arg & sign_mask) == sign_mask) {
+ // integer fits in target runtime's small-int
+ EMIT_ARG(load_const_small_int, arg);
+ } else {
+ // integer doesn't fit, so create a multi-precision int object
+ // (but only create the actual object on the last pass)
+ if (comp->pass != MP_PASS_EMIT) {
+ EMIT_ARG(load_const_obj, mp_const_none);
+ } else {
+ EMIT_ARG(load_const_obj, mp_obj_new_int_from_ll(arg));
+ }
+ }
+ #else
+ EMIT_ARG(load_const_small_int, arg);
+ #endif
+ } else if (MP_PARSE_NODE_IS_LEAF(pn)) {
+ uintptr_t arg = MP_PARSE_NODE_LEAF_ARG(pn);
+ switch (MP_PARSE_NODE_LEAF_KIND(pn)) {
+ case MP_PARSE_NODE_ID:
+ compile_load_id(comp, arg);
+ break;
+ case MP_PARSE_NODE_STRING:
+ EMIT_ARG(load_const_str, arg);
+ break;
+ case MP_PARSE_NODE_BYTES:
+ // only create and load the actual bytes object on the last pass
+ if (comp->pass != MP_PASS_EMIT) {
+ EMIT_ARG(load_const_obj, mp_const_none);
+ } else {
+ size_t len;
+ const byte *data = qstr_data(arg, &len);
+ EMIT_ARG(load_const_obj, mp_obj_new_bytes(data, len));
+ }
+ break;
+ case MP_PARSE_NODE_TOKEN:
+ default:
+ if (arg == MP_TOKEN_NEWLINE) {
+ // this can occur when file_input lets through a NEWLINE (eg if file starts with a newline)
+ // or when single_input lets through a NEWLINE (user enters a blank line)
+ // do nothing
+ } else {
+ EMIT_ARG(load_const_tok, arg);
+ }
+ break;
+ }
+ } else {
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+ EMIT_ARG(set_source_line, pns->source_line);
+ assert(MP_PARSE_NODE_STRUCT_KIND(pns) <= PN_const_object);
+ compile_function_t f = compile_function[MP_PARSE_NODE_STRUCT_KIND(pns)];
+ f(comp, pns);
+ }
+}
+
+#if MICROPY_EMIT_NATIVE
+STATIC int compile_viper_type_annotation(compiler_t *comp, mp_parse_node_t pn_annotation) {
+ int native_type = MP_NATIVE_TYPE_OBJ;
+ if (MP_PARSE_NODE_IS_NULL(pn_annotation)) {
+ // No annotation, type defaults to object
+ } else if (MP_PARSE_NODE_IS_ID(pn_annotation)) {
+ qstr type_name = MP_PARSE_NODE_LEAF_ARG(pn_annotation);
+ native_type = mp_native_type_from_qstr(type_name);
+ if (native_type < 0) {
+ comp->compile_error = mp_obj_new_exception_msg_varg(&mp_type_ViperTypeError, MP_ERROR_TEXT("unknown type '%q'"), type_name);
+ native_type = 0;
+ }
+ } else {
+ compile_syntax_error(comp, pn_annotation, MP_ERROR_TEXT("annotation must be an identifier"));
+ }
+ return native_type;
+}
+#endif
+
+STATIC void compile_scope_func_lambda_param(compiler_t *comp, mp_parse_node_t pn, pn_kind_t pn_name, pn_kind_t pn_star, pn_kind_t pn_dbl_star) {
+ (void)pn_dbl_star;
+
+ // check that **kw is last
+ if ((comp->scope_cur->scope_flags & MP_SCOPE_FLAG_VARKEYWORDS) != 0) {
+ compile_syntax_error(comp, pn, MP_ERROR_TEXT("invalid syntax"));
+ return;
+ }
+
+ qstr param_name = MP_QSTRnull;
+ uint param_flag = ID_FLAG_IS_PARAM;
+ mp_parse_node_struct_t *pns = NULL;
+ if (MP_PARSE_NODE_IS_ID(pn)) {
+ param_name = MP_PARSE_NODE_LEAF_ARG(pn);
+ if (comp->have_star) {
+ // comes after a star, so counts as a keyword-only parameter
+ comp->scope_cur->num_kwonly_args += 1;
+ } else {
+ // comes before a star, so counts as a positional parameter
+ comp->scope_cur->num_pos_args += 1;
+ }
+ } else {
+ assert(MP_PARSE_NODE_IS_STRUCT(pn));
+ pns = (mp_parse_node_struct_t *)pn;
+ if (MP_PARSE_NODE_STRUCT_KIND(pns) == pn_name) {
+ // named parameter with possible annotation
+ param_name = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]);
+ if (comp->have_star) {
+ // comes after a star, so counts as a keyword-only parameter
+ comp->scope_cur->num_kwonly_args += 1;
+ } else {
+ // comes before a star, so counts as a positional parameter
+ comp->scope_cur->num_pos_args += 1;
+ }
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == pn_star) {
+ if (comp->have_star) {
+ // more than one star
+ compile_syntax_error(comp, pn, MP_ERROR_TEXT("invalid syntax"));
+ return;
+ }
+ comp->have_star = true;
+ param_flag = ID_FLAG_IS_PARAM | ID_FLAG_IS_STAR_PARAM;
+ if (MP_PARSE_NODE_IS_NULL(pns->nodes[0])) {
+ // bare star
+ // TODO see http://www.python.org/dev/peps/pep-3102/
+ // assert(comp->scope_cur->num_dict_params == 0);
+ pns = NULL;
+ } else if (MP_PARSE_NODE_IS_ID(pns->nodes[0])) {
+ // named star
+ comp->scope_cur->scope_flags |= MP_SCOPE_FLAG_VARARGS;
+ param_name = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]);
+ pns = NULL;
+ } else {
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_tfpdef)); // should be
+ // named star with possible annotation
+ comp->scope_cur->scope_flags |= MP_SCOPE_FLAG_VARARGS;
+ pns = (mp_parse_node_struct_t *)pns->nodes[0];
+ param_name = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]);
+ }
+ } else {
+ // double star with possible annotation
+ assert(MP_PARSE_NODE_STRUCT_KIND(pns) == pn_dbl_star); // should be
+ param_name = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]);
+ param_flag = ID_FLAG_IS_PARAM | ID_FLAG_IS_DBL_STAR_PARAM;
+ comp->scope_cur->scope_flags |= MP_SCOPE_FLAG_VARKEYWORDS;
+ }
+ }
+
+ if (param_name != MP_QSTRnull) {
+ id_info_t *id_info = scope_find_or_add_id(comp->scope_cur, param_name, ID_INFO_KIND_UNDECIDED);
+ if (id_info->kind != ID_INFO_KIND_UNDECIDED) {
+ compile_syntax_error(comp, pn, MP_ERROR_TEXT("argument name reused"));
+ return;
+ }
+ id_info->kind = ID_INFO_KIND_LOCAL;
+ id_info->flags = param_flag;
+
+ #if MICROPY_EMIT_NATIVE
+ if (comp->scope_cur->emit_options == MP_EMIT_OPT_VIPER && pn_name == PN_typedargslist_name && pns != NULL) {
+ id_info->flags |= compile_viper_type_annotation(comp, pns->nodes[1]) << ID_FLAG_VIPER_TYPE_POS;
+ }
+ #else
+ (void)pns;
+ #endif
+ }
+}
+
+STATIC void compile_scope_func_param(compiler_t *comp, mp_parse_node_t pn) {
+ compile_scope_func_lambda_param(comp, pn, PN_typedargslist_name, PN_typedargslist_star, PN_typedargslist_dbl_star);
+}
+
+STATIC void compile_scope_lambda_param(compiler_t *comp, mp_parse_node_t pn) {
+ compile_scope_func_lambda_param(comp, pn, PN_varargslist_name, PN_varargslist_star, PN_varargslist_dbl_star);
+}
+
+STATIC void compile_scope_comp_iter(compiler_t *comp, mp_parse_node_struct_t *pns_comp_for, mp_parse_node_t pn_inner_expr, int for_depth) {
+ uint l_top = comp_next_label(comp);
+ uint l_end = comp_next_label(comp);
+ EMIT_ARG(label_assign, l_top);
+ EMIT_ARG(for_iter, l_end);
+ c_assign(comp, pns_comp_for->nodes[0], ASSIGN_STORE);
+ mp_parse_node_t pn_iter = pns_comp_for->nodes[2];
+
+tail_recursion:
+ if (MP_PARSE_NODE_IS_NULL(pn_iter)) {
+ // no more nested if/for; compile inner expression
+ compile_node(comp, pn_inner_expr);
+ if (comp->scope_cur->kind == SCOPE_GEN_EXPR) {
+ EMIT_ARG(yield, MP_EMIT_YIELD_VALUE);
+ reserve_labels_for_native(comp, 1);
+ EMIT(pop_top);
+ } else {
+ EMIT_ARG(store_comp, comp->scope_cur->kind, 4 * for_depth + 5);
+ }
+ } else if (MP_PARSE_NODE_STRUCT_KIND((mp_parse_node_struct_t *)pn_iter) == PN_comp_if) {
+ // if condition
+ mp_parse_node_struct_t *pns_comp_if = (mp_parse_node_struct_t *)pn_iter;
+ c_if_cond(comp, pns_comp_if->nodes[0], false, l_top);
+ pn_iter = pns_comp_if->nodes[1];
+ goto tail_recursion;
+ } else {
+ assert(MP_PARSE_NODE_STRUCT_KIND((mp_parse_node_struct_t *)pn_iter) == PN_comp_for); // should be
+ // for loop
+ mp_parse_node_struct_t *pns_comp_for2 = (mp_parse_node_struct_t *)pn_iter;
+ compile_node(comp, pns_comp_for2->nodes[1]);
+ EMIT_ARG(get_iter, true);
+ compile_scope_comp_iter(comp, pns_comp_for2, pn_inner_expr, for_depth + 1);
+ }
+
+ EMIT_ARG(jump, l_top);
+ EMIT_ARG(label_assign, l_end);
+ EMIT(for_iter_end);
+}
+
+STATIC void check_for_doc_string(compiler_t *comp, mp_parse_node_t pn) {
+ #if MICROPY_ENABLE_DOC_STRING
+ // see http://www.python.org/dev/peps/pep-0257/
+
+ // look for the first statement
+ if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_expr_stmt)) {
+ // a statement; fall through
+ } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_file_input_2)) {
+ // file input; find the first non-newline node
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+ int num_nodes = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+ for (int i = 0; i < num_nodes; i++) {
+ pn = pns->nodes[i];
+ if (!(MP_PARSE_NODE_IS_LEAF(pn) && MP_PARSE_NODE_LEAF_KIND(pn) == MP_PARSE_NODE_TOKEN && MP_PARSE_NODE_LEAF_ARG(pn) == MP_TOKEN_NEWLINE)) {
+ // not a newline, so this is the first statement; finish search
+ break;
+ }
+ }
+ // if we didn't find a non-newline then it's okay to fall through; pn will be a newline and so doc-string test below will fail gracefully
+ } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_suite_block_stmts)) {
+ // a list of statements; get the first one
+ pn = ((mp_parse_node_struct_t *)pn)->nodes[0];
+ } else {
+ return;
+ }
+
+ // check the first statement for a doc string
+ if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_expr_stmt)) {
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+ if ((MP_PARSE_NODE_IS_LEAF(pns->nodes[0])
+ && MP_PARSE_NODE_LEAF_KIND(pns->nodes[0]) == MP_PARSE_NODE_STRING)
+ || (MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_const_object)
+ && mp_obj_is_str(get_const_object((mp_parse_node_struct_t *)pns->nodes[0])))) {
+ // compile the doc string
+ compile_node(comp, pns->nodes[0]);
+ // store the doc string
+ compile_store_id(comp, MP_QSTR___doc__);
+ }
+ }
+ #else
+ (void)comp;
+ (void)pn;
+ #endif
+}
+
+STATIC void compile_scope(compiler_t *comp, scope_t *scope, pass_kind_t pass) {
+ comp->pass = pass;
+ comp->scope_cur = scope;
+ comp->next_label = 0;
+ EMIT_ARG(start_pass, pass, scope);
+ reserve_labels_for_native(comp, 6); // used by native's start_pass
+
+ if (comp->pass == MP_PASS_SCOPE) {
+ // reset maximum stack sizes in scope
+ // they will be computed in this first pass
+ scope->stack_size = 0;
+ scope->exc_stack_size = 0;
+ }
+
+ // compile
+ if (MP_PARSE_NODE_IS_STRUCT_KIND(scope->pn, PN_eval_input)) {
+ assert(scope->kind == SCOPE_MODULE);
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)scope->pn;
+ compile_node(comp, pns->nodes[0]); // compile the expression
+ EMIT(return_value);
+ } else if (scope->kind == SCOPE_MODULE) {
+ if (!comp->is_repl) {
+ check_for_doc_string(comp, scope->pn);
+ }
+ compile_node(comp, scope->pn);
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ EMIT(return_value);
+ } else if (scope->kind == SCOPE_FUNCTION) {
+ assert(MP_PARSE_NODE_IS_STRUCT(scope->pn));
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)scope->pn;
+ assert(MP_PARSE_NODE_STRUCT_KIND(pns) == PN_funcdef);
+
+ // work out number of parameters, keywords and default parameters, and add them to the id_info array
+ // must be done before compiling the body so that arguments are numbered first (for LOAD_FAST etc)
+ if (comp->pass == MP_PASS_SCOPE) {
+ comp->have_star = false;
+ apply_to_single_or_list(comp, pns->nodes[1], PN_typedargslist, compile_scope_func_param);
+
+ #if MICROPY_EMIT_NATIVE
+ if (scope->emit_options == MP_EMIT_OPT_VIPER) {
+ // Compile return type; pns->nodes[2] is return/whole function annotation
+ scope->scope_flags |= compile_viper_type_annotation(comp, pns->nodes[2]) << MP_SCOPE_FLAG_VIPERRET_POS;
+ }
+ #endif // MICROPY_EMIT_NATIVE
+ }
+
+ compile_node(comp, pns->nodes[3]); // 3 is function body
+ // emit return if it wasn't the last opcode
+ if (!EMIT(last_emit_was_return_value)) {
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ EMIT(return_value);
+ }
+ } else if (scope->kind == SCOPE_LAMBDA) {
+ assert(MP_PARSE_NODE_IS_STRUCT(scope->pn));
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)scope->pn;
+ assert(MP_PARSE_NODE_STRUCT_NUM_NODES(pns) == 3);
+
+ // Set the source line number for the start of the lambda
+ EMIT_ARG(set_source_line, pns->source_line);
+
+ // work out number of parameters, keywords and default parameters, and add them to the id_info array
+ // must be done before compiling the body so that arguments are numbered first (for LOAD_FAST etc)
+ if (comp->pass == MP_PASS_SCOPE) {
+ comp->have_star = false;
+ apply_to_single_or_list(comp, pns->nodes[0], PN_varargslist, compile_scope_lambda_param);
+ }
+
+ compile_node(comp, pns->nodes[1]); // 1 is lambda body
+
+ // if the lambda is a generator, then we return None, not the result of the expression of the lambda
+ if (scope->scope_flags & MP_SCOPE_FLAG_GENERATOR) {
+ EMIT(pop_top);
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ }
+ EMIT(return_value);
+ } else if (SCOPE_IS_COMP_LIKE(scope->kind)) {
+ // a bit of a hack at the moment
+
+ assert(MP_PARSE_NODE_IS_STRUCT(scope->pn));
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)scope->pn;
+ assert(MP_PARSE_NODE_STRUCT_NUM_NODES(pns) == 2);
+ assert(MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[1], PN_comp_for));
+ mp_parse_node_struct_t *pns_comp_for = (mp_parse_node_struct_t *)pns->nodes[1];
+
+ // We need a unique name for the comprehension argument (the iterator).
+ // CPython uses .0, but we should be able to use anything that won't
+ // clash with a user defined variable. Best to use an existing qstr,
+ // so we use the blank qstr.
+ qstr qstr_arg = MP_QSTR_;
+ if (comp->pass == MP_PASS_SCOPE) {
+ scope_find_or_add_id(comp->scope_cur, qstr_arg, ID_INFO_KIND_LOCAL);
+ scope->num_pos_args = 1;
+ }
+
+ // Set the source line number for the start of the comprehension
+ EMIT_ARG(set_source_line, pns->source_line);
+
+ if (scope->kind == SCOPE_LIST_COMP) {
+ EMIT_ARG(build, 0, MP_EMIT_BUILD_LIST);
+ } else if (scope->kind == SCOPE_DICT_COMP) {
+ EMIT_ARG(build, 0, MP_EMIT_BUILD_MAP);
+ #if MICROPY_PY_BUILTINS_SET
+ } else if (scope->kind == SCOPE_SET_COMP) {
+ EMIT_ARG(build, 0, MP_EMIT_BUILD_SET);
+ #endif
+ }
+
+ // There are 4 slots on the stack for the iterator, and the first one is
+ // NULL to indicate that the second one points to the iterator object.
+ if (scope->kind == SCOPE_GEN_EXPR) {
+ MP_STATIC_ASSERT(MP_OBJ_ITER_BUF_NSLOTS == 4);
+ EMIT(load_null);
+ compile_load_id(comp, qstr_arg);
+ EMIT(load_null);
+ EMIT(load_null);
+ } else {
+ compile_load_id(comp, qstr_arg);
+ EMIT_ARG(get_iter, true);
+ }
+
+ compile_scope_comp_iter(comp, pns_comp_for, pns->nodes[0], 0);
+
+ if (scope->kind == SCOPE_GEN_EXPR) {
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ }
+ EMIT(return_value);
+ } else {
+ assert(scope->kind == SCOPE_CLASS);
+ assert(MP_PARSE_NODE_IS_STRUCT(scope->pn));
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)scope->pn;
+ assert(MP_PARSE_NODE_STRUCT_KIND(pns) == PN_classdef);
+
+ if (comp->pass == MP_PASS_SCOPE) {
+ scope_find_or_add_id(scope, MP_QSTR___class__, ID_INFO_KIND_LOCAL);
+ }
+
+ #if MICROPY_PY_SYS_SETTRACE
+ EMIT_ARG(set_source_line, pns->source_line);
+ #endif
+ compile_load_id(comp, MP_QSTR___name__);
+ compile_store_id(comp, MP_QSTR___module__);
+ EMIT_ARG(load_const_str, MP_PARSE_NODE_LEAF_ARG(pns->nodes[0])); // 0 is class name
+ compile_store_id(comp, MP_QSTR___qualname__);
+
+ check_for_doc_string(comp, pns->nodes[2]);
+ compile_node(comp, pns->nodes[2]); // 2 is class body
+
+ id_info_t *id = scope_find(scope, MP_QSTR___class__);
+ assert(id != NULL);
+ if (id->kind == ID_INFO_KIND_LOCAL) {
+ EMIT_ARG(load_const_tok, MP_TOKEN_KW_NONE);
+ } else {
+ EMIT_LOAD_FAST(MP_QSTR___class__, id->local_num);
+ }
+ EMIT(return_value);
+ }
+
+ EMIT(end_pass);
+
+ // make sure we match all the exception levels
+ assert(comp->cur_except_level == 0);
+}
+
+#if MICROPY_EMIT_INLINE_ASM
+// requires 3 passes: SCOPE, CODE_SIZE, EMIT
+STATIC void compile_scope_inline_asm(compiler_t *comp, scope_t *scope, pass_kind_t pass) {
+ comp->pass = pass;
+ comp->scope_cur = scope;
+ comp->next_label = 0;
+
+ if (scope->kind != SCOPE_FUNCTION) {
+ compile_syntax_error(comp, MP_PARSE_NODE_NULL, MP_ERROR_TEXT("inline assembler must be a function"));
+ return;
+ }
+
+ if (comp->pass > MP_PASS_SCOPE) {
+ EMIT_INLINE_ASM_ARG(start_pass, comp->pass, &comp->compile_error);
+ }
+
+ // get the function definition parse node
+ assert(MP_PARSE_NODE_IS_STRUCT(scope->pn));
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)scope->pn;
+ assert(MP_PARSE_NODE_STRUCT_KIND(pns) == PN_funcdef);
+
+ // qstr f_id = MP_PARSE_NODE_LEAF_ARG(pns->nodes[0]); // function name
+
+ // parameters are in pns->nodes[1]
+ if (comp->pass == MP_PASS_CODE_SIZE) {
+ mp_parse_node_t *pn_params;
+ size_t n_params = mp_parse_node_extract_list(&pns->nodes[1], PN_typedargslist, &pn_params);
+ scope->num_pos_args = EMIT_INLINE_ASM_ARG(count_params, n_params, pn_params);
+ if (comp->compile_error != MP_OBJ_NULL) {
+ goto inline_asm_error;
+ }
+ }
+
+ // pns->nodes[2] is function return annotation
+ mp_uint_t type_sig = MP_NATIVE_TYPE_INT;
+ mp_parse_node_t pn_annotation = pns->nodes[2];
+ if (!MP_PARSE_NODE_IS_NULL(pn_annotation)) {
+ // nodes[2] can be null or a test-expr
+ if (MP_PARSE_NODE_IS_ID(pn_annotation)) {
+ qstr ret_type = MP_PARSE_NODE_LEAF_ARG(pn_annotation);
+ switch (ret_type) {
+ case MP_QSTR_object:
+ type_sig = MP_NATIVE_TYPE_OBJ;
+ break;
+ case MP_QSTR_bool:
+ type_sig = MP_NATIVE_TYPE_BOOL;
+ break;
+ case MP_QSTR_int:
+ type_sig = MP_NATIVE_TYPE_INT;
+ break;
+ case MP_QSTR_uint:
+ type_sig = MP_NATIVE_TYPE_UINT;
+ break;
+ default:
+ compile_syntax_error(comp, pn_annotation, MP_ERROR_TEXT("unknown type"));
+ return;
+ }
+ } else {
+ compile_syntax_error(comp, pn_annotation, MP_ERROR_TEXT("return annotation must be an identifier"));
+ }
+ }
+
+ mp_parse_node_t pn_body = pns->nodes[3]; // body
+ mp_parse_node_t *nodes;
+ size_t num = mp_parse_node_extract_list(&pn_body, PN_suite_block_stmts, &nodes);
+
+ for (size_t i = 0; i < num; i++) {
+ assert(MP_PARSE_NODE_IS_STRUCT(nodes[i]));
+ mp_parse_node_struct_t *pns2 = (mp_parse_node_struct_t *)nodes[i];
+ if (MP_PARSE_NODE_STRUCT_KIND(pns2) == PN_pass_stmt) {
+ // no instructions
+ continue;
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns2) != PN_expr_stmt) {
+ // not an instruction; error
+ not_an_instruction:
+ compile_syntax_error(comp, nodes[i], MP_ERROR_TEXT("expecting an assembler instruction"));
+ return;
+ }
+
+ // check structure of parse node
+ assert(MP_PARSE_NODE_IS_STRUCT(pns2->nodes[0]));
+ if (!MP_PARSE_NODE_IS_NULL(pns2->nodes[1])) {
+ goto not_an_instruction;
+ }
+ pns2 = (mp_parse_node_struct_t *)pns2->nodes[0];
+ if (MP_PARSE_NODE_STRUCT_KIND(pns2) != PN_atom_expr_normal) {
+ goto not_an_instruction;
+ }
+ if (!MP_PARSE_NODE_IS_ID(pns2->nodes[0])) {
+ goto not_an_instruction;
+ }
+ if (!MP_PARSE_NODE_IS_STRUCT_KIND(pns2->nodes[1], PN_trailer_paren)) {
+ goto not_an_instruction;
+ }
+
+ // parse node looks like an instruction
+ // get instruction name and args
+ qstr op = MP_PARSE_NODE_LEAF_ARG(pns2->nodes[0]);
+ pns2 = (mp_parse_node_struct_t *)pns2->nodes[1]; // PN_trailer_paren
+ mp_parse_node_t *pn_arg;
+ size_t n_args = mp_parse_node_extract_list(&pns2->nodes[0], PN_arglist, &pn_arg);
+
+ // emit instructions
+ if (op == MP_QSTR_label) {
+ if (!(n_args == 1 && MP_PARSE_NODE_IS_ID(pn_arg[0]))) {
+ compile_syntax_error(comp, nodes[i], MP_ERROR_TEXT("'label' requires 1 argument"));
+ return;
+ }
+ uint lab = comp_next_label(comp);
+ if (pass > MP_PASS_SCOPE) {
+ if (!EMIT_INLINE_ASM_ARG(label, lab, MP_PARSE_NODE_LEAF_ARG(pn_arg[0]))) {
+ compile_syntax_error(comp, nodes[i], MP_ERROR_TEXT("label redefined"));
+ return;
+ }
+ }
+ } else if (op == MP_QSTR_align) {
+ if (!(n_args == 1 && MP_PARSE_NODE_IS_SMALL_INT(pn_arg[0]))) {
+ compile_syntax_error(comp, nodes[i], MP_ERROR_TEXT("'align' requires 1 argument"));
+ return;
+ }
+ if (pass > MP_PASS_SCOPE) {
+ mp_asm_base_align((mp_asm_base_t *)comp->emit_inline_asm,
+ MP_PARSE_NODE_LEAF_SMALL_INT(pn_arg[0]));
+ }
+ } else if (op == MP_QSTR_data) {
+ if (!(n_args >= 2 && MP_PARSE_NODE_IS_SMALL_INT(pn_arg[0]))) {
+ compile_syntax_error(comp, nodes[i], MP_ERROR_TEXT("'data' requires at least 2 arguments"));
+ return;
+ }
+ if (pass > MP_PASS_SCOPE) {
+ mp_int_t bytesize = MP_PARSE_NODE_LEAF_SMALL_INT(pn_arg[0]);
+ for (uint j = 1; j < n_args; j++) {
+ if (!MP_PARSE_NODE_IS_SMALL_INT(pn_arg[j])) {
+ compile_syntax_error(comp, nodes[i], MP_ERROR_TEXT("'data' requires integer arguments"));
+ return;
+ }
+ mp_asm_base_data((mp_asm_base_t *)comp->emit_inline_asm,
+ bytesize, MP_PARSE_NODE_LEAF_SMALL_INT(pn_arg[j]));
+ }
+ }
+ } else {
+ if (pass > MP_PASS_SCOPE) {
+ EMIT_INLINE_ASM_ARG(op, op, n_args, pn_arg);
+ }
+ }
+
+ if (comp->compile_error != MP_OBJ_NULL) {
+ pns = pns2; // this is the parse node that had the error
+ goto inline_asm_error;
+ }
+ }
+
+ if (comp->pass > MP_PASS_SCOPE) {
+ EMIT_INLINE_ASM_ARG(end_pass, type_sig);
+
+ if (comp->pass == MP_PASS_EMIT) {
+ void *f = mp_asm_base_get_code((mp_asm_base_t *)comp->emit_inline_asm);
+ mp_emit_glue_assign_native(comp->scope_cur->raw_code, MP_CODE_NATIVE_ASM,
+ f, mp_asm_base_get_code_size((mp_asm_base_t *)comp->emit_inline_asm),
+ NULL,
+ #if MICROPY_PERSISTENT_CODE_SAVE
+ 0, 0, 0, 0, NULL,
+ #endif
+ comp->scope_cur->num_pos_args, 0, type_sig);
+ }
+ }
+
+ if (comp->compile_error != MP_OBJ_NULL) {
+ // inline assembler had an error; set line for its exception
+ inline_asm_error:
+ comp->compile_error_line = pns->source_line;
+ }
+}
+#endif
+
+STATIC void scope_compute_things(scope_t *scope) {
+ // in MicroPython we put the *x parameter after all other parameters (except **y)
+ if (scope->scope_flags & MP_SCOPE_FLAG_VARARGS) {
+ id_info_t *id_param = NULL;
+ for (int i = scope->id_info_len - 1; i >= 0; i--) {
+ id_info_t *id = &scope->id_info[i];
+ if (id->flags & ID_FLAG_IS_STAR_PARAM) {
+ if (id_param != NULL) {
+ // swap star param with last param
+ id_info_t temp = *id_param;
+ *id_param = *id;
+ *id = temp;
+ }
+ break;
+ } else if (id_param == NULL && id->flags == ID_FLAG_IS_PARAM) {
+ id_param = id;
+ }
+ }
+ }
+
+ // in functions, turn implicit globals into explicit globals
+ // compute the index of each local
+ scope->num_locals = 0;
+ for (int i = 0; i < scope->id_info_len; i++) {
+ id_info_t *id = &scope->id_info[i];
+ if (scope->kind == SCOPE_CLASS && id->qst == MP_QSTR___class__) {
+ // __class__ is not counted as a local; if it's used then it becomes a ID_INFO_KIND_CELL
+ continue;
+ }
+ if (SCOPE_IS_FUNC_LIKE(scope->kind) && id->kind == ID_INFO_KIND_GLOBAL_IMPLICIT) {
+ id->kind = ID_INFO_KIND_GLOBAL_EXPLICIT;
+ }
+ #if MICROPY_EMIT_NATIVE
+ if (id->kind == ID_INFO_KIND_GLOBAL_EXPLICIT) {
+ // This function makes a reference to a global variable
+ if (scope->emit_options == MP_EMIT_OPT_VIPER
+ && mp_native_type_from_qstr(id->qst) >= MP_NATIVE_TYPE_INT) {
+ // A casting operator in viper mode, not a real global reference
+ } else {
+ scope->scope_flags |= MP_SCOPE_FLAG_REFGLOBALS;
+ }
+ }
+ #endif
+ // params always count for 1 local, even if they are a cell
+ if (id->kind == ID_INFO_KIND_LOCAL || (id->flags & ID_FLAG_IS_PARAM)) {
+ id->local_num = scope->num_locals++;
+ }
+ }
+
+ // compute the index of cell vars
+ for (int i = 0; i < scope->id_info_len; i++) {
+ id_info_t *id = &scope->id_info[i];
+ // in MicroPython the cells come right after the fast locals
+ // parameters are not counted here, since they remain at the start
+ // of the locals, even if they are cell vars
+ if (id->kind == ID_INFO_KIND_CELL && !(id->flags & ID_FLAG_IS_PARAM)) {
+ id->local_num = scope->num_locals;
+ scope->num_locals += 1;
+ }
+ }
+
+ // compute the index of free vars
+ // make sure they are in the order of the parent scope
+ if (scope->parent != NULL) {
+ int num_free = 0;
+ for (int i = 0; i < scope->parent->id_info_len; i++) {
+ id_info_t *id = &scope->parent->id_info[i];
+ if (id->kind == ID_INFO_KIND_CELL || id->kind == ID_INFO_KIND_FREE) {
+ for (int j = 0; j < scope->id_info_len; j++) {
+ id_info_t *id2 = &scope->id_info[j];
+ if (id2->kind == ID_INFO_KIND_FREE && id->qst == id2->qst) {
+ assert(!(id2->flags & ID_FLAG_IS_PARAM)); // free vars should not be params
+ // in MicroPython the frees come first, before the params
+ id2->local_num = num_free;
+ num_free += 1;
+ }
+ }
+ }
+ }
+ // in MicroPython shift all other locals after the free locals
+ if (num_free > 0) {
+ for (int i = 0; i < scope->id_info_len; i++) {
+ id_info_t *id = &scope->id_info[i];
+ if (id->kind != ID_INFO_KIND_FREE || (id->flags & ID_FLAG_IS_PARAM)) {
+ id->local_num += num_free;
+ }
+ }
+ scope->num_pos_args += num_free; // free vars are counted as params for passing them into the function
+ scope->num_locals += num_free;
+ }
+ }
+}
+
+#if !MICROPY_PERSISTENT_CODE_SAVE
+STATIC
+#endif
+mp_raw_code_t *mp_compile_to_raw_code(mp_parse_tree_t *parse_tree, qstr source_file, bool is_repl) {
+ // put compiler state on the stack, it's relatively small
+ compiler_t comp_state = {0};
+ compiler_t *comp = &comp_state;
+
+ comp->source_file = source_file;
+ comp->is_repl = is_repl;
+ comp->break_label = INVALID_LABEL;
+ comp->continue_label = INVALID_LABEL;
+
+ // create the module scope
+ #if MICROPY_EMIT_NATIVE
+ const uint emit_opt = MP_STATE_VM(default_emit_opt);
+ #else
+ const uint emit_opt = MP_EMIT_OPT_NONE;
+ #endif
+ scope_t *module_scope = scope_new_and_link(comp, SCOPE_MODULE, parse_tree->root, emit_opt);
+
+ // create standard emitter; it's used at least for MP_PASS_SCOPE
+ emit_t *emit_bc = emit_bc_new();
+
+ // compile pass 1
+ comp->emit = emit_bc;
+ #if MICROPY_EMIT_NATIVE
+ comp->emit_method_table = &emit_bc_method_table;
+ #endif
+ uint max_num_labels = 0;
+ for (scope_t *s = comp->scope_head; s != NULL && comp->compile_error == MP_OBJ_NULL; s = s->next) {
+ #if MICROPY_EMIT_INLINE_ASM
+ if (s->emit_options == MP_EMIT_OPT_ASM) {
+ compile_scope_inline_asm(comp, s, MP_PASS_SCOPE);
+ } else
+ #endif
+ {
+ compile_scope(comp, s, MP_PASS_SCOPE);
+
+ // Check if any implicitly declared variables should be closed over
+ for (size_t i = 0; i < s->id_info_len; ++i) {
+ id_info_t *id = &s->id_info[i];
+ if (id->kind == ID_INFO_KIND_GLOBAL_IMPLICIT) {
+ scope_check_to_close_over(s, id);
+ }
+ }
+ }
+
+ // update maximim number of labels needed
+ if (comp->next_label > max_num_labels) {
+ max_num_labels = comp->next_label;
+ }
+ }
+
+ // compute some things related to scope and identifiers
+ for (scope_t *s = comp->scope_head; s != NULL && comp->compile_error == MP_OBJ_NULL; s = s->next) {
+ scope_compute_things(s);
+ }
+
+ // set max number of labels now that it's calculated
+ emit_bc_set_max_num_labels(emit_bc, max_num_labels);
+
+ // compile pass 2 and 3
+ #if MICROPY_EMIT_NATIVE
+ emit_t *emit_native = NULL;
+ #endif
+ for (scope_t *s = comp->scope_head; s != NULL && comp->compile_error == MP_OBJ_NULL; s = s->next) {
+ #if MICROPY_EMIT_INLINE_ASM
+ if (s->emit_options == MP_EMIT_OPT_ASM) {
+ // inline assembly
+ if (comp->emit_inline_asm == NULL) {
+ comp->emit_inline_asm = ASM_EMITTER(new)(max_num_labels);
+ }
+ comp->emit = NULL;
+ comp->emit_inline_asm_method_table = ASM_EMITTER_TABLE;
+ compile_scope_inline_asm(comp, s, MP_PASS_CODE_SIZE);
+ #if MICROPY_EMIT_INLINE_XTENSA
+ // Xtensa requires an extra pass to compute size of l32r const table
+ // TODO this can be improved by calculating it during SCOPE pass
+ // but that requires some other structural changes to the asm emitters
+ #if MICROPY_DYNAMIC_COMPILER
+ if (mp_dynamic_compiler.native_arch == MP_NATIVE_ARCH_XTENSA)
+ #endif
+ {
+ compile_scope_inline_asm(comp, s, MP_PASS_CODE_SIZE);
+ }
+ #endif
+ if (comp->compile_error == MP_OBJ_NULL) {
+ compile_scope_inline_asm(comp, s, MP_PASS_EMIT);
+ }
+ } else
+ #endif
+ {
+
+ // choose the emit type
+
+ switch (s->emit_options) {
+
+ #if MICROPY_EMIT_NATIVE
+ case MP_EMIT_OPT_NATIVE_PYTHON:
+ case MP_EMIT_OPT_VIPER:
+ if (emit_native == NULL) {
+ emit_native = NATIVE_EMITTER(new)(&comp->compile_error, &comp->next_label, max_num_labels);
+ }
+ comp->emit_method_table = NATIVE_EMITTER_TABLE;
+ comp->emit = emit_native;
+ break;
+ #endif // MICROPY_EMIT_NATIVE
+
+ default:
+ comp->emit = emit_bc;
+ #if MICROPY_EMIT_NATIVE
+ comp->emit_method_table = &emit_bc_method_table;
+ #endif
+ break;
+ }
+
+ // need a pass to compute stack size
+ compile_scope(comp, s, MP_PASS_STACK_SIZE);
+
+ // second last pass: compute code size
+ if (comp->compile_error == MP_OBJ_NULL) {
+ compile_scope(comp, s, MP_PASS_CODE_SIZE);
+ }
+
+ // final pass: emit code
+ if (comp->compile_error == MP_OBJ_NULL) {
+ compile_scope(comp, s, MP_PASS_EMIT);
+ }
+ }
+ }
+
+ if (comp->compile_error != MP_OBJ_NULL) {
+ // if there is no line number for the error then use the line
+ // number for the start of this scope
+ compile_error_set_line(comp, comp->scope_cur->pn);
+ // add a traceback to the exception using relevant source info
+ mp_obj_exception_add_traceback(comp->compile_error, comp->source_file,
+ comp->compile_error_line, comp->scope_cur->simple_name);
+ }
+
+ // free the emitters
+
+ emit_bc_free(emit_bc);
+ #if MICROPY_EMIT_NATIVE
+ if (emit_native != NULL) {
+ NATIVE_EMITTER(free)(emit_native);
+ }
+ #endif
+ #if MICROPY_EMIT_INLINE_ASM
+ if (comp->emit_inline_asm != NULL) {
+ ASM_EMITTER(free)(comp->emit_inline_asm);
+ }
+ #endif
+
+ // free the parse tree
+ mp_parse_tree_clear(parse_tree);
+
+ // free the scopes
+ mp_raw_code_t *outer_raw_code = module_scope->raw_code;
+ for (scope_t *s = module_scope; s;) {
+ scope_t *next = s->next;
+ scope_free(s);
+ s = next;
+ }
+
+ if (comp->compile_error != MP_OBJ_NULL) {
+ nlr_raise(comp->compile_error);
+ } else {
+ return outer_raw_code;
+ }
+}
+
+mp_obj_t mp_compile(mp_parse_tree_t *parse_tree, qstr source_file, bool is_repl) {
+ mp_raw_code_t *rc = mp_compile_to_raw_code(parse_tree, source_file, is_repl);
+ // return function that executes the outer module
+ return mp_make_function_from_raw_code(rc, MP_OBJ_NULL, MP_OBJ_NULL);
+}
+
+#endif // MICROPY_ENABLE_COMPILER
diff --git a/circuitpython/py/compile.h b/circuitpython/py/compile.h
new file mode 100644
index 0000000..348beff
--- /dev/null
+++ b/circuitpython/py/compile.h
@@ -0,0 +1,45 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_COMPILE_H
+#define MICROPY_INCLUDED_PY_COMPILE_H
+
+#include "py/lexer.h"
+#include "py/parse.h"
+#include "py/emitglue.h"
+
+// the compiler will raise an exception if an error occurred
+// the compiler will clear the parse tree before it returns
+mp_obj_t mp_compile(mp_parse_tree_t *parse_tree, qstr source_file, bool is_repl);
+
+#if MICROPY_PERSISTENT_CODE_SAVE
+// this has the same semantics as mp_compile
+mp_raw_code_t *mp_compile_to_raw_code(mp_parse_tree_t *parse_tree, qstr source_file, bool is_repl);
+#endif
+
+// this is implemented in runtime.c
+mp_obj_t mp_parse_compile_execute(mp_lexer_t *lex, mp_parse_input_kind_t parse_input_kind, mp_obj_dict_t *globals, mp_obj_dict_t *locals);
+
+#endif // MICROPY_INCLUDED_PY_COMPILE_H
diff --git a/circuitpython/py/dynruntime.h b/circuitpython/py/dynruntime.h
new file mode 100644
index 0000000..608cdec
--- /dev/null
+++ b/circuitpython/py/dynruntime.h
@@ -0,0 +1,287 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2019 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_DYNRUNTIME_H
+#define MICROPY_INCLUDED_PY_DYNRUNTIME_H
+
+// This header file contains definitions to dynamically implement the static
+// MicroPython runtime API defined in py/obj.h and py/runtime.h.
+
+#include "py/nativeglue.h"
+#include "py/objstr.h"
+#include "py/objtype.h"
+
+#if !MICROPY_ENABLE_DYNRUNTIME
+#error "dynruntime.h included in non-dynamic-module build."
+#endif
+
+#undef MP_ROM_QSTR
+#undef MP_OBJ_QSTR_VALUE
+#undef MP_OBJ_NEW_QSTR
+#undef mp_const_none
+#undef mp_const_false
+#undef mp_const_true
+#undef mp_const_empty_tuple
+#undef nlr_raise
+
+/******************************************************************************/
+// Memory allocation
+
+#define m_malloc(n,_) (m_malloc_dyn((n)))
+#define m_free(ptr) (m_free_dyn((ptr)))
+#define m_realloc(ptr, new_num_bytes) (m_realloc_dyn((ptr), (new_num_bytes)))
+
+static inline void *m_malloc_dyn(size_t n) {
+ // TODO won't raise on OOM
+ return mp_fun_table.realloc_(NULL, n, false);
+}
+
+static inline void m_free_dyn(void *ptr) {
+ mp_fun_table.realloc_(ptr, 0, false);
+}
+
+static inline void *m_realloc_dyn(void *ptr, size_t new_num_bytes) {
+ // TODO won't raise on OOM
+ return mp_fun_table.realloc_(ptr, new_num_bytes, true);
+}
+
+/******************************************************************************/
+// Printing
+
+#define mp_plat_print (*mp_fun_table.plat_print)
+#define mp_printf(p, ...) (mp_fun_table.printf_((p), __VA_ARGS__))
+#define mp_vprintf(p, fmt, args) (mp_fun_table.vprintf_((p), (fmt), (args)))
+
+/******************************************************************************/
+// Types and objects
+
+#define MP_OBJ_NEW_QSTR(x) MP_OBJ_NEW_QSTR_##x
+
+#define mp_type_type (*mp_fun_table.type_type)
+#define mp_type_str (*mp_fun_table.type_str)
+#define mp_type_tuple (*((mp_obj_base_t *)mp_const_empty_tuple)->type)
+#define mp_type_list (*mp_fun_table.type_list)
+#define mp_type_EOFError (*(mp_obj_type_t *)(mp_load_global(MP_QSTR_EOFError)))
+#define mp_type_IndexError (*(mp_obj_type_t *)(mp_load_global(MP_QSTR_IndexError)))
+#define mp_type_KeyError (*(mp_obj_type_t *)(mp_load_global(MP_QSTR_KeyError)))
+#define mp_type_NotImplementedError (*(mp_obj_type_t *)(mp_load_global(MP_QSTR_NotImplementedError)))
+#define mp_type_RuntimeError (*(mp_obj_type_t *)(mp_load_global(MP_QSTR_RuntimeError)))
+#define mp_type_TypeError (*(mp_obj_type_t *)(mp_load_global(MP_QSTR_TypeError)))
+#define mp_type_ValueError (*(mp_obj_type_t *)(mp_load_global(MP_QSTR_ValueError)))
+
+#define mp_stream_read_obj (*mp_fun_table.stream_read_obj)
+#define mp_stream_readinto_obj (*mp_fun_table.stream_readinto_obj)
+#define mp_stream_unbuffered_readline_obj (*mp_fun_table.stream_unbuffered_readline_obj)
+#define mp_stream_write_obj (*mp_fun_table.stream_write_obj)
+
+#define mp_const_none ((mp_obj_t)mp_fun_table.const_none)
+#define mp_const_false ((mp_obj_t)mp_fun_table.const_false)
+#define mp_const_true ((mp_obj_t)mp_fun_table.const_true)
+#define mp_const_empty_tuple (mp_fun_table.new_tuple(0, NULL))
+
+#define mp_obj_new_bool(b) ((b) ? (mp_obj_t)mp_fun_table.const_true : (mp_obj_t)mp_fun_table.const_false)
+#define mp_obj_new_int(i) (mp_fun_table.native_to_obj(i, MP_NATIVE_TYPE_INT))
+#define mp_obj_new_int_from_uint(i) (mp_fun_table.native_to_obj(i, MP_NATIVE_TYPE_UINT))
+#define mp_obj_new_str(data, len) (mp_fun_table.obj_new_str((data), (len)))
+#define mp_obj_new_str_of_type(t, d, l) (mp_obj_new_str_of_type_dyn((t), (d), (l)))
+#define mp_obj_new_bytes(data, len) (mp_fun_table.obj_new_bytes((data), (len)))
+#define mp_obj_new_bytearray_by_ref(n, i) (mp_fun_table.obj_new_bytearray_by_ref((n), (i)))
+#define mp_obj_new_tuple(n, items) (mp_fun_table.new_tuple((n), (items)))
+#define mp_obj_new_list(n, items) (mp_fun_table.new_list((n), (items)))
+
+#define mp_obj_get_type(o) (mp_fun_table.obj_get_type((o)))
+#define mp_obj_cast_to_native_base(o, t) (mp_obj_cast_to_native_base_dyn((o), (t)))
+#define mp_obj_get_int(o) (mp_fun_table.native_from_obj(o, MP_NATIVE_TYPE_INT))
+#define mp_obj_get_int_truncated(o) (mp_fun_table.native_from_obj(o, MP_NATIVE_TYPE_UINT))
+#define mp_obj_str_get_str(s) (mp_obj_str_get_data_dyn((s), NULL))
+#define mp_obj_str_get_data(o, len) (mp_obj_str_get_data_dyn((o), (len)))
+#define mp_get_buffer_raise(o, bufinfo, fl) (mp_fun_table.get_buffer_raise((o), (bufinfo), (fl)))
+#define mp_get_stream_raise(s, flags) (mp_fun_table.get_stream_raise((s), (flags)))
+
+#define mp_obj_len(o) (mp_obj_len_dyn(o))
+#define mp_obj_subscr(base, index, val) (mp_fun_table.obj_subscr((base), (index), (val)))
+#define mp_obj_get_array(o, len, items) (mp_obj_get_array_dyn((o), (len), (items)))
+#define mp_obj_list_append(list, item) (mp_fun_table.list_append((list), (item)))
+
+#define mp_obj_assert_native_inited(o) (mp_fun_table.assert_native_inited((o)))
+
+static inline mp_obj_t mp_obj_new_str_of_type_dyn(const mp_obj_type_t *type, const byte *data, size_t len) {
+ if (type == &mp_type_str) {
+ return mp_obj_new_str((const char *)data, len);
+ } else {
+ return mp_obj_new_bytes(data, len);
+ }
+}
+
+static inline mp_obj_t mp_obj_cast_to_native_base_dyn(mp_obj_t self_in, mp_const_obj_t native_type) {
+ const mp_obj_type_t *self_type = mp_obj_get_type(self_in);
+
+ if (MP_OBJ_FROM_PTR(self_type) == native_type) {
+ return self_in;
+ }
+ if (self_type->parent != native_type) {
+ // The self_in object is not a direct descendant of native_type, so fail the cast.
+ // This is a very simple version of mp_obj_is_subclass_fast that could be improved.
+ return MP_OBJ_NULL;
+ } else {
+ mp_obj_instance_t *self = (mp_obj_instance_t *)MP_OBJ_TO_PTR(self_in);
+ return self->subobj[0];
+ }
+}
+
+static inline void *mp_obj_str_get_data_dyn(mp_obj_t o, size_t *l) {
+ mp_buffer_info_t bufinfo;
+ mp_get_buffer_raise(o, &bufinfo, MP_BUFFER_READ);
+ if (l != NULL) {
+ *l = bufinfo.len;
+ }
+ return bufinfo.buf;
+}
+
+static inline mp_obj_t mp_obj_len_dyn(mp_obj_t o) {
+ // If bytes implemented MP_UNARY_OP_LEN could use: mp_unary_op(MP_UNARY_OP_LEN, o)
+ return mp_fun_table.call_function_n_kw(mp_fun_table.load_name(MP_QSTR_len), 1, &o);
+}
+
+
+/******************************************************************************/
+// General runtime functions
+
+#define mp_load_name(qst) (mp_fun_table.load_name((qst)))
+#define mp_load_global(qst) (mp_fun_table.load_global((qst)))
+#define mp_load_attr(base, attr) (mp_fun_table.load_attr((base), (attr)))
+#define mp_load_method(base, attr, dest) (mp_fun_table.load_method((base), (attr), (dest)))
+#define mp_load_super_method(attr, dest) (mp_fun_table.load_super_method((attr), (dest)))
+#define mp_store_name(qst, obj) (mp_fun_table.store_name((qst), (obj)))
+#define mp_store_global(qst, obj) (mp_fun_table.store_global((qst), (obj)))
+#define mp_store_attr(base, attr, val) (mp_fun_table.store_attr((base), (attr), (val)))
+
+#define mp_unary_op(op, obj) (mp_fun_table.unary_op((op), (obj)))
+#define mp_binary_op(op, lhs, rhs) (mp_fun_table.binary_op((op), (lhs), (rhs)))
+
+#define mp_make_function_from_raw_code(rc, def_args, def_kw_args) \
+ (mp_fun_table.make_function_from_raw_code((rc), (def_args), (def_kw_args)))
+
+#define mp_call_function_n_kw(fun, n_args, n_kw, args) \
+ (mp_fun_table.call_function_n_kw((fun), (n_args) | ((n_kw) << 8), args))
+
+#define mp_arg_check_num(n_args, n_kw, n_args_min, n_args_max, takes_kw) \
+ (mp_fun_table.arg_check_num_sig((n_args), (n_kw), MP_OBJ_FUN_MAKE_SIG((n_args_min), (n_args_max), (takes_kw))))
+
+#define MP_DYNRUNTIME_INIT_ENTRY \
+ mp_obj_t old_globals = mp_fun_table.swap_globals(self->globals); \
+ mp_raw_code_t rc; \
+ rc.kind = MP_CODE_NATIVE_VIPER; \
+ rc.scope_flags = 0; \
+ rc.const_table = (void *)self->const_table; \
+ (void)rc;
+
+#define MP_DYNRUNTIME_INIT_EXIT \
+ mp_fun_table.swap_globals(old_globals); \
+ return mp_const_none;
+
+#define MP_DYNRUNTIME_MAKE_FUNCTION(f) \
+ (mp_make_function_from_raw_code((rc.fun_data = (f), &rc), MP_OBJ_NULL, MP_OBJ_NULL))
+
+#define mp_import_name(name, fromlist, level) \
+ (mp_fun_table.import_name((name), (fromlist), (level)))
+#define mp_import_from(module, name) \
+ (mp_fun_table.import_from((module), (name)))
+#define mp_import_all(module) \
+ (mp_fun_table.import_all((module))
+
+/******************************************************************************/
+// Exceptions
+
+#define mp_obj_new_exception(o) ((mp_obj_t)(o)) // Assumes returned object will be raised, will create instance then
+#define mp_obj_new_exception_arg1(e_type, arg) (mp_obj_new_exception_arg1_dyn((e_type), (arg)))
+
+#define nlr_raise(o) (mp_raise_dyn(o))
+#define mp_raise_type_arg(type, arg) (mp_raise_dyn(mp_obj_new_exception_arg1_dyn((type), (arg))))
+#define mp_raise_msg(type, msg) (mp_fun_table.raise_msg_str((type), (msg)))
+#define mp_raise_OSError(er) (mp_raise_OSError_dyn(er))
+#define mp_raise_NotImplementedError(msg) (mp_raise_msg(&mp_type_NotImplementedError, (msg)))
+#define mp_raise_TypeError(msg) (mp_raise_msg(&mp_type_TypeError, (msg)))
+#define mp_raise_ValueError(msg) (mp_raise_msg(&mp_type_ValueError, (msg)))
+
+static inline mp_obj_t mp_obj_new_exception_arg1_dyn(const mp_obj_type_t *exc_type, mp_obj_t arg) {
+ mp_obj_t args[1] = { arg };
+ return mp_call_function_n_kw(MP_OBJ_FROM_PTR(exc_type), 1, 0, &args[0]);
+}
+
+static NORETURN inline void mp_raise_dyn(mp_obj_t o) {
+ mp_fun_table.raise(o);
+ for (;;) {
+ }
+}
+
+static NORETURN inline void mp_raise_arg1(const mp_obj_type_t *exc_type, mp_obj_t arg) {
+ mp_fun_table.raise(mp_obj_new_exception_arg1_dyn(exc_type, arg));
+ for (;;) {
+ }
+}
+
+
+static inline void mp_raise_OSError_dyn(int er) {
+ mp_obj_t args[1] = { MP_OBJ_NEW_SMALL_INT(er) };
+ nlr_raise(mp_call_function_n_kw(mp_load_global(MP_QSTR_OSError), 1, 0, &args[0]));
+}
+
+/******************************************************************************/
+// Floating point
+
+#define mp_obj_new_float_from_f(f) (mp_fun_table.obj_new_float_from_f((f)))
+#define mp_obj_new_float_from_d(d) (mp_fun_table.obj_new_float_from_d((d)))
+#define mp_obj_get_float_to_f(o) (mp_fun_table.obj_get_float_to_f((o)))
+#define mp_obj_get_float_to_d(o) (mp_fun_table.obj_get_float_to_d((o)))
+
+#if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
+#define mp_obj_new_float(f) (mp_obj_new_float_from_f((f)))
+#define mp_obj_get_float(o) (mp_obj_get_float_to_f((o)))
+#elif MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE
+#define mp_obj_new_float(f) (mp_obj_new_float_from_d((f)))
+#define mp_obj_get_float(o) (mp_obj_get_float_to_d((o)))
+#endif
+
+/******************************************************************************/
+// Inline function definitions.
+
+// *items may point inside a GC block
+static inline void mp_obj_get_array_dyn(mp_obj_t o, size_t *len, mp_obj_t **items) {
+ const mp_obj_type_t *type = mp_obj_get_type(o);
+ if (type == &mp_type_tuple) {
+ mp_obj_tuple_t *t = MP_OBJ_TO_PTR(o);
+ *len = t->len;
+ *items = &t->items[0];
+ } else if (type == &mp_type_list) {
+ mp_obj_list_t *l = MP_OBJ_TO_PTR(o);
+ *len = l->len;
+ *items = l->items;
+ } else {
+ mp_raise_TypeError("expected tuple/list");
+ }
+}
+
+#endif // MICROPY_INCLUDED_PY_DYNRUNTIME_H
diff --git a/circuitpython/py/dynruntime.mk b/circuitpython/py/dynruntime.mk
new file mode 100644
index 0000000..db06d41
--- /dev/null
+++ b/circuitpython/py/dynruntime.mk
@@ -0,0 +1,143 @@
+# Makefile fragment for generating native .mpy files from C source
+# MPY_DIR must be set to the top of the MicroPython source tree
+
+BUILD ?= build
+
+ECHO = @echo
+RM = /bin/rm
+MKDIR = /bin/mkdir
+PYTHON = python3
+MPY_CROSS = $(MPY_DIR)/mpy-cross/mpy-cross
+MPY_TOOL = $(PYTHON) $(MPY_DIR)/tools/mpy-tool.py
+MPY_LD = $(PYTHON) $(MPY_DIR)/tools/mpy_ld.py
+
+Q = @
+ifeq ("$(origin V)", "command line")
+ifeq ($(V),1)
+Q =
+MPY_LD += '-vvv'
+endif
+endif
+
+ARCH_UPPER = $(shell echo $(ARCH) | tr '[:lower:]' '[:upper:]')
+CONFIG_H = $(BUILD)/$(MOD).config.h
+
+CFLAGS += -I. -I$(MPY_DIR)
+CFLAGS += -std=c99
+CFLAGS += -Os
+CFLAGS += -Wall -Werror -DNDEBUG
+CFLAGS += -DNO_QSTR
+CFLAGS += -DMICROPY_ENABLE_DYNRUNTIME
+CFLAGS += -DMP_CONFIGFILE='<$(CONFIG_H)>'
+CFLAGS += -fpic -fno-common
+CFLAGS += -U _FORTIFY_SOURCE # prevent use of __*_chk libc functions
+#CFLAGS += -fdata-sections -ffunction-sections
+
+MPY_CROSS_FLAGS += -march=$(ARCH)
+
+SRC_O += $(addprefix $(BUILD)/, $(patsubst %.c,%.o,$(filter %.c,$(SRC))))
+SRC_MPY += $(addprefix $(BUILD)/, $(patsubst %.py,%.mpy,$(filter %.py,$(SRC))))
+
+################################################################################
+# Architecture configuration
+
+ifeq ($(ARCH),x86)
+
+# x86
+CROSS =
+CFLAGS += -m32 -fno-stack-protector
+MICROPY_FLOAT_IMPL ?= double
+
+else ifeq ($(ARCH),x64)
+
+# x64
+CROSS =
+CFLAGS += -fno-stack-protector
+MICROPY_FLOAT_IMPL ?= double
+
+else ifeq ($(ARCH),armv7m)
+
+# thumb
+CROSS = arm-none-eabi-
+CFLAGS += -mthumb -mcpu=cortex-m3
+MICROPY_FLOAT_IMPL ?= none
+
+else ifeq ($(ARCH),armv7emsp)
+
+# thumb
+CROSS = arm-none-eabi-
+CFLAGS += -mthumb -mcpu=cortex-m4
+CFLAGS += -mfpu=fpv4-sp-d16 -mfloat-abi=hard
+MICROPY_FLOAT_IMPL ?= float
+
+else ifeq ($(ARCH),armv7emdp)
+
+# thumb
+CROSS = arm-none-eabi-
+CFLAGS += -mthumb -mcpu=cortex-m7
+CFLAGS += -mfpu=fpv5-d16 -mfloat-abi=hard
+MICROPY_FLOAT_IMPL ?= double
+
+else ifeq ($(ARCH),xtensa)
+
+# xtensa
+CROSS = xtensa-lx106-elf-
+CFLAGS += -mforce-l32
+MICROPY_FLOAT_IMPL ?= none
+
+else ifeq ($(ARCH),xtensawin)
+
+# xtensawin
+CROSS = xtensa-esp32-elf-
+CFLAGS +=
+MICROPY_FLOAT_IMPL ?= float
+
+else
+$(error architecture '$(ARCH)' not supported)
+endif
+
+MICROPY_FLOAT_IMPL_UPPER = $(shell echo $(MICROPY_FLOAT_IMPL) | tr '[:lower:]' '[:upper:]')
+CFLAGS += -DMICROPY_FLOAT_IMPL=MICROPY_FLOAT_IMPL_$(MICROPY_FLOAT_IMPL_UPPER)
+
+CFLAGS += $(CFLAGS_EXTRA)
+
+################################################################################
+# Build rules
+
+.PHONY: all clean
+
+all: $(MOD).mpy
+
+clean:
+ $(RM) -rf $(BUILD) $(CLEAN_EXTRA)
+
+# Create build destination directories first
+BUILD_DIRS = $(sort $(dir $(CONFIG_H) $(SRC_O) $(SRC_MPY)))
+$(CONFIG_H) $(SRC_O) $(SRC_MPY): | $(BUILD_DIRS)
+$(BUILD_DIRS):
+ $(Q)$(MKDIR) -p $@
+
+# Preprocess all source files to generate $(CONFIG_H)
+$(CONFIG_H): $(SRC)
+ $(ECHO) "GEN $@"
+ $(Q)$(MPY_LD) --arch $(ARCH) --preprocess -o $@ $^
+
+# Build .o from .c source files
+$(BUILD)/%.o: %.c $(CONFIG_H) Makefile
+ $(ECHO) "CC $<"
+ $(Q)$(CROSS)gcc $(CFLAGS) -o $@ -c $<
+
+# Build .mpy from .py source files
+$(BUILD)/%.mpy: %.py
+ $(ECHO) "MPY $<"
+ $(Q)$(MPY_CROSS) $(MPY_CROSS_FLAGS) -o $@ $<
+
+# Build native .mpy from object files
+$(BUILD)/$(MOD).native.mpy: $(SRC_O)
+ $(ECHO) "LINK $<"
+ $(Q)$(MPY_LD) --arch $(ARCH) --qstrs $(CONFIG_H) -o $@ $^
+
+# Build final .mpy from all intermediate .mpy files
+$(MOD).mpy: $(BUILD)/$(MOD).native.mpy $(SRC_MPY)
+ $(ECHO) "GEN $@"
+ $(Q)$(MPY_TOOL) --merge -o $@ $^
diff --git a/circuitpython/py/emit.h b/circuitpython/py/emit.h
new file mode 100644
index 0000000..2797fb8
--- /dev/null
+++ b/circuitpython/py/emit.h
@@ -0,0 +1,288 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_EMIT_H
+#define MICROPY_INCLUDED_PY_EMIT_H
+
+#include "py/lexer.h"
+#include "py/scope.h"
+
+/* Notes on passes:
+ * We don't know exactly the opcodes in pass 1 because they depend on the
+ * closing over of variables (LOAD_CLOSURE, BUILD_TUPLE, MAKE_CLOSURE), which
+ * depends on determining the scope of variables in each function, and this
+ * is not known until the end of pass 1.
+ * As a consequence, we don't know the maximum stack size until the end of pass 2.
+ * This is problematic for some emitters (x64) since they need to know the maximum
+ * stack size to compile the entry to the function, and this affects code size.
+ */
+
+typedef enum {
+ MP_PASS_SCOPE = 1, // work out id's and their kind, and number of labels
+ MP_PASS_STACK_SIZE = 2, // work out maximum stack size
+ MP_PASS_CODE_SIZE = 3, // work out code size and label offsets
+ MP_PASS_EMIT = 4, // emit code
+} pass_kind_t;
+
+#define MP_EMIT_STAR_FLAG_SINGLE (0x01)
+#define MP_EMIT_STAR_FLAG_DOUBLE (0x02)
+
+#define MP_EMIT_BREAK_FROM_FOR (0x8000)
+
+// Kind for emit_id_ops->local()
+#define MP_EMIT_IDOP_LOCAL_FAST (0)
+#define MP_EMIT_IDOP_LOCAL_DEREF (1)
+
+// Kind for emit_id_ops->global()
+#define MP_EMIT_IDOP_GLOBAL_NAME (0)
+#define MP_EMIT_IDOP_GLOBAL_GLOBAL (1)
+
+// Kind for emit->import()
+#define MP_EMIT_IMPORT_NAME (0)
+#define MP_EMIT_IMPORT_FROM (1)
+#define MP_EMIT_IMPORT_STAR (2)
+
+// Kind for emit->subscr()
+#define MP_EMIT_SUBSCR_LOAD (0)
+#define MP_EMIT_SUBSCR_STORE (1)
+#define MP_EMIT_SUBSCR_DELETE (2)
+
+// Kind for emit->attr()
+#define MP_EMIT_ATTR_LOAD (0)
+#define MP_EMIT_ATTR_STORE (1)
+#define MP_EMIT_ATTR_DELETE (2)
+
+// Kind for emit->setup_block()
+#define MP_EMIT_SETUP_BLOCK_WITH (0)
+#define MP_EMIT_SETUP_BLOCK_EXCEPT (1)
+#define MP_EMIT_SETUP_BLOCK_FINALLY (2)
+
+// Kind for emit->build()
+#define MP_EMIT_BUILD_TUPLE (0)
+#define MP_EMIT_BUILD_LIST (1)
+#define MP_EMIT_BUILD_MAP (2)
+#define MP_EMIT_BUILD_SET (3)
+#define MP_EMIT_BUILD_SLICE (4)
+
+// Kind for emit->yield()
+#define MP_EMIT_YIELD_VALUE (0)
+#define MP_EMIT_YIELD_FROM (1)
+
+typedef struct _emit_t emit_t;
+
+typedef struct _mp_emit_method_table_id_ops_t {
+ void (*local)(emit_t *emit, qstr qst, mp_uint_t local_num, int kind);
+ void (*global)(emit_t *emit, qstr qst, int kind);
+} mp_emit_method_table_id_ops_t;
+
+typedef struct _emit_method_table_t {
+ #if MICROPY_DYNAMIC_COMPILER
+ emit_t *(*emit_new)(mp_obj_t * error_slot, uint *label_slot, mp_uint_t max_num_labels);
+ void (*emit_free)(emit_t *emit);
+ #endif
+
+ void (*start_pass)(emit_t *emit, pass_kind_t pass, scope_t *scope);
+ void (*end_pass)(emit_t *emit);
+ bool (*last_emit_was_return_value)(emit_t *emit);
+ void (*adjust_stack_size)(emit_t *emit, mp_int_t delta);
+ void (*set_source_line)(emit_t *emit, mp_uint_t line);
+
+ mp_emit_method_table_id_ops_t load_id;
+ mp_emit_method_table_id_ops_t store_id;
+ mp_emit_method_table_id_ops_t delete_id;
+
+ void (*label_assign)(emit_t *emit, mp_uint_t l);
+ void (*import)(emit_t *emit, qstr qst, int kind);
+ void (*load_const_tok)(emit_t *emit, mp_token_kind_t tok);
+ void (*load_const_small_int)(emit_t *emit, mp_int_t arg);
+ void (*load_const_str)(emit_t *emit, qstr qst);
+ void (*load_const_obj)(emit_t *emit, mp_obj_t obj);
+ void (*load_null)(emit_t *emit);
+ void (*load_method)(emit_t *emit, qstr qst, bool is_super);
+ void (*load_build_class)(emit_t *emit);
+ void (*subscr)(emit_t *emit, int kind);
+ void (*attr)(emit_t *emit, qstr qst, int kind);
+ void (*dup_top)(emit_t *emit);
+ void (*dup_top_two)(emit_t *emit);
+ void (*pop_top)(emit_t *emit);
+ void (*rot_two)(emit_t *emit);
+ void (*rot_three)(emit_t *emit);
+ void (*jump)(emit_t *emit, mp_uint_t label);
+ void (*pop_jump_if)(emit_t *emit, bool cond, mp_uint_t label);
+ void (*jump_if_or_pop)(emit_t *emit, bool cond, mp_uint_t label);
+ void (*unwind_jump)(emit_t *emit, mp_uint_t label, mp_uint_t except_depth);
+ void (*setup_block)(emit_t *emit, mp_uint_t label, int kind);
+ void (*with_cleanup)(emit_t *emit, mp_uint_t label);
+ void (*end_finally)(emit_t *emit);
+ void (*get_iter)(emit_t *emit, bool use_stack);
+ void (*for_iter)(emit_t *emit, mp_uint_t label);
+ void (*for_iter_end)(emit_t *emit);
+ void (*pop_except_jump)(emit_t *emit, mp_uint_t label, bool within_exc_handler);
+ void (*unary_op)(emit_t *emit, mp_unary_op_t op);
+ void (*binary_op)(emit_t *emit, mp_binary_op_t op);
+ void (*build)(emit_t *emit, mp_uint_t n_args, int kind);
+ void (*store_map)(emit_t *emit);
+ void (*store_comp)(emit_t *emit, scope_kind_t kind, mp_uint_t set_stack_index);
+ void (*unpack_sequence)(emit_t *emit, mp_uint_t n_args);
+ void (*unpack_ex)(emit_t *emit, mp_uint_t n_left, mp_uint_t n_right);
+ void (*make_function)(emit_t *emit, scope_t *scope, mp_uint_t n_pos_defaults, mp_uint_t n_kw_defaults);
+ void (*make_closure)(emit_t *emit, scope_t *scope, mp_uint_t n_closed_over, mp_uint_t n_pos_defaults, mp_uint_t n_kw_defaults);
+ void (*call_function)(emit_t *emit, mp_uint_t n_positional, mp_uint_t n_keyword, mp_uint_t star_flags);
+ void (*call_method)(emit_t *emit, mp_uint_t n_positional, mp_uint_t n_keyword, mp_uint_t star_flags);
+ void (*return_value)(emit_t *emit);
+ void (*raise_varargs)(emit_t *emit, mp_uint_t n_args);
+ void (*yield)(emit_t *emit, int kind);
+
+ // these methods are used to control entry to/exit from an exception handler
+ // they may or may not emit code
+ void (*start_except_handler)(emit_t *emit);
+ void (*end_except_handler)(emit_t *emit);
+} emit_method_table_t;
+
+static inline void mp_emit_common_get_id_for_load(scope_t *scope, qstr qst) {
+ scope_find_or_add_id(scope, qst, ID_INFO_KIND_GLOBAL_IMPLICIT);
+}
+
+void mp_emit_common_get_id_for_modification(scope_t *scope, qstr qst);
+void mp_emit_common_id_op(emit_t *emit, const mp_emit_method_table_id_ops_t *emit_method_table, scope_t *scope, qstr qst);
+
+extern const emit_method_table_t emit_bc_method_table;
+extern const emit_method_table_t emit_native_x64_method_table;
+extern const emit_method_table_t emit_native_x86_method_table;
+extern const emit_method_table_t emit_native_thumb_method_table;
+extern const emit_method_table_t emit_native_arm_method_table;
+extern const emit_method_table_t emit_native_xtensa_method_table;
+extern const emit_method_table_t emit_native_xtensawin_method_table;
+
+extern const mp_emit_method_table_id_ops_t mp_emit_bc_method_table_load_id_ops;
+extern const mp_emit_method_table_id_ops_t mp_emit_bc_method_table_store_id_ops;
+extern const mp_emit_method_table_id_ops_t mp_emit_bc_method_table_delete_id_ops;
+
+emit_t *emit_bc_new(void);
+emit_t *emit_native_x64_new(mp_obj_t *error_slot, uint *label_slot, mp_uint_t max_num_labels);
+emit_t *emit_native_x86_new(mp_obj_t *error_slot, uint *label_slot, mp_uint_t max_num_labels);
+emit_t *emit_native_thumb_new(mp_obj_t *error_slot, uint *label_slot, mp_uint_t max_num_labels);
+emit_t *emit_native_arm_new(mp_obj_t *error_slot, uint *label_slot, mp_uint_t max_num_labels);
+emit_t *emit_native_xtensa_new(mp_obj_t *error_slot, uint *label_slot, mp_uint_t max_num_labels);
+emit_t *emit_native_xtensawin_new(mp_obj_t *error_slot, uint *label_slot, mp_uint_t max_num_labels);
+
+void emit_bc_set_max_num_labels(emit_t *emit, mp_uint_t max_num_labels);
+
+void emit_bc_free(emit_t *emit);
+void emit_native_x64_free(emit_t *emit);
+void emit_native_x86_free(emit_t *emit);
+void emit_native_thumb_free(emit_t *emit);
+void emit_native_arm_free(emit_t *emit);
+void emit_native_xtensa_free(emit_t *emit);
+void emit_native_xtensawin_free(emit_t *emit);
+
+void mp_emit_bc_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scope);
+void mp_emit_bc_end_pass(emit_t *emit);
+bool mp_emit_bc_last_emit_was_return_value(emit_t *emit);
+void mp_emit_bc_adjust_stack_size(emit_t *emit, mp_int_t delta);
+void mp_emit_bc_set_source_line(emit_t *emit, mp_uint_t line);
+
+void mp_emit_bc_load_local(emit_t *emit, qstr qst, mp_uint_t local_num, int kind);
+void mp_emit_bc_load_global(emit_t *emit, qstr qst, int kind);
+void mp_emit_bc_store_local(emit_t *emit, qstr qst, mp_uint_t local_num, int kind);
+void mp_emit_bc_store_global(emit_t *emit, qstr qst, int kind);
+void mp_emit_bc_delete_local(emit_t *emit, qstr qst, mp_uint_t local_num, int kind);
+void mp_emit_bc_delete_global(emit_t *emit, qstr qst, int kind);
+
+void mp_emit_bc_label_assign(emit_t *emit, mp_uint_t l);
+void mp_emit_bc_import(emit_t *emit, qstr qst, int kind);
+void mp_emit_bc_load_const_tok(emit_t *emit, mp_token_kind_t tok);
+void mp_emit_bc_load_const_small_int(emit_t *emit, mp_int_t arg);
+void mp_emit_bc_load_const_str(emit_t *emit, qstr qst);
+void mp_emit_bc_load_const_obj(emit_t *emit, mp_obj_t obj);
+void mp_emit_bc_load_null(emit_t *emit);
+void mp_emit_bc_load_method(emit_t *emit, qstr qst, bool is_super);
+void mp_emit_bc_load_build_class(emit_t *emit);
+void mp_emit_bc_subscr(emit_t *emit, int kind);
+void mp_emit_bc_attr(emit_t *emit, qstr qst, int kind);
+void mp_emit_bc_dup_top(emit_t *emit);
+void mp_emit_bc_dup_top_two(emit_t *emit);
+void mp_emit_bc_pop_top(emit_t *emit);
+void mp_emit_bc_rot_two(emit_t *emit);
+void mp_emit_bc_rot_three(emit_t *emit);
+void mp_emit_bc_jump(emit_t *emit, mp_uint_t label);
+void mp_emit_bc_pop_jump_if(emit_t *emit, bool cond, mp_uint_t label);
+void mp_emit_bc_jump_if_or_pop(emit_t *emit, bool cond, mp_uint_t label);
+void mp_emit_bc_unwind_jump(emit_t *emit, mp_uint_t label, mp_uint_t except_depth);
+void mp_emit_bc_setup_block(emit_t *emit, mp_uint_t label, int kind);
+void mp_emit_bc_with_cleanup(emit_t *emit, mp_uint_t label);
+void mp_emit_bc_end_finally(emit_t *emit);
+void mp_emit_bc_get_iter(emit_t *emit, bool use_stack);
+void mp_emit_bc_for_iter(emit_t *emit, mp_uint_t label);
+void mp_emit_bc_for_iter_end(emit_t *emit);
+void mp_emit_bc_pop_except_jump(emit_t *emit, mp_uint_t label, bool within_exc_handler);
+void mp_emit_bc_unary_op(emit_t *emit, mp_unary_op_t op);
+void mp_emit_bc_binary_op(emit_t *emit, mp_binary_op_t op);
+void mp_emit_bc_build(emit_t *emit, mp_uint_t n_args, int kind);
+void mp_emit_bc_store_map(emit_t *emit);
+void mp_emit_bc_store_comp(emit_t *emit, scope_kind_t kind, mp_uint_t list_stack_index);
+void mp_emit_bc_unpack_sequence(emit_t *emit, mp_uint_t n_args);
+void mp_emit_bc_unpack_ex(emit_t *emit, mp_uint_t n_left, mp_uint_t n_right);
+void mp_emit_bc_make_function(emit_t *emit, scope_t *scope, mp_uint_t n_pos_defaults, mp_uint_t n_kw_defaults);
+void mp_emit_bc_make_closure(emit_t *emit, scope_t *scope, mp_uint_t n_closed_over, mp_uint_t n_pos_defaults, mp_uint_t n_kw_defaults);
+void mp_emit_bc_call_function(emit_t *emit, mp_uint_t n_positional, mp_uint_t n_keyword, mp_uint_t star_flags);
+void mp_emit_bc_call_method(emit_t *emit, mp_uint_t n_positional, mp_uint_t n_keyword, mp_uint_t star_flags);
+void mp_emit_bc_return_value(emit_t *emit);
+void mp_emit_bc_raise_varargs(emit_t *emit, mp_uint_t n_args);
+void mp_emit_bc_yield(emit_t *emit, int kind);
+void mp_emit_bc_start_except_handler(emit_t *emit);
+void mp_emit_bc_end_except_handler(emit_t *emit);
+
+typedef struct _emit_inline_asm_t emit_inline_asm_t;
+
+typedef struct _emit_inline_asm_method_table_t {
+ #if MICROPY_DYNAMIC_COMPILER
+ emit_inline_asm_t *(*asm_new)(mp_uint_t max_num_labels);
+ void (*asm_free)(emit_inline_asm_t *emit);
+ #endif
+
+ void (*start_pass)(emit_inline_asm_t *emit, pass_kind_t pass, mp_obj_t *error_slot);
+ void (*end_pass)(emit_inline_asm_t *emit, mp_uint_t type_sig);
+ mp_uint_t (*count_params)(emit_inline_asm_t *emit, mp_uint_t n_params, mp_parse_node_t *pn_params);
+ bool (*label)(emit_inline_asm_t *emit, mp_uint_t label_num, qstr label_id);
+ void (*op)(emit_inline_asm_t *emit, qstr op, mp_uint_t n_args, mp_parse_node_t *pn_args);
+} emit_inline_asm_method_table_t;
+
+extern const emit_inline_asm_method_table_t emit_inline_thumb_method_table;
+extern const emit_inline_asm_method_table_t emit_inline_xtensa_method_table;
+
+emit_inline_asm_t *emit_inline_thumb_new(mp_uint_t max_num_labels);
+emit_inline_asm_t *emit_inline_xtensa_new(mp_uint_t max_num_labels);
+
+void emit_inline_thumb_free(emit_inline_asm_t *emit);
+void emit_inline_xtensa_free(emit_inline_asm_t *emit);
+
+#if MICROPY_WARNINGS
+void mp_emitter_warning(pass_kind_t pass, const char *msg);
+#else
+#define mp_emitter_warning(pass, msg)
+#endif
+
+#endif // MICROPY_INCLUDED_PY_EMIT_H
diff --git a/circuitpython/py/emitbc.c b/circuitpython/py/emitbc.c
new file mode 100644
index 0000000..8f7b1d5
--- /dev/null
+++ b/circuitpython/py/emitbc.c
@@ -0,0 +1,937 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013-2019 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/mpstate.h"
+#include "py/emit.h"
+#include "py/bc0.h"
+
+#if MICROPY_ENABLE_COMPILER
+
+#define BYTES_FOR_INT ((MP_BYTES_PER_OBJ_WORD * 8 + 6) / 7)
+#define DUMMY_DATA_SIZE (BYTES_FOR_INT)
+
+struct _emit_t {
+ // Accessed as mp_obj_t, so must be aligned as such, and we rely on the
+ // memory allocator returning a suitably aligned pointer.
+ // Should work for cases when mp_obj_t is 64-bit on a 32-bit machine.
+ byte dummy_data[DUMMY_DATA_SIZE];
+
+ pass_kind_t pass : 8;
+ mp_uint_t last_emit_was_return_value : 8;
+
+ int stack_size;
+
+ scope_t *scope;
+
+ mp_uint_t last_source_line_offset;
+ mp_uint_t last_source_line;
+
+ mp_uint_t max_num_labels;
+ mp_uint_t *label_offsets;
+
+ size_t code_info_offset;
+ size_t code_info_size;
+ size_t bytecode_offset;
+ size_t bytecode_size;
+ byte *code_base; // stores both byte code and code info
+
+ size_t n_info;
+ size_t n_cell;
+
+ #if MICROPY_PERSISTENT_CODE
+ uint16_t ct_cur_obj;
+ uint16_t ct_num_obj;
+ uint16_t ct_cur_raw_code;
+ #endif
+ mp_uint_t *const_table;
+};
+
+emit_t *emit_bc_new(void) {
+ emit_t *emit = m_new0(emit_t, 1);
+ return emit;
+}
+
+void emit_bc_set_max_num_labels(emit_t *emit, mp_uint_t max_num_labels) {
+ emit->max_num_labels = max_num_labels;
+ emit->label_offsets = m_new(mp_uint_t, emit->max_num_labels);
+}
+
+void emit_bc_free(emit_t *emit) {
+ m_del(mp_uint_t, emit->label_offsets, emit->max_num_labels);
+ m_del_obj(emit_t, emit);
+}
+
+typedef byte *(*emit_allocator_t)(emit_t *emit, int nbytes);
+
+STATIC void emit_write_uint(emit_t *emit, emit_allocator_t allocator, mp_uint_t val) {
+ // We store each 7 bits in a separate byte, and that's how many bytes needed
+ byte buf[BYTES_FOR_INT];
+ byte *p = buf + sizeof(buf);
+ // We encode in little-ending order, but store in big-endian, to help decoding
+ do {
+ *--p = val & 0x7f;
+ val >>= 7;
+ } while (val != 0);
+ byte *c = allocator(emit, buf + sizeof(buf) - p);
+ while (p != buf + sizeof(buf) - 1) {
+ *c++ = *p++ | 0x80;
+ }
+ *c = *p;
+}
+
+// all functions must go through this one to emit code info
+STATIC byte *emit_get_cur_to_write_code_info(emit_t *emit, int num_bytes_to_write) {
+ if (emit->pass < MP_PASS_EMIT) {
+ emit->code_info_offset += num_bytes_to_write;
+ return emit->dummy_data;
+ } else {
+ assert(emit->code_info_offset + num_bytes_to_write <= emit->code_info_size);
+ byte *c = emit->code_base + emit->code_info_offset;
+ emit->code_info_offset += num_bytes_to_write;
+ return c;
+ }
+}
+
+STATIC void emit_write_code_info_byte(emit_t *emit, byte val) {
+ *emit_get_cur_to_write_code_info(emit, 1) = val;
+}
+
+STATIC void emit_write_code_info_qstr(emit_t *emit, qstr qst) {
+ #if MICROPY_PERSISTENT_CODE
+ assert((qst >> 16) == 0);
+ byte *c = emit_get_cur_to_write_code_info(emit, 2);
+ c[0] = qst;
+ c[1] = qst >> 8;
+ #else
+ emit_write_uint(emit, emit_get_cur_to_write_code_info, qst);
+ #endif
+}
+
+#if MICROPY_ENABLE_SOURCE_LINE
+STATIC void emit_write_code_info_bytes_lines(emit_t *emit, mp_uint_t bytes_to_skip, mp_uint_t lines_to_skip) {
+ assert(bytes_to_skip > 0 || lines_to_skip > 0);
+ while (bytes_to_skip > 0 || lines_to_skip > 0) {
+ mp_uint_t b, l;
+ if (lines_to_skip <= 6 || bytes_to_skip > 0xf) {
+ // use 0b0LLBBBBB encoding
+ b = MIN(bytes_to_skip, 0x1f);
+ if (b < bytes_to_skip) {
+ // we can't skip any lines until we skip all the bytes
+ l = 0;
+ } else {
+ l = MIN(lines_to_skip, 0x3);
+ }
+ *emit_get_cur_to_write_code_info(emit, 1) = b | (l << 5);
+ } else {
+ // use 0b1LLLBBBB 0bLLLLLLLL encoding (l's LSB in second byte)
+ b = MIN(bytes_to_skip, 0xf);
+ l = MIN(lines_to_skip, 0x7ff);
+ byte *ci = emit_get_cur_to_write_code_info(emit, 2);
+ ci[0] = 0x80 | b | ((l >> 4) & 0x70);
+ ci[1] = l;
+ }
+ bytes_to_skip -= b;
+ lines_to_skip -= l;
+ }
+}
+#endif
+
+// all functions must go through this one to emit byte code
+STATIC byte *emit_get_cur_to_write_bytecode(emit_t *emit, int num_bytes_to_write) {
+ if (emit->pass < MP_PASS_EMIT) {
+ emit->bytecode_offset += num_bytes_to_write;
+ return emit->dummy_data;
+ } else {
+ assert(emit->bytecode_offset + num_bytes_to_write <= emit->bytecode_size);
+ byte *c = emit->code_base + emit->code_info_size + emit->bytecode_offset;
+ emit->bytecode_offset += num_bytes_to_write;
+ return c;
+ }
+}
+
+STATIC void emit_write_bytecode_raw_byte(emit_t *emit, byte b1) {
+ byte *c = emit_get_cur_to_write_bytecode(emit, 1);
+ c[0] = b1;
+}
+
+STATIC void emit_write_bytecode_byte(emit_t *emit, int stack_adj, byte b1) {
+ mp_emit_bc_adjust_stack_size(emit, stack_adj);
+ byte *c = emit_get_cur_to_write_bytecode(emit, 1);
+ c[0] = b1;
+}
+
+// Similar to emit_write_bytecode_uint(), just some extra handling to encode sign
+STATIC void emit_write_bytecode_byte_int(emit_t *emit, int stack_adj, byte b1, mp_int_t num) {
+ emit_write_bytecode_byte(emit, stack_adj, b1);
+
+ // We store each 7 bits in a separate byte, and that's how many bytes needed
+ byte buf[BYTES_FOR_INT];
+ byte *p = buf + sizeof(buf);
+ // We encode in little-ending order, but store in big-endian, to help decoding
+ do {
+ *--p = num & 0x7f;
+ num >>= 7;
+ } while (num != 0 && num != -1);
+ // Make sure that highest bit we stored (mask 0x40) matches sign
+ // of the number. If not, store extra byte just to encode sign
+ if (num == -1 && (*p & 0x40) == 0) {
+ *--p = 0x7f;
+ } else if (num == 0 && (*p & 0x40) != 0) {
+ *--p = 0;
+ }
+
+ byte *c = emit_get_cur_to_write_bytecode(emit, buf + sizeof(buf) - p);
+ while (p != buf + sizeof(buf) - 1) {
+ *c++ = *p++ | 0x80;
+ }
+ *c = *p;
+}
+
+STATIC void emit_write_bytecode_byte_uint(emit_t *emit, int stack_adj, byte b, mp_uint_t val) {
+ emit_write_bytecode_byte(emit, stack_adj, b);
+ emit_write_uint(emit, emit_get_cur_to_write_bytecode, val);
+}
+
+#if MICROPY_PERSISTENT_CODE
+STATIC void emit_write_bytecode_byte_const(emit_t *emit, int stack_adj, byte b, mp_uint_t n, mp_uint_t c) {
+ if (emit->pass == MP_PASS_EMIT) {
+ emit->const_table[n] = c;
+ }
+ emit_write_bytecode_byte_uint(emit, stack_adj, b, n);
+}
+#endif
+
+STATIC void emit_write_bytecode_byte_qstr(emit_t *emit, int stack_adj, byte b, qstr qst) {
+ #if MICROPY_PERSISTENT_CODE
+ assert((qst >> 16) == 0);
+ mp_emit_bc_adjust_stack_size(emit, stack_adj);
+ byte *c = emit_get_cur_to_write_bytecode(emit, 3);
+ c[0] = b;
+ c[1] = qst;
+ c[2] = qst >> 8;
+ #else
+ emit_write_bytecode_byte_uint(emit, stack_adj, b, qst);
+ #endif
+}
+
+STATIC void emit_write_bytecode_byte_obj(emit_t *emit, int stack_adj, byte b, mp_obj_t obj) {
+ #if MICROPY_PERSISTENT_CODE
+ emit_write_bytecode_byte_const(emit, stack_adj, b,
+ emit->scope->num_pos_args + emit->scope->num_kwonly_args
+ + emit->ct_cur_obj++, (mp_uint_t)obj);
+ #else
+ // aligns the pointer so it is friendly to GC
+ emit_write_bytecode_byte(emit, stack_adj, b);
+ emit->bytecode_offset = (size_t)MP_ALIGN(emit->bytecode_offset, sizeof(mp_obj_t));
+ mp_obj_t *c = (mp_obj_t *)emit_get_cur_to_write_bytecode(emit, sizeof(mp_obj_t));
+ // Verify thar c is already uint-aligned
+ assert(c == MP_ALIGN(c, sizeof(mp_obj_t)));
+ *c = obj;
+ #endif
+}
+
+STATIC void emit_write_bytecode_byte_raw_code(emit_t *emit, int stack_adj, byte b, mp_raw_code_t *rc) {
+ #if MICROPY_PERSISTENT_CODE
+ emit_write_bytecode_byte_const(emit, stack_adj, b,
+ emit->scope->num_pos_args + emit->scope->num_kwonly_args
+ + emit->ct_num_obj + emit->ct_cur_raw_code++, (mp_uint_t)(uintptr_t)rc);
+ #else
+ // aligns the pointer so it is friendly to GC
+ emit_write_bytecode_byte(emit, stack_adj, b);
+ emit->bytecode_offset = (size_t)MP_ALIGN(emit->bytecode_offset, sizeof(void *));
+ void **c = (void **)emit_get_cur_to_write_bytecode(emit, sizeof(void *));
+ // Verify thar c is already uint-aligned
+ assert(c == MP_ALIGN(c, sizeof(void *)));
+ *c = rc;
+ #endif
+ #if MICROPY_PY_SYS_SETTRACE
+ rc->line_of_definition = emit->last_source_line;
+ #endif
+}
+
+// unsigned labels are relative to ip following this instruction, stored as 16 bits
+STATIC void emit_write_bytecode_byte_unsigned_label(emit_t *emit, int stack_adj, byte b1, mp_uint_t label) {
+ mp_emit_bc_adjust_stack_size(emit, stack_adj);
+ mp_uint_t bytecode_offset;
+ if (emit->pass < MP_PASS_EMIT) {
+ bytecode_offset = 0;
+ } else {
+ bytecode_offset = emit->label_offsets[label] - emit->bytecode_offset - 3;
+ }
+ byte *c = emit_get_cur_to_write_bytecode(emit, 3);
+ c[0] = b1;
+ c[1] = bytecode_offset;
+ c[2] = bytecode_offset >> 8;
+}
+
+// signed labels are relative to ip following this instruction, stored as 16 bits, in excess
+STATIC void emit_write_bytecode_byte_signed_label(emit_t *emit, int stack_adj, byte b1, mp_uint_t label) {
+ mp_emit_bc_adjust_stack_size(emit, stack_adj);
+ int bytecode_offset;
+ if (emit->pass < MP_PASS_EMIT) {
+ bytecode_offset = 0;
+ } else {
+ bytecode_offset = emit->label_offsets[label] - emit->bytecode_offset - 3 + 0x8000;
+ }
+ byte *c = emit_get_cur_to_write_bytecode(emit, 3);
+ c[0] = b1;
+ c[1] = bytecode_offset;
+ c[2] = bytecode_offset >> 8;
+}
+
+void mp_emit_bc_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scope) {
+ emit->pass = pass;
+ emit->stack_size = 0;
+ emit->last_emit_was_return_value = false;
+ emit->scope = scope;
+ emit->last_source_line_offset = 0;
+ emit->last_source_line = 1;
+ #ifndef NDEBUG
+ // With debugging enabled labels are checked for unique assignment
+ if (pass < MP_PASS_EMIT && emit->label_offsets != NULL) {
+ memset(emit->label_offsets, -1, emit->max_num_labels * sizeof(mp_uint_t));
+ }
+ #endif
+ emit->bytecode_offset = 0;
+ emit->code_info_offset = 0;
+
+ // Write local state size, exception stack size, scope flags and number of arguments
+ {
+ mp_uint_t n_state = scope->num_locals + scope->stack_size;
+ if (n_state == 0) {
+ // Need at least 1 entry in the state, in the case an exception is
+ // propagated through this function, the exception is returned in
+ // the highest slot in the state (fastn[0], see vm.c).
+ n_state = 1;
+ }
+ #if MICROPY_DEBUG_VM_STACK_OVERFLOW
+ // An extra slot in the stack is needed to detect VM stack overflow
+ n_state += 1;
+ #endif
+
+ size_t n_exc_stack = scope->exc_stack_size;
+ MP_BC_PRELUDE_SIG_ENCODE(n_state, n_exc_stack, scope, emit_write_code_info_byte, emit);
+ }
+
+ // Write number of cells and size of the source code info
+ if (pass >= MP_PASS_CODE_SIZE) {
+ MP_BC_PRELUDE_SIZE_ENCODE(emit->n_info, emit->n_cell, emit_write_code_info_byte, emit);
+ }
+
+ emit->n_info = emit->code_info_offset;
+
+ // Write the name and source file of this function.
+ emit_write_code_info_qstr(emit, scope->simple_name);
+ emit_write_code_info_qstr(emit, scope->source_file);
+
+ #if MICROPY_PERSISTENT_CODE
+ emit->ct_cur_obj = 0;
+ emit->ct_cur_raw_code = 0;
+ #endif
+
+ if (pass == MP_PASS_EMIT) {
+ // Write argument names (needed to resolve positional args passed as
+ // keywords). We store them as full word-sized objects for efficient access
+ // in mp_setup_code_state this is the start of the prelude and is guaranteed
+ // to be aligned on a word boundary.
+
+ // For a given argument position (indexed by i) we need to find the
+ // corresponding id_info which is a parameter, as it has the correct
+ // qstr name to use as the argument name. Note that it's not a simple
+ // 1-1 mapping (ie i!=j in general) because of possible closed-over
+ // variables. In the case that the argument i has no corresponding
+ // parameter we use "*" as its name (since no argument can ever be named
+ // "*"). We could use a blank qstr but "*" is better for debugging.
+ // Note: there is some wasted RAM here for the case of storing a qstr
+ // for each closed-over variable, and maybe there is a better way to do
+ // it, but that would require changes to mp_setup_code_state.
+ for (int i = 0; i < scope->num_pos_args + scope->num_kwonly_args; i++) {
+ qstr qst = MP_QSTR__star_;
+ for (int j = 0; j < scope->id_info_len; ++j) {
+ id_info_t *id = &scope->id_info[j];
+ if ((id->flags & ID_FLAG_IS_PARAM) && id->local_num == i) {
+ qst = id->qst;
+ break;
+ }
+ }
+ emit->const_table[i] = (mp_uint_t)MP_OBJ_NEW_QSTR(qst);
+ }
+ }
+}
+
+void mp_emit_bc_end_pass(emit_t *emit) {
+ if (emit->pass == MP_PASS_SCOPE) {
+ return;
+ }
+
+ // check stack is back to zero size
+ assert(emit->stack_size == 0);
+
+ emit_write_code_info_byte(emit, 0); // end of line number info
+
+ // Calculate size of source code info section
+ emit->n_info = emit->code_info_offset - emit->n_info;
+
+ // Emit closure section of prelude
+ emit->n_cell = 0;
+ for (size_t i = 0; i < emit->scope->id_info_len; ++i) {
+ id_info_t *id = &emit->scope->id_info[i];
+ if (id->kind == ID_INFO_KIND_CELL) {
+ assert(id->local_num <= 255);
+ emit_write_code_info_byte(emit, id->local_num); // write the local which should be converted to a cell
+ ++emit->n_cell;
+ }
+ }
+
+ #if MICROPY_PERSISTENT_CODE
+ assert(emit->pass <= MP_PASS_STACK_SIZE || (emit->ct_num_obj == emit->ct_cur_obj));
+ emit->ct_num_obj = emit->ct_cur_obj;
+ #endif
+
+ if (emit->pass == MP_PASS_CODE_SIZE) {
+ #if !MICROPY_PERSISTENT_CODE
+ // so bytecode is aligned
+ emit->code_info_offset = (size_t)MP_ALIGN(emit->code_info_offset, sizeof(mp_uint_t));
+ #endif
+
+ // calculate size of total code-info + bytecode, in bytes
+ emit->code_info_size = emit->code_info_offset;
+ emit->bytecode_size = emit->bytecode_offset;
+ emit->code_base = m_new0(byte, emit->code_info_size + emit->bytecode_size);
+
+ #if MICROPY_PERSISTENT_CODE
+ emit->const_table = m_new0(mp_uint_t,
+ emit->scope->num_pos_args + emit->scope->num_kwonly_args
+ + emit->ct_cur_obj + emit->ct_cur_raw_code);
+ #else
+ emit->const_table = m_new0(mp_uint_t,
+ emit->scope->num_pos_args + emit->scope->num_kwonly_args);
+ #endif
+
+ } else if (emit->pass == MP_PASS_EMIT) {
+ mp_emit_glue_assign_bytecode(emit->scope->raw_code, emit->code_base,
+ #if MICROPY_PERSISTENT_CODE_SAVE || MICROPY_DEBUG_PRINTERS
+ emit->code_info_size + emit->bytecode_size,
+ #endif
+ emit->const_table,
+ #if MICROPY_PERSISTENT_CODE_SAVE
+ emit->ct_cur_obj, emit->ct_cur_raw_code,
+ #endif
+ emit->scope->scope_flags);
+ }
+}
+
+bool mp_emit_bc_last_emit_was_return_value(emit_t *emit) {
+ return emit->last_emit_was_return_value;
+}
+
+void mp_emit_bc_adjust_stack_size(emit_t *emit, mp_int_t delta) {
+ if (emit->pass == MP_PASS_SCOPE) {
+ return;
+ }
+ assert((mp_int_t)emit->stack_size + delta >= 0);
+ emit->stack_size += delta;
+ if (emit->stack_size > emit->scope->stack_size) {
+ emit->scope->stack_size = emit->stack_size;
+ }
+ emit->last_emit_was_return_value = false;
+}
+
+void mp_emit_bc_set_source_line(emit_t *emit, mp_uint_t source_line) {
+ #if MICROPY_ENABLE_SOURCE_LINE
+ if (MP_STATE_VM(mp_optimise_value) >= 3) {
+ // If we compile with -O3, don't store line numbers.
+ return;
+ }
+ if (source_line > emit->last_source_line) {
+ mp_uint_t bytes_to_skip = emit->bytecode_offset - emit->last_source_line_offset;
+ mp_uint_t lines_to_skip = source_line - emit->last_source_line;
+ emit_write_code_info_bytes_lines(emit, bytes_to_skip, lines_to_skip);
+ emit->last_source_line_offset = emit->bytecode_offset;
+ emit->last_source_line = source_line;
+ }
+ #else
+ (void)emit;
+ (void)source_line;
+ #endif
+}
+
+void mp_emit_bc_label_assign(emit_t *emit, mp_uint_t l) {
+ mp_emit_bc_adjust_stack_size(emit, 0);
+ if (emit->pass == MP_PASS_SCOPE) {
+ return;
+ }
+ assert(l < emit->max_num_labels);
+ if (emit->pass < MP_PASS_EMIT) {
+ // assign label offset
+ assert(emit->label_offsets[l] == (mp_uint_t)-1);
+ emit->label_offsets[l] = emit->bytecode_offset;
+ } else {
+ // ensure label offset has not changed from MP_PASS_CODE_SIZE to MP_PASS_EMIT
+ assert(emit->label_offsets[l] == emit->bytecode_offset);
+ }
+}
+
+void mp_emit_bc_import(emit_t *emit, qstr qst, int kind) {
+ MP_STATIC_ASSERT(MP_BC_IMPORT_NAME + MP_EMIT_IMPORT_NAME == MP_BC_IMPORT_NAME);
+ MP_STATIC_ASSERT(MP_BC_IMPORT_NAME + MP_EMIT_IMPORT_FROM == MP_BC_IMPORT_FROM);
+ int stack_adj = kind == MP_EMIT_IMPORT_FROM ? 1 : -1;
+ if (kind == MP_EMIT_IMPORT_STAR) {
+ emit_write_bytecode_byte(emit, stack_adj, MP_BC_IMPORT_STAR);
+ } else {
+ emit_write_bytecode_byte_qstr(emit, stack_adj, MP_BC_IMPORT_NAME + kind, qst);
+ }
+}
+
+void mp_emit_bc_load_const_tok(emit_t *emit, mp_token_kind_t tok) {
+ MP_STATIC_ASSERT(MP_BC_LOAD_CONST_FALSE + (MP_TOKEN_KW_NONE - MP_TOKEN_KW_FALSE) == MP_BC_LOAD_CONST_NONE);
+ MP_STATIC_ASSERT(MP_BC_LOAD_CONST_FALSE + (MP_TOKEN_KW_TRUE - MP_TOKEN_KW_FALSE) == MP_BC_LOAD_CONST_TRUE);
+ if (tok == MP_TOKEN_ELLIPSIS) {
+ emit_write_bytecode_byte_obj(emit, 1, MP_BC_LOAD_CONST_OBJ, MP_OBJ_FROM_PTR(&mp_const_ellipsis_obj));
+ } else {
+ emit_write_bytecode_byte(emit, 1, MP_BC_LOAD_CONST_FALSE + (tok - MP_TOKEN_KW_FALSE));
+ }
+}
+
+void mp_emit_bc_load_const_small_int(emit_t *emit, mp_int_t arg) {
+ if (-MP_BC_LOAD_CONST_SMALL_INT_MULTI_EXCESS <= arg
+ && arg < MP_BC_LOAD_CONST_SMALL_INT_MULTI_NUM - MP_BC_LOAD_CONST_SMALL_INT_MULTI_EXCESS) {
+ emit_write_bytecode_byte(emit, 1,
+ MP_BC_LOAD_CONST_SMALL_INT_MULTI + MP_BC_LOAD_CONST_SMALL_INT_MULTI_EXCESS + arg);
+ } else {
+ emit_write_bytecode_byte_int(emit, 1, MP_BC_LOAD_CONST_SMALL_INT, arg);
+ }
+}
+
+void mp_emit_bc_load_const_str(emit_t *emit, qstr qst) {
+ emit_write_bytecode_byte_qstr(emit, 1, MP_BC_LOAD_CONST_STRING, qst);
+}
+
+void mp_emit_bc_load_const_obj(emit_t *emit, mp_obj_t obj) {
+ emit_write_bytecode_byte_obj(emit, 1, MP_BC_LOAD_CONST_OBJ, obj);
+}
+
+void mp_emit_bc_load_null(emit_t *emit) {
+ emit_write_bytecode_byte(emit, 1, MP_BC_LOAD_NULL);
+}
+
+void mp_emit_bc_load_local(emit_t *emit, qstr qst, mp_uint_t local_num, int kind) {
+ MP_STATIC_ASSERT(MP_BC_LOAD_FAST_N + MP_EMIT_IDOP_LOCAL_FAST == MP_BC_LOAD_FAST_N);
+ MP_STATIC_ASSERT(MP_BC_LOAD_FAST_N + MP_EMIT_IDOP_LOCAL_DEREF == MP_BC_LOAD_DEREF);
+ (void)qst;
+ if (kind == MP_EMIT_IDOP_LOCAL_FAST && local_num <= 15) {
+ emit_write_bytecode_byte(emit, 1, MP_BC_LOAD_FAST_MULTI + local_num);
+ } else {
+ emit_write_bytecode_byte_uint(emit, 1, MP_BC_LOAD_FAST_N + kind, local_num);
+ }
+}
+
+void mp_emit_bc_load_global(emit_t *emit, qstr qst, int kind) {
+ MP_STATIC_ASSERT(MP_BC_LOAD_NAME + MP_EMIT_IDOP_GLOBAL_NAME == MP_BC_LOAD_NAME);
+ MP_STATIC_ASSERT(MP_BC_LOAD_NAME + MP_EMIT_IDOP_GLOBAL_GLOBAL == MP_BC_LOAD_GLOBAL);
+ (void)qst;
+ emit_write_bytecode_byte_qstr(emit, 1, MP_BC_LOAD_NAME + kind, qst);
+}
+
+void mp_emit_bc_load_method(emit_t *emit, qstr qst, bool is_super) {
+ int stack_adj = 1 - 2 * is_super;
+ emit_write_bytecode_byte_qstr(emit, stack_adj, is_super ? MP_BC_LOAD_SUPER_METHOD : MP_BC_LOAD_METHOD, qst);
+}
+
+void mp_emit_bc_load_build_class(emit_t *emit) {
+ emit_write_bytecode_byte(emit, 1, MP_BC_LOAD_BUILD_CLASS);
+}
+
+void mp_emit_bc_subscr(emit_t *emit, int kind) {
+ if (kind == MP_EMIT_SUBSCR_LOAD) {
+ emit_write_bytecode_byte(emit, -1, MP_BC_LOAD_SUBSCR);
+ } else {
+ if (kind == MP_EMIT_SUBSCR_DELETE) {
+ mp_emit_bc_load_null(emit);
+ mp_emit_bc_rot_three(emit);
+ }
+ emit_write_bytecode_byte(emit, -3, MP_BC_STORE_SUBSCR);
+ }
+}
+
+void mp_emit_bc_attr(emit_t *emit, qstr qst, int kind) {
+ if (kind == MP_EMIT_ATTR_LOAD) {
+ emit_write_bytecode_byte_qstr(emit, 0, MP_BC_LOAD_ATTR, qst);
+ } else {
+ if (kind == MP_EMIT_ATTR_DELETE) {
+ mp_emit_bc_load_null(emit);
+ mp_emit_bc_rot_two(emit);
+ }
+ emit_write_bytecode_byte_qstr(emit, -2, MP_BC_STORE_ATTR, qst);
+ }
+}
+
+void mp_emit_bc_store_local(emit_t *emit, qstr qst, mp_uint_t local_num, int kind) {
+ MP_STATIC_ASSERT(MP_BC_STORE_FAST_N + MP_EMIT_IDOP_LOCAL_FAST == MP_BC_STORE_FAST_N);
+ MP_STATIC_ASSERT(MP_BC_STORE_FAST_N + MP_EMIT_IDOP_LOCAL_DEREF == MP_BC_STORE_DEREF);
+ (void)qst;
+ if (kind == MP_EMIT_IDOP_LOCAL_FAST && local_num <= 15) {
+ emit_write_bytecode_byte(emit, -1, MP_BC_STORE_FAST_MULTI + local_num);
+ } else {
+ emit_write_bytecode_byte_uint(emit, -1, MP_BC_STORE_FAST_N + kind, local_num);
+ }
+}
+
+void mp_emit_bc_store_global(emit_t *emit, qstr qst, int kind) {
+ MP_STATIC_ASSERT(MP_BC_STORE_NAME + MP_EMIT_IDOP_GLOBAL_NAME == MP_BC_STORE_NAME);
+ MP_STATIC_ASSERT(MP_BC_STORE_NAME + MP_EMIT_IDOP_GLOBAL_GLOBAL == MP_BC_STORE_GLOBAL);
+ emit_write_bytecode_byte_qstr(emit, -1, MP_BC_STORE_NAME + kind, qst);
+}
+
+void mp_emit_bc_delete_local(emit_t *emit, qstr qst, mp_uint_t local_num, int kind) {
+ MP_STATIC_ASSERT(MP_BC_DELETE_FAST + MP_EMIT_IDOP_LOCAL_FAST == MP_BC_DELETE_FAST);
+ MP_STATIC_ASSERT(MP_BC_DELETE_FAST + MP_EMIT_IDOP_LOCAL_DEREF == MP_BC_DELETE_DEREF);
+ (void)qst;
+ emit_write_bytecode_byte_uint(emit, 0, MP_BC_DELETE_FAST + kind, local_num);
+}
+
+void mp_emit_bc_delete_global(emit_t *emit, qstr qst, int kind) {
+ MP_STATIC_ASSERT(MP_BC_DELETE_NAME + MP_EMIT_IDOP_GLOBAL_NAME == MP_BC_DELETE_NAME);
+ MP_STATIC_ASSERT(MP_BC_DELETE_NAME + MP_EMIT_IDOP_GLOBAL_GLOBAL == MP_BC_DELETE_GLOBAL);
+ emit_write_bytecode_byte_qstr(emit, 0, MP_BC_DELETE_NAME + kind, qst);
+}
+
+void mp_emit_bc_dup_top(emit_t *emit) {
+ emit_write_bytecode_byte(emit, 1, MP_BC_DUP_TOP);
+}
+
+void mp_emit_bc_dup_top_two(emit_t *emit) {
+ emit_write_bytecode_byte(emit, 2, MP_BC_DUP_TOP_TWO);
+}
+
+void mp_emit_bc_pop_top(emit_t *emit) {
+ emit_write_bytecode_byte(emit, -1, MP_BC_POP_TOP);
+}
+
+void mp_emit_bc_rot_two(emit_t *emit) {
+ emit_write_bytecode_byte(emit, 0, MP_BC_ROT_TWO);
+}
+
+void mp_emit_bc_rot_three(emit_t *emit) {
+ emit_write_bytecode_byte(emit, 0, MP_BC_ROT_THREE);
+}
+
+void mp_emit_bc_jump(emit_t *emit, mp_uint_t label) {
+ emit_write_bytecode_byte_signed_label(emit, 0, MP_BC_JUMP, label);
+}
+
+void mp_emit_bc_pop_jump_if(emit_t *emit, bool cond, mp_uint_t label) {
+ if (cond) {
+ emit_write_bytecode_byte_signed_label(emit, -1, MP_BC_POP_JUMP_IF_TRUE, label);
+ } else {
+ emit_write_bytecode_byte_signed_label(emit, -1, MP_BC_POP_JUMP_IF_FALSE, label);
+ }
+}
+
+void mp_emit_bc_jump_if_or_pop(emit_t *emit, bool cond, mp_uint_t label) {
+ if (cond) {
+ emit_write_bytecode_byte_signed_label(emit, -1, MP_BC_JUMP_IF_TRUE_OR_POP, label);
+ } else {
+ emit_write_bytecode_byte_signed_label(emit, -1, MP_BC_JUMP_IF_FALSE_OR_POP, label);
+ }
+}
+
+void mp_emit_bc_unwind_jump(emit_t *emit, mp_uint_t label, mp_uint_t except_depth) {
+ if (except_depth == 0) {
+ if (label & MP_EMIT_BREAK_FROM_FOR) {
+ // need to pop the iterator if we are breaking out of a for loop
+ emit_write_bytecode_raw_byte(emit, MP_BC_POP_TOP);
+ // also pop the iter_buf
+ for (size_t i = 0; i < MP_OBJ_ITER_BUF_NSLOTS - 1; ++i) {
+ emit_write_bytecode_raw_byte(emit, MP_BC_POP_TOP);
+ }
+ }
+ emit_write_bytecode_byte_signed_label(emit, 0, MP_BC_JUMP, label & ~MP_EMIT_BREAK_FROM_FOR);
+ } else {
+ emit_write_bytecode_byte_signed_label(emit, 0, MP_BC_UNWIND_JUMP, label & ~MP_EMIT_BREAK_FROM_FOR);
+ emit_write_bytecode_raw_byte(emit, ((label & MP_EMIT_BREAK_FROM_FOR) ? 0x80 : 0) | except_depth);
+ }
+}
+
+void mp_emit_bc_setup_block(emit_t *emit, mp_uint_t label, int kind) {
+ MP_STATIC_ASSERT(MP_BC_SETUP_WITH + MP_EMIT_SETUP_BLOCK_WITH == MP_BC_SETUP_WITH);
+ MP_STATIC_ASSERT(MP_BC_SETUP_WITH + MP_EMIT_SETUP_BLOCK_EXCEPT == MP_BC_SETUP_EXCEPT);
+ MP_STATIC_ASSERT(MP_BC_SETUP_WITH + MP_EMIT_SETUP_BLOCK_FINALLY == MP_BC_SETUP_FINALLY);
+ // The SETUP_WITH opcode pops ctx_mgr from the top of the stack
+ // and then pushes 3 entries: __exit__, ctx_mgr, as_value.
+ int stack_adj = kind == MP_EMIT_SETUP_BLOCK_WITH ? 2 : 0;
+ emit_write_bytecode_byte_unsigned_label(emit, stack_adj, MP_BC_SETUP_WITH + kind, label);
+}
+
+void mp_emit_bc_with_cleanup(emit_t *emit, mp_uint_t label) {
+ mp_emit_bc_load_const_tok(emit, MP_TOKEN_KW_NONE);
+ mp_emit_bc_label_assign(emit, label);
+ // The +2 is to ensure we have enough stack space to call the __exit__ method
+ emit_write_bytecode_byte(emit, 2, MP_BC_WITH_CLEANUP);
+ // Cancel the +2 above, plus the +2 from mp_emit_bc_setup_block(MP_EMIT_SETUP_BLOCK_WITH)
+ mp_emit_bc_adjust_stack_size(emit, -4);
+}
+
+void mp_emit_bc_end_finally(emit_t *emit) {
+ emit_write_bytecode_byte(emit, -1, MP_BC_END_FINALLY);
+}
+
+void mp_emit_bc_get_iter(emit_t *emit, bool use_stack) {
+ int stack_adj = use_stack ? MP_OBJ_ITER_BUF_NSLOTS - 1 : 0;
+ emit_write_bytecode_byte(emit, stack_adj, use_stack ? MP_BC_GET_ITER_STACK : MP_BC_GET_ITER);
+}
+
+void mp_emit_bc_for_iter(emit_t *emit, mp_uint_t label) {
+ emit_write_bytecode_byte_unsigned_label(emit, 1, MP_BC_FOR_ITER, label);
+}
+
+void mp_emit_bc_for_iter_end(emit_t *emit) {
+ mp_emit_bc_adjust_stack_size(emit, -MP_OBJ_ITER_BUF_NSLOTS);
+}
+
+void mp_emit_bc_pop_except_jump(emit_t *emit, mp_uint_t label, bool within_exc_handler) {
+ (void)within_exc_handler;
+ emit_write_bytecode_byte_unsigned_label(emit, 0, MP_BC_POP_EXCEPT_JUMP, label);
+}
+
+void mp_emit_bc_unary_op(emit_t *emit, mp_unary_op_t op) {
+ emit_write_bytecode_byte(emit, 0, MP_BC_UNARY_OP_MULTI + op);
+}
+
+void mp_emit_bc_binary_op(emit_t *emit, mp_binary_op_t op) {
+ bool invert = false;
+ if (op == MP_BINARY_OP_NOT_IN) {
+ invert = true;
+ op = MP_BINARY_OP_IN;
+ } else if (op == MP_BINARY_OP_IS_NOT) {
+ invert = true;
+ op = MP_BINARY_OP_IS;
+ }
+ emit_write_bytecode_byte(emit, -1, MP_BC_BINARY_OP_MULTI + op);
+ if (invert) {
+ emit_write_bytecode_byte(emit, 0, MP_BC_UNARY_OP_MULTI + MP_UNARY_OP_NOT);
+ }
+}
+
+void mp_emit_bc_build(emit_t *emit, mp_uint_t n_args, int kind) {
+ MP_STATIC_ASSERT(MP_BC_BUILD_TUPLE + MP_EMIT_BUILD_TUPLE == MP_BC_BUILD_TUPLE);
+ MP_STATIC_ASSERT(MP_BC_BUILD_TUPLE + MP_EMIT_BUILD_LIST == MP_BC_BUILD_LIST);
+ MP_STATIC_ASSERT(MP_BC_BUILD_TUPLE + MP_EMIT_BUILD_MAP == MP_BC_BUILD_MAP);
+ MP_STATIC_ASSERT(MP_BC_BUILD_TUPLE + MP_EMIT_BUILD_SET == MP_BC_BUILD_SET);
+ MP_STATIC_ASSERT(MP_BC_BUILD_TUPLE + MP_EMIT_BUILD_SLICE == MP_BC_BUILD_SLICE);
+ int stack_adj = kind == MP_EMIT_BUILD_MAP ? 1 : 1 - n_args;
+ emit_write_bytecode_byte_uint(emit, stack_adj, MP_BC_BUILD_TUPLE + kind, n_args);
+}
+
+void mp_emit_bc_store_map(emit_t *emit) {
+ emit_write_bytecode_byte(emit, -2, MP_BC_STORE_MAP);
+}
+
+void mp_emit_bc_store_comp(emit_t *emit, scope_kind_t kind, mp_uint_t collection_stack_index) {
+ int t;
+ int n;
+ if (kind == SCOPE_LIST_COMP) {
+ n = 0;
+ t = 0;
+ } else if (!MICROPY_PY_BUILTINS_SET || kind == SCOPE_DICT_COMP) {
+ n = 1;
+ t = 1;
+ } else if (MICROPY_PY_BUILTINS_SET) {
+ n = 0;
+ t = 2;
+ }
+ // the lower 2 bits of the opcode argument indicate the collection type
+ emit_write_bytecode_byte_uint(emit, -1 - n, MP_BC_STORE_COMP, ((collection_stack_index + n) << 2) | t);
+}
+
+void mp_emit_bc_unpack_sequence(emit_t *emit, mp_uint_t n_args) {
+ emit_write_bytecode_byte_uint(emit, -1 + n_args, MP_BC_UNPACK_SEQUENCE, n_args);
+}
+
+void mp_emit_bc_unpack_ex(emit_t *emit, mp_uint_t n_left, mp_uint_t n_right) {
+ emit_write_bytecode_byte_uint(emit, -1 + n_left + n_right + 1, MP_BC_UNPACK_EX, n_left | (n_right << 8));
+}
+
+void mp_emit_bc_make_function(emit_t *emit, scope_t *scope, mp_uint_t n_pos_defaults, mp_uint_t n_kw_defaults) {
+ if (n_pos_defaults == 0 && n_kw_defaults == 0) {
+ emit_write_bytecode_byte_raw_code(emit, 1, MP_BC_MAKE_FUNCTION, scope->raw_code);
+ } else {
+ emit_write_bytecode_byte_raw_code(emit, -1, MP_BC_MAKE_FUNCTION_DEFARGS, scope->raw_code);
+ }
+}
+
+void mp_emit_bc_make_closure(emit_t *emit, scope_t *scope, mp_uint_t n_closed_over, mp_uint_t n_pos_defaults, mp_uint_t n_kw_defaults) {
+ if (n_pos_defaults == 0 && n_kw_defaults == 0) {
+ int stack_adj = -n_closed_over + 1;
+ emit_write_bytecode_byte_raw_code(emit, stack_adj, MP_BC_MAKE_CLOSURE, scope->raw_code);
+ emit_write_bytecode_raw_byte(emit, n_closed_over);
+ } else {
+ assert(n_closed_over <= 255);
+ int stack_adj = -2 - (mp_int_t)n_closed_over + 1;
+ emit_write_bytecode_byte_raw_code(emit, stack_adj, MP_BC_MAKE_CLOSURE_DEFARGS, scope->raw_code);
+ emit_write_bytecode_raw_byte(emit, n_closed_over);
+ }
+}
+
+STATIC void emit_bc_call_function_method_helper(emit_t *emit, int stack_adj, mp_uint_t bytecode_base, mp_uint_t n_positional, mp_uint_t n_keyword, mp_uint_t star_flags) {
+ if (star_flags) {
+ stack_adj -= (int)n_positional + 2 * (int)n_keyword + 2;
+ emit_write_bytecode_byte_uint(emit, stack_adj, bytecode_base + 1, (n_keyword << 8) | n_positional); // TODO make it 2 separate uints?
+ } else {
+ stack_adj -= (int)n_positional + 2 * (int)n_keyword;
+ emit_write_bytecode_byte_uint(emit, stack_adj, bytecode_base, (n_keyword << 8) | n_positional); // TODO make it 2 separate uints?
+ }
+}
+
+void mp_emit_bc_call_function(emit_t *emit, mp_uint_t n_positional, mp_uint_t n_keyword, mp_uint_t star_flags) {
+ emit_bc_call_function_method_helper(emit, 0, MP_BC_CALL_FUNCTION, n_positional, n_keyword, star_flags);
+}
+
+void mp_emit_bc_call_method(emit_t *emit, mp_uint_t n_positional, mp_uint_t n_keyword, mp_uint_t star_flags) {
+ emit_bc_call_function_method_helper(emit, -1, MP_BC_CALL_METHOD, n_positional, n_keyword, star_flags);
+}
+
+void mp_emit_bc_return_value(emit_t *emit) {
+ emit_write_bytecode_byte(emit, -1, MP_BC_RETURN_VALUE);
+ emit->last_emit_was_return_value = true;
+}
+
+void mp_emit_bc_raise_varargs(emit_t *emit, mp_uint_t n_args) {
+ MP_STATIC_ASSERT(MP_BC_RAISE_LAST + 1 == MP_BC_RAISE_OBJ);
+ MP_STATIC_ASSERT(MP_BC_RAISE_LAST + 2 == MP_BC_RAISE_FROM);
+ assert(n_args <= 2);
+ emit_write_bytecode_byte(emit, -n_args, MP_BC_RAISE_LAST + n_args);
+}
+
+void mp_emit_bc_yield(emit_t *emit, int kind) {
+ MP_STATIC_ASSERT(MP_BC_YIELD_VALUE + 1 == MP_BC_YIELD_FROM);
+ emit_write_bytecode_byte(emit, -kind, MP_BC_YIELD_VALUE + kind);
+ emit->scope->scope_flags |= MP_SCOPE_FLAG_GENERATOR;
+}
+
+void mp_emit_bc_start_except_handler(emit_t *emit) {
+ mp_emit_bc_adjust_stack_size(emit, 4); // stack adjust for the exception instance, +3 for possible UNWIND_JUMP state
+}
+
+void mp_emit_bc_end_except_handler(emit_t *emit) {
+ mp_emit_bc_adjust_stack_size(emit, -3); // stack adjust
+}
+
+#if MICROPY_EMIT_NATIVE
+const emit_method_table_t emit_bc_method_table = {
+ #if MICROPY_DYNAMIC_COMPILER
+ NULL,
+ NULL,
+ #endif
+
+ mp_emit_bc_start_pass,
+ mp_emit_bc_end_pass,
+ mp_emit_bc_last_emit_was_return_value,
+ mp_emit_bc_adjust_stack_size,
+ mp_emit_bc_set_source_line,
+
+ {
+ mp_emit_bc_load_local,
+ mp_emit_bc_load_global,
+ },
+ {
+ mp_emit_bc_store_local,
+ mp_emit_bc_store_global,
+ },
+ {
+ mp_emit_bc_delete_local,
+ mp_emit_bc_delete_global,
+ },
+
+ mp_emit_bc_label_assign,
+ mp_emit_bc_import,
+ mp_emit_bc_load_const_tok,
+ mp_emit_bc_load_const_small_int,
+ mp_emit_bc_load_const_str,
+ mp_emit_bc_load_const_obj,
+ mp_emit_bc_load_null,
+ mp_emit_bc_load_method,
+ mp_emit_bc_load_build_class,
+ mp_emit_bc_subscr,
+ mp_emit_bc_attr,
+ mp_emit_bc_dup_top,
+ mp_emit_bc_dup_top_two,
+ mp_emit_bc_pop_top,
+ mp_emit_bc_rot_two,
+ mp_emit_bc_rot_three,
+ mp_emit_bc_jump,
+ mp_emit_bc_pop_jump_if,
+ mp_emit_bc_jump_if_or_pop,
+ mp_emit_bc_unwind_jump,
+ mp_emit_bc_setup_block,
+ mp_emit_bc_with_cleanup,
+ mp_emit_bc_end_finally,
+ mp_emit_bc_get_iter,
+ mp_emit_bc_for_iter,
+ mp_emit_bc_for_iter_end,
+ mp_emit_bc_pop_except_jump,
+ mp_emit_bc_unary_op,
+ mp_emit_bc_binary_op,
+ mp_emit_bc_build,
+ mp_emit_bc_store_map,
+ mp_emit_bc_store_comp,
+ mp_emit_bc_unpack_sequence,
+ mp_emit_bc_unpack_ex,
+ mp_emit_bc_make_function,
+ mp_emit_bc_make_closure,
+ mp_emit_bc_call_function,
+ mp_emit_bc_call_method,
+ mp_emit_bc_return_value,
+ mp_emit_bc_raise_varargs,
+ mp_emit_bc_yield,
+
+ mp_emit_bc_start_except_handler,
+ mp_emit_bc_end_except_handler,
+};
+#else
+const mp_emit_method_table_id_ops_t mp_emit_bc_method_table_load_id_ops = {
+ mp_emit_bc_load_local,
+ mp_emit_bc_load_global,
+};
+
+const mp_emit_method_table_id_ops_t mp_emit_bc_method_table_store_id_ops = {
+ mp_emit_bc_store_local,
+ mp_emit_bc_store_global,
+};
+
+const mp_emit_method_table_id_ops_t mp_emit_bc_method_table_delete_id_ops = {
+ mp_emit_bc_delete_local,
+ mp_emit_bc_delete_global,
+};
+#endif
+
+#endif // MICROPY_ENABLE_COMPILER
diff --git a/circuitpython/py/emitcommon.c b/circuitpython/py/emitcommon.c
new file mode 100644
index 0000000..177418c
--- /dev/null
+++ b/circuitpython/py/emitcommon.c
@@ -0,0 +1,61 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <assert.h>
+
+#include "py/emit.h"
+
+#if MICROPY_ENABLE_COMPILER
+
+void mp_emit_common_get_id_for_modification(scope_t *scope, qstr qst) {
+ // name adding/lookup
+ id_info_t *id = scope_find_or_add_id(scope, qst, ID_INFO_KIND_GLOBAL_IMPLICIT);
+ if (SCOPE_IS_FUNC_LIKE(scope->kind) && id->kind == ID_INFO_KIND_GLOBAL_IMPLICIT) {
+ // rebind as a local variable
+ id->kind = ID_INFO_KIND_LOCAL;
+ }
+}
+
+void mp_emit_common_id_op(emit_t *emit, const mp_emit_method_table_id_ops_t *emit_method_table, scope_t *scope, qstr qst) {
+ // assumes pass is greater than 1, ie that all identifiers are defined in the scope
+
+ id_info_t *id = scope_find(scope, qst);
+ assert(id != NULL);
+
+ // call the emit backend with the correct code
+ if (id->kind == ID_INFO_KIND_GLOBAL_IMPLICIT) {
+ emit_method_table->global(emit, qst, MP_EMIT_IDOP_GLOBAL_NAME);
+ } else if (id->kind == ID_INFO_KIND_GLOBAL_EXPLICIT) {
+ emit_method_table->global(emit, qst, MP_EMIT_IDOP_GLOBAL_GLOBAL);
+ } else if (id->kind == ID_INFO_KIND_LOCAL) {
+ emit_method_table->local(emit, qst, id->local_num, MP_EMIT_IDOP_LOCAL_FAST);
+ } else {
+ assert(id->kind == ID_INFO_KIND_CELL || id->kind == ID_INFO_KIND_FREE);
+ emit_method_table->local(emit, qst, id->local_num, MP_EMIT_IDOP_LOCAL_DEREF);
+ }
+}
+
+#endif // MICROPY_ENABLE_COMPILER
diff --git a/circuitpython/py/emitglue.c b/circuitpython/py/emitglue.c
new file mode 100644
index 0000000..e701d08
--- /dev/null
+++ b/circuitpython/py/emitglue.c
@@ -0,0 +1,232 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+// This code glues the code emitters to the runtime.
+
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/emitglue.h"
+#include "py/runtime0.h"
+#include "py/bc.h"
+#include "py/profile.h"
+
+#if MICROPY_DEBUG_VERBOSE // print debugging info
+#define DEBUG_PRINT (1)
+#define WRITE_CODE (1)
+#define DEBUG_printf DEBUG_printf
+#define DEBUG_OP_printf(...) DEBUG_printf(__VA_ARGS__)
+#else // don't print debugging info
+#define DEBUG_printf(...) (void)0
+#define DEBUG_OP_printf(...) (void)0
+#endif
+
+#if MICROPY_DEBUG_PRINTERS
+mp_uint_t mp_verbose_flag = 0;
+#endif
+
+mp_raw_code_t *mp_emit_glue_new_raw_code(void) {
+ mp_raw_code_t *rc = m_new0(mp_raw_code_t, 1);
+ rc->kind = MP_CODE_RESERVED;
+ #if MICROPY_PY_SYS_SETTRACE
+ rc->line_of_definition = 0;
+ #endif
+ return rc;
+}
+
+void mp_emit_glue_assign_bytecode(mp_raw_code_t *rc, const byte *code,
+ #if MICROPY_PERSISTENT_CODE_SAVE || MICROPY_DEBUG_PRINTERS
+ size_t len,
+ #endif
+ const mp_uint_t *const_table,
+ #if MICROPY_PERSISTENT_CODE_SAVE
+ uint16_t n_obj, uint16_t n_raw_code,
+ #endif
+ mp_uint_t scope_flags) {
+
+ rc->kind = MP_CODE_BYTECODE;
+ rc->scope_flags = scope_flags;
+ rc->fun_data = code;
+ rc->const_table = const_table;
+ #if MICROPY_PERSISTENT_CODE_SAVE
+ rc->fun_data_len = len;
+ rc->n_obj = n_obj;
+ rc->n_raw_code = n_raw_code;
+ #endif
+
+ #if MICROPY_PY_SYS_SETTRACE
+ mp_bytecode_prelude_t *prelude = &rc->prelude;
+ mp_prof_extract_prelude(code, prelude);
+ #endif
+
+ #ifdef DEBUG_PRINT
+ #if !MICROPY_DEBUG_PRINTERS
+ const size_t len = 0;
+ #endif
+ DEBUG_printf("assign byte code: code=%p len=" UINT_FMT " flags=%x\n", code, len, (uint)scope_flags);
+ #endif
+ #if MICROPY_DEBUG_PRINTERS
+ if (mp_verbose_flag >= 2) {
+ mp_bytecode_print(&mp_plat_print, rc, code, len, const_table);
+ }
+ #endif
+}
+
+#if MICROPY_EMIT_MACHINE_CODE
+void mp_emit_glue_assign_native(mp_raw_code_t *rc, mp_raw_code_kind_t kind, void *fun_data, mp_uint_t fun_len, const mp_uint_t *const_table,
+ #if MICROPY_PERSISTENT_CODE_SAVE
+ uint16_t prelude_offset,
+ uint16_t n_obj, uint16_t n_raw_code,
+ uint16_t n_qstr, mp_qstr_link_entry_t *qstr_link,
+ #endif
+ mp_uint_t n_pos_args, mp_uint_t scope_flags, mp_uint_t type_sig) {
+
+ assert(kind == MP_CODE_NATIVE_PY || kind == MP_CODE_NATIVE_VIPER || kind == MP_CODE_NATIVE_ASM);
+
+ // Some architectures require flushing/invalidation of the I/D caches,
+ // so that the generated native code which was created in data RAM will
+ // be available for execution from instruction RAM.
+ #if MICROPY_EMIT_THUMB || MICROPY_EMIT_INLINE_THUMB
+ #if defined(__ICACHE_PRESENT) && __ICACHE_PRESENT == 1
+ // Flush D-cache, so the code emitted is stored in RAM.
+ MP_HAL_CLEAN_DCACHE(fun_data, fun_len);
+ // Invalidate I-cache, so the newly-created code is reloaded from RAM.
+ SCB_InvalidateICache();
+ #endif
+ #elif MICROPY_EMIT_ARM
+ #if (defined(__linux__) && defined(__GNUC__)) || __ARM_ARCH == 7
+ __builtin___clear_cache(fun_data, (uint8_t *)fun_data + fun_len);
+ #elif defined(__arm__)
+ // Flush I-cache and D-cache.
+ asm volatile (
+ "0:"
+ "mrc p15, 0, r15, c7, c10, 3\n" // test and clean D-cache
+ "bne 0b\n"
+ "mov r0, #0\n"
+ "mcr p15, 0, r0, c7, c7, 0\n" // invalidate I-cache and D-cache
+ : : : "r0", "cc");
+ #endif
+ #endif
+
+ rc->kind = kind;
+ rc->scope_flags = scope_flags;
+ rc->n_pos_args = n_pos_args;
+ rc->fun_data = fun_data;
+ rc->const_table = const_table;
+ rc->type_sig = type_sig;
+
+ #if MICROPY_PERSISTENT_CODE_SAVE
+ rc->fun_data_len = fun_len;
+ rc->prelude_offset = prelude_offset;
+ rc->n_obj = n_obj;
+ rc->n_raw_code = n_raw_code;
+ rc->n_qstr = n_qstr;
+ rc->qstr_link = qstr_link;
+ #endif
+
+ #ifdef DEBUG_PRINT
+ DEBUG_printf("assign native: kind=%d fun=%p len=" UINT_FMT " n_pos_args=" UINT_FMT " flags=%x\n", kind, fun_data, fun_len, n_pos_args, (uint)scope_flags);
+ for (mp_uint_t i = 0; i < fun_len; i++) {
+ if (i > 0 && i % 16 == 0) {
+ DEBUG_printf("\n");
+ }
+ DEBUG_printf(" %02x", ((byte *)fun_data)[i]);
+ }
+ DEBUG_printf("\n");
+
+ #ifdef WRITE_CODE
+ FILE *fp_write_code = fopen("out-code", "wb");
+ fwrite(fun_data, fun_len, 1, fp_write_code);
+ fclose(fp_write_code);
+ #endif
+ #else
+ (void)fun_len;
+ #endif
+}
+#endif
+
+mp_obj_t mp_make_function_from_raw_code(const mp_raw_code_t *rc, mp_obj_t def_args, mp_obj_t def_kw_args) {
+ DEBUG_OP_printf("make_function_from_raw_code %p\n", rc);
+ assert(rc != NULL);
+
+ // def_args must be MP_OBJ_NULL or a tuple
+ assert(def_args == MP_OBJ_NULL || mp_obj_is_type(def_args, &mp_type_tuple));
+
+ // def_kw_args must be MP_OBJ_NULL or a dict
+ assert(def_kw_args == MP_OBJ_NULL || mp_obj_is_type(def_kw_args, &mp_type_dict));
+
+ // make the function, depending on the raw code kind
+ mp_obj_t fun;
+ switch (rc->kind) {
+ #if MICROPY_EMIT_NATIVE
+ case MP_CODE_NATIVE_PY:
+ case MP_CODE_NATIVE_VIPER:
+ fun = mp_obj_new_fun_native(def_args, def_kw_args, rc->fun_data, rc->const_table);
+ break;
+ #endif
+ #if MICROPY_EMIT_INLINE_ASM
+ case MP_CODE_NATIVE_ASM:
+ fun = mp_obj_new_fun_asm(rc->n_pos_args, rc->fun_data, rc->type_sig);
+ break;
+ #endif
+ default:
+ // rc->kind should always be set and BYTECODE is the only remaining case
+ assert(rc->kind == MP_CODE_BYTECODE);
+ fun = mp_obj_new_fun_bc(def_args, def_kw_args, rc->fun_data, rc->const_table);
+ break;
+ }
+
+ // check for generator functions and if so wrap in generator object
+ if ((rc->scope_flags & MP_SCOPE_FLAG_GENERATOR) != 0) {
+ fun = mp_obj_new_gen_wrap(fun, (rc->scope_flags & MP_SCOPE_FLAG_ASYNC) != 0);
+ }
+
+ #if MICROPY_PY_SYS_SETTRACE
+ if (rc->kind == MP_CODE_BYTECODE) {
+ mp_obj_fun_bc_t *self_fun = (mp_obj_fun_bc_t *)MP_OBJ_TO_PTR(fun);
+ self_fun->rc = rc;
+ }
+ #endif
+
+ return fun;
+}
+
+mp_obj_t mp_make_closure_from_raw_code(const mp_raw_code_t *rc, mp_uint_t n_closed_over, const mp_obj_t *args) {
+ DEBUG_OP_printf("make_closure_from_raw_code %p " UINT_FMT " %p\n", rc, n_closed_over, args);
+ // make function object
+ mp_obj_t ffun;
+ if (n_closed_over & 0x100) {
+ // default positional and keyword args given
+ ffun = mp_make_function_from_raw_code(rc, args[0], args[1]);
+ } else {
+ // default positional and keyword args not given
+ ffun = mp_make_function_from_raw_code(rc, MP_OBJ_NULL, MP_OBJ_NULL);
+ }
+ // wrap function in closure object
+ return mp_obj_new_closure(ffun, n_closed_over & 0xff, args + ((n_closed_over >> 7) & 2));
+}
diff --git a/circuitpython/py/emitglue.h b/circuitpython/py/emitglue.h
new file mode 100644
index 0000000..cbe69d6
--- /dev/null
+++ b/circuitpython/py/emitglue.h
@@ -0,0 +1,110 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_EMITGLUE_H
+#define MICROPY_INCLUDED_PY_EMITGLUE_H
+
+#include "py/obj.h"
+#include "py/bc.h"
+
+// These variables and functions glue the code emitters to the runtime.
+
+// These must fit in 8 bits; see scope.h
+enum {
+ MP_EMIT_OPT_NONE,
+ MP_EMIT_OPT_BYTECODE,
+ MP_EMIT_OPT_NATIVE_PYTHON,
+ MP_EMIT_OPT_VIPER,
+ MP_EMIT_OPT_ASM,
+};
+
+typedef enum {
+ MP_CODE_UNUSED,
+ MP_CODE_RESERVED,
+ MP_CODE_BYTECODE,
+ MP_CODE_NATIVE_PY,
+ MP_CODE_NATIVE_VIPER,
+ MP_CODE_NATIVE_ASM,
+} mp_raw_code_kind_t;
+
+typedef struct _mp_qstr_link_entry_t {
+ uint16_t off;
+ uint16_t qst;
+} mp_qstr_link_entry_t;
+
+typedef struct _mp_raw_code_t {
+ uint8_t kind; // of type mp_raw_code_kind_t
+ uint8_t scope_flags;
+ uint16_t n_pos_args;
+ const void *fun_data;
+ const mp_uint_t *const_table;
+ #if MICROPY_PERSISTENT_CODE_SAVE
+ size_t fun_data_len;
+ uint16_t n_obj;
+ uint16_t n_raw_code;
+ #if MICROPY_PY_SYS_SETTRACE
+ mp_bytecode_prelude_t prelude;
+ // line_of_definition is a Python source line where the raw_code was
+ // created e.g. MP_BC_MAKE_FUNCTION. This is different from lineno info
+ // stored in prelude, which provides line number for first statement of
+ // a function. Required to properly implement "call" trace event.
+ mp_uint_t line_of_definition;
+ #endif
+ #if MICROPY_EMIT_MACHINE_CODE
+ uint16_t prelude_offset;
+ uint16_t n_qstr;
+ mp_qstr_link_entry_t *qstr_link;
+ #endif
+ #endif
+ #if MICROPY_EMIT_MACHINE_CODE
+ mp_uint_t type_sig; // for viper, compressed as 2-bit types; ret is MSB, then arg0, arg1, etc
+ #endif
+} mp_raw_code_t;
+
+mp_raw_code_t *mp_emit_glue_new_raw_code(void);
+
+void mp_emit_glue_assign_bytecode(mp_raw_code_t *rc, const byte *code,
+ #if MICROPY_PERSISTENT_CODE_SAVE || MICROPY_DEBUG_PRINTERS
+ size_t len,
+ #endif
+ const mp_uint_t *const_table,
+ #if MICROPY_PERSISTENT_CODE_SAVE
+ uint16_t n_obj, uint16_t n_raw_code,
+ #endif
+ mp_uint_t scope_flags);
+
+void mp_emit_glue_assign_native(mp_raw_code_t *rc, mp_raw_code_kind_t kind, void *fun_data, mp_uint_t fun_len,
+ const mp_uint_t *const_table,
+ #if MICROPY_PERSISTENT_CODE_SAVE
+ uint16_t prelude_offset,
+ uint16_t n_obj, uint16_t n_raw_code,
+ uint16_t n_qstr, mp_qstr_link_entry_t *qstr_link,
+ #endif
+ mp_uint_t n_pos_args, mp_uint_t scope_flags, mp_uint_t type_sig);
+
+mp_obj_t mp_make_function_from_raw_code(const mp_raw_code_t *rc, mp_obj_t def_args, mp_obj_t def_kw_args);
+mp_obj_t mp_make_closure_from_raw_code(const mp_raw_code_t *rc, mp_uint_t n_closed_over, const mp_obj_t *args);
+
+#endif // MICROPY_INCLUDED_PY_EMITGLUE_H
diff --git a/circuitpython/py/emitinlinethumb.c b/circuitpython/py/emitinlinethumb.c
new file mode 100644
index 0000000..65168a5
--- /dev/null
+++ b/circuitpython/py/emitinlinethumb.c
@@ -0,0 +1,847 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdarg.h>
+#include <assert.h>
+
+#include "py/emit.h"
+#include "py/asmthumb.h"
+
+#if MICROPY_EMIT_INLINE_THUMB
+
+typedef enum {
+// define rules with a compile function
+#define DEF_RULE(rule, comp, kind, ...) PN_##rule,
+#define DEF_RULE_NC(rule, kind, ...)
+ #include "py/grammar.h"
+#undef DEF_RULE
+#undef DEF_RULE_NC
+ PN_const_object, // special node for a constant, generic Python object
+// define rules without a compile function
+#define DEF_RULE(rule, comp, kind, ...)
+#define DEF_RULE_NC(rule, kind, ...) PN_##rule,
+ #include "py/grammar.h"
+#undef DEF_RULE
+#undef DEF_RULE_NC
+} pn_kind_t;
+
+struct _emit_inline_asm_t {
+ asm_thumb_t as;
+ uint16_t pass;
+ mp_obj_t *error_slot;
+ mp_uint_t max_num_labels;
+ qstr *label_lookup;
+};
+
+STATIC void emit_inline_thumb_error_msg(emit_inline_asm_t *emit, const compressed_string_t *msg) {
+ *emit->error_slot = mp_obj_new_exception_msg(&mp_type_SyntaxError, msg);
+}
+
+STATIC void emit_inline_thumb_error_exc(emit_inline_asm_t *emit, mp_obj_t exc) {
+ *emit->error_slot = exc;
+}
+
+emit_inline_asm_t *emit_inline_thumb_new(mp_uint_t max_num_labels) {
+ emit_inline_asm_t *emit = m_new_obj(emit_inline_asm_t);
+ memset(&emit->as, 0, sizeof(emit->as));
+ mp_asm_base_init(&emit->as.base, max_num_labels);
+ emit->max_num_labels = max_num_labels;
+ emit->label_lookup = m_new(qstr, max_num_labels);
+ return emit;
+}
+
+void emit_inline_thumb_free(emit_inline_asm_t *emit) {
+ m_del(qstr, emit->label_lookup, emit->max_num_labels);
+ mp_asm_base_deinit(&emit->as.base, false);
+ m_del_obj(emit_inline_asm_t, emit);
+}
+
+STATIC void emit_inline_thumb_start_pass(emit_inline_asm_t *emit, pass_kind_t pass, mp_obj_t *error_slot) {
+ emit->pass = pass;
+ emit->error_slot = error_slot;
+ if (emit->pass == MP_PASS_CODE_SIZE) {
+ memset(emit->label_lookup, 0, emit->max_num_labels * sizeof(qstr));
+ }
+ mp_asm_base_start_pass(&emit->as.base, pass == MP_PASS_EMIT ? MP_ASM_PASS_EMIT : MP_ASM_PASS_COMPUTE);
+ asm_thumb_entry(&emit->as, 0);
+}
+
+STATIC void emit_inline_thumb_end_pass(emit_inline_asm_t *emit, mp_uint_t type_sig) {
+ asm_thumb_exit(&emit->as);
+ asm_thumb_end_pass(&emit->as);
+}
+
+STATIC mp_uint_t emit_inline_thumb_count_params(emit_inline_asm_t *emit, mp_uint_t n_params, mp_parse_node_t *pn_params) {
+ if (n_params > 4) {
+ emit_inline_thumb_error_msg(emit, MP_ERROR_TEXT("can only have up to 4 parameters to Thumb assembly"));
+ return 0;
+ }
+ for (mp_uint_t i = 0; i < n_params; i++) {
+ if (!MP_PARSE_NODE_IS_ID(pn_params[i])) {
+ emit_inline_thumb_error_msg(emit, MP_ERROR_TEXT("parameters must be registers in sequence r0 to r3"));
+ return 0;
+ }
+ const char *p = qstr_str(MP_PARSE_NODE_LEAF_ARG(pn_params[i]));
+ if (!(strlen(p) == 2 && p[0] == 'r' && (mp_uint_t)p[1] == '0' + i)) {
+ emit_inline_thumb_error_msg(emit, MP_ERROR_TEXT("parameters must be registers in sequence r0 to r3"));
+ return 0;
+ }
+ }
+ return n_params;
+}
+
+STATIC bool emit_inline_thumb_label(emit_inline_asm_t *emit, mp_uint_t label_num, qstr label_id) {
+ assert(label_num < emit->max_num_labels);
+ if (emit->pass == MP_PASS_CODE_SIZE) {
+ // check for duplicate label on first pass
+ for (uint i = 0; i < emit->max_num_labels; i++) {
+ if (emit->label_lookup[i] == label_id) {
+ return false;
+ }
+ }
+ }
+ emit->label_lookup[label_num] = label_id;
+ mp_asm_base_label_assign(&emit->as.base, label_num);
+ return true;
+}
+
+typedef struct _reg_name_t { byte reg;
+ byte name[3];
+} reg_name_t;
+STATIC const reg_name_t reg_name_table[] = {
+ {0, "r0\0"},
+ {1, "r1\0"},
+ {2, "r2\0"},
+ {3, "r3\0"},
+ {4, "r4\0"},
+ {5, "r5\0"},
+ {6, "r6\0"},
+ {7, "r7\0"},
+ {8, "r8\0"},
+ {9, "r9\0"},
+ {10, "r10"},
+ {11, "r11"},
+ {12, "r12"},
+ {13, "r13"},
+ {14, "r14"},
+ {15, "r15"},
+ {10, "sl\0"},
+ {11, "fp\0"},
+ {13, "sp\0"},
+ {14, "lr\0"},
+ {15, "pc\0"},
+};
+
+#define MAX_SPECIAL_REGISTER_NAME_LENGTH 7
+typedef struct _special_reg_name_t { byte reg;
+ char name[MAX_SPECIAL_REGISTER_NAME_LENGTH + 1];
+} special_reg_name_t;
+STATIC const special_reg_name_t special_reg_name_table[] = {
+ {5, "IPSR"},
+ {17, "BASEPRI"},
+};
+
+// return empty string in case of error, so we can attempt to parse the string
+// without a special check if it was in fact a string
+STATIC const char *get_arg_str(mp_parse_node_t pn) {
+ if (MP_PARSE_NODE_IS_ID(pn)) {
+ qstr qst = MP_PARSE_NODE_LEAF_ARG(pn);
+ return qstr_str(qst);
+ } else {
+ return "";
+ }
+}
+
+STATIC mp_uint_t get_arg_reg(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn, mp_uint_t max_reg) {
+ const char *reg_str = get_arg_str(pn);
+ for (mp_uint_t i = 0; i < MP_ARRAY_SIZE(reg_name_table); i++) {
+ const reg_name_t *r = &reg_name_table[i];
+ if (reg_str[0] == r->name[0]
+ && reg_str[1] == r->name[1]
+ && reg_str[2] == r->name[2]
+ && (reg_str[2] == '\0' || reg_str[3] == '\0')) {
+ if (r->reg > max_reg) {
+ emit_inline_thumb_error_exc(emit,
+ mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+ MP_ERROR_TEXT("'%s' expects at most r%d"), op, max_reg));
+ return 0;
+ } else {
+ return r->reg;
+ }
+ }
+ }
+ emit_inline_thumb_error_exc(emit,
+ mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+ MP_ERROR_TEXT("'%s' expects a register"), op));
+ return 0;
+}
+
+STATIC mp_uint_t get_arg_special_reg(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn) {
+ const char *reg_str = get_arg_str(pn);
+ for (mp_uint_t i = 0; i < MP_ARRAY_SIZE(special_reg_name_table); i++) {
+ const special_reg_name_t *r = &special_reg_name_table[i];
+ if (strcmp(r->name, reg_str) == 0) {
+ return r->reg;
+ }
+ }
+ emit_inline_thumb_error_exc(emit,
+ mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+ MP_ERROR_TEXT("'%s' expects a special register"), op));
+ return 0;
+}
+
+#if MICROPY_EMIT_INLINE_THUMB_FLOAT
+STATIC mp_uint_t get_arg_vfpreg(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn) {
+ const char *reg_str = get_arg_str(pn);
+ if (reg_str[0] == 's' && reg_str[1] != '\0') {
+ mp_uint_t regno = 0;
+ for (++reg_str; *reg_str; ++reg_str) {
+ mp_uint_t v = *reg_str;
+ if (!('0' <= v && v <= '9')) {
+ goto malformed;
+ }
+ regno = 10 * regno + v - '0';
+ }
+ if (regno > 31) {
+ emit_inline_thumb_error_exc(emit,
+ mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+ MP_ERROR_TEXT("'%s' expects at most r%d"), op, 31));
+ return 0;
+ } else {
+ return regno;
+ }
+ }
+malformed:
+ emit_inline_thumb_error_exc(emit,
+ mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+ MP_ERROR_TEXT("'%s' expects an FPU register"), op));
+ return 0;
+}
+#endif
+
+STATIC mp_uint_t get_arg_reglist(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn) {
+ // a register list looks like {r0, r1, r2} and is parsed as a Python set
+
+ if (!MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_atom_brace)) {
+ goto bad_arg;
+ }
+
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+ assert(MP_PARSE_NODE_STRUCT_NUM_NODES(pns) == 1); // should always be
+ pn = pns->nodes[0];
+
+ mp_uint_t reglist = 0;
+
+ if (MP_PARSE_NODE_IS_ID(pn)) {
+ // set with one element
+ reglist |= 1 << get_arg_reg(emit, op, pn, 15);
+ } else if (MP_PARSE_NODE_IS_STRUCT(pn)) {
+ pns = (mp_parse_node_struct_t *)pn;
+ if (MP_PARSE_NODE_STRUCT_KIND(pns) == PN_dictorsetmaker) {
+ assert(MP_PARSE_NODE_IS_STRUCT(pns->nodes[1])); // should succeed
+ mp_parse_node_struct_t *pns1 = (mp_parse_node_struct_t *)pns->nodes[1];
+ if (MP_PARSE_NODE_STRUCT_KIND(pns1) == PN_dictorsetmaker_list) {
+ // set with multiple elements
+
+ // get first element of set (we rely on get_arg_reg to catch syntax errors)
+ reglist |= 1 << get_arg_reg(emit, op, pns->nodes[0], 15);
+
+ // get tail elements (2nd, 3rd, ...)
+ mp_parse_node_t *nodes;
+ int n = mp_parse_node_extract_list(&pns1->nodes[0], PN_dictorsetmaker_list2, &nodes);
+
+ // process rest of elements
+ for (int i = 0; i < n; i++) {
+ reglist |= 1 << get_arg_reg(emit, op, nodes[i], 15);
+ }
+ } else {
+ goto bad_arg;
+ }
+ } else {
+ goto bad_arg;
+ }
+ } else {
+ goto bad_arg;
+ }
+
+ return reglist;
+
+bad_arg:
+ emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("'%s' expects {r0, r1, ...}"), op));
+ return 0;
+}
+
+STATIC uint32_t get_arg_i(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn, uint32_t fit_mask) {
+ mp_obj_t o;
+ if (!mp_parse_node_get_int_maybe(pn, &o)) {
+ emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("'%s' expects an integer"), op));
+ return 0;
+ }
+ uint32_t i = mp_obj_get_int_truncated(o);
+ if ((i & (~fit_mask)) != 0) {
+ emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("'%s' integer 0x%x doesn't fit in mask 0x%x"), op, i, fit_mask));
+ return 0;
+ }
+ return i;
+}
+
+STATIC bool get_arg_addr(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn, mp_parse_node_t *pn_base, mp_parse_node_t *pn_offset) {
+ if (!MP_PARSE_NODE_IS_STRUCT_KIND(pn, PN_atom_bracket)) {
+ goto bad_arg;
+ }
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+ if (!MP_PARSE_NODE_IS_STRUCT_KIND(pns->nodes[0], PN_testlist_comp)) {
+ goto bad_arg;
+ }
+ pns = (mp_parse_node_struct_t *)pns->nodes[0];
+ if (MP_PARSE_NODE_STRUCT_NUM_NODES(pns) != 2) {
+ goto bad_arg;
+ }
+
+ *pn_base = pns->nodes[0];
+ *pn_offset = pns->nodes[1];
+ return true;
+
+bad_arg:
+ emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("'%s' expects an address of the form [a, b]"), op));
+ return false;
+}
+
+STATIC int get_arg_label(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn) {
+ if (!MP_PARSE_NODE_IS_ID(pn)) {
+ emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("'%s' expects a label"), op));
+ return 0;
+ }
+ qstr label_qstr = MP_PARSE_NODE_LEAF_ARG(pn);
+ for (uint i = 0; i < emit->max_num_labels; i++) {
+ if (emit->label_lookup[i] == label_qstr) {
+ return i;
+ }
+ }
+ // only need to have the labels on the last pass
+ if (emit->pass == MP_PASS_EMIT) {
+ emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("label '%q' not defined"), label_qstr));
+ }
+ return 0;
+}
+
+typedef struct _cc_name_t { byte cc;
+ byte name[2];
+} cc_name_t;
+STATIC const cc_name_t cc_name_table[] = {
+ { ASM_THUMB_CC_EQ, "eq" },
+ { ASM_THUMB_CC_NE, "ne" },
+ { ASM_THUMB_CC_CS, "cs" },
+ { ASM_THUMB_CC_CC, "cc" },
+ { ASM_THUMB_CC_MI, "mi" },
+ { ASM_THUMB_CC_PL, "pl" },
+ { ASM_THUMB_CC_VS, "vs" },
+ { ASM_THUMB_CC_VC, "vc" },
+ { ASM_THUMB_CC_HI, "hi" },
+ { ASM_THUMB_CC_LS, "ls" },
+ { ASM_THUMB_CC_GE, "ge" },
+ { ASM_THUMB_CC_LT, "lt" },
+ { ASM_THUMB_CC_GT, "gt" },
+ { ASM_THUMB_CC_LE, "le" },
+};
+
+typedef struct _format_4_op_t { byte op;
+ char name[3];
+} format_4_op_t;
+#define X(x) (((x) >> 4) & 0xff) // only need 1 byte to distinguish these ops
+STATIC const format_4_op_t format_4_op_table[] = {
+ { X(ASM_THUMB_FORMAT_4_EOR), "eor" },
+ { X(ASM_THUMB_FORMAT_4_LSL), "lsl" },
+ { X(ASM_THUMB_FORMAT_4_LSR), "lsr" },
+ { X(ASM_THUMB_FORMAT_4_ASR), "asr" },
+ { X(ASM_THUMB_FORMAT_4_ADC), "adc" },
+ { X(ASM_THUMB_FORMAT_4_SBC), "sbc" },
+ { X(ASM_THUMB_FORMAT_4_ROR), "ror" },
+ { X(ASM_THUMB_FORMAT_4_TST), "tst" },
+ { X(ASM_THUMB_FORMAT_4_NEG), "neg" },
+ { X(ASM_THUMB_FORMAT_4_CMP), "cmp" },
+ { X(ASM_THUMB_FORMAT_4_CMN), "cmn" },
+ { X(ASM_THUMB_FORMAT_4_ORR), "orr" },
+ { X(ASM_THUMB_FORMAT_4_MUL), "mul" },
+ { X(ASM_THUMB_FORMAT_4_BIC), "bic" },
+ { X(ASM_THUMB_FORMAT_4_MVN), "mvn" },
+};
+#undef X
+
+// name is actually a qstr, which should fit in 16 bits
+typedef struct _format_9_10_op_t { uint16_t op;
+ uint16_t name;
+} format_9_10_op_t;
+#define X(x) (x)
+STATIC const format_9_10_op_t format_9_10_op_table[] = {
+ { X(ASM_THUMB_FORMAT_9_LDR | ASM_THUMB_FORMAT_9_WORD_TRANSFER), MP_QSTR_ldr },
+ { X(ASM_THUMB_FORMAT_9_LDR | ASM_THUMB_FORMAT_9_BYTE_TRANSFER), MP_QSTR_ldrb },
+ { X(ASM_THUMB_FORMAT_10_LDRH), MP_QSTR_ldrh },
+ { X(ASM_THUMB_FORMAT_9_STR | ASM_THUMB_FORMAT_9_WORD_TRANSFER), MP_QSTR_str },
+ { X(ASM_THUMB_FORMAT_9_STR | ASM_THUMB_FORMAT_9_BYTE_TRANSFER), MP_QSTR_strb },
+ { X(ASM_THUMB_FORMAT_10_STRH), MP_QSTR_strh },
+};
+#undef X
+
+#if MICROPY_EMIT_INLINE_THUMB_FLOAT
+// actual opcodes are: 0xee00 | op.hi_nibble, 0x0a00 | op.lo_nibble
+typedef struct _format_vfp_op_t { byte op;
+ char name[3];
+} format_vfp_op_t;
+STATIC const format_vfp_op_t format_vfp_op_table[] = {
+ { 0x30, "add" },
+ { 0x34, "sub" },
+ { 0x20, "mul" },
+ { 0x80, "div" },
+};
+#endif
+
+// shorthand alias for whether we allow ARMv7-M instructions
+#define ARMV7M MICROPY_EMIT_INLINE_THUMB_ARMV7M
+
+STATIC void emit_inline_thumb_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_args, mp_parse_node_t *pn_args) {
+ // TODO perhaps make two tables:
+ // one_args =
+ // "b", LAB, asm_thumb_b_n,
+ // "bgt", LAB, asm_thumb_bgt_n,
+ // two_args =
+ // "movs", RLO, I8, asm_thumb_movs_reg_i8
+ // "movw", REG, REG, asm_thumb_movw_reg_i16
+ // three_args =
+ // "subs", RLO, RLO, I3, asm_thumb_subs_reg_reg_i3
+
+ size_t op_len;
+ const char *op_str = (const char *)qstr_data(op, &op_len);
+
+ #if MICROPY_EMIT_INLINE_THUMB_FLOAT
+ if (op_str[0] == 'v') {
+ // floating point operations
+ if (n_args == 2) {
+ mp_uint_t op_code = 0x0ac0, op_code_hi;
+ if (op == MP_QSTR_vcmp) {
+ op_code_hi = 0xeeb4;
+ op_vfp_twoargs:;
+ mp_uint_t vd = get_arg_vfpreg(emit, op_str, pn_args[0]);
+ mp_uint_t vm = get_arg_vfpreg(emit, op_str, pn_args[1]);
+ asm_thumb_op32(&emit->as,
+ op_code_hi | ((vd & 1) << 6),
+ op_code | ((vd & 0x1e) << 11) | ((vm & 1) << 5) | (vm & 0x1e) >> 1);
+ } else if (op == MP_QSTR_vsqrt) {
+ op_code_hi = 0xeeb1;
+ goto op_vfp_twoargs;
+ } else if (op == MP_QSTR_vneg) {
+ op_code_hi = 0xeeb1;
+ op_code = 0x0a40;
+ goto op_vfp_twoargs;
+ } else if (op == MP_QSTR_vcvt_f32_s32) {
+ op_code_hi = 0xeeb8; // int to float
+ goto op_vfp_twoargs;
+ } else if (op == MP_QSTR_vcvt_s32_f32) {
+ op_code_hi = 0xeebd; // float to int
+ goto op_vfp_twoargs;
+ } else if (op == MP_QSTR_vmrs) {
+ mp_uint_t reg_dest;
+ const char *reg_str0 = get_arg_str(pn_args[0]);
+ if (strcmp(reg_str0, "APSR_nzcv") == 0) {
+ reg_dest = 15;
+ } else {
+ reg_dest = get_arg_reg(emit, op_str, pn_args[0], 15);
+ }
+ const char *reg_str1 = get_arg_str(pn_args[1]);
+ if (strcmp(reg_str1, "FPSCR") == 0) {
+ // FP status to ARM reg
+ asm_thumb_op32(&emit->as, 0xeef1, 0x0a10 | (reg_dest << 12));
+ } else {
+ goto unknown_op;
+ }
+ } else if (op == MP_QSTR_vmov) {
+ op_code_hi = 0xee00;
+ mp_uint_t r_arm, vm;
+ const char *reg_str = get_arg_str(pn_args[0]);
+ if (reg_str[0] == 'r') {
+ r_arm = get_arg_reg(emit, op_str, pn_args[0], 15);
+ vm = get_arg_vfpreg(emit, op_str, pn_args[1]);
+ op_code_hi |= 0x10;
+ } else {
+ vm = get_arg_vfpreg(emit, op_str, pn_args[0]);
+ r_arm = get_arg_reg(emit, op_str, pn_args[1], 15);
+ }
+ asm_thumb_op32(&emit->as,
+ op_code_hi | ((vm & 0x1e) >> 1),
+ 0x0a10 | (r_arm << 12) | ((vm & 1) << 7));
+ } else if (op == MP_QSTR_vldr) {
+ op_code_hi = 0xed90;
+ op_vldr_vstr:;
+ mp_uint_t vd = get_arg_vfpreg(emit, op_str, pn_args[0]);
+ mp_parse_node_t pn_base, pn_offset;
+ if (get_arg_addr(emit, op_str, pn_args[1], &pn_base, &pn_offset)) {
+ mp_uint_t rlo_base = get_arg_reg(emit, op_str, pn_base, 7);
+ mp_uint_t i8;
+ i8 = get_arg_i(emit, op_str, pn_offset, 0x3fc) >> 2;
+ asm_thumb_op32(&emit->as,
+ op_code_hi | rlo_base | ((vd & 1) << 6),
+ 0x0a00 | ((vd & 0x1e) << 11) | i8);
+ }
+ } else if (op == MP_QSTR_vstr) {
+ op_code_hi = 0xed80;
+ goto op_vldr_vstr;
+ } else {
+ goto unknown_op;
+ }
+ } else if (n_args == 3) {
+ // search table for arith ops
+ for (mp_uint_t i = 0; i < MP_ARRAY_SIZE(format_vfp_op_table); i++) {
+ if (strncmp(op_str + 1, format_vfp_op_table[i].name, 3) == 0 && op_str[4] == '\0') {
+ mp_uint_t op_code_hi = 0xee00 | (format_vfp_op_table[i].op & 0xf0);
+ mp_uint_t op_code = 0x0a00 | ((format_vfp_op_table[i].op & 0x0f) << 4);
+ mp_uint_t vd = get_arg_vfpreg(emit, op_str, pn_args[0]);
+ mp_uint_t vn = get_arg_vfpreg(emit, op_str, pn_args[1]);
+ mp_uint_t vm = get_arg_vfpreg(emit, op_str, pn_args[2]);
+ asm_thumb_op32(&emit->as,
+ op_code_hi | ((vd & 1) << 6) | (vn >> 1),
+ op_code | (vm >> 1) | ((vm & 1) << 5) | ((vd & 0x1e) << 11) | ((vn & 1) << 7));
+ return;
+ }
+ }
+ goto unknown_op;
+ } else {
+ goto unknown_op;
+ }
+ return;
+ }
+ #endif
+
+ if (n_args == 0) {
+ if (op == MP_QSTR_nop) {
+ asm_thumb_op16(&emit->as, ASM_THUMB_OP_NOP);
+ } else if (op == MP_QSTR_wfi) {
+ asm_thumb_op16(&emit->as, ASM_THUMB_OP_WFI);
+ } else {
+ goto unknown_op;
+ }
+
+ } else if (n_args == 1) {
+ if (op == MP_QSTR_b) {
+ int label_num = get_arg_label(emit, op_str, pn_args[0]);
+ if (!asm_thumb_b_n_label(&emit->as, label_num)) {
+ goto branch_not_in_range;
+ }
+ } else if (op == MP_QSTR_bl) {
+ int label_num = get_arg_label(emit, op_str, pn_args[0]);
+ if (!asm_thumb_bl_label(&emit->as, label_num)) {
+ goto branch_not_in_range;
+ }
+ } else if (op == MP_QSTR_bx) {
+ mp_uint_t r = get_arg_reg(emit, op_str, pn_args[0], 15);
+ asm_thumb_op16(&emit->as, 0x4700 | (r << 3));
+ } else if (op_str[0] == 'b' && (op_len == 3
+ || (op_len == 5 && op_str[3] == '_'
+ && (op_str[4] == 'n' || (ARMV7M && op_str[4] == 'w'))))) {
+ mp_uint_t cc = -1;
+ for (mp_uint_t i = 0; i < MP_ARRAY_SIZE(cc_name_table); i++) {
+ if (op_str[1] == cc_name_table[i].name[0] && op_str[2] == cc_name_table[i].name[1]) {
+ cc = cc_name_table[i].cc;
+ }
+ }
+ if (cc == (mp_uint_t)-1) {
+ goto unknown_op;
+ }
+ int label_num = get_arg_label(emit, op_str, pn_args[0]);
+ bool wide = op_len == 5 && op_str[4] == 'w';
+ if (wide && !ARMV7M) {
+ goto unknown_op;
+ }
+ if (!asm_thumb_bcc_nw_label(&emit->as, cc, label_num, wide)) {
+ goto branch_not_in_range;
+ }
+ } else if (ARMV7M && op_str[0] == 'i' && op_str[1] == 't') {
+ const char *arg_str = get_arg_str(pn_args[0]);
+ mp_uint_t cc = -1;
+ for (mp_uint_t i = 0; i < MP_ARRAY_SIZE(cc_name_table); i++) {
+ if (arg_str[0] == cc_name_table[i].name[0]
+ && arg_str[1] == cc_name_table[i].name[1]
+ && arg_str[2] == '\0') {
+ cc = cc_name_table[i].cc;
+ break;
+ }
+ }
+ if (cc == (mp_uint_t)-1) {
+ goto unknown_op;
+ }
+ const char *os = op_str + 2;
+ while (*os != '\0') {
+ os++;
+ }
+ if (os > op_str + 5) {
+ goto unknown_op;
+ }
+ mp_uint_t it_mask = 8;
+ while (--os >= op_str + 2) {
+ it_mask >>= 1;
+ if (*os == 't') {
+ it_mask |= (cc & 1) << 3;
+ } else if (*os == 'e') {
+ it_mask |= ((~cc) & 1) << 3;
+ } else {
+ goto unknown_op;
+ }
+ }
+ asm_thumb_it_cc(&emit->as, cc, it_mask);
+ } else if (op == MP_QSTR_cpsid) {
+ // TODO check pn_args[0] == i
+ asm_thumb_op16(&emit->as, ASM_THUMB_OP_CPSID_I);
+ } else if (op == MP_QSTR_cpsie) {
+ // TODO check pn_args[0] == i
+ asm_thumb_op16(&emit->as, ASM_THUMB_OP_CPSIE_I);
+ } else if (op == MP_QSTR_push) {
+ mp_uint_t reglist = get_arg_reglist(emit, op_str, pn_args[0]);
+ if ((reglist & 0xff00) == 0) {
+ asm_thumb_op16(&emit->as, 0xb400 | reglist);
+ } else {
+ if (!ARMV7M) {
+ goto unknown_op;
+ }
+ asm_thumb_op32(&emit->as, 0xe92d, reglist);
+ }
+ } else if (op == MP_QSTR_pop) {
+ mp_uint_t reglist = get_arg_reglist(emit, op_str, pn_args[0]);
+ if ((reglist & 0xff00) == 0) {
+ asm_thumb_op16(&emit->as, 0xbc00 | reglist);
+ } else {
+ if (!ARMV7M) {
+ goto unknown_op;
+ }
+ asm_thumb_op32(&emit->as, 0xe8bd, reglist);
+ }
+ } else {
+ goto unknown_op;
+ }
+
+ } else if (n_args == 2) {
+ if (MP_PARSE_NODE_IS_ID(pn_args[1])) {
+ // second arg is a register (or should be)
+ mp_uint_t op_code, op_code_hi;
+ if (op == MP_QSTR_mov) {
+ mp_uint_t reg_dest = get_arg_reg(emit, op_str, pn_args[0], 15);
+ mp_uint_t reg_src = get_arg_reg(emit, op_str, pn_args[1], 15);
+ asm_thumb_mov_reg_reg(&emit->as, reg_dest, reg_src);
+ } else if (ARMV7M && op == MP_QSTR_clz) {
+ op_code_hi = 0xfab0;
+ op_code = 0xf080;
+ mp_uint_t rd, rm;
+ op_clz_rbit:
+ rd = get_arg_reg(emit, op_str, pn_args[0], 15);
+ rm = get_arg_reg(emit, op_str, pn_args[1], 15);
+ asm_thumb_op32(&emit->as, op_code_hi | rm, op_code | (rd << 8) | rm);
+ } else if (ARMV7M && op == MP_QSTR_rbit) {
+ op_code_hi = 0xfa90;
+ op_code = 0xf0a0;
+ goto op_clz_rbit;
+ } else if (ARMV7M && op == MP_QSTR_mrs) {
+ mp_uint_t reg_dest = get_arg_reg(emit, op_str, pn_args[0], 12);
+ mp_uint_t reg_src = get_arg_special_reg(emit, op_str, pn_args[1]);
+ asm_thumb_op32(&emit->as, 0xf3ef, 0x8000 | (reg_dest << 8) | reg_src);
+ } else {
+ if (op == MP_QSTR_and_) {
+ op_code = ASM_THUMB_FORMAT_4_AND;
+ mp_uint_t reg_dest, reg_src;
+ op_format_4:
+ reg_dest = get_arg_reg(emit, op_str, pn_args[0], 7);
+ reg_src = get_arg_reg(emit, op_str, pn_args[1], 7);
+ asm_thumb_format_4(&emit->as, op_code, reg_dest, reg_src);
+ return;
+ }
+ // search table for ALU ops
+ for (mp_uint_t i = 0; i < MP_ARRAY_SIZE(format_4_op_table); i++) {
+ if (strncmp(op_str, format_4_op_table[i].name, 3) == 0 && op_str[3] == '\0') {
+ op_code = 0x4000 | (format_4_op_table[i].op << 4);
+ goto op_format_4;
+ }
+ }
+ goto unknown_op;
+ }
+ } else {
+ // second arg is not a register
+ mp_uint_t op_code;
+ if (op == MP_QSTR_mov) {
+ op_code = ASM_THUMB_FORMAT_3_MOV;
+ mp_uint_t rlo_dest, i8_src;
+ op_format_3:
+ rlo_dest = get_arg_reg(emit, op_str, pn_args[0], 7);
+ i8_src = get_arg_i(emit, op_str, pn_args[1], 0xff);
+ asm_thumb_format_3(&emit->as, op_code, rlo_dest, i8_src);
+ } else if (op == MP_QSTR_cmp) {
+ op_code = ASM_THUMB_FORMAT_3_CMP;
+ goto op_format_3;
+ } else if (op == MP_QSTR_add) {
+ op_code = ASM_THUMB_FORMAT_3_ADD;
+ goto op_format_3;
+ } else if (op == MP_QSTR_sub) {
+ op_code = ASM_THUMB_FORMAT_3_SUB;
+ goto op_format_3;
+ #if ARMV7M
+ } else if (op == MP_QSTR_movw) {
+ op_code = ASM_THUMB_OP_MOVW;
+ mp_uint_t reg_dest;
+ op_movw_movt:
+ reg_dest = get_arg_reg(emit, op_str, pn_args[0], 15);
+ int i_src = get_arg_i(emit, op_str, pn_args[1], 0xffff);
+ asm_thumb_mov_reg_i16(&emit->as, op_code, reg_dest, i_src);
+ } else if (op == MP_QSTR_movt) {
+ op_code = ASM_THUMB_OP_MOVT;
+ goto op_movw_movt;
+ } else if (op == MP_QSTR_movwt) {
+ // this is a convenience instruction
+ mp_uint_t reg_dest = get_arg_reg(emit, op_str, pn_args[0], 15);
+ uint32_t i_src = get_arg_i(emit, op_str, pn_args[1], 0xffffffff);
+ asm_thumb_mov_reg_i16(&emit->as, ASM_THUMB_OP_MOVW, reg_dest, i_src & 0xffff);
+ asm_thumb_mov_reg_i16(&emit->as, ASM_THUMB_OP_MOVT, reg_dest, (i_src >> 16) & 0xffff);
+ } else if (op == MP_QSTR_ldrex) {
+ mp_uint_t r_dest = get_arg_reg(emit, op_str, pn_args[0], 15);
+ mp_parse_node_t pn_base, pn_offset;
+ if (get_arg_addr(emit, op_str, pn_args[1], &pn_base, &pn_offset)) {
+ mp_uint_t r_base = get_arg_reg(emit, op_str, pn_base, 15);
+ mp_uint_t i8 = get_arg_i(emit, op_str, pn_offset, 0xff) >> 2;
+ asm_thumb_op32(&emit->as, 0xe850 | r_base, 0x0f00 | (r_dest << 12) | i8);
+ }
+ #endif
+ } else {
+ // search table for ldr/str instructions
+ for (mp_uint_t i = 0; i < MP_ARRAY_SIZE(format_9_10_op_table); i++) {
+ if (op == format_9_10_op_table[i].name) {
+ op_code = format_9_10_op_table[i].op;
+ mp_parse_node_t pn_base, pn_offset;
+ mp_uint_t rlo_dest = get_arg_reg(emit, op_str, pn_args[0], 7);
+ if (get_arg_addr(emit, op_str, pn_args[1], &pn_base, &pn_offset)) {
+ mp_uint_t rlo_base = get_arg_reg(emit, op_str, pn_base, 7);
+ mp_uint_t i5;
+ if (op_code & ASM_THUMB_FORMAT_9_BYTE_TRANSFER) {
+ i5 = get_arg_i(emit, op_str, pn_offset, 0x1f);
+ } else if (op_code & ASM_THUMB_FORMAT_10_STRH) { // also catches LDRH
+ i5 = get_arg_i(emit, op_str, pn_offset, 0x3e) >> 1;
+ } else {
+ i5 = get_arg_i(emit, op_str, pn_offset, 0x7c) >> 2;
+ }
+ asm_thumb_format_9_10(&emit->as, op_code, rlo_dest, rlo_base, i5);
+ return;
+ }
+ break;
+ }
+ }
+ goto unknown_op;
+ }
+ }
+
+ } else if (n_args == 3) {
+ mp_uint_t op_code;
+ if (op == MP_QSTR_lsl) {
+ op_code = ASM_THUMB_FORMAT_1_LSL;
+ mp_uint_t rlo_dest, rlo_src, i5;
+ op_format_1:
+ rlo_dest = get_arg_reg(emit, op_str, pn_args[0], 7);
+ rlo_src = get_arg_reg(emit, op_str, pn_args[1], 7);
+ i5 = get_arg_i(emit, op_str, pn_args[2], 0x1f);
+ asm_thumb_format_1(&emit->as, op_code, rlo_dest, rlo_src, i5);
+ } else if (op == MP_QSTR_lsr) {
+ op_code = ASM_THUMB_FORMAT_1_LSR;
+ goto op_format_1;
+ } else if (op == MP_QSTR_asr) {
+ op_code = ASM_THUMB_FORMAT_1_ASR;
+ goto op_format_1;
+ } else if (op == MP_QSTR_add) {
+ op_code = ASM_THUMB_FORMAT_2_ADD;
+ mp_uint_t rlo_dest, rlo_src;
+ op_format_2:
+ rlo_dest = get_arg_reg(emit, op_str, pn_args[0], 7);
+ rlo_src = get_arg_reg(emit, op_str, pn_args[1], 7);
+ int src_b;
+ if (MP_PARSE_NODE_IS_ID(pn_args[2])) {
+ op_code |= ASM_THUMB_FORMAT_2_REG_OPERAND;
+ src_b = get_arg_reg(emit, op_str, pn_args[2], 7);
+ } else {
+ op_code |= ASM_THUMB_FORMAT_2_IMM_OPERAND;
+ src_b = get_arg_i(emit, op_str, pn_args[2], 0x7);
+ }
+ asm_thumb_format_2(&emit->as, op_code, rlo_dest, rlo_src, src_b);
+ } else if (ARMV7M && op == MP_QSTR_sdiv) {
+ op_code = 0xfb90; // sdiv high part
+ mp_uint_t rd, rn, rm;
+ op_sdiv_udiv:
+ rd = get_arg_reg(emit, op_str, pn_args[0], 15);
+ rn = get_arg_reg(emit, op_str, pn_args[1], 15);
+ rm = get_arg_reg(emit, op_str, pn_args[2], 15);
+ asm_thumb_op32(&emit->as, op_code | rn, 0xf0f0 | (rd << 8) | rm);
+ } else if (ARMV7M && op == MP_QSTR_udiv) {
+ op_code = 0xfbb0; // udiv high part
+ goto op_sdiv_udiv;
+ } else if (op == MP_QSTR_sub) {
+ op_code = ASM_THUMB_FORMAT_2_SUB;
+ goto op_format_2;
+ } else if (ARMV7M && op == MP_QSTR_strex) {
+ mp_uint_t r_dest = get_arg_reg(emit, op_str, pn_args[0], 15);
+ mp_uint_t r_src = get_arg_reg(emit, op_str, pn_args[1], 15);
+ mp_parse_node_t pn_base, pn_offset;
+ if (get_arg_addr(emit, op_str, pn_args[2], &pn_base, &pn_offset)) {
+ mp_uint_t r_base = get_arg_reg(emit, op_str, pn_base, 15);
+ mp_uint_t i8 = get_arg_i(emit, op_str, pn_offset, 0xff) >> 2;
+ asm_thumb_op32(&emit->as, 0xe840 | r_base, (r_src << 12) | (r_dest << 8) | i8);
+ }
+ } else {
+ goto unknown_op;
+ }
+
+ } else {
+ goto unknown_op;
+ }
+
+ return;
+
+unknown_op:
+ emit_inline_thumb_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("unsupported Thumb instruction '%s' with %d arguments"), op_str, n_args));
+ return;
+
+branch_not_in_range:
+ emit_inline_thumb_error_msg(emit, MP_ERROR_TEXT("branch not in range"));
+ return;
+}
+
+const emit_inline_asm_method_table_t emit_inline_thumb_method_table = {
+ #if MICROPY_DYNAMIC_COMPILER
+ emit_inline_thumb_new,
+ emit_inline_thumb_free,
+ #endif
+
+ emit_inline_thumb_start_pass,
+ emit_inline_thumb_end_pass,
+ emit_inline_thumb_count_params,
+ emit_inline_thumb_label,
+ emit_inline_thumb_op,
+};
+
+#endif // MICROPY_EMIT_INLINE_THUMB
diff --git a/circuitpython/py/emitinlinextensa.c b/circuitpython/py/emitinlinextensa.c
new file mode 100644
index 0000000..17b9116
--- /dev/null
+++ b/circuitpython/py/emitinlinextensa.c
@@ -0,0 +1,352 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013-2016 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdarg.h>
+#include <assert.h>
+
+#include "py/emit.h"
+#include "py/asmxtensa.h"
+
+#if MICROPY_EMIT_INLINE_XTENSA
+
+struct _emit_inline_asm_t {
+ asm_xtensa_t as;
+ uint16_t pass;
+ mp_obj_t *error_slot;
+ mp_uint_t max_num_labels;
+ qstr *label_lookup;
+};
+
+STATIC void emit_inline_xtensa_error_msg(emit_inline_asm_t *emit, const compressed_string_t *msg) {
+ *emit->error_slot = mp_obj_new_exception_msg(&mp_type_SyntaxError, msg);
+}
+
+STATIC void emit_inline_xtensa_error_exc(emit_inline_asm_t *emit, mp_obj_t exc) {
+ *emit->error_slot = exc;
+}
+
+emit_inline_asm_t *emit_inline_xtensa_new(mp_uint_t max_num_labels) {
+ emit_inline_asm_t *emit = m_new_obj(emit_inline_asm_t);
+ memset(&emit->as, 0, sizeof(emit->as));
+ mp_asm_base_init(&emit->as.base, max_num_labels);
+ emit->max_num_labels = max_num_labels;
+ emit->label_lookup = m_new(qstr, max_num_labels);
+ return emit;
+}
+
+void emit_inline_xtensa_free(emit_inline_asm_t *emit) {
+ m_del(qstr, emit->label_lookup, emit->max_num_labels);
+ mp_asm_base_deinit(&emit->as.base, false);
+ m_del_obj(emit_inline_asm_t, emit);
+}
+
+STATIC void emit_inline_xtensa_start_pass(emit_inline_asm_t *emit, pass_kind_t pass, mp_obj_t *error_slot) {
+ emit->pass = pass;
+ emit->error_slot = error_slot;
+ if (emit->pass == MP_PASS_CODE_SIZE) {
+ memset(emit->label_lookup, 0, emit->max_num_labels * sizeof(qstr));
+ }
+ mp_asm_base_start_pass(&emit->as.base, pass == MP_PASS_EMIT ? MP_ASM_PASS_EMIT : MP_ASM_PASS_COMPUTE);
+ asm_xtensa_entry(&emit->as, 0);
+}
+
+STATIC void emit_inline_xtensa_end_pass(emit_inline_asm_t *emit, mp_uint_t type_sig) {
+ asm_xtensa_exit(&emit->as);
+ asm_xtensa_end_pass(&emit->as);
+}
+
+STATIC mp_uint_t emit_inline_xtensa_count_params(emit_inline_asm_t *emit, mp_uint_t n_params, mp_parse_node_t *pn_params) {
+ if (n_params > 4) {
+ emit_inline_xtensa_error_msg(emit, MP_ERROR_TEXT("can only have up to 4 parameters to Xtensa assembly"));
+ return 0;
+ }
+ for (mp_uint_t i = 0; i < n_params; i++) {
+ if (!MP_PARSE_NODE_IS_ID(pn_params[i])) {
+ emit_inline_xtensa_error_msg(emit, MP_ERROR_TEXT("parameters must be registers in sequence a2 to a5"));
+ return 0;
+ }
+ const char *p = qstr_str(MP_PARSE_NODE_LEAF_ARG(pn_params[i]));
+ if (!(strlen(p) == 2 && p[0] == 'a' && (mp_uint_t)p[1] == '2' + i)) {
+ emit_inline_xtensa_error_msg(emit, MP_ERROR_TEXT("parameters must be registers in sequence a2 to a5"));
+ return 0;
+ }
+ }
+ return n_params;
+}
+
+STATIC bool emit_inline_xtensa_label(emit_inline_asm_t *emit, mp_uint_t label_num, qstr label_id) {
+ assert(label_num < emit->max_num_labels);
+ if (emit->pass == MP_PASS_CODE_SIZE) {
+ // check for duplicate label on first pass
+ for (uint i = 0; i < emit->max_num_labels; i++) {
+ if (emit->label_lookup[i] == label_id) {
+ return false;
+ }
+ }
+ }
+ emit->label_lookup[label_num] = label_id;
+ mp_asm_base_label_assign(&emit->as.base, label_num);
+ return true;
+}
+
+typedef struct _reg_name_t { byte reg;
+ byte name[3];
+} reg_name_t;
+STATIC const reg_name_t reg_name_table[] = {
+ {0, "a0\0"},
+ {1, "a1\0"},
+ {2, "a2\0"},
+ {3, "a3\0"},
+ {4, "a4\0"},
+ {5, "a5\0"},
+ {6, "a6\0"},
+ {7, "a7\0"},
+ {8, "a8\0"},
+ {9, "a9\0"},
+ {10, "a10"},
+ {11, "a11"},
+ {12, "a12"},
+ {13, "a13"},
+ {14, "a14"},
+ {15, "a15"},
+};
+
+// return empty string in case of error, so we can attempt to parse the string
+// without a special check if it was in fact a string
+STATIC const char *get_arg_str(mp_parse_node_t pn) {
+ if (MP_PARSE_NODE_IS_ID(pn)) {
+ qstr qst = MP_PARSE_NODE_LEAF_ARG(pn);
+ return qstr_str(qst);
+ } else {
+ return "";
+ }
+}
+
+STATIC mp_uint_t get_arg_reg(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn) {
+ const char *reg_str = get_arg_str(pn);
+ for (mp_uint_t i = 0; i < MP_ARRAY_SIZE(reg_name_table); i++) {
+ const reg_name_t *r = &reg_name_table[i];
+ if (reg_str[0] == r->name[0]
+ && reg_str[1] == r->name[1]
+ && reg_str[2] == r->name[2]
+ && (reg_str[2] == '\0' || reg_str[3] == '\0')) {
+ return r->reg;
+ }
+ }
+ emit_inline_xtensa_error_exc(emit,
+ mp_obj_new_exception_msg_varg(&mp_type_SyntaxError,
+ MP_ERROR_TEXT("'%s' expects a register"), op));
+ return 0;
+}
+
+STATIC uint32_t get_arg_i(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn, int min, int max) {
+ mp_obj_t o;
+ if (!mp_parse_node_get_int_maybe(pn, &o)) {
+ emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("'%s' expects an integer"), op));
+ return 0;
+ }
+ uint32_t i = mp_obj_get_int_truncated(o);
+ if (min != max && ((int)i < min || (int)i > max)) {
+ emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("'%s' integer %d isn't within range %d..%d"), op, i, min, max));
+ return 0;
+ }
+ return i;
+}
+
+STATIC int get_arg_label(emit_inline_asm_t *emit, const char *op, mp_parse_node_t pn) {
+ if (!MP_PARSE_NODE_IS_ID(pn)) {
+ emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("'%s' expects a label"), op));
+ return 0;
+ }
+ qstr label_qstr = MP_PARSE_NODE_LEAF_ARG(pn);
+ for (uint i = 0; i < emit->max_num_labels; i++) {
+ if (emit->label_lookup[i] == label_qstr) {
+ return i;
+ }
+ }
+ // only need to have the labels on the last pass
+ if (emit->pass == MP_PASS_EMIT) {
+ emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("label '%q' not defined"), label_qstr));
+ }
+ return 0;
+}
+
+#define RRR (0)
+#define RRI8 (1)
+#define RRI8_B (2)
+
+typedef struct _opcode_table_3arg_t {
+ uint16_t name; // actually a qstr, which should fit in 16 bits
+ uint8_t type;
+ uint8_t a0 : 4;
+ uint8_t a1 : 4;
+} opcode_table_3arg_t;
+
+STATIC const opcode_table_3arg_t opcode_table_3arg[] = {
+ // arithmetic opcodes: reg, reg, reg
+ {MP_QSTR_and_, RRR, 0, 1},
+ {MP_QSTR_or_, RRR, 0, 2},
+ {MP_QSTR_xor, RRR, 0, 3},
+ {MP_QSTR_add, RRR, 0, 8},
+ {MP_QSTR_sub, RRR, 0, 12},
+ {MP_QSTR_mull, RRR, 2, 8},
+
+ // load/store/addi opcodes: reg, reg, imm
+ // upper nibble of type encodes the range of the immediate arg
+ {MP_QSTR_l8ui, RRI8 | 0x10, 2, 0},
+ {MP_QSTR_l16ui, RRI8 | 0x30, 2, 1},
+ {MP_QSTR_l32i, RRI8 | 0x50, 2, 2},
+ {MP_QSTR_s8i, RRI8 | 0x10, 2, 4},
+ {MP_QSTR_s16i, RRI8 | 0x30, 2, 5},
+ {MP_QSTR_s32i, RRI8 | 0x50, 2, 6},
+ {MP_QSTR_l16si, RRI8 | 0x30, 2, 9},
+ {MP_QSTR_addi, RRI8 | 0x00, 2, 12},
+
+ // branch opcodes: reg, reg, label
+ {MP_QSTR_ball, RRI8_B, ASM_XTENSA_CC_ALL, 0},
+ {MP_QSTR_bany, RRI8_B, ASM_XTENSA_CC_ANY, 0},
+ {MP_QSTR_bbc, RRI8_B, ASM_XTENSA_CC_BC, 0},
+ {MP_QSTR_bbs, RRI8_B, ASM_XTENSA_CC_BS, 0},
+ {MP_QSTR_beq, RRI8_B, ASM_XTENSA_CC_EQ, 0},
+ {MP_QSTR_bge, RRI8_B, ASM_XTENSA_CC_GE, 0},
+ {MP_QSTR_bgeu, RRI8_B, ASM_XTENSA_CC_GEU, 0},
+ {MP_QSTR_blt, RRI8_B, ASM_XTENSA_CC_LT, 0},
+ {MP_QSTR_bnall, RRI8_B, ASM_XTENSA_CC_NALL, 0},
+ {MP_QSTR_bne, RRI8_B, ASM_XTENSA_CC_NE, 0},
+ {MP_QSTR_bnone, RRI8_B, ASM_XTENSA_CC_NONE, 0},
+};
+
+STATIC void emit_inline_xtensa_op(emit_inline_asm_t *emit, qstr op, mp_uint_t n_args, mp_parse_node_t *pn_args) {
+ size_t op_len;
+ const char *op_str = (const char *)qstr_data(op, &op_len);
+
+ if (n_args == 0) {
+ if (op == MP_QSTR_ret_n) {
+ asm_xtensa_op_ret_n(&emit->as);
+ } else {
+ goto unknown_op;
+ }
+
+ } else if (n_args == 1) {
+ if (op == MP_QSTR_callx0) {
+ uint r0 = get_arg_reg(emit, op_str, pn_args[0]);
+ asm_xtensa_op_callx0(&emit->as, r0);
+ } else if (op == MP_QSTR_j) {
+ int label = get_arg_label(emit, op_str, pn_args[0]);
+ asm_xtensa_j_label(&emit->as, label);
+ } else if (op == MP_QSTR_jx) {
+ uint r0 = get_arg_reg(emit, op_str, pn_args[0]);
+ asm_xtensa_op_jx(&emit->as, r0);
+ } else {
+ goto unknown_op;
+ }
+
+ } else if (n_args == 2) {
+ uint r0 = get_arg_reg(emit, op_str, pn_args[0]);
+ if (op == MP_QSTR_beqz) {
+ int label = get_arg_label(emit, op_str, pn_args[1]);
+ asm_xtensa_bccz_reg_label(&emit->as, ASM_XTENSA_CCZ_EQ, r0, label);
+ } else if (op == MP_QSTR_bnez) {
+ int label = get_arg_label(emit, op_str, pn_args[1]);
+ asm_xtensa_bccz_reg_label(&emit->as, ASM_XTENSA_CCZ_NE, r0, label);
+ } else if (op == MP_QSTR_mov || op == MP_QSTR_mov_n) {
+ // we emit mov.n for both "mov" and "mov_n" opcodes
+ uint r1 = get_arg_reg(emit, op_str, pn_args[1]);
+ asm_xtensa_op_mov_n(&emit->as, r0, r1);
+ } else if (op == MP_QSTR_movi) {
+ // for convenience we emit l32r if the integer doesn't fit in movi
+ uint32_t imm = get_arg_i(emit, op_str, pn_args[1], 0, 0);
+ asm_xtensa_mov_reg_i32(&emit->as, r0, imm);
+ } else {
+ goto unknown_op;
+ }
+
+ } else if (n_args == 3) {
+ // search table for 3 arg instructions
+ for (uint i = 0; i < MP_ARRAY_SIZE(opcode_table_3arg); i++) {
+ const opcode_table_3arg_t *o = &opcode_table_3arg[i];
+ if (op == o->name) {
+ uint r0 = get_arg_reg(emit, op_str, pn_args[0]);
+ uint r1 = get_arg_reg(emit, op_str, pn_args[1]);
+ if (o->type == RRR) {
+ uint r2 = get_arg_reg(emit, op_str, pn_args[2]);
+ asm_xtensa_op24(&emit->as, ASM_XTENSA_ENCODE_RRR(0, o->a0, o->a1, r0, r1, r2));
+ } else if (o->type == RRI8_B) {
+ int label = get_arg_label(emit, op_str, pn_args[2]);
+ asm_xtensa_bcc_reg_reg_label(&emit->as, o->a0, r0, r1, label);
+ } else {
+ int shift, min, max;
+ if ((o->type & 0xf0) == 0) {
+ shift = 0;
+ min = -128;
+ max = 127;
+ } else {
+ shift = (o->type & 0xf0) >> 5;
+ min = 0;
+ max = 0xff << shift;
+ }
+ uint32_t imm = get_arg_i(emit, op_str, pn_args[2], min, max);
+ asm_xtensa_op24(&emit->as, ASM_XTENSA_ENCODE_RRI8(o->a0, o->a1, r1, r0, (imm >> shift) & 0xff));
+ }
+ return;
+ }
+ }
+ goto unknown_op;
+
+ } else {
+ goto unknown_op;
+ }
+
+ return;
+
+unknown_op:
+ emit_inline_xtensa_error_exc(emit, mp_obj_new_exception_msg_varg(&mp_type_SyntaxError, MP_ERROR_TEXT("unsupported Xtensa instruction '%s' with %d arguments"), op_str, n_args));
+ return;
+
+ /*
+branch_not_in_range:
+ emit_inline_xtensa_error_msg(emit, MP_ERROR_TEXT("branch not in range"));
+ return;
+ */
+}
+
+const emit_inline_asm_method_table_t emit_inline_xtensa_method_table = {
+ #if MICROPY_DYNAMIC_COMPILER
+ emit_inline_xtensa_new,
+ emit_inline_xtensa_free,
+ #endif
+
+ emit_inline_xtensa_start_pass,
+ emit_inline_xtensa_end_pass,
+ emit_inline_xtensa_count_params,
+ emit_inline_xtensa_label,
+ emit_inline_xtensa_op,
+};
+
+#endif // MICROPY_EMIT_INLINE_XTENSA
diff --git a/circuitpython/py/emitnarm.c b/circuitpython/py/emitnarm.c
new file mode 100644
index 0000000..8297ad6
--- /dev/null
+++ b/circuitpython/py/emitnarm.c
@@ -0,0 +1,20 @@
+// ARM specific stuff
+
+#include "py/mpconfig.h"
+
+#if MICROPY_EMIT_ARM
+
+// This is defined so that the assembler exports generic assembler API macros
+#define GENERIC_ASM_API (1)
+#include "py/asmarm.h"
+
+// Word indices of REG_LOCAL_x in nlr_buf_t
+#define NLR_BUF_IDX_LOCAL_1 (3) // r4
+#define NLR_BUF_IDX_LOCAL_2 (4) // r5
+#define NLR_BUF_IDX_LOCAL_3 (5) // r6
+
+#define N_ARM (1)
+#define EXPORT_FUN(name) emit_native_arm_##name
+#include "py/emitnative.c"
+
+#endif
diff --git a/circuitpython/py/emitnative.c b/circuitpython/py/emitnative.c
new file mode 100644
index 0000000..5946fcd
--- /dev/null
+++ b/circuitpython/py/emitnative.c
@@ -0,0 +1,3045 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+// Essentially normal Python has 1 type: Python objects
+// Viper has more than 1 type, and is just a more complicated (a superset of) Python.
+// If you declare everything in Viper as a Python object (ie omit type decls) then
+// it should in principle be exactly the same as Python native.
+// Having types means having more opcodes, like binary_op_nat_nat, binary_op_nat_obj etc.
+// In practice we won't have a VM but rather do this in asm which is actually very minimal.
+
+// Because it breaks strict Python equivalence it should be a completely separate
+// decorator. It breaks equivalence because overflow on integers wraps around.
+// It shouldn't break equivalence if you don't use the new types, but since the
+// type decls might be used in normal Python for other reasons, it's probably safest,
+// cleanest and clearest to make it a separate decorator.
+
+// Actually, it does break equivalence because integers default to native integers,
+// not Python objects.
+
+// for x in l[0:8]: can be compiled into a native loop if l has pointer type
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/emit.h"
+#include "py/nativeglue.h"
+#include "py/objstr.h"
+
+#if MICROPY_DEBUG_VERBOSE // print debugging info
+#define DEBUG_PRINT (1)
+#define DEBUG_printf DEBUG_printf
+#else // don't print debugging info
+#define DEBUG_printf(...) (void)0
+#endif
+
+#ifndef N_X64
+#define N_X64 (0)
+#endif
+
+#ifndef N_X86
+#define N_X86 (0)
+#endif
+
+#ifndef N_THUMB
+#define N_THUMB (0)
+#endif
+
+#ifndef N_ARM
+#define N_ARM (0)
+#endif
+
+#ifndef N_XTENSA
+#define N_XTENSA (0)
+#endif
+
+#ifndef N_NLR_SETJMP
+#define N_NLR_SETJMP (0)
+#endif
+
+#ifndef N_PRELUDE_AS_BYTES_OBJ
+#define N_PRELUDE_AS_BYTES_OBJ (0)
+#endif
+
+// wrapper around everything in this file
+#if N_X64 || N_X86 || N_THUMB || N_ARM || N_XTENSA || N_XTENSAWIN
+
+// C stack layout for native functions:
+// 0: nlr_buf_t [optional]
+// emit->code_state_start: mp_code_state_t
+// emit->stack_start: Python object stack | emit->n_state
+// locals (reversed, L0 at end) |
+//
+// C stack layout for native generator functions:
+// 0=emit->stack_start: nlr_buf_t
+//
+// Then REG_GENERATOR_STATE points to:
+// 0=emit->code_state_start: mp_code_state_t
+// emit->stack_start: Python object stack | emit->n_state
+// locals (reversed, L0 at end) |
+//
+// C stack layout for viper functions:
+// 0: nlr_buf_t [optional]
+// emit->code_state_start: fun_obj, old_globals [optional]
+// emit->stack_start: Python object stack | emit->n_state
+// locals (reversed, L0 at end) |
+// (L0-L2 may be in regs instead)
+
+// Native emitter needs to know the following sizes and offsets of C structs (on the target):
+#if MICROPY_DYNAMIC_COMPILER
+#define SIZEOF_NLR_BUF (2 + mp_dynamic_compiler.nlr_buf_num_regs + 1) // the +1 is conservative in case MICROPY_ENABLE_PYSTACK enabled
+#else
+#define SIZEOF_NLR_BUF (sizeof(nlr_buf_t) / sizeof(uintptr_t))
+#endif
+#define SIZEOF_CODE_STATE (sizeof(mp_code_state_t) / sizeof(uintptr_t))
+#define OFFSETOF_CODE_STATE_STATE (offsetof(mp_code_state_t, state) / sizeof(uintptr_t))
+#define OFFSETOF_CODE_STATE_FUN_BC (offsetof(mp_code_state_t, fun_bc) / sizeof(uintptr_t))
+#define OFFSETOF_CODE_STATE_IP (offsetof(mp_code_state_t, ip) / sizeof(uintptr_t))
+#define OFFSETOF_CODE_STATE_SP (offsetof(mp_code_state_t, sp) / sizeof(uintptr_t))
+#define OFFSETOF_OBJ_FUN_BC_GLOBALS (offsetof(mp_obj_fun_bc_t, globals) / sizeof(uintptr_t))
+#define OFFSETOF_OBJ_FUN_BC_BYTECODE (offsetof(mp_obj_fun_bc_t, bytecode) / sizeof(uintptr_t))
+#define OFFSETOF_OBJ_FUN_BC_CONST_TABLE (offsetof(mp_obj_fun_bc_t, const_table) / sizeof(uintptr_t))
+
+// If not already defined, set parent args to same as child call registers
+#ifndef REG_PARENT_RET
+#define REG_PARENT_RET REG_RET
+#define REG_PARENT_ARG_1 REG_ARG_1
+#define REG_PARENT_ARG_2 REG_ARG_2
+#define REG_PARENT_ARG_3 REG_ARG_3
+#define REG_PARENT_ARG_4 REG_ARG_4
+#endif
+
+// Word index of nlr_buf_t.ret_val
+#define NLR_BUF_IDX_RET_VAL (1)
+
+// Whether the viper function needs access to fun_obj
+#define NEED_FUN_OBJ(emit) ((emit)->scope->exc_stack_size > 0 \
+ || ((emit)->scope->scope_flags & (MP_SCOPE_FLAG_REFGLOBALS | MP_SCOPE_FLAG_HASCONSTS)))
+
+// Whether the native/viper function needs to be wrapped in an exception handler
+#define NEED_GLOBAL_EXC_HANDLER(emit) ((emit)->scope->exc_stack_size > 0 \
+ || ((emit)->scope->scope_flags & (MP_SCOPE_FLAG_GENERATOR | MP_SCOPE_FLAG_REFGLOBALS)))
+
+// Whether registers can be used to store locals (only true if there are no
+// exception handlers, because otherwise an nlr_jump will restore registers to
+// their state at the start of the function and updates to locals will be lost)
+#define CAN_USE_REGS_FOR_LOCALS(emit) ((emit)->scope->exc_stack_size == 0 && !(emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR))
+
+// Indices within the local C stack for various variables
+#define LOCAL_IDX_EXC_VAL(emit) (NLR_BUF_IDX_RET_VAL)
+#define LOCAL_IDX_EXC_HANDLER_PC(emit) (NLR_BUF_IDX_LOCAL_1)
+#define LOCAL_IDX_EXC_HANDLER_UNWIND(emit) (NLR_BUF_IDX_LOCAL_2)
+#define LOCAL_IDX_RET_VAL(emit) (NLR_BUF_IDX_LOCAL_3)
+#define LOCAL_IDX_FUN_OBJ(emit) ((emit)->code_state_start + OFFSETOF_CODE_STATE_FUN_BC)
+#define LOCAL_IDX_OLD_GLOBALS(emit) ((emit)->code_state_start + OFFSETOF_CODE_STATE_IP)
+#define LOCAL_IDX_GEN_PC(emit) ((emit)->code_state_start + OFFSETOF_CODE_STATE_IP)
+#define LOCAL_IDX_LOCAL_VAR(emit, local_num) ((emit)->stack_start + (emit)->n_state - 1 - (local_num))
+
+#define REG_GENERATOR_STATE (REG_LOCAL_3)
+
+#define EMIT_NATIVE_VIPER_TYPE_ERROR(emit, ...) do { \
+ *emit->error_slot = mp_obj_new_exception_msg_varg(&mp_type_ViperTypeError, __VA_ARGS__); \
+} while (0)
+
+typedef enum {
+ STACK_VALUE,
+ STACK_REG,
+ STACK_IMM,
+} stack_info_kind_t;
+
+// these enums must be distinct and the bottom 4 bits
+// must correspond to the correct MP_NATIVE_TYPE_xxx value
+typedef enum {
+ VTYPE_PYOBJ = 0x00 | MP_NATIVE_TYPE_OBJ,
+ VTYPE_BOOL = 0x00 | MP_NATIVE_TYPE_BOOL,
+ VTYPE_INT = 0x00 | MP_NATIVE_TYPE_INT,
+ VTYPE_UINT = 0x00 | MP_NATIVE_TYPE_UINT,
+ VTYPE_PTR = 0x00 | MP_NATIVE_TYPE_PTR,
+ VTYPE_PTR8 = 0x00 | MP_NATIVE_TYPE_PTR8,
+ VTYPE_PTR16 = 0x00 | MP_NATIVE_TYPE_PTR16,
+ VTYPE_PTR32 = 0x00 | MP_NATIVE_TYPE_PTR32,
+
+ VTYPE_PTR_NONE = 0x50 | MP_NATIVE_TYPE_PTR,
+
+ VTYPE_UNBOUND = 0x60 | MP_NATIVE_TYPE_OBJ,
+ VTYPE_BUILTIN_CAST = 0x70 | MP_NATIVE_TYPE_OBJ,
+} vtype_kind_t;
+
+STATIC qstr vtype_to_qstr(vtype_kind_t vtype) {
+ switch (vtype) {
+ case VTYPE_PYOBJ:
+ return MP_QSTR_object;
+ case VTYPE_BOOL:
+ return MP_QSTR_bool;
+ case VTYPE_INT:
+ return MP_QSTR_int;
+ case VTYPE_UINT:
+ return MP_QSTR_uint;
+ case VTYPE_PTR:
+ return MP_QSTR_ptr;
+ case VTYPE_PTR8:
+ return MP_QSTR_ptr8;
+ case VTYPE_PTR16:
+ return MP_QSTR_ptr16;
+ case VTYPE_PTR32:
+ return MP_QSTR_ptr32;
+ case VTYPE_PTR_NONE:
+ default:
+ return MP_QSTR_None;
+ }
+}
+
+typedef struct _stack_info_t {
+ vtype_kind_t vtype;
+ stack_info_kind_t kind;
+ union {
+ int u_reg;
+ mp_int_t u_imm;
+ } data;
+} stack_info_t;
+
+#define UNWIND_LABEL_UNUSED (0x7fff)
+#define UNWIND_LABEL_DO_FINAL_UNWIND (0x7ffe)
+
+typedef struct _exc_stack_entry_t {
+ uint16_t label : 15;
+ uint16_t is_finally : 1;
+ uint16_t unwind_label : 15;
+ uint16_t is_active : 1;
+} exc_stack_entry_t;
+
+struct _emit_t {
+ mp_obj_t *error_slot;
+ uint *label_slot;
+ uint exit_label;
+ int pass;
+
+ bool do_viper_types;
+ bool prelude_offset_uses_u16_encoding;
+
+ mp_uint_t local_vtype_alloc;
+ vtype_kind_t *local_vtype;
+
+ mp_uint_t stack_info_alloc;
+ stack_info_t *stack_info;
+ vtype_kind_t saved_stack_vtype;
+
+ size_t exc_stack_alloc;
+ size_t exc_stack_size;
+ exc_stack_entry_t *exc_stack;
+
+ int prelude_offset;
+ int start_offset;
+ int n_state;
+ uint16_t code_state_start;
+ uint16_t stack_start;
+ int stack_size;
+ uint16_t n_cell;
+
+ uint16_t const_table_cur_obj;
+ uint16_t const_table_num_obj;
+ uint16_t const_table_cur_raw_code;
+ mp_uint_t *const_table;
+
+ #if MICROPY_PERSISTENT_CODE_SAVE
+ uint16_t qstr_link_cur;
+ mp_qstr_link_entry_t *qstr_link;
+ #endif
+
+ bool last_emit_was_return_value;
+
+ scope_t *scope;
+
+ ASM_T *as;
+};
+
+STATIC const uint8_t reg_local_table[REG_LOCAL_NUM] = {REG_LOCAL_1, REG_LOCAL_2, REG_LOCAL_3};
+
+STATIC void emit_native_global_exc_entry(emit_t *emit);
+STATIC void emit_native_global_exc_exit(emit_t *emit);
+STATIC void emit_native_load_const_obj(emit_t *emit, mp_obj_t obj);
+
+emit_t *EXPORT_FUN(new)(mp_obj_t * error_slot, uint *label_slot, mp_uint_t max_num_labels) {
+ emit_t *emit = m_new0(emit_t, 1);
+ emit->error_slot = error_slot;
+ emit->label_slot = label_slot;
+ emit->stack_info_alloc = 8;
+ emit->stack_info = m_new(stack_info_t, emit->stack_info_alloc);
+ emit->exc_stack_alloc = 8;
+ emit->exc_stack = m_new(exc_stack_entry_t, emit->exc_stack_alloc);
+ emit->as = m_new0(ASM_T, 1);
+ mp_asm_base_init(&emit->as->base, max_num_labels);
+ return emit;
+}
+
+void EXPORT_FUN(free)(emit_t * emit) {
+ mp_asm_base_deinit(&emit->as->base, false);
+ m_del_obj(ASM_T, emit->as);
+ m_del(exc_stack_entry_t, emit->exc_stack, emit->exc_stack_alloc);
+ m_del(vtype_kind_t, emit->local_vtype, emit->local_vtype_alloc);
+ m_del(stack_info_t, emit->stack_info, emit->stack_info_alloc);
+ m_del_obj(emit_t, emit);
+}
+
+STATIC void emit_call_with_imm_arg(emit_t *emit, mp_fun_kind_t fun_kind, mp_int_t arg_val, int arg_reg);
+
+STATIC void emit_native_mov_reg_const(emit_t *emit, int reg_dest, int const_val) {
+ ASM_LOAD_REG_REG_OFFSET(emit->as, reg_dest, REG_FUN_TABLE, const_val);
+}
+
+STATIC void emit_native_mov_state_reg(emit_t *emit, int local_num, int reg_src) {
+ if (emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR) {
+ ASM_STORE_REG_REG_OFFSET(emit->as, reg_src, REG_GENERATOR_STATE, local_num);
+ } else {
+ ASM_MOV_LOCAL_REG(emit->as, local_num, reg_src);
+ }
+}
+
+STATIC void emit_native_mov_reg_state(emit_t *emit, int reg_dest, int local_num) {
+ if (emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR) {
+ ASM_LOAD_REG_REG_OFFSET(emit->as, reg_dest, REG_GENERATOR_STATE, local_num);
+ } else {
+ ASM_MOV_REG_LOCAL(emit->as, reg_dest, local_num);
+ }
+}
+
+STATIC void emit_native_mov_reg_state_addr(emit_t *emit, int reg_dest, int local_num) {
+ if (emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR) {
+ ASM_MOV_REG_IMM(emit->as, reg_dest, local_num * ASM_WORD_SIZE);
+ ASM_ADD_REG_REG(emit->as, reg_dest, REG_GENERATOR_STATE);
+ } else {
+ ASM_MOV_REG_LOCAL_ADDR(emit->as, reg_dest, local_num);
+ }
+}
+
+STATIC void emit_native_mov_reg_qstr(emit_t *emit, int arg_reg, qstr qst) {
+ #if MICROPY_PERSISTENT_CODE_SAVE
+ size_t loc = ASM_MOV_REG_IMM_FIX_U16(emit->as, arg_reg, qst);
+ size_t link_idx = emit->qstr_link_cur++;
+ if (emit->pass == MP_PASS_EMIT) {
+ emit->qstr_link[link_idx].off = loc << 2 | 1;
+ emit->qstr_link[link_idx].qst = qst;
+ }
+ #else
+ ASM_MOV_REG_IMM(emit->as, arg_reg, qst);
+ #endif
+}
+
+STATIC void emit_native_mov_reg_qstr_obj(emit_t *emit, int reg_dest, qstr qst) {
+ #if MICROPY_PERSISTENT_CODE_SAVE
+ size_t loc = ASM_MOV_REG_IMM_FIX_WORD(emit->as, reg_dest, (mp_uint_t)MP_OBJ_NEW_QSTR(qst));
+ size_t link_idx = emit->qstr_link_cur++;
+ if (emit->pass == MP_PASS_EMIT) {
+ emit->qstr_link[link_idx].off = loc << 2 | 2;
+ emit->qstr_link[link_idx].qst = qst;
+ }
+ #else
+ ASM_MOV_REG_IMM(emit->as, reg_dest, (mp_uint_t)MP_OBJ_NEW_QSTR(qst));
+ #endif
+}
+
+#define emit_native_mov_state_imm_via(emit, local_num, imm, reg_temp) \
+ do { \
+ ASM_MOV_REG_IMM((emit)->as, (reg_temp), (imm)); \
+ emit_native_mov_state_reg((emit), (local_num), (reg_temp)); \
+ } while (false)
+
+#define emit_native_mov_state_imm_fix_u16_via(emit, local_num, imm, reg_temp) \
+ do { \
+ ASM_MOV_REG_IMM_FIX_U16((emit)->as, (reg_temp), (imm)); \
+ emit_native_mov_state_reg((emit), (local_num), (reg_temp)); \
+ } while (false)
+
+#define emit_native_mov_state_imm_fix_word_via(emit, local_num, imm, reg_temp) \
+ do { \
+ ASM_MOV_REG_IMM_FIX_WORD((emit)->as, (reg_temp), (imm)); \
+ emit_native_mov_state_reg((emit), (local_num), (reg_temp)); \
+ } while (false)
+
+STATIC void emit_native_start_pass(emit_t *emit, pass_kind_t pass, scope_t *scope) {
+ DEBUG_printf("start_pass(pass=%u, scope=%p)\n", pass, scope);
+
+ emit->pass = pass;
+ emit->do_viper_types = scope->emit_options == MP_EMIT_OPT_VIPER;
+ emit->stack_size = 0;
+ #if N_PRELUDE_AS_BYTES_OBJ
+ emit->const_table_cur_obj = emit->do_viper_types ? 0 : 1; // reserve first obj for prelude bytes obj
+ #else
+ emit->const_table_cur_obj = 0;
+ #endif
+ emit->const_table_cur_raw_code = 0;
+ #if MICROPY_PERSISTENT_CODE_SAVE
+ emit->qstr_link_cur = 0;
+ #endif
+ emit->last_emit_was_return_value = false;
+ emit->scope = scope;
+
+ // allocate memory for keeping track of the types of locals
+ if (emit->local_vtype_alloc < scope->num_locals) {
+ emit->local_vtype = m_renew(vtype_kind_t, emit->local_vtype, emit->local_vtype_alloc, scope->num_locals);
+ emit->local_vtype_alloc = scope->num_locals;
+ }
+
+ // set default type for arguments
+ mp_uint_t num_args = emit->scope->num_pos_args + emit->scope->num_kwonly_args;
+ if (scope->scope_flags & MP_SCOPE_FLAG_VARARGS) {
+ num_args += 1;
+ }
+ if (scope->scope_flags & MP_SCOPE_FLAG_VARKEYWORDS) {
+ num_args += 1;
+ }
+ for (mp_uint_t i = 0; i < num_args; i++) {
+ emit->local_vtype[i] = VTYPE_PYOBJ;
+ }
+
+ // Set viper type for arguments
+ if (emit->do_viper_types) {
+ for (int i = 0; i < emit->scope->id_info_len; ++i) {
+ id_info_t *id = &emit->scope->id_info[i];
+ if (id->flags & ID_FLAG_IS_PARAM) {
+ assert(id->local_num < emit->local_vtype_alloc);
+ emit->local_vtype[id->local_num] = id->flags >> ID_FLAG_VIPER_TYPE_POS;
+ }
+ }
+ }
+
+ // local variables begin unbound, and have unknown type
+ for (mp_uint_t i = num_args; i < emit->local_vtype_alloc; i++) {
+ emit->local_vtype[i] = VTYPE_UNBOUND;
+ }
+
+ // values on stack begin unbound
+ for (mp_uint_t i = 0; i < emit->stack_info_alloc; i++) {
+ emit->stack_info[i].kind = STACK_VALUE;
+ emit->stack_info[i].vtype = VTYPE_UNBOUND;
+ }
+
+ mp_asm_base_start_pass(&emit->as->base, pass == MP_PASS_EMIT ? MP_ASM_PASS_EMIT : MP_ASM_PASS_COMPUTE);
+
+ // generate code for entry to function
+
+ // Work out start of code state (mp_code_state_t or reduced version for viper)
+ emit->code_state_start = 0;
+ if (NEED_GLOBAL_EXC_HANDLER(emit)) {
+ emit->code_state_start = SIZEOF_NLR_BUF;
+ }
+
+ if (emit->do_viper_types) {
+ // Work out size of state (locals plus stack)
+ // n_state counts all stack and locals, even those in registers
+ emit->n_state = scope->num_locals + scope->stack_size;
+ int num_locals_in_regs = 0;
+ if (CAN_USE_REGS_FOR_LOCALS(emit)) {
+ num_locals_in_regs = scope->num_locals;
+ if (num_locals_in_regs > REG_LOCAL_NUM) {
+ num_locals_in_regs = REG_LOCAL_NUM;
+ }
+ // Need a spot for REG_LOCAL_3 if 4 or more args (see below)
+ if (scope->num_pos_args >= 4) {
+ --num_locals_in_regs;
+ }
+ }
+
+ // Work out where the locals and Python stack start within the C stack
+ if (NEED_GLOBAL_EXC_HANDLER(emit)) {
+ // Reserve 2 words for function object and old globals
+ emit->stack_start = emit->code_state_start + 2;
+ } else if (scope->scope_flags & MP_SCOPE_FLAG_HASCONSTS) {
+ // Reserve 1 word for function object, to access const table
+ emit->stack_start = emit->code_state_start + 1;
+ } else {
+ emit->stack_start = emit->code_state_start + 0;
+ }
+
+ // Entry to function
+ ASM_ENTRY(emit->as, emit->stack_start + emit->n_state - num_locals_in_regs);
+
+ #if N_X86
+ asm_x86_mov_arg_to_r32(emit->as, 0, REG_PARENT_ARG_1);
+ #endif
+
+ // Load REG_FUN_TABLE with a pointer to mp_fun_table, found in the const_table
+ ASM_LOAD_REG_REG_OFFSET(emit->as, REG_LOCAL_3, REG_PARENT_ARG_1, OFFSETOF_OBJ_FUN_BC_CONST_TABLE);
+ ASM_LOAD_REG_REG_OFFSET(emit->as, REG_FUN_TABLE, REG_LOCAL_3, 0);
+
+ // Store function object (passed as first arg) to stack if needed
+ if (NEED_FUN_OBJ(emit)) {
+ ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_FUN_OBJ(emit), REG_PARENT_ARG_1);
+ }
+
+ // Put n_args in REG_ARG_1, n_kw in REG_ARG_2, args array in REG_LOCAL_3
+ #if N_X86
+ asm_x86_mov_arg_to_r32(emit->as, 1, REG_ARG_1);
+ asm_x86_mov_arg_to_r32(emit->as, 2, REG_ARG_2);
+ asm_x86_mov_arg_to_r32(emit->as, 3, REG_LOCAL_3);
+ #else
+ ASM_MOV_REG_REG(emit->as, REG_ARG_1, REG_PARENT_ARG_2);
+ ASM_MOV_REG_REG(emit->as, REG_ARG_2, REG_PARENT_ARG_3);
+ ASM_MOV_REG_REG(emit->as, REG_LOCAL_3, REG_PARENT_ARG_4);
+ #endif
+
+ // Check number of args matches this function, and call mp_arg_check_num_sig if not
+ ASM_JUMP_IF_REG_NONZERO(emit->as, REG_ARG_2, *emit->label_slot + 4, true);
+ ASM_MOV_REG_IMM(emit->as, REG_ARG_3, scope->num_pos_args);
+ ASM_JUMP_IF_REG_EQ(emit->as, REG_ARG_1, REG_ARG_3, *emit->label_slot + 5);
+ mp_asm_base_label_assign(&emit->as->base, *emit->label_slot + 4);
+ ASM_MOV_REG_IMM(emit->as, REG_ARG_3, MP_OBJ_FUN_MAKE_SIG(scope->num_pos_args, scope->num_pos_args, false));
+ ASM_CALL_IND(emit->as, MP_F_ARG_CHECK_NUM_SIG);
+ mp_asm_base_label_assign(&emit->as->base, *emit->label_slot + 5);
+
+ // Store arguments into locals (reg or stack), converting to native if needed
+ for (int i = 0; i < emit->scope->num_pos_args; i++) {
+ int r = REG_ARG_1;
+ ASM_LOAD_REG_REG_OFFSET(emit->as, REG_ARG_1, REG_LOCAL_3, i);
+ if (emit->local_vtype[i] != VTYPE_PYOBJ) {
+ emit_call_with_imm_arg(emit, MP_F_CONVERT_OBJ_TO_NATIVE, emit->local_vtype[i], REG_ARG_2);
+ r = REG_RET;
+ }
+ // REG_LOCAL_3 points to the args array so be sure not to overwrite it if it's still needed
+ if (i < REG_LOCAL_NUM && CAN_USE_REGS_FOR_LOCALS(emit) && (i != 2 || emit->scope->num_pos_args == 3)) {
+ ASM_MOV_REG_REG(emit->as, reg_local_table[i], r);
+ } else {
+ emit_native_mov_state_reg(emit, LOCAL_IDX_LOCAL_VAR(emit, i), r);
+ }
+ }
+ // Get 3rd local from the stack back into REG_LOCAL_3 if this reg couldn't be written to above
+ if (emit->scope->num_pos_args >= 4 && CAN_USE_REGS_FOR_LOCALS(emit)) {
+ ASM_MOV_REG_LOCAL(emit->as, REG_LOCAL_3, LOCAL_IDX_LOCAL_VAR(emit, 2));
+ }
+
+ emit_native_global_exc_entry(emit);
+
+ } else {
+ // work out size of state (locals plus stack)
+ emit->n_state = scope->num_locals + scope->stack_size;
+
+ if (emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR) {
+ emit->code_state_start = 0;
+ emit->stack_start = SIZEOF_CODE_STATE;
+ #if N_PRELUDE_AS_BYTES_OBJ
+ // Load index of prelude bytes object in const_table
+ mp_asm_base_data(&emit->as->base, ASM_WORD_SIZE, (uintptr_t)(emit->scope->num_pos_args + emit->scope->num_kwonly_args + 1));
+ #else
+ mp_asm_base_data(&emit->as->base, ASM_WORD_SIZE, (uintptr_t)emit->prelude_offset);
+ #endif
+ mp_asm_base_data(&emit->as->base, ASM_WORD_SIZE, (uintptr_t)emit->start_offset);
+ ASM_ENTRY(emit->as, SIZEOF_NLR_BUF);
+
+ // Put address of code_state into REG_GENERATOR_STATE
+ #if N_X86
+ asm_x86_mov_arg_to_r32(emit->as, 0, REG_GENERATOR_STATE);
+ #else
+ ASM_MOV_REG_REG(emit->as, REG_GENERATOR_STATE, REG_PARENT_ARG_1);
+ #endif
+
+ // Put throw value into LOCAL_IDX_EXC_VAL slot, for yield/yield-from
+ #if N_X86
+ asm_x86_mov_arg_to_r32(emit->as, 1, REG_PARENT_ARG_2);
+ #endif
+ ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_VAL(emit), REG_PARENT_ARG_2);
+
+ // Load REG_FUN_TABLE with a pointer to mp_fun_table, found in the const_table
+ ASM_LOAD_REG_REG_OFFSET(emit->as, REG_TEMP0, REG_GENERATOR_STATE, LOCAL_IDX_FUN_OBJ(emit));
+ ASM_LOAD_REG_REG_OFFSET(emit->as, REG_TEMP0, REG_TEMP0, OFFSETOF_OBJ_FUN_BC_CONST_TABLE);
+ ASM_LOAD_REG_REG_OFFSET(emit->as, REG_FUN_TABLE, REG_TEMP0, emit->scope->num_pos_args + emit->scope->num_kwonly_args);
+ } else {
+ // The locals and stack start after the code_state structure
+ emit->stack_start = emit->code_state_start + SIZEOF_CODE_STATE;
+
+ // Allocate space on C-stack for code_state structure, which includes state
+ ASM_ENTRY(emit->as, emit->stack_start + emit->n_state);
+
+ // Prepare incoming arguments for call to mp_setup_code_state
+
+ #if N_X86
+ asm_x86_mov_arg_to_r32(emit->as, 0, REG_PARENT_ARG_1);
+ asm_x86_mov_arg_to_r32(emit->as, 1, REG_PARENT_ARG_2);
+ asm_x86_mov_arg_to_r32(emit->as, 2, REG_PARENT_ARG_3);
+ asm_x86_mov_arg_to_r32(emit->as, 3, REG_PARENT_ARG_4);
+ #endif
+
+ // Load REG_FUN_TABLE with a pointer to mp_fun_table, found in the const_table
+ ASM_LOAD_REG_REG_OFFSET(emit->as, REG_LOCAL_3, REG_PARENT_ARG_1, OFFSETOF_OBJ_FUN_BC_CONST_TABLE);
+ ASM_LOAD_REG_REG_OFFSET(emit->as, REG_FUN_TABLE, REG_LOCAL_3, emit->scope->num_pos_args + emit->scope->num_kwonly_args);
+
+ // Set code_state.fun_bc
+ ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_FUN_OBJ(emit), REG_PARENT_ARG_1);
+
+ // Set code_state.ip (offset from start of this function to prelude info)
+ int code_state_ip_local = emit->code_state_start + OFFSETOF_CODE_STATE_IP;
+ #if N_PRELUDE_AS_BYTES_OBJ
+ // Prelude is a bytes object in const_table; store ip = prelude->data - fun_bc->bytecode
+ ASM_LOAD_REG_REG_OFFSET(emit->as, REG_LOCAL_3, REG_LOCAL_3, emit->scope->num_pos_args + emit->scope->num_kwonly_args + 1);
+ ASM_LOAD_REG_REG_OFFSET(emit->as, REG_LOCAL_3, REG_LOCAL_3, offsetof(mp_obj_str_t, data) / sizeof(uintptr_t));
+ ASM_LOAD_REG_REG_OFFSET(emit->as, REG_PARENT_ARG_1, REG_PARENT_ARG_1, OFFSETOF_OBJ_FUN_BC_BYTECODE);
+ ASM_SUB_REG_REG(emit->as, REG_LOCAL_3, REG_PARENT_ARG_1);
+ emit_native_mov_state_reg(emit, code_state_ip_local, REG_LOCAL_3);
+ #else
+ if (emit->pass == MP_PASS_CODE_SIZE) {
+ // Commit to the encoding size based on the value of prelude_offset in this pass.
+ // By using 32768 as the cut-off it is highly unlikely that prelude_offset will
+ // grow beyond 65535 by the end of thiss pass, and so require the larger encoding.
+ emit->prelude_offset_uses_u16_encoding = emit->prelude_offset < 32768;
+ }
+ if (emit->prelude_offset_uses_u16_encoding) {
+ assert(emit->prelude_offset <= 65535);
+ emit_native_mov_state_imm_fix_u16_via(emit, code_state_ip_local, emit->prelude_offset, REG_PARENT_ARG_1);
+ } else {
+ emit_native_mov_state_imm_fix_word_via(emit, code_state_ip_local, emit->prelude_offset, REG_PARENT_ARG_1);
+ }
+ #endif
+
+ // Set code_state.n_state (only works on little endian targets due to n_state being uint16_t)
+ emit_native_mov_state_imm_via(emit, emit->code_state_start + offsetof(mp_code_state_t, n_state) / sizeof(uintptr_t), emit->n_state, REG_ARG_1);
+
+ // Put address of code_state into first arg
+ ASM_MOV_REG_LOCAL_ADDR(emit->as, REG_ARG_1, emit->code_state_start);
+
+ // Copy next 3 args if needed
+ #if REG_ARG_2 != REG_PARENT_ARG_2
+ ASM_MOV_REG_REG(emit->as, REG_ARG_2, REG_PARENT_ARG_2);
+ #endif
+ #if REG_ARG_3 != REG_PARENT_ARG_3
+ ASM_MOV_REG_REG(emit->as, REG_ARG_3, REG_PARENT_ARG_3);
+ #endif
+ #if REG_ARG_4 != REG_PARENT_ARG_4
+ ASM_MOV_REG_REG(emit->as, REG_ARG_4, REG_PARENT_ARG_4);
+ #endif
+
+ // Call mp_setup_code_state to prepare code_state structure
+ #if N_THUMB
+ asm_thumb_bl_ind(emit->as, MP_F_SETUP_CODE_STATE, ASM_THUMB_REG_R4);
+ #elif N_ARM
+ asm_arm_bl_ind(emit->as, MP_F_SETUP_CODE_STATE, ASM_ARM_REG_R4);
+ #else
+ ASM_CALL_IND(emit->as, MP_F_SETUP_CODE_STATE);
+ #endif
+ }
+
+ emit_native_global_exc_entry(emit);
+
+ // cache some locals in registers, but only if no exception handlers
+ if (CAN_USE_REGS_FOR_LOCALS(emit)) {
+ for (int i = 0; i < REG_LOCAL_NUM && i < scope->num_locals; ++i) {
+ ASM_MOV_REG_LOCAL(emit->as, reg_local_table[i], LOCAL_IDX_LOCAL_VAR(emit, i));
+ }
+ }
+
+ // set the type of closed over variables
+ for (mp_uint_t i = 0; i < scope->id_info_len; i++) {
+ id_info_t *id = &scope->id_info[i];
+ if (id->kind == ID_INFO_KIND_CELL) {
+ emit->local_vtype[id->local_num] = VTYPE_PYOBJ;
+ }
+ }
+
+ if (pass == MP_PASS_EMIT) {
+ // write argument names as qstr objects
+ // see comment in corresponding part of emitbc.c about the logic here
+ for (int i = 0; i < scope->num_pos_args + scope->num_kwonly_args; i++) {
+ qstr qst = MP_QSTR__star_;
+ for (int j = 0; j < scope->id_info_len; ++j) {
+ id_info_t *id = &scope->id_info[j];
+ if ((id->flags & ID_FLAG_IS_PARAM) && id->local_num == i) {
+ qst = id->qst;
+ break;
+ }
+ }
+ emit->const_table[i] = (mp_uint_t)MP_OBJ_NEW_QSTR(qst);
+ }
+ }
+ }
+
+}
+
+static inline void emit_native_write_code_info_byte(emit_t *emit, byte val) {
+ mp_asm_base_data(&emit->as->base, 1, val);
+}
+
+STATIC void emit_native_end_pass(emit_t *emit) {
+ emit_native_global_exc_exit(emit);
+
+ if (!emit->do_viper_types) {
+ emit->prelude_offset = mp_asm_base_get_code_pos(&emit->as->base);
+
+ size_t n_state = emit->n_state;
+ size_t n_exc_stack = 0; // exc-stack not needed for native code
+ MP_BC_PRELUDE_SIG_ENCODE(n_state, n_exc_stack, emit->scope, emit_native_write_code_info_byte, emit);
+
+ #if MICROPY_PERSISTENT_CODE
+ size_t n_info = 4;
+ #else
+ size_t n_info = 1;
+ #endif
+ MP_BC_PRELUDE_SIZE_ENCODE(n_info, emit->n_cell, emit_native_write_code_info_byte, emit);
+
+ #if MICROPY_PERSISTENT_CODE
+ mp_asm_base_data(&emit->as->base, 1, emit->scope->simple_name);
+ mp_asm_base_data(&emit->as->base, 1, emit->scope->simple_name >> 8);
+ mp_asm_base_data(&emit->as->base, 1, emit->scope->source_file);
+ mp_asm_base_data(&emit->as->base, 1, emit->scope->source_file >> 8);
+ #else
+ mp_asm_base_data(&emit->as->base, 1, 1);
+ #endif
+
+ // bytecode prelude: initialise closed over variables
+ size_t cell_start = mp_asm_base_get_code_pos(&emit->as->base);
+ for (int i = 0; i < emit->scope->id_info_len; i++) {
+ id_info_t *id = &emit->scope->id_info[i];
+ if (id->kind == ID_INFO_KIND_CELL) {
+ assert(id->local_num <= 255);
+ mp_asm_base_data(&emit->as->base, 1, id->local_num); // write the local which should be converted to a cell
+ }
+ }
+ emit->n_cell = mp_asm_base_get_code_pos(&emit->as->base) - cell_start;
+
+ #if N_PRELUDE_AS_BYTES_OBJ
+ // Prelude bytes object is after qstr arg names and mp_fun_table
+ size_t table_off = emit->scope->num_pos_args + emit->scope->num_kwonly_args + 1;
+ if (emit->pass == MP_PASS_EMIT) {
+ void *buf = emit->as->base.code_base + emit->prelude_offset;
+ size_t n = emit->as->base.code_offset - emit->prelude_offset;
+ emit->const_table[table_off] = (uintptr_t)mp_obj_new_bytes(buf, n);
+ }
+ #endif
+ }
+
+ ASM_END_PASS(emit->as);
+
+ // check stack is back to zero size
+ assert(emit->stack_size == 0);
+ assert(emit->exc_stack_size == 0);
+
+ // Deal with const table accounting
+ assert(emit->pass <= MP_PASS_STACK_SIZE || (emit->const_table_num_obj == emit->const_table_cur_obj));
+ emit->const_table_num_obj = emit->const_table_cur_obj;
+ if (emit->pass == MP_PASS_CODE_SIZE) {
+ size_t const_table_alloc = 1 + emit->const_table_num_obj + emit->const_table_cur_raw_code;
+ size_t nqstr = 0;
+ if (!emit->do_viper_types) {
+ // Add room for qstr names of arguments
+ nqstr = emit->scope->num_pos_args + emit->scope->num_kwonly_args;
+ const_table_alloc += nqstr;
+ }
+ emit->const_table = m_new(mp_uint_t, const_table_alloc);
+ #if !MICROPY_DYNAMIC_COMPILER
+ // Store mp_fun_table pointer just after qstrs
+ // (but in dynamic-compiler mode eliminate dependency on mp_fun_table)
+ emit->const_table[nqstr] = (mp_uint_t)(uintptr_t)&mp_fun_table;
+ #endif
+
+ #if MICROPY_PERSISTENT_CODE_SAVE
+ size_t qstr_link_alloc = emit->qstr_link_cur;
+ if (qstr_link_alloc > 0) {
+ emit->qstr_link = m_new(mp_qstr_link_entry_t, qstr_link_alloc);
+ }
+ #endif
+ }
+
+ if (emit->pass == MP_PASS_EMIT) {
+ void *f = mp_asm_base_get_code(&emit->as->base);
+ mp_uint_t f_len = mp_asm_base_get_code_size(&emit->as->base);
+
+ mp_emit_glue_assign_native(emit->scope->raw_code,
+ emit->do_viper_types ? MP_CODE_NATIVE_VIPER : MP_CODE_NATIVE_PY,
+ f, f_len, emit->const_table,
+ #if MICROPY_PERSISTENT_CODE_SAVE
+ emit->prelude_offset,
+ emit->const_table_cur_obj, emit->const_table_cur_raw_code,
+ emit->qstr_link_cur, emit->qstr_link,
+ #endif
+ emit->scope->num_pos_args, emit->scope->scope_flags, 0);
+ }
+}
+
+STATIC bool emit_native_last_emit_was_return_value(emit_t *emit) {
+ return emit->last_emit_was_return_value;
+}
+
+STATIC void ensure_extra_stack(emit_t *emit, size_t delta) {
+ if (emit->stack_size + delta > emit->stack_info_alloc) {
+ size_t new_alloc = (emit->stack_size + delta + 8) & ~3;
+ emit->stack_info = m_renew(stack_info_t, emit->stack_info, emit->stack_info_alloc, new_alloc);
+ emit->stack_info_alloc = new_alloc;
+ }
+}
+
+STATIC void adjust_stack(emit_t *emit, mp_int_t stack_size_delta) {
+ assert((mp_int_t)emit->stack_size + stack_size_delta >= 0);
+ assert((mp_int_t)emit->stack_size + stack_size_delta <= (mp_int_t)emit->stack_info_alloc);
+ emit->stack_size += stack_size_delta;
+ if (emit->pass > MP_PASS_SCOPE && emit->stack_size > emit->scope->stack_size) {
+ emit->scope->stack_size = emit->stack_size;
+ }
+ #ifdef DEBUG_PRINT
+ DEBUG_printf(" adjust_stack; stack_size=%d+%d; stack now:", emit->stack_size - stack_size_delta, stack_size_delta);
+ for (int i = 0; i < emit->stack_size; i++) {
+ stack_info_t *si = &emit->stack_info[i];
+ DEBUG_printf(" (v=%d k=%d %d)", si->vtype, si->kind, si->data.u_reg);
+ }
+ DEBUG_printf("\n");
+ #endif
+}
+
+STATIC void emit_native_adjust_stack_size(emit_t *emit, mp_int_t delta) {
+ DEBUG_printf("adjust_stack_size(" INT_FMT ")\n", delta);
+ if (delta > 0) {
+ ensure_extra_stack(emit, delta);
+ }
+ // If we are adjusting the stack in a positive direction (pushing) then we
+ // need to fill in values for the stack kind and vtype of the newly-pushed
+ // entries. These should be set to "value" (ie not reg or imm) because we
+ // should only need to adjust the stack due to a jump to this part in the
+ // code (and hence we have settled the stack before the jump).
+ for (mp_int_t i = 0; i < delta; i++) {
+ stack_info_t *si = &emit->stack_info[emit->stack_size + i];
+ si->kind = STACK_VALUE;
+ // TODO we don't know the vtype to use here. At the moment this is a
+ // hack to get the case of multi comparison working.
+ if (delta == 1) {
+ si->vtype = emit->saved_stack_vtype;
+ } else {
+ si->vtype = VTYPE_PYOBJ;
+ }
+ }
+ adjust_stack(emit, delta);
+}
+
+STATIC void emit_native_set_source_line(emit_t *emit, mp_uint_t source_line) {
+ (void)emit;
+ (void)source_line;
+}
+
+// this must be called at start of emit functions
+STATIC void emit_native_pre(emit_t *emit) {
+ emit->last_emit_was_return_value = false;
+}
+
+// depth==0 is top, depth==1 is before top, etc
+STATIC stack_info_t *peek_stack(emit_t *emit, mp_uint_t depth) {
+ return &emit->stack_info[emit->stack_size - 1 - depth];
+}
+
+// depth==0 is top, depth==1 is before top, etc
+STATIC vtype_kind_t peek_vtype(emit_t *emit, mp_uint_t depth) {
+ if (emit->do_viper_types) {
+ return peek_stack(emit, depth)->vtype;
+ } else {
+ // Type is always PYOBJ even if the intermediate stored value is not
+ return VTYPE_PYOBJ;
+ }
+}
+
+// pos=1 is TOS, pos=2 is next, etc
+// use pos=0 for no skipping
+STATIC void need_reg_single(emit_t *emit, int reg_needed, int skip_stack_pos) {
+ skip_stack_pos = emit->stack_size - skip_stack_pos;
+ for (int i = 0; i < emit->stack_size; i++) {
+ if (i != skip_stack_pos) {
+ stack_info_t *si = &emit->stack_info[i];
+ if (si->kind == STACK_REG && si->data.u_reg == reg_needed) {
+ si->kind = STACK_VALUE;
+ emit_native_mov_state_reg(emit, emit->stack_start + i, si->data.u_reg);
+ }
+ }
+ }
+}
+
+// Ensures all unsettled registers that hold Python values are copied to the
+// concrete Python stack. All registers are then free to use.
+STATIC void need_reg_all(emit_t *emit) {
+ for (int i = 0; i < emit->stack_size; i++) {
+ stack_info_t *si = &emit->stack_info[i];
+ if (si->kind == STACK_REG) {
+ DEBUG_printf(" reg(%u) to local(%u)\n", si->data.u_reg, emit->stack_start + i);
+ si->kind = STACK_VALUE;
+ emit_native_mov_state_reg(emit, emit->stack_start + i, si->data.u_reg);
+ }
+ }
+}
+
+STATIC vtype_kind_t load_reg_stack_imm(emit_t *emit, int reg_dest, const stack_info_t *si, bool convert_to_pyobj) {
+ if (!convert_to_pyobj && emit->do_viper_types) {
+ ASM_MOV_REG_IMM(emit->as, reg_dest, si->data.u_imm);
+ return si->vtype;
+ } else {
+ if (si->vtype == VTYPE_PYOBJ) {
+ ASM_MOV_REG_IMM(emit->as, reg_dest, si->data.u_imm);
+ } else if (si->vtype == VTYPE_BOOL) {
+ emit_native_mov_reg_const(emit, reg_dest, MP_F_CONST_FALSE_OBJ + si->data.u_imm);
+ } else if (si->vtype == VTYPE_INT || si->vtype == VTYPE_UINT) {
+ ASM_MOV_REG_IMM(emit->as, reg_dest, (uintptr_t)MP_OBJ_NEW_SMALL_INT(si->data.u_imm));
+ } else if (si->vtype == VTYPE_PTR_NONE) {
+ emit_native_mov_reg_const(emit, reg_dest, MP_F_CONST_NONE_OBJ);
+ } else {
+ mp_raise_NotImplementedError(MP_ERROR_TEXT("conversion to object"));
+ }
+ return VTYPE_PYOBJ;
+ }
+}
+
+// Copies all unsettled registers and immediates that are Python values into the
+// concrete Python stack. This ensures the concrete Python stack holds valid
+// values for the current stack_size.
+// This function may clobber REG_TEMP1.
+STATIC void need_stack_settled(emit_t *emit) {
+ DEBUG_printf(" need_stack_settled; stack_size=%d\n", emit->stack_size);
+ need_reg_all(emit);
+ for (int i = 0; i < emit->stack_size; i++) {
+ stack_info_t *si = &emit->stack_info[i];
+ if (si->kind == STACK_IMM) {
+ DEBUG_printf(" imm(" INT_FMT ") to local(%u)\n", si->data.u_imm, emit->stack_start + i);
+ si->kind = STACK_VALUE;
+ // using REG_TEMP1 to avoid clobbering REG_TEMP0 (aka REG_RET)
+ si->vtype = load_reg_stack_imm(emit, REG_TEMP1, si, false);
+ emit_native_mov_state_reg(emit, emit->stack_start + i, REG_TEMP1);
+ }
+ }
+}
+
+// pos=1 is TOS, pos=2 is next, etc
+STATIC void emit_access_stack(emit_t *emit, int pos, vtype_kind_t *vtype, int reg_dest) {
+ need_reg_single(emit, reg_dest, pos);
+ stack_info_t *si = &emit->stack_info[emit->stack_size - pos];
+ *vtype = si->vtype;
+ switch (si->kind) {
+ case STACK_VALUE:
+ emit_native_mov_reg_state(emit, reg_dest, emit->stack_start + emit->stack_size - pos);
+ break;
+
+ case STACK_REG:
+ if (si->data.u_reg != reg_dest) {
+ ASM_MOV_REG_REG(emit->as, reg_dest, si->data.u_reg);
+ }
+ break;
+
+ case STACK_IMM:
+ *vtype = load_reg_stack_imm(emit, reg_dest, si, false);
+ break;
+ }
+}
+
+// does an efficient X=pop(); discard(); push(X)
+// needs a (non-temp) register in case the poped element was stored in the stack
+STATIC void emit_fold_stack_top(emit_t *emit, int reg_dest) {
+ stack_info_t *si = &emit->stack_info[emit->stack_size - 2];
+ si[0] = si[1];
+ if (si->kind == STACK_VALUE) {
+ // if folded element was on the stack we need to put it in a register
+ emit_native_mov_reg_state(emit, reg_dest, emit->stack_start + emit->stack_size - 1);
+ si->kind = STACK_REG;
+ si->data.u_reg = reg_dest;
+ }
+ adjust_stack(emit, -1);
+}
+
+// If stacked value is in a register and the register is not r1 or r2, then
+// *reg_dest is set to that register. Otherwise the value is put in *reg_dest.
+STATIC void emit_pre_pop_reg_flexible(emit_t *emit, vtype_kind_t *vtype, int *reg_dest, int not_r1, int not_r2) {
+ emit->last_emit_was_return_value = false;
+ stack_info_t *si = peek_stack(emit, 0);
+ if (si->kind == STACK_REG && si->data.u_reg != not_r1 && si->data.u_reg != not_r2) {
+ *vtype = si->vtype;
+ *reg_dest = si->data.u_reg;
+ need_reg_single(emit, *reg_dest, 1);
+ } else {
+ emit_access_stack(emit, 1, vtype, *reg_dest);
+ }
+ adjust_stack(emit, -1);
+}
+
+STATIC void emit_pre_pop_discard(emit_t *emit) {
+ emit->last_emit_was_return_value = false;
+ adjust_stack(emit, -1);
+}
+
+STATIC void emit_pre_pop_reg(emit_t *emit, vtype_kind_t *vtype, int reg_dest) {
+ emit->last_emit_was_return_value = false;
+ emit_access_stack(emit, 1, vtype, reg_dest);
+ adjust_stack(emit, -1);
+}
+
+STATIC void emit_pre_pop_reg_reg(emit_t *emit, vtype_kind_t *vtypea, int rega, vtype_kind_t *vtypeb, int regb) {
+ emit_pre_pop_reg(emit, vtypea, rega);
+ emit_pre_pop_reg(emit, vtypeb, regb);
+}
+
+STATIC void emit_pre_pop_reg_reg_reg(emit_t *emit, vtype_kind_t *vtypea, int rega, vtype_kind_t *vtypeb, int regb, vtype_kind_t *vtypec, int regc) {
+ emit_pre_pop_reg(emit, vtypea, rega);
+ emit_pre_pop_reg(emit, vtypeb, regb);
+ emit_pre_pop_reg(emit, vtypec, regc);
+}
+
+STATIC void emit_post(emit_t *emit) {
+ (void)emit;
+}
+
+STATIC void emit_post_top_set_vtype(emit_t *emit, vtype_kind_t new_vtype) {
+ stack_info_t *si = &emit->stack_info[emit->stack_size - 1];
+ si->vtype = new_vtype;
+}
+
+STATIC void emit_post_push_reg(emit_t *emit, vtype_kind_t vtype, int reg) {
+ ensure_extra_stack(emit, 1);
+ stack_info_t *si = &emit->stack_info[emit->stack_size];
+ si->vtype = vtype;
+ si->kind = STACK_REG;
+ si->data.u_reg = reg;
+ adjust_stack(emit, 1);
+}
+
+STATIC void emit_post_push_imm(emit_t *emit, vtype_kind_t vtype, mp_int_t imm) {
+ ensure_extra_stack(emit, 1);
+ stack_info_t *si = &emit->stack_info[emit->stack_size];
+ si->vtype = vtype;
+ si->kind = STACK_IMM;
+ si->data.u_imm = imm;
+ adjust_stack(emit, 1);
+}
+
+STATIC void emit_post_push_reg_reg(emit_t *emit, vtype_kind_t vtypea, int rega, vtype_kind_t vtypeb, int regb) {
+ emit_post_push_reg(emit, vtypea, rega);
+ emit_post_push_reg(emit, vtypeb, regb);
+}
+
+STATIC void emit_post_push_reg_reg_reg(emit_t *emit, vtype_kind_t vtypea, int rega, vtype_kind_t vtypeb, int regb, vtype_kind_t vtypec, int regc) {
+ emit_post_push_reg(emit, vtypea, rega);
+ emit_post_push_reg(emit, vtypeb, regb);
+ emit_post_push_reg(emit, vtypec, regc);
+}
+
+STATIC void emit_post_push_reg_reg_reg_reg(emit_t *emit, vtype_kind_t vtypea, int rega, vtype_kind_t vtypeb, int regb, vtype_kind_t vtypec, int regc, vtype_kind_t vtyped, int regd) {
+ emit_post_push_reg(emit, vtypea, rega);
+ emit_post_push_reg(emit, vtypeb, regb);
+ emit_post_push_reg(emit, vtypec, regc);
+ emit_post_push_reg(emit, vtyped, regd);
+}
+
+STATIC void emit_call(emit_t *emit, mp_fun_kind_t fun_kind) {
+ need_reg_all(emit);
+ ASM_CALL_IND(emit->as, fun_kind);
+}
+
+STATIC void emit_call_with_imm_arg(emit_t *emit, mp_fun_kind_t fun_kind, mp_int_t arg_val, int arg_reg) {
+ need_reg_all(emit);
+ ASM_MOV_REG_IMM(emit->as, arg_reg, arg_val);
+ ASM_CALL_IND(emit->as, fun_kind);
+}
+
+STATIC void emit_call_with_2_imm_args(emit_t *emit, mp_fun_kind_t fun_kind, mp_int_t arg_val1, int arg_reg1, mp_int_t arg_val2, int arg_reg2) {
+ need_reg_all(emit);
+ ASM_MOV_REG_IMM(emit->as, arg_reg1, arg_val1);
+ ASM_MOV_REG_IMM(emit->as, arg_reg2, arg_val2);
+ ASM_CALL_IND(emit->as, fun_kind);
+}
+
+STATIC void emit_call_with_qstr_arg(emit_t *emit, mp_fun_kind_t fun_kind, qstr qst, int arg_reg) {
+ need_reg_all(emit);
+ emit_native_mov_reg_qstr(emit, arg_reg, qst);
+ ASM_CALL_IND(emit->as, fun_kind);
+}
+
+// vtype of all n_pop objects is VTYPE_PYOBJ
+// Will convert any items that are not VTYPE_PYOBJ to this type and put them back on the stack.
+// If any conversions of non-immediate values are needed, then it uses REG_ARG_1, REG_ARG_2 and REG_RET.
+// Otherwise, it does not use any temporary registers (but may use reg_dest before loading it with stack pointer).
+STATIC void emit_get_stack_pointer_to_reg_for_pop(emit_t *emit, mp_uint_t reg_dest, mp_uint_t n_pop) {
+ need_reg_all(emit);
+
+ // First, store any immediate values to their respective place on the stack.
+ for (mp_uint_t i = 0; i < n_pop; i++) {
+ stack_info_t *si = &emit->stack_info[emit->stack_size - 1 - i];
+ // must push any imm's to stack
+ // must convert them to VTYPE_PYOBJ for viper code
+ if (si->kind == STACK_IMM) {
+ si->kind = STACK_VALUE;
+ si->vtype = load_reg_stack_imm(emit, reg_dest, si, true);
+ emit_native_mov_state_reg(emit, emit->stack_start + emit->stack_size - 1 - i, reg_dest);
+ }
+
+ // verify that this value is on the stack
+ assert(si->kind == STACK_VALUE);
+ }
+
+ // Second, convert any non-VTYPE_PYOBJ to that type.
+ for (mp_uint_t i = 0; i < n_pop; i++) {
+ stack_info_t *si = &emit->stack_info[emit->stack_size - 1 - i];
+ if (si->vtype != VTYPE_PYOBJ) {
+ mp_uint_t local_num = emit->stack_start + emit->stack_size - 1 - i;
+ emit_native_mov_reg_state(emit, REG_ARG_1, local_num);
+ emit_call_with_imm_arg(emit, MP_F_CONVERT_NATIVE_TO_OBJ, si->vtype, REG_ARG_2); // arg2 = type
+ emit_native_mov_state_reg(emit, local_num, REG_RET);
+ si->vtype = VTYPE_PYOBJ;
+ DEBUG_printf(" convert_native_to_obj(local_num=" UINT_FMT ")\n", local_num);
+ }
+ }
+
+ // Adujust the stack for a pop of n_pop items, and load the stack pointer into reg_dest.
+ adjust_stack(emit, -n_pop);
+ emit_native_mov_reg_state_addr(emit, reg_dest, emit->stack_start + emit->stack_size);
+}
+
+// vtype of all n_push objects is VTYPE_PYOBJ
+STATIC void emit_get_stack_pointer_to_reg_for_push(emit_t *emit, mp_uint_t reg_dest, mp_uint_t n_push) {
+ need_reg_all(emit);
+ ensure_extra_stack(emit, n_push);
+ for (mp_uint_t i = 0; i < n_push; i++) {
+ emit->stack_info[emit->stack_size + i].kind = STACK_VALUE;
+ emit->stack_info[emit->stack_size + i].vtype = VTYPE_PYOBJ;
+ }
+ emit_native_mov_reg_state_addr(emit, reg_dest, emit->stack_start + emit->stack_size);
+ adjust_stack(emit, n_push);
+}
+
+STATIC void emit_native_push_exc_stack(emit_t *emit, uint label, bool is_finally) {
+ if (emit->exc_stack_size + 1 > emit->exc_stack_alloc) {
+ size_t new_alloc = emit->exc_stack_alloc + 4;
+ emit->exc_stack = m_renew(exc_stack_entry_t, emit->exc_stack, emit->exc_stack_alloc, new_alloc);
+ emit->exc_stack_alloc = new_alloc;
+ }
+
+ exc_stack_entry_t *e = &emit->exc_stack[emit->exc_stack_size++];
+ e->label = label;
+ e->is_finally = is_finally;
+ e->unwind_label = UNWIND_LABEL_UNUSED;
+ e->is_active = true;
+
+ ASM_MOV_REG_PCREL(emit->as, REG_RET, label);
+ ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_HANDLER_PC(emit), REG_RET);
+}
+
+STATIC void emit_native_leave_exc_stack(emit_t *emit, bool start_of_handler) {
+ assert(emit->exc_stack_size > 0);
+
+ // Get current exception handler and deactivate it
+ exc_stack_entry_t *e = &emit->exc_stack[emit->exc_stack_size - 1];
+ e->is_active = false;
+
+ // Find next innermost active exception handler, to restore as current handler
+ for (--e; e >= emit->exc_stack && !e->is_active; --e) {
+ }
+
+ // Update the PC of the new exception handler
+ if (e < emit->exc_stack) {
+ // No active handler, clear handler PC to zero
+ if (start_of_handler) {
+ // Optimisation: PC is already cleared by global exc handler
+ return;
+ }
+ ASM_XOR_REG_REG(emit->as, REG_RET, REG_RET);
+ } else {
+ // Found new active handler, get its PC
+ ASM_MOV_REG_PCREL(emit->as, REG_RET, e->label);
+ }
+ ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_HANDLER_PC(emit), REG_RET);
+}
+
+STATIC exc_stack_entry_t *emit_native_pop_exc_stack(emit_t *emit) {
+ assert(emit->exc_stack_size > 0);
+ exc_stack_entry_t *e = &emit->exc_stack[--emit->exc_stack_size];
+ assert(e->is_active == false);
+ return e;
+}
+
+STATIC void emit_load_reg_with_ptr(emit_t *emit, int reg, mp_uint_t ptr, size_t table_off) {
+ if (!emit->do_viper_types) {
+ // Skip qstr names of arguments
+ table_off += emit->scope->num_pos_args + emit->scope->num_kwonly_args;
+ }
+ if (emit->pass == MP_PASS_EMIT) {
+ emit->const_table[table_off] = ptr;
+ }
+ emit_native_mov_reg_state(emit, REG_TEMP0, LOCAL_IDX_FUN_OBJ(emit));
+ ASM_LOAD_REG_REG_OFFSET(emit->as, REG_TEMP0, REG_TEMP0, OFFSETOF_OBJ_FUN_BC_CONST_TABLE);
+ ASM_LOAD_REG_REG_OFFSET(emit->as, reg, REG_TEMP0, table_off);
+}
+
+STATIC void emit_load_reg_with_object(emit_t *emit, int reg, mp_obj_t obj) {
+ // First entry is for mp_fun_table
+ size_t table_off = 1 + emit->const_table_cur_obj++;
+ emit_load_reg_with_ptr(emit, reg, (mp_uint_t)obj, table_off);
+}
+
+STATIC void emit_load_reg_with_raw_code(emit_t *emit, int reg, mp_raw_code_t *rc) {
+ // First entry is for mp_fun_table, then constant objects
+ size_t table_off = 1 + emit->const_table_num_obj + emit->const_table_cur_raw_code++;
+ emit_load_reg_with_ptr(emit, reg, (mp_uint_t)rc, table_off);
+}
+
+STATIC void emit_native_label_assign(emit_t *emit, mp_uint_t l) {
+ DEBUG_printf("label_assign(" UINT_FMT ")\n", l);
+
+ bool is_finally = false;
+ if (emit->exc_stack_size > 0) {
+ exc_stack_entry_t *e = &emit->exc_stack[emit->exc_stack_size - 1];
+ is_finally = e->is_finally && e->label == l;
+ }
+
+ if (is_finally) {
+ // Label is at start of finally handler: store TOS into exception slot
+ vtype_kind_t vtype;
+ emit_pre_pop_reg(emit, &vtype, REG_TEMP0);
+ ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_VAL(emit), REG_TEMP0);
+ }
+
+ emit_native_pre(emit);
+ // need to commit stack because we can jump here from elsewhere
+ need_stack_settled(emit);
+ mp_asm_base_label_assign(&emit->as->base, l);
+ emit_post(emit);
+
+ if (is_finally) {
+ // Label is at start of finally handler: pop exception stack
+ emit_native_leave_exc_stack(emit, false);
+ }
+}
+
+STATIC void emit_native_global_exc_entry(emit_t *emit) {
+ // Note: 4 labels are reserved for this function, starting at *emit->label_slot
+
+ emit->exit_label = *emit->label_slot;
+
+ if (NEED_GLOBAL_EXC_HANDLER(emit)) {
+ mp_uint_t nlr_label = *emit->label_slot + 1;
+ mp_uint_t start_label = *emit->label_slot + 2;
+ mp_uint_t global_except_label = *emit->label_slot + 3;
+
+ if (!(emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR)) {
+ // Set new globals
+ emit_native_mov_reg_state(emit, REG_ARG_1, LOCAL_IDX_FUN_OBJ(emit));
+ ASM_LOAD_REG_REG_OFFSET(emit->as, REG_ARG_1, REG_ARG_1, OFFSETOF_OBJ_FUN_BC_GLOBALS);
+ emit_call(emit, MP_F_NATIVE_SWAP_GLOBALS);
+
+ // Save old globals (or NULL if globals didn't change)
+ emit_native_mov_state_reg(emit, LOCAL_IDX_OLD_GLOBALS(emit), REG_RET);
+ }
+
+ if (emit->scope->exc_stack_size == 0) {
+ if (!(emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR)) {
+ // Optimisation: if globals didn't change don't push the nlr context
+ ASM_JUMP_IF_REG_ZERO(emit->as, REG_RET, start_label, false);
+ }
+
+ // Wrap everything in an nlr context
+ ASM_MOV_REG_LOCAL_ADDR(emit->as, REG_ARG_1, 0);
+ emit_call(emit, MP_F_NLR_PUSH);
+ #if N_NLR_SETJMP
+ ASM_MOV_REG_LOCAL_ADDR(emit->as, REG_ARG_1, 2);
+ emit_call(emit, MP_F_SETJMP);
+ #endif
+ ASM_JUMP_IF_REG_ZERO(emit->as, REG_RET, start_label, true);
+ } else {
+ // Clear the unwind state
+ ASM_XOR_REG_REG(emit->as, REG_TEMP0, REG_TEMP0);
+ ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_HANDLER_UNWIND(emit), REG_TEMP0);
+
+ // Put PC of start code block into REG_LOCAL_1
+ ASM_MOV_REG_PCREL(emit->as, REG_LOCAL_1, start_label);
+
+ // Wrap everything in an nlr context
+ emit_native_label_assign(emit, nlr_label);
+ ASM_MOV_REG_LOCAL(emit->as, REG_LOCAL_2, LOCAL_IDX_EXC_HANDLER_UNWIND(emit));
+ ASM_MOV_REG_LOCAL_ADDR(emit->as, REG_ARG_1, 0);
+ emit_call(emit, MP_F_NLR_PUSH);
+ #if N_NLR_SETJMP
+ ASM_MOV_REG_LOCAL_ADDR(emit->as, REG_ARG_1, 2);
+ emit_call(emit, MP_F_SETJMP);
+ #endif
+ ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_HANDLER_UNWIND(emit), REG_LOCAL_2);
+ ASM_JUMP_IF_REG_NONZERO(emit->as, REG_RET, global_except_label, true);
+
+ // Clear PC of current code block, and jump there to resume execution
+ ASM_XOR_REG_REG(emit->as, REG_TEMP0, REG_TEMP0);
+ ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_HANDLER_PC(emit), REG_TEMP0);
+ ASM_JUMP_REG(emit->as, REG_LOCAL_1);
+
+ // Global exception handler: check for valid exception handler
+ emit_native_label_assign(emit, global_except_label);
+ #if N_NLR_SETJMP
+ // Reload REG_FUN_TABLE, since it may be clobbered by longjmp
+ emit_native_mov_reg_state(emit, REG_LOCAL_1, LOCAL_IDX_FUN_OBJ(emit));
+ ASM_LOAD_REG_REG_OFFSET(emit->as, REG_LOCAL_1, REG_LOCAL_1, offsetof(mp_obj_fun_bc_t, const_table) / sizeof(uintptr_t));
+ ASM_LOAD_REG_REG_OFFSET(emit->as, REG_FUN_TABLE, REG_LOCAL_1, emit->scope->num_pos_args + emit->scope->num_kwonly_args);
+ #endif
+ ASM_MOV_REG_LOCAL(emit->as, REG_LOCAL_1, LOCAL_IDX_EXC_HANDLER_PC(emit));
+ ASM_JUMP_IF_REG_NONZERO(emit->as, REG_LOCAL_1, nlr_label, false);
+ }
+
+ if (!(emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR)) {
+ // Restore old globals
+ emit_native_mov_reg_state(emit, REG_ARG_1, LOCAL_IDX_OLD_GLOBALS(emit));
+ emit_call(emit, MP_F_NATIVE_SWAP_GLOBALS);
+ }
+
+ if (emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR) {
+ // Store return value in state[0]
+ ASM_MOV_REG_LOCAL(emit->as, REG_TEMP0, LOCAL_IDX_EXC_VAL(emit));
+ ASM_STORE_REG_REG_OFFSET(emit->as, REG_TEMP0, REG_GENERATOR_STATE, OFFSETOF_CODE_STATE_STATE);
+
+ // Load return kind
+ ASM_MOV_REG_IMM(emit->as, REG_PARENT_RET, MP_VM_RETURN_EXCEPTION);
+
+ ASM_EXIT(emit->as);
+ } else {
+ // Re-raise exception out to caller
+ ASM_MOV_REG_LOCAL(emit->as, REG_ARG_1, LOCAL_IDX_EXC_VAL(emit));
+ emit_call(emit, MP_F_NATIVE_RAISE);
+ }
+
+ // Label for start of function
+ emit_native_label_assign(emit, start_label);
+
+ if (emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR) {
+ emit_native_mov_reg_state(emit, REG_TEMP0, LOCAL_IDX_GEN_PC(emit));
+ ASM_JUMP_REG(emit->as, REG_TEMP0);
+ emit->start_offset = mp_asm_base_get_code_pos(&emit->as->base);
+
+ // This is the first entry of the generator
+
+ // Check LOCAL_IDX_EXC_VAL for any injected value
+ ASM_MOV_REG_LOCAL(emit->as, REG_ARG_1, LOCAL_IDX_EXC_VAL(emit));
+ emit_call(emit, MP_F_NATIVE_RAISE);
+ }
+ }
+}
+
+STATIC void emit_native_global_exc_exit(emit_t *emit) {
+ // Label for end of function
+ emit_native_label_assign(emit, emit->exit_label);
+
+ if (NEED_GLOBAL_EXC_HANDLER(emit)) {
+ // Get old globals
+ if (!(emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR)) {
+ emit_native_mov_reg_state(emit, REG_ARG_1, LOCAL_IDX_OLD_GLOBALS(emit));
+
+ if (emit->scope->exc_stack_size == 0) {
+ // Optimisation: if globals didn't change then don't restore them and don't do nlr_pop
+ ASM_JUMP_IF_REG_ZERO(emit->as, REG_ARG_1, emit->exit_label + 1, false);
+ }
+
+ // Restore old globals
+ emit_call(emit, MP_F_NATIVE_SWAP_GLOBALS);
+ }
+
+ // Pop the nlr context
+ emit_call(emit, MP_F_NLR_POP);
+
+ if (!(emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR)) {
+ if (emit->scope->exc_stack_size == 0) {
+ // Destination label for above optimisation
+ emit_native_label_assign(emit, emit->exit_label + 1);
+ }
+ }
+
+ // Load return value
+ ASM_MOV_REG_LOCAL(emit->as, REG_PARENT_RET, LOCAL_IDX_RET_VAL(emit));
+ }
+
+ ASM_EXIT(emit->as);
+}
+
+STATIC void emit_native_import_name(emit_t *emit, qstr qst) {
+ DEBUG_printf("import_name %s\n", qstr_str(qst));
+
+ // get arguments from stack: arg2 = fromlist, arg3 = level
+ // If using viper types these arguments must be converted to proper objects, and
+ // to accomplish this viper types are turned off for the emit_pre_pop_reg_reg call.
+ bool orig_do_viper_types = emit->do_viper_types;
+ emit->do_viper_types = false;
+ vtype_kind_t vtype_fromlist;
+ vtype_kind_t vtype_level;
+ emit_pre_pop_reg_reg(emit, &vtype_fromlist, REG_ARG_2, &vtype_level, REG_ARG_3);
+ assert(vtype_fromlist == VTYPE_PYOBJ);
+ assert(vtype_level == VTYPE_PYOBJ);
+ emit->do_viper_types = orig_do_viper_types;
+
+ emit_call_with_qstr_arg(emit, MP_F_IMPORT_NAME, qst, REG_ARG_1); // arg1 = import name
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+STATIC void emit_native_import_from(emit_t *emit, qstr qst) {
+ DEBUG_printf("import_from %s\n", qstr_str(qst));
+ emit_native_pre(emit);
+ vtype_kind_t vtype_module;
+ emit_access_stack(emit, 1, &vtype_module, REG_ARG_1); // arg1 = module
+ assert(vtype_module == VTYPE_PYOBJ);
+ emit_call_with_qstr_arg(emit, MP_F_IMPORT_FROM, qst, REG_ARG_2); // arg2 = import name
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+STATIC void emit_native_import_star(emit_t *emit) {
+ DEBUG_printf("import_star\n");
+ vtype_kind_t vtype_module;
+ emit_pre_pop_reg(emit, &vtype_module, REG_ARG_1); // arg1 = module
+ assert(vtype_module == VTYPE_PYOBJ);
+ emit_call(emit, MP_F_IMPORT_ALL);
+ emit_post(emit);
+}
+
+STATIC void emit_native_import(emit_t *emit, qstr qst, int kind) {
+ if (kind == MP_EMIT_IMPORT_NAME) {
+ emit_native_import_name(emit, qst);
+ } else if (kind == MP_EMIT_IMPORT_FROM) {
+ emit_native_import_from(emit, qst);
+ } else {
+ emit_native_import_star(emit);
+ }
+}
+
+STATIC void emit_native_load_const_tok(emit_t *emit, mp_token_kind_t tok) {
+ DEBUG_printf("load_const_tok(tok=%u)\n", tok);
+ if (tok == MP_TOKEN_ELLIPSIS) {
+ #if MICROPY_PERSISTENT_CODE_SAVE
+ emit_native_load_const_obj(emit, MP_OBJ_FROM_PTR(&mp_const_ellipsis_obj));
+ #else
+ emit_post_push_imm(emit, VTYPE_PYOBJ, (mp_uint_t)MP_OBJ_FROM_PTR(&mp_const_ellipsis_obj));
+ #endif
+ } else {
+ emit_native_pre(emit);
+ if (tok == MP_TOKEN_KW_NONE) {
+ emit_post_push_imm(emit, VTYPE_PTR_NONE, 0);
+ } else {
+ emit_post_push_imm(emit, VTYPE_BOOL, tok == MP_TOKEN_KW_FALSE ? 0 : 1);
+ }
+ }
+}
+
+STATIC void emit_native_load_const_small_int(emit_t *emit, mp_int_t arg) {
+ DEBUG_printf("load_const_small_int(int=" INT_FMT ")\n", arg);
+ emit_native_pre(emit);
+ emit_post_push_imm(emit, VTYPE_INT, arg);
+}
+
+STATIC void emit_native_load_const_str(emit_t *emit, qstr qst) {
+ emit_native_pre(emit);
+ // TODO: Eventually we want to be able to work with raw pointers in viper to
+ // do native array access. For now we just load them as any other object.
+ /*
+ if (emit->do_viper_types) {
+ // load a pointer to the asciiz string?
+ emit_post_push_imm(emit, VTYPE_PTR, (mp_uint_t)qstr_str(qst));
+ } else
+ */
+ {
+ need_reg_single(emit, REG_TEMP0, 0);
+ emit_native_mov_reg_qstr_obj(emit, REG_TEMP0, qst);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_TEMP0);
+ }
+}
+
+STATIC void emit_native_load_const_obj(emit_t *emit, mp_obj_t obj) {
+ emit->scope->scope_flags |= MP_SCOPE_FLAG_HASCONSTS;
+ emit_native_pre(emit);
+ need_reg_single(emit, REG_RET, 0);
+ emit_load_reg_with_object(emit, REG_RET, obj);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+STATIC void emit_native_load_null(emit_t *emit) {
+ emit_native_pre(emit);
+ emit_post_push_imm(emit, VTYPE_PYOBJ, 0);
+}
+
+STATIC void emit_native_load_fast(emit_t *emit, qstr qst, mp_uint_t local_num) {
+ DEBUG_printf("load_fast(%s, " UINT_FMT ")\n", qstr_str(qst), local_num);
+ vtype_kind_t vtype = emit->local_vtype[local_num];
+ if (vtype == VTYPE_UNBOUND) {
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit, MP_ERROR_TEXT("local '%q' used before type known"), qst);
+ }
+ emit_native_pre(emit);
+ if (local_num < REG_LOCAL_NUM && CAN_USE_REGS_FOR_LOCALS(emit)) {
+ emit_post_push_reg(emit, vtype, reg_local_table[local_num]);
+ } else {
+ need_reg_single(emit, REG_TEMP0, 0);
+ emit_native_mov_reg_state(emit, REG_TEMP0, LOCAL_IDX_LOCAL_VAR(emit, local_num));
+ emit_post_push_reg(emit, vtype, REG_TEMP0);
+ }
+}
+
+STATIC void emit_native_load_deref(emit_t *emit, qstr qst, mp_uint_t local_num) {
+ DEBUG_printf("load_deref(%s, " UINT_FMT ")\n", qstr_str(qst), local_num);
+ need_reg_single(emit, REG_RET, 0);
+ emit_native_load_fast(emit, qst, local_num);
+ vtype_kind_t vtype;
+ int reg_base = REG_RET;
+ emit_pre_pop_reg_flexible(emit, &vtype, &reg_base, -1, -1);
+ ASM_LOAD_REG_REG_OFFSET(emit->as, REG_RET, reg_base, 1);
+ // closed over vars are always Python objects
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+STATIC void emit_native_load_local(emit_t *emit, qstr qst, mp_uint_t local_num, int kind) {
+ if (kind == MP_EMIT_IDOP_LOCAL_FAST) {
+ emit_native_load_fast(emit, qst, local_num);
+ } else {
+ emit_native_load_deref(emit, qst, local_num);
+ }
+}
+
+STATIC void emit_native_load_global(emit_t *emit, qstr qst, int kind) {
+ MP_STATIC_ASSERT(MP_F_LOAD_NAME + MP_EMIT_IDOP_GLOBAL_NAME == MP_F_LOAD_NAME);
+ MP_STATIC_ASSERT(MP_F_LOAD_NAME + MP_EMIT_IDOP_GLOBAL_GLOBAL == MP_F_LOAD_GLOBAL);
+ emit_native_pre(emit);
+ if (kind == MP_EMIT_IDOP_GLOBAL_NAME) {
+ DEBUG_printf("load_name(%s)\n", qstr_str(qst));
+ } else {
+ DEBUG_printf("load_global(%s)\n", qstr_str(qst));
+ if (emit->do_viper_types) {
+ // check for builtin casting operators
+ int native_type = mp_native_type_from_qstr(qst);
+ if (native_type >= MP_NATIVE_TYPE_BOOL) {
+ emit_post_push_imm(emit, VTYPE_BUILTIN_CAST, native_type);
+ return;
+ }
+ }
+ }
+ emit_call_with_qstr_arg(emit, MP_F_LOAD_NAME + kind, qst, REG_ARG_1);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+STATIC void emit_native_load_attr(emit_t *emit, qstr qst) {
+ // depends on type of subject:
+ // - integer, function, pointer to integers: error
+ // - pointer to structure: get member, quite easy
+ // - Python object: call mp_load_attr, and needs to be typed to convert result
+ vtype_kind_t vtype_base;
+ emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1); // arg1 = base
+ assert(vtype_base == VTYPE_PYOBJ);
+ emit_call_with_qstr_arg(emit, MP_F_LOAD_ATTR, qst, REG_ARG_2); // arg2 = attribute name
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+STATIC void emit_native_load_method(emit_t *emit, qstr qst, bool is_super) {
+ if (is_super) {
+ emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_2, 3); // arg2 = dest ptr
+ emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_2, 2); // arg2 = dest ptr
+ emit_call_with_qstr_arg(emit, MP_F_LOAD_SUPER_METHOD, qst, REG_ARG_1); // arg1 = method name
+ } else {
+ vtype_kind_t vtype_base;
+ emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1); // arg1 = base
+ assert(vtype_base == VTYPE_PYOBJ);
+ emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_3, 2); // arg3 = dest ptr
+ emit_call_with_qstr_arg(emit, MP_F_LOAD_METHOD, qst, REG_ARG_2); // arg2 = method name
+ }
+}
+
+STATIC void emit_native_load_build_class(emit_t *emit) {
+ emit_native_pre(emit);
+ emit_call(emit, MP_F_LOAD_BUILD_CLASS);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+STATIC void emit_native_load_subscr(emit_t *emit) {
+ DEBUG_printf("load_subscr\n");
+ // need to compile: base[index]
+
+ // pop: index, base
+ // optimise case where index is an immediate
+ vtype_kind_t vtype_base = peek_vtype(emit, 1);
+
+ if (vtype_base == VTYPE_PYOBJ) {
+ // standard Python subscr
+ // TODO factor this implicit cast code with other uses of it
+ vtype_kind_t vtype_index = peek_vtype(emit, 0);
+ if (vtype_index == VTYPE_PYOBJ) {
+ emit_pre_pop_reg(emit, &vtype_index, REG_ARG_2);
+ } else {
+ emit_pre_pop_reg(emit, &vtype_index, REG_ARG_1);
+ emit_call_with_imm_arg(emit, MP_F_CONVERT_NATIVE_TO_OBJ, vtype_index, REG_ARG_2); // arg2 = type
+ ASM_MOV_REG_REG(emit->as, REG_ARG_2, REG_RET);
+ }
+ emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1);
+ emit_call_with_imm_arg(emit, MP_F_OBJ_SUBSCR, (mp_uint_t)MP_OBJ_SENTINEL, REG_ARG_3);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+ } else {
+ // viper load
+ // TODO The different machine architectures have very different
+ // capabilities and requirements for loads, so probably best to
+ // write a completely separate load-optimiser for each one.
+ stack_info_t *top = peek_stack(emit, 0);
+ if (top->vtype == VTYPE_INT && top->kind == STACK_IMM) {
+ // index is an immediate
+ mp_int_t index_value = top->data.u_imm;
+ emit_pre_pop_discard(emit); // discard index
+ int reg_base = REG_ARG_1;
+ int reg_index = REG_ARG_2;
+ emit_pre_pop_reg_flexible(emit, &vtype_base, &reg_base, reg_index, reg_index);
+ need_reg_single(emit, REG_RET, 0);
+ switch (vtype_base) {
+ case VTYPE_PTR8: {
+ // pointer to 8-bit memory
+ // TODO optimise to use thumb ldrb r1, [r2, r3]
+ if (index_value != 0) {
+ // index is non-zero
+ #if N_THUMB
+ if (index_value > 0 && index_value < 32) {
+ asm_thumb_ldrb_rlo_rlo_i5(emit->as, REG_RET, reg_base, index_value);
+ break;
+ }
+ #endif
+ ASM_MOV_REG_IMM(emit->as, reg_index, index_value);
+ ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add index to base
+ reg_base = reg_index;
+ }
+ ASM_LOAD8_REG_REG(emit->as, REG_RET, reg_base); // load from (base+index)
+ break;
+ }
+ case VTYPE_PTR16: {
+ // pointer to 16-bit memory
+ if (index_value != 0) {
+ // index is a non-zero immediate
+ #if N_THUMB
+ if (index_value > 0 && index_value < 32) {
+ asm_thumb_ldrh_rlo_rlo_i5(emit->as, REG_RET, reg_base, index_value);
+ break;
+ }
+ #endif
+ ASM_MOV_REG_IMM(emit->as, reg_index, index_value << 1);
+ ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add 2*index to base
+ reg_base = reg_index;
+ }
+ ASM_LOAD16_REG_REG(emit->as, REG_RET, reg_base); // load from (base+2*index)
+ break;
+ }
+ case VTYPE_PTR32: {
+ // pointer to 32-bit memory
+ if (index_value != 0) {
+ // index is a non-zero immediate
+ #if N_THUMB
+ if (index_value > 0 && index_value < 32) {
+ asm_thumb_ldr_rlo_rlo_i5(emit->as, REG_RET, reg_base, index_value);
+ break;
+ }
+ #endif
+ ASM_MOV_REG_IMM(emit->as, reg_index, index_value << 2);
+ ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add 4*index to base
+ reg_base = reg_index;
+ }
+ ASM_LOAD32_REG_REG(emit->as, REG_RET, reg_base); // load from (base+4*index)
+ break;
+ }
+ default:
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+ MP_ERROR_TEXT("can't load from '%q'"), vtype_to_qstr(vtype_base));
+ }
+ } else {
+ // index is not an immediate
+ vtype_kind_t vtype_index;
+ int reg_index = REG_ARG_2;
+ emit_pre_pop_reg_flexible(emit, &vtype_index, &reg_index, REG_ARG_1, REG_ARG_1);
+ emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1);
+ need_reg_single(emit, REG_RET, 0);
+ if (vtype_index != VTYPE_INT && vtype_index != VTYPE_UINT) {
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+ MP_ERROR_TEXT("can't load with '%q' index"), vtype_to_qstr(vtype_index));
+ }
+ switch (vtype_base) {
+ case VTYPE_PTR8: {
+ // pointer to 8-bit memory
+ // TODO optimise to use thumb ldrb r1, [r2, r3]
+ ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+ ASM_LOAD8_REG_REG(emit->as, REG_RET, REG_ARG_1); // store value to (base+index)
+ break;
+ }
+ case VTYPE_PTR16: {
+ // pointer to 16-bit memory
+ ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+ ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+ ASM_LOAD16_REG_REG(emit->as, REG_RET, REG_ARG_1); // load from (base+2*index)
+ break;
+ }
+ case VTYPE_PTR32: {
+ // pointer to word-size memory
+ ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+ ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+ ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+ ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+ ASM_LOAD32_REG_REG(emit->as, REG_RET, REG_ARG_1); // load from (base+4*index)
+ break;
+ }
+ default:
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+ MP_ERROR_TEXT("can't load from '%q'"), vtype_to_qstr(vtype_base));
+ }
+ }
+ emit_post_push_reg(emit, VTYPE_INT, REG_RET);
+ }
+}
+
+STATIC void emit_native_store_fast(emit_t *emit, qstr qst, mp_uint_t local_num) {
+ vtype_kind_t vtype;
+ if (local_num < REG_LOCAL_NUM && CAN_USE_REGS_FOR_LOCALS(emit)) {
+ emit_pre_pop_reg(emit, &vtype, reg_local_table[local_num]);
+ } else {
+ emit_pre_pop_reg(emit, &vtype, REG_TEMP0);
+ emit_native_mov_state_reg(emit, LOCAL_IDX_LOCAL_VAR(emit, local_num), REG_TEMP0);
+ }
+ emit_post(emit);
+
+ // check types
+ if (emit->local_vtype[local_num] == VTYPE_UNBOUND) {
+ // first time this local is assigned, so give it a type of the object stored in it
+ emit->local_vtype[local_num] = vtype;
+ } else if (emit->local_vtype[local_num] != vtype) {
+ // type of local is not the same as object stored in it
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+ MP_ERROR_TEXT("local '%q' has type '%q' but source is '%q'"),
+ qst, vtype_to_qstr(emit->local_vtype[local_num]), vtype_to_qstr(vtype));
+ }
+}
+
+STATIC void emit_native_store_deref(emit_t *emit, qstr qst, mp_uint_t local_num) {
+ DEBUG_printf("store_deref(%s, " UINT_FMT ")\n", qstr_str(qst), local_num);
+ need_reg_single(emit, REG_TEMP0, 0);
+ need_reg_single(emit, REG_TEMP1, 0);
+ emit_native_load_fast(emit, qst, local_num);
+ vtype_kind_t vtype;
+ int reg_base = REG_TEMP0;
+ emit_pre_pop_reg_flexible(emit, &vtype, &reg_base, -1, -1);
+ int reg_src = REG_TEMP1;
+ emit_pre_pop_reg_flexible(emit, &vtype, &reg_src, reg_base, reg_base);
+ ASM_STORE_REG_REG_OFFSET(emit->as, reg_src, reg_base, 1);
+ emit_post(emit);
+}
+
+STATIC void emit_native_store_local(emit_t *emit, qstr qst, mp_uint_t local_num, int kind) {
+ if (kind == MP_EMIT_IDOP_LOCAL_FAST) {
+ emit_native_store_fast(emit, qst, local_num);
+ } else {
+ emit_native_store_deref(emit, qst, local_num);
+ }
+}
+
+STATIC void emit_native_store_global(emit_t *emit, qstr qst, int kind) {
+ MP_STATIC_ASSERT(MP_F_STORE_NAME + MP_EMIT_IDOP_GLOBAL_NAME == MP_F_STORE_NAME);
+ MP_STATIC_ASSERT(MP_F_STORE_NAME + MP_EMIT_IDOP_GLOBAL_GLOBAL == MP_F_STORE_GLOBAL);
+ if (kind == MP_EMIT_IDOP_GLOBAL_NAME) {
+ // mp_store_name, but needs conversion of object (maybe have mp_viper_store_name(obj, type))
+ vtype_kind_t vtype;
+ emit_pre_pop_reg(emit, &vtype, REG_ARG_2);
+ assert(vtype == VTYPE_PYOBJ);
+ } else {
+ vtype_kind_t vtype = peek_vtype(emit, 0);
+ if (vtype == VTYPE_PYOBJ) {
+ emit_pre_pop_reg(emit, &vtype, REG_ARG_2);
+ } else {
+ emit_pre_pop_reg(emit, &vtype, REG_ARG_1);
+ emit_call_with_imm_arg(emit, MP_F_CONVERT_NATIVE_TO_OBJ, vtype, REG_ARG_2); // arg2 = type
+ ASM_MOV_REG_REG(emit->as, REG_ARG_2, REG_RET);
+ }
+ }
+ emit_call_with_qstr_arg(emit, MP_F_STORE_NAME + kind, qst, REG_ARG_1); // arg1 = name
+ emit_post(emit);
+}
+
+STATIC void emit_native_store_attr(emit_t *emit, qstr qst) {
+ vtype_kind_t vtype_base, vtype_val;
+ emit_pre_pop_reg_reg(emit, &vtype_base, REG_ARG_1, &vtype_val, REG_ARG_3); // arg1 = base, arg3 = value
+ assert(vtype_base == VTYPE_PYOBJ);
+ assert(vtype_val == VTYPE_PYOBJ);
+ emit_call_with_qstr_arg(emit, MP_F_STORE_ATTR, qst, REG_ARG_2); // arg2 = attribute name
+ emit_post(emit);
+}
+
+STATIC void emit_native_store_subscr(emit_t *emit) {
+ DEBUG_printf("store_subscr\n");
+ // need to compile: base[index] = value
+
+ // pop: index, base, value
+ // optimise case where index is an immediate
+ vtype_kind_t vtype_base = peek_vtype(emit, 1);
+
+ if (vtype_base == VTYPE_PYOBJ) {
+ // standard Python subscr
+ vtype_kind_t vtype_index = peek_vtype(emit, 0);
+ vtype_kind_t vtype_value = peek_vtype(emit, 2);
+ if (vtype_index != VTYPE_PYOBJ || vtype_value != VTYPE_PYOBJ) {
+ // need to implicitly convert non-objects to objects
+ // TODO do this properly
+ emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_1, 3);
+ adjust_stack(emit, 3);
+ }
+ emit_pre_pop_reg_reg_reg(emit, &vtype_index, REG_ARG_2, &vtype_base, REG_ARG_1, &vtype_value, REG_ARG_3);
+ emit_call(emit, MP_F_OBJ_SUBSCR);
+ } else {
+ // viper store
+ // TODO The different machine architectures have very different
+ // capabilities and requirements for stores, so probably best to
+ // write a completely separate store-optimiser for each one.
+ stack_info_t *top = peek_stack(emit, 0);
+ if (top->vtype == VTYPE_INT && top->kind == STACK_IMM) {
+ // index is an immediate
+ mp_int_t index_value = top->data.u_imm;
+ emit_pre_pop_discard(emit); // discard index
+ vtype_kind_t vtype_value;
+ int reg_base = REG_ARG_1;
+ int reg_index = REG_ARG_2;
+ int reg_value = REG_ARG_3;
+ emit_pre_pop_reg_flexible(emit, &vtype_base, &reg_base, reg_index, reg_value);
+ #if N_X64 || N_X86
+ // special case: x86 needs byte stores to be from lower 4 regs (REG_ARG_3 is EDX)
+ emit_pre_pop_reg(emit, &vtype_value, reg_value);
+ #else
+ emit_pre_pop_reg_flexible(emit, &vtype_value, &reg_value, reg_base, reg_index);
+ #endif
+ if (vtype_value != VTYPE_BOOL && vtype_value != VTYPE_INT && vtype_value != VTYPE_UINT) {
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+ MP_ERROR_TEXT("can't store '%q'"), vtype_to_qstr(vtype_value));
+ }
+ switch (vtype_base) {
+ case VTYPE_PTR8: {
+ // pointer to 8-bit memory
+ // TODO optimise to use thumb strb r1, [r2, r3]
+ if (index_value != 0) {
+ // index is non-zero
+ #if N_THUMB
+ if (index_value > 0 && index_value < 32) {
+ asm_thumb_strb_rlo_rlo_i5(emit->as, reg_value, reg_base, index_value);
+ break;
+ }
+ #endif
+ ASM_MOV_REG_IMM(emit->as, reg_index, index_value);
+ #if N_ARM
+ asm_arm_strb_reg_reg_reg(emit->as, reg_value, reg_base, reg_index);
+ return;
+ #endif
+ ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add index to base
+ reg_base = reg_index;
+ }
+ ASM_STORE8_REG_REG(emit->as, reg_value, reg_base); // store value to (base+index)
+ break;
+ }
+ case VTYPE_PTR16: {
+ // pointer to 16-bit memory
+ if (index_value != 0) {
+ // index is a non-zero immediate
+ #if N_THUMB
+ if (index_value > 0 && index_value < 32) {
+ asm_thumb_strh_rlo_rlo_i5(emit->as, reg_value, reg_base, index_value);
+ break;
+ }
+ #endif
+ ASM_MOV_REG_IMM(emit->as, reg_index, index_value << 1);
+ ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add 2*index to base
+ reg_base = reg_index;
+ }
+ ASM_STORE16_REG_REG(emit->as, reg_value, reg_base); // store value to (base+2*index)
+ break;
+ }
+ case VTYPE_PTR32: {
+ // pointer to 32-bit memory
+ if (index_value != 0) {
+ // index is a non-zero immediate
+ #if N_THUMB
+ if (index_value > 0 && index_value < 32) {
+ asm_thumb_str_rlo_rlo_i5(emit->as, reg_value, reg_base, index_value);
+ break;
+ }
+ #endif
+ #if N_ARM
+ ASM_MOV_REG_IMM(emit->as, reg_index, index_value);
+ asm_arm_str_reg_reg_reg(emit->as, reg_value, reg_base, reg_index);
+ return;
+ #endif
+ ASM_MOV_REG_IMM(emit->as, reg_index, index_value << 2);
+ ASM_ADD_REG_REG(emit->as, reg_index, reg_base); // add 4*index to base
+ reg_base = reg_index;
+ }
+ ASM_STORE32_REG_REG(emit->as, reg_value, reg_base); // store value to (base+4*index)
+ break;
+ }
+ default:
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+ MP_ERROR_TEXT("can't store to '%q'"), vtype_to_qstr(vtype_base));
+ }
+ } else {
+ // index is not an immediate
+ vtype_kind_t vtype_index, vtype_value;
+ int reg_index = REG_ARG_2;
+ int reg_value = REG_ARG_3;
+ emit_pre_pop_reg_flexible(emit, &vtype_index, &reg_index, REG_ARG_1, reg_value);
+ emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1);
+ if (vtype_index != VTYPE_INT && vtype_index != VTYPE_UINT) {
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+ MP_ERROR_TEXT("can't store with '%q' index"), vtype_to_qstr(vtype_index));
+ }
+ #if N_X64 || N_X86
+ // special case: x86 needs byte stores to be from lower 4 regs (REG_ARG_3 is EDX)
+ emit_pre_pop_reg(emit, &vtype_value, reg_value);
+ #else
+ emit_pre_pop_reg_flexible(emit, &vtype_value, &reg_value, REG_ARG_1, reg_index);
+ #endif
+ if (vtype_value != VTYPE_BOOL && vtype_value != VTYPE_INT && vtype_value != VTYPE_UINT) {
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+ MP_ERROR_TEXT("can't store '%q'"), vtype_to_qstr(vtype_value));
+ }
+ switch (vtype_base) {
+ case VTYPE_PTR8: {
+ // pointer to 8-bit memory
+ // TODO optimise to use thumb strb r1, [r2, r3]
+ #if N_ARM
+ asm_arm_strb_reg_reg_reg(emit->as, reg_value, REG_ARG_1, reg_index);
+ break;
+ #endif
+ ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+ ASM_STORE8_REG_REG(emit->as, reg_value, REG_ARG_1); // store value to (base+index)
+ break;
+ }
+ case VTYPE_PTR16: {
+ // pointer to 16-bit memory
+ #if N_ARM
+ asm_arm_strh_reg_reg_reg(emit->as, reg_value, REG_ARG_1, reg_index);
+ break;
+ #endif
+ ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+ ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+ ASM_STORE16_REG_REG(emit->as, reg_value, REG_ARG_1); // store value to (base+2*index)
+ break;
+ }
+ case VTYPE_PTR32: {
+ // pointer to 32-bit memory
+ #if N_ARM
+ asm_arm_str_reg_reg_reg(emit->as, reg_value, REG_ARG_1, reg_index);
+ break;
+ #endif
+ ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+ ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+ ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+ ASM_ADD_REG_REG(emit->as, REG_ARG_1, reg_index); // add index to base
+ ASM_STORE32_REG_REG(emit->as, reg_value, REG_ARG_1); // store value to (base+4*index)
+ break;
+ }
+ default:
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+ MP_ERROR_TEXT("can't store to '%q'"), vtype_to_qstr(vtype_base));
+ }
+ }
+
+ }
+}
+
+STATIC void emit_native_delete_local(emit_t *emit, qstr qst, mp_uint_t local_num, int kind) {
+ if (kind == MP_EMIT_IDOP_LOCAL_FAST) {
+ // TODO: This is not compliant implementation. We could use MP_OBJ_SENTINEL
+ // to mark deleted vars but then every var would need to be checked on
+ // each access. Very inefficient, so just set value to None to enable GC.
+ emit_native_load_const_tok(emit, MP_TOKEN_KW_NONE);
+ emit_native_store_fast(emit, qst, local_num);
+ } else {
+ // TODO implement me!
+ }
+}
+
+STATIC void emit_native_delete_global(emit_t *emit, qstr qst, int kind) {
+ MP_STATIC_ASSERT(MP_F_DELETE_NAME + MP_EMIT_IDOP_GLOBAL_NAME == MP_F_DELETE_NAME);
+ MP_STATIC_ASSERT(MP_F_DELETE_NAME + MP_EMIT_IDOP_GLOBAL_GLOBAL == MP_F_DELETE_GLOBAL);
+ emit_native_pre(emit);
+ emit_call_with_qstr_arg(emit, MP_F_DELETE_NAME + kind, qst, REG_ARG_1);
+ emit_post(emit);
+}
+
+STATIC void emit_native_delete_attr(emit_t *emit, qstr qst) {
+ vtype_kind_t vtype_base;
+ emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1); // arg1 = base
+ assert(vtype_base == VTYPE_PYOBJ);
+ ASM_XOR_REG_REG(emit->as, REG_ARG_3, REG_ARG_3); // arg3 = value (null for delete)
+ emit_call_with_qstr_arg(emit, MP_F_STORE_ATTR, qst, REG_ARG_2); // arg2 = attribute name
+ emit_post(emit);
+}
+
+STATIC void emit_native_delete_subscr(emit_t *emit) {
+ vtype_kind_t vtype_index, vtype_base;
+ emit_pre_pop_reg_reg(emit, &vtype_index, REG_ARG_2, &vtype_base, REG_ARG_1); // index, base
+ assert(vtype_index == VTYPE_PYOBJ);
+ assert(vtype_base == VTYPE_PYOBJ);
+ emit_call_with_imm_arg(emit, MP_F_OBJ_SUBSCR, (mp_uint_t)MP_OBJ_NULL, REG_ARG_3);
+}
+
+STATIC void emit_native_subscr(emit_t *emit, int kind) {
+ if (kind == MP_EMIT_SUBSCR_LOAD) {
+ emit_native_load_subscr(emit);
+ } else if (kind == MP_EMIT_SUBSCR_STORE) {
+ emit_native_store_subscr(emit);
+ } else {
+ emit_native_delete_subscr(emit);
+ }
+}
+
+STATIC void emit_native_attr(emit_t *emit, qstr qst, int kind) {
+ if (kind == MP_EMIT_ATTR_LOAD) {
+ emit_native_load_attr(emit, qst);
+ } else if (kind == MP_EMIT_ATTR_STORE) {
+ emit_native_store_attr(emit, qst);
+ } else {
+ emit_native_delete_attr(emit, qst);
+ }
+}
+
+STATIC void emit_native_dup_top(emit_t *emit) {
+ DEBUG_printf("dup_top\n");
+ vtype_kind_t vtype;
+ int reg = REG_TEMP0;
+ emit_pre_pop_reg_flexible(emit, &vtype, &reg, -1, -1);
+ emit_post_push_reg_reg(emit, vtype, reg, vtype, reg);
+}
+
+STATIC void emit_native_dup_top_two(emit_t *emit) {
+ vtype_kind_t vtype0, vtype1;
+ emit_pre_pop_reg_reg(emit, &vtype0, REG_TEMP0, &vtype1, REG_TEMP1);
+ emit_post_push_reg_reg_reg_reg(emit, vtype1, REG_TEMP1, vtype0, REG_TEMP0, vtype1, REG_TEMP1, vtype0, REG_TEMP0);
+}
+
+STATIC void emit_native_pop_top(emit_t *emit) {
+ DEBUG_printf("pop_top\n");
+ emit_pre_pop_discard(emit);
+ emit_post(emit);
+}
+
+STATIC void emit_native_rot_two(emit_t *emit) {
+ DEBUG_printf("rot_two\n");
+ vtype_kind_t vtype0, vtype1;
+ emit_pre_pop_reg_reg(emit, &vtype0, REG_TEMP0, &vtype1, REG_TEMP1);
+ emit_post_push_reg_reg(emit, vtype0, REG_TEMP0, vtype1, REG_TEMP1);
+}
+
+STATIC void emit_native_rot_three(emit_t *emit) {
+ DEBUG_printf("rot_three\n");
+ vtype_kind_t vtype0, vtype1, vtype2;
+ emit_pre_pop_reg_reg_reg(emit, &vtype0, REG_TEMP0, &vtype1, REG_TEMP1, &vtype2, REG_TEMP2);
+ emit_post_push_reg_reg_reg(emit, vtype0, REG_TEMP0, vtype2, REG_TEMP2, vtype1, REG_TEMP1);
+}
+
+STATIC void emit_native_jump(emit_t *emit, mp_uint_t label) {
+ DEBUG_printf("jump(label=" UINT_FMT ")\n", label);
+ emit_native_pre(emit);
+ // need to commit stack because we are jumping elsewhere
+ need_stack_settled(emit);
+ ASM_JUMP(emit->as, label);
+ emit_post(emit);
+}
+
+STATIC void emit_native_jump_helper(emit_t *emit, bool cond, mp_uint_t label, bool pop) {
+ vtype_kind_t vtype = peek_vtype(emit, 0);
+ if (vtype == VTYPE_PYOBJ) {
+ emit_pre_pop_reg(emit, &vtype, REG_ARG_1);
+ if (!pop) {
+ adjust_stack(emit, 1);
+ }
+ emit_call(emit, MP_F_OBJ_IS_TRUE);
+ } else {
+ emit_pre_pop_reg(emit, &vtype, REG_RET);
+ if (!pop) {
+ adjust_stack(emit, 1);
+ }
+ if (!(vtype == VTYPE_BOOL || vtype == VTYPE_INT || vtype == VTYPE_UINT)) {
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+ MP_ERROR_TEXT("can't implicitly convert '%q' to 'bool'"), vtype_to_qstr(vtype));
+ }
+ }
+ // For non-pop need to save the vtype so that emit_native_adjust_stack_size
+ // can use it. This is a bit of a hack.
+ if (!pop) {
+ emit->saved_stack_vtype = vtype;
+ }
+ // need to commit stack because we may jump elsewhere
+ need_stack_settled(emit);
+ // Emit the jump
+ if (cond) {
+ ASM_JUMP_IF_REG_NONZERO(emit->as, REG_RET, label, vtype == VTYPE_PYOBJ);
+ } else {
+ ASM_JUMP_IF_REG_ZERO(emit->as, REG_RET, label, vtype == VTYPE_PYOBJ);
+ }
+ if (!pop) {
+ adjust_stack(emit, -1);
+ }
+ emit_post(emit);
+}
+
+STATIC void emit_native_pop_jump_if(emit_t *emit, bool cond, mp_uint_t label) {
+ DEBUG_printf("pop_jump_if(cond=%u, label=" UINT_FMT ")\n", cond, label);
+ emit_native_jump_helper(emit, cond, label, true);
+}
+
+STATIC void emit_native_jump_if_or_pop(emit_t *emit, bool cond, mp_uint_t label) {
+ DEBUG_printf("jump_if_or_pop(cond=%u, label=" UINT_FMT ")\n", cond, label);
+ emit_native_jump_helper(emit, cond, label, false);
+}
+
+STATIC void emit_native_unwind_jump(emit_t *emit, mp_uint_t label, mp_uint_t except_depth) {
+ if (except_depth > 0) {
+ exc_stack_entry_t *first_finally = NULL;
+ exc_stack_entry_t *prev_finally = NULL;
+ exc_stack_entry_t *e = &emit->exc_stack[emit->exc_stack_size - 1];
+ for (; except_depth > 0; --except_depth, --e) {
+ if (e->is_finally && e->is_active) {
+ // Found an active finally handler
+ if (first_finally == NULL) {
+ first_finally = e;
+ }
+ if (prev_finally != NULL) {
+ // Mark prev finally as needed to unwind a jump
+ prev_finally->unwind_label = e->label;
+ }
+ prev_finally = e;
+ }
+ }
+ if (prev_finally == NULL) {
+ // No finally, handle the jump ourselves
+ // First, restore the exception handler address for the jump
+ if (e < emit->exc_stack) {
+ ASM_XOR_REG_REG(emit->as, REG_RET, REG_RET);
+ } else {
+ ASM_MOV_REG_PCREL(emit->as, REG_RET, e->label);
+ }
+ ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_HANDLER_PC(emit), REG_RET);
+ } else {
+ // Last finally should do our jump for us
+ // Mark finally as needing to decide the type of jump
+ prev_finally->unwind_label = UNWIND_LABEL_DO_FINAL_UNWIND;
+ ASM_MOV_REG_PCREL(emit->as, REG_RET, label & ~MP_EMIT_BREAK_FROM_FOR);
+ ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_HANDLER_UNWIND(emit), REG_RET);
+ // Cancel any active exception (see also emit_native_pop_except_jump)
+ ASM_MOV_REG_IMM(emit->as, REG_RET, (mp_uint_t)MP_OBJ_NULL);
+ ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_VAL(emit), REG_RET);
+ // Jump to the innermost active finally
+ label = first_finally->label;
+ }
+ }
+ emit_native_jump(emit, label & ~MP_EMIT_BREAK_FROM_FOR);
+}
+
+STATIC void emit_native_setup_with(emit_t *emit, mp_uint_t label) {
+ // the context manager is on the top of the stack
+ // stack: (..., ctx_mgr)
+
+ // get __exit__ method
+ vtype_kind_t vtype;
+ emit_access_stack(emit, 1, &vtype, REG_ARG_1); // arg1 = ctx_mgr
+ assert(vtype == VTYPE_PYOBJ);
+ emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_3, 2); // arg3 = dest ptr
+ emit_call_with_qstr_arg(emit, MP_F_LOAD_METHOD, MP_QSTR___exit__, REG_ARG_2);
+ // stack: (..., ctx_mgr, __exit__, self)
+
+ emit_pre_pop_reg(emit, &vtype, REG_ARG_3); // self
+ emit_pre_pop_reg(emit, &vtype, REG_ARG_2); // __exit__
+ emit_pre_pop_reg(emit, &vtype, REG_ARG_1); // ctx_mgr
+ emit_post_push_reg(emit, vtype, REG_ARG_2); // __exit__
+ emit_post_push_reg(emit, vtype, REG_ARG_3); // self
+ // stack: (..., __exit__, self)
+ // REG_ARG_1=ctx_mgr
+
+ // get __enter__ method
+ emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_3, 2); // arg3 = dest ptr
+ emit_call_with_qstr_arg(emit, MP_F_LOAD_METHOD, MP_QSTR___enter__, REG_ARG_2); // arg2 = method name
+ // stack: (..., __exit__, self, __enter__, self)
+
+ // call __enter__ method
+ emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, 2); // pointer to items, including meth and self
+ emit_call_with_2_imm_args(emit, MP_F_CALL_METHOD_N_KW, 0, REG_ARG_1, 0, REG_ARG_2);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); // push return value of __enter__
+ // stack: (..., __exit__, self, as_value)
+
+ // need to commit stack because we may jump elsewhere
+ need_stack_settled(emit);
+ emit_native_push_exc_stack(emit, label, true);
+
+ emit_native_dup_top(emit);
+ // stack: (..., __exit__, self, as_value, as_value)
+}
+
+STATIC void emit_native_setup_block(emit_t *emit, mp_uint_t label, int kind) {
+ if (kind == MP_EMIT_SETUP_BLOCK_WITH) {
+ emit_native_setup_with(emit, label);
+ } else {
+ // Set up except and finally
+ emit_native_pre(emit);
+ need_stack_settled(emit);
+ emit_native_push_exc_stack(emit, label, kind == MP_EMIT_SETUP_BLOCK_FINALLY);
+ emit_post(emit);
+ }
+}
+
+STATIC void emit_native_with_cleanup(emit_t *emit, mp_uint_t label) {
+ // Note: 3 labels are reserved for this function, starting at *emit->label_slot
+
+ // stack: (..., __exit__, self, as_value)
+ emit_native_pre(emit);
+ emit_native_leave_exc_stack(emit, false);
+ adjust_stack(emit, -1);
+ // stack: (..., __exit__, self)
+
+ // Label for case where __exit__ is called from an unwind jump
+ emit_native_label_assign(emit, *emit->label_slot + 2);
+
+ // call __exit__
+ emit_post_push_imm(emit, VTYPE_PTR_NONE, 0);
+ emit_post_push_imm(emit, VTYPE_PTR_NONE, 0);
+ emit_post_push_imm(emit, VTYPE_PTR_NONE, 0);
+ emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, 5);
+ emit_call_with_2_imm_args(emit, MP_F_CALL_METHOD_N_KW, 3, REG_ARG_1, 0, REG_ARG_2);
+
+ // Replace exc with None and finish
+ emit_native_jump(emit, *emit->label_slot);
+
+ // nlr_catch
+ // Don't use emit_native_label_assign because this isn't a real finally label
+ mp_asm_base_label_assign(&emit->as->base, label);
+
+ // Leave with's exception handler
+ emit_native_leave_exc_stack(emit, true);
+
+ // Adjust stack counter for: __exit__, self (implicitly discard as_value which is above self)
+ emit_native_adjust_stack_size(emit, 2);
+ // stack: (..., __exit__, self)
+
+ ASM_MOV_REG_LOCAL(emit->as, REG_ARG_1, LOCAL_IDX_EXC_VAL(emit)); // get exc
+
+ // Check if exc is MP_OBJ_NULL (i.e. zero) and jump to non-exc handler if it is
+ ASM_JUMP_IF_REG_ZERO(emit->as, REG_ARG_1, *emit->label_slot + 2, false);
+
+ ASM_LOAD_REG_REG_OFFSET(emit->as, REG_ARG_2, REG_ARG_1, 0); // get type(exc)
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_ARG_2); // push type(exc)
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_ARG_1); // push exc value
+ emit_post_push_imm(emit, VTYPE_PTR_NONE, 0); // traceback info
+ // Stack: (..., __exit__, self, type(exc), exc, traceback)
+
+ // call __exit__ method
+ emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, 5);
+ emit_call_with_2_imm_args(emit, MP_F_CALL_METHOD_N_KW, 3, REG_ARG_1, 0, REG_ARG_2);
+ // Stack: (...)
+
+ // If REG_RET is true then we need to replace exception with None (swallow exception)
+ if (REG_ARG_1 != REG_RET) {
+ ASM_MOV_REG_REG(emit->as, REG_ARG_1, REG_RET);
+ }
+ emit_call(emit, MP_F_OBJ_IS_TRUE);
+ ASM_JUMP_IF_REG_ZERO(emit->as, REG_RET, *emit->label_slot + 1, true);
+
+ // Replace exception with MP_OBJ_NULL.
+ emit_native_label_assign(emit, *emit->label_slot);
+ ASM_MOV_REG_IMM(emit->as, REG_TEMP0, (mp_uint_t)MP_OBJ_NULL);
+ ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_VAL(emit), REG_TEMP0);
+
+ // end of with cleanup nlr_catch block
+ emit_native_label_assign(emit, *emit->label_slot + 1);
+
+ // Exception is in nlr_buf.ret_val slot
+}
+
+STATIC void emit_native_end_finally(emit_t *emit) {
+ // logic:
+ // exc = pop_stack
+ // if exc == None: pass
+ // else: raise exc
+ // the check if exc is None is done in the MP_F_NATIVE_RAISE stub
+ emit_native_pre(emit);
+ ASM_MOV_REG_LOCAL(emit->as, REG_ARG_1, LOCAL_IDX_EXC_VAL(emit));
+ emit_call(emit, MP_F_NATIVE_RAISE);
+
+ // Get state for this finally and see if we need to unwind
+ exc_stack_entry_t *e = emit_native_pop_exc_stack(emit);
+ if (e->unwind_label != UNWIND_LABEL_UNUSED) {
+ ASM_MOV_REG_LOCAL(emit->as, REG_RET, LOCAL_IDX_EXC_HANDLER_UNWIND(emit));
+ ASM_JUMP_IF_REG_ZERO(emit->as, REG_RET, *emit->label_slot, false);
+ if (e->unwind_label == UNWIND_LABEL_DO_FINAL_UNWIND) {
+ ASM_JUMP_REG(emit->as, REG_RET);
+ } else {
+ emit_native_jump(emit, e->unwind_label);
+ }
+ emit_native_label_assign(emit, *emit->label_slot);
+ }
+
+ emit_post(emit);
+}
+
+STATIC void emit_native_get_iter(emit_t *emit, bool use_stack) {
+ // perhaps the difficult one, as we want to rewrite for loops using native code
+ // in cases where we iterate over a Python object, can we use normal runtime calls?
+
+ vtype_kind_t vtype;
+ emit_pre_pop_reg(emit, &vtype, REG_ARG_1);
+ assert(vtype == VTYPE_PYOBJ);
+ if (use_stack) {
+ emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_2, MP_OBJ_ITER_BUF_NSLOTS);
+ emit_call(emit, MP_F_NATIVE_GETITER);
+ } else {
+ // mp_getiter will allocate the iter_buf on the heap
+ ASM_MOV_REG_IMM(emit->as, REG_ARG_2, 0);
+ emit_call(emit, MP_F_NATIVE_GETITER);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+ }
+}
+
+STATIC void emit_native_for_iter(emit_t *emit, mp_uint_t label) {
+ emit_native_pre(emit);
+ emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_1, MP_OBJ_ITER_BUF_NSLOTS);
+ adjust_stack(emit, MP_OBJ_ITER_BUF_NSLOTS);
+ emit_call(emit, MP_F_NATIVE_ITERNEXT);
+ #if MICROPY_DEBUG_MP_OBJ_SENTINELS
+ ASM_MOV_REG_IMM(emit->as, REG_TEMP1, (mp_uint_t)MP_OBJ_STOP_ITERATION);
+ ASM_JUMP_IF_REG_EQ(emit->as, REG_RET, REG_TEMP1, label);
+ #else
+ MP_STATIC_ASSERT(MP_OBJ_STOP_ITERATION == 0);
+ ASM_JUMP_IF_REG_ZERO(emit->as, REG_RET, label, false);
+ #endif
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+STATIC void emit_native_for_iter_end(emit_t *emit) {
+ // adjust stack counter (we get here from for_iter ending, which popped the value for us)
+ emit_native_pre(emit);
+ adjust_stack(emit, -MP_OBJ_ITER_BUF_NSLOTS);
+ emit_post(emit);
+}
+
+STATIC void emit_native_pop_except_jump(emit_t *emit, mp_uint_t label, bool within_exc_handler) {
+ if (within_exc_handler) {
+ // Cancel any active exception so subsequent handlers don't see it
+ ASM_MOV_REG_IMM(emit->as, REG_TEMP0, (mp_uint_t)MP_OBJ_NULL);
+ ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_VAL(emit), REG_TEMP0);
+ } else {
+ emit_native_leave_exc_stack(emit, false);
+ }
+ emit_native_jump(emit, label);
+}
+
+STATIC void emit_native_unary_op(emit_t *emit, mp_unary_op_t op) {
+ vtype_kind_t vtype;
+ emit_pre_pop_reg(emit, &vtype, REG_ARG_2);
+ if (vtype == VTYPE_PYOBJ) {
+ emit_call_with_imm_arg(emit, MP_F_UNARY_OP, op, REG_ARG_1);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+ } else {
+ adjust_stack(emit, 1);
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+ MP_ERROR_TEXT("unary op %q not implemented"), mp_unary_op_method_name[op]);
+ }
+}
+
+STATIC void emit_native_binary_op(emit_t *emit, mp_binary_op_t op) {
+ DEBUG_printf("binary_op(" UINT_FMT ")\n", op);
+ vtype_kind_t vtype_lhs = peek_vtype(emit, 1);
+ vtype_kind_t vtype_rhs = peek_vtype(emit, 0);
+ if ((vtype_lhs == VTYPE_INT || vtype_lhs == VTYPE_UINT)
+ && (vtype_rhs == VTYPE_INT || vtype_rhs == VTYPE_UINT)) {
+ // for integers, inplace and normal ops are equivalent, so use just normal ops
+ if (MP_BINARY_OP_INPLACE_OR <= op && op <= MP_BINARY_OP_INPLACE_POWER) {
+ op += MP_BINARY_OP_OR - MP_BINARY_OP_INPLACE_OR;
+ }
+
+ #if N_X64 || N_X86
+ // special cases for x86 and shifting
+ if (op == MP_BINARY_OP_LSHIFT || op == MP_BINARY_OP_RSHIFT) {
+ #if N_X64
+ emit_pre_pop_reg_reg(emit, &vtype_rhs, ASM_X64_REG_RCX, &vtype_lhs, REG_RET);
+ #else
+ emit_pre_pop_reg_reg(emit, &vtype_rhs, ASM_X86_REG_ECX, &vtype_lhs, REG_RET);
+ #endif
+ if (op == MP_BINARY_OP_LSHIFT) {
+ ASM_LSL_REG(emit->as, REG_RET);
+ } else {
+ if (vtype_lhs == VTYPE_UINT) {
+ ASM_LSR_REG(emit->as, REG_RET);
+ } else {
+ ASM_ASR_REG(emit->as, REG_RET);
+ }
+ }
+ emit_post_push_reg(emit, vtype_lhs, REG_RET);
+ return;
+ }
+ #endif
+
+ // special cases for floor-divide and module because we dispatch to helper functions
+ if (op == MP_BINARY_OP_FLOOR_DIVIDE || op == MP_BINARY_OP_MODULO) {
+ emit_pre_pop_reg_reg(emit, &vtype_rhs, REG_ARG_2, &vtype_lhs, REG_ARG_1);
+ if (vtype_lhs != VTYPE_INT) {
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+ MP_ERROR_TEXT("div/mod not implemented for uint"), mp_binary_op_method_name[op]);
+ }
+ if (op == MP_BINARY_OP_FLOOR_DIVIDE) {
+ emit_call(emit, MP_F_SMALL_INT_FLOOR_DIVIDE);
+ } else {
+ emit_call(emit, MP_F_SMALL_INT_MODULO);
+ }
+ emit_post_push_reg(emit, VTYPE_INT, REG_RET);
+ return;
+ }
+
+ int reg_rhs = REG_ARG_3;
+ emit_pre_pop_reg_flexible(emit, &vtype_rhs, &reg_rhs, REG_RET, REG_ARG_2);
+ emit_pre_pop_reg(emit, &vtype_lhs, REG_ARG_2);
+
+ #if !(N_X64 || N_X86)
+ if (op == MP_BINARY_OP_LSHIFT || op == MP_BINARY_OP_RSHIFT) {
+ if (op == MP_BINARY_OP_LSHIFT) {
+ ASM_LSL_REG_REG(emit->as, REG_ARG_2, reg_rhs);
+ } else {
+ if (vtype_lhs == VTYPE_UINT) {
+ ASM_LSR_REG_REG(emit->as, REG_ARG_2, reg_rhs);
+ } else {
+ ASM_ASR_REG_REG(emit->as, REG_ARG_2, reg_rhs);
+ }
+ }
+ emit_post_push_reg(emit, vtype_lhs, REG_ARG_2);
+ return;
+ }
+ #endif
+
+ if (op == MP_BINARY_OP_OR) {
+ ASM_OR_REG_REG(emit->as, REG_ARG_2, reg_rhs);
+ emit_post_push_reg(emit, vtype_lhs, REG_ARG_2);
+ } else if (op == MP_BINARY_OP_XOR) {
+ ASM_XOR_REG_REG(emit->as, REG_ARG_2, reg_rhs);
+ emit_post_push_reg(emit, vtype_lhs, REG_ARG_2);
+ } else if (op == MP_BINARY_OP_AND) {
+ ASM_AND_REG_REG(emit->as, REG_ARG_2, reg_rhs);
+ emit_post_push_reg(emit, vtype_lhs, REG_ARG_2);
+ } else if (op == MP_BINARY_OP_ADD) {
+ ASM_ADD_REG_REG(emit->as, REG_ARG_2, reg_rhs);
+ emit_post_push_reg(emit, vtype_lhs, REG_ARG_2);
+ } else if (op == MP_BINARY_OP_SUBTRACT) {
+ ASM_SUB_REG_REG(emit->as, REG_ARG_2, reg_rhs);
+ emit_post_push_reg(emit, vtype_lhs, REG_ARG_2);
+ } else if (op == MP_BINARY_OP_MULTIPLY) {
+ ASM_MUL_REG_REG(emit->as, REG_ARG_2, reg_rhs);
+ emit_post_push_reg(emit, vtype_lhs, REG_ARG_2);
+ } else if (MP_BINARY_OP_LESS <= op && op <= MP_BINARY_OP_NOT_EQUAL) {
+ // comparison ops are (in enum order):
+ // MP_BINARY_OP_LESS
+ // MP_BINARY_OP_MORE
+ // MP_BINARY_OP_EQUAL
+ // MP_BINARY_OP_LESS_EQUAL
+ // MP_BINARY_OP_MORE_EQUAL
+ // MP_BINARY_OP_NOT_EQUAL
+
+ if (vtype_lhs != vtype_rhs) {
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit, MP_ERROR_TEXT("comparison of int and uint"));
+ }
+
+ size_t op_idx = op - MP_BINARY_OP_LESS + (vtype_lhs == VTYPE_UINT ? 0 : 6);
+
+ need_reg_single(emit, REG_RET, 0);
+ #if N_X64
+ asm_x64_xor_r64_r64(emit->as, REG_RET, REG_RET);
+ asm_x64_cmp_r64_with_r64(emit->as, reg_rhs, REG_ARG_2);
+ static byte ops[6 + 6] = {
+ // unsigned
+ ASM_X64_CC_JB,
+ ASM_X64_CC_JA,
+ ASM_X64_CC_JE,
+ ASM_X64_CC_JBE,
+ ASM_X64_CC_JAE,
+ ASM_X64_CC_JNE,
+ // signed
+ ASM_X64_CC_JL,
+ ASM_X64_CC_JG,
+ ASM_X64_CC_JE,
+ ASM_X64_CC_JLE,
+ ASM_X64_CC_JGE,
+ ASM_X64_CC_JNE,
+ };
+ asm_x64_setcc_r8(emit->as, ops[op_idx], REG_RET);
+ #elif N_X86
+ asm_x86_xor_r32_r32(emit->as, REG_RET, REG_RET);
+ asm_x86_cmp_r32_with_r32(emit->as, reg_rhs, REG_ARG_2);
+ static byte ops[6 + 6] = {
+ // unsigned
+ ASM_X86_CC_JB,
+ ASM_X86_CC_JA,
+ ASM_X86_CC_JE,
+ ASM_X86_CC_JBE,
+ ASM_X86_CC_JAE,
+ ASM_X86_CC_JNE,
+ // signed
+ ASM_X86_CC_JL,
+ ASM_X86_CC_JG,
+ ASM_X86_CC_JE,
+ ASM_X86_CC_JLE,
+ ASM_X86_CC_JGE,
+ ASM_X86_CC_JNE,
+ };
+ asm_x86_setcc_r8(emit->as, ops[op_idx], REG_RET);
+ #elif N_THUMB
+ asm_thumb_cmp_rlo_rlo(emit->as, REG_ARG_2, reg_rhs);
+ #if MICROPY_EMIT_THUMB_ARMV7M
+ static uint16_t ops[6 + 6] = {
+ // unsigned
+ ASM_THUMB_OP_ITE_CC,
+ ASM_THUMB_OP_ITE_HI,
+ ASM_THUMB_OP_ITE_EQ,
+ ASM_THUMB_OP_ITE_LS,
+ ASM_THUMB_OP_ITE_CS,
+ ASM_THUMB_OP_ITE_NE,
+ // signed
+ ASM_THUMB_OP_ITE_LT,
+ ASM_THUMB_OP_ITE_GT,
+ ASM_THUMB_OP_ITE_EQ,
+ ASM_THUMB_OP_ITE_LE,
+ ASM_THUMB_OP_ITE_GE,
+ ASM_THUMB_OP_ITE_NE,
+ };
+ asm_thumb_op16(emit->as, ops[op_idx]);
+ asm_thumb_mov_rlo_i8(emit->as, REG_RET, 1);
+ asm_thumb_mov_rlo_i8(emit->as, REG_RET, 0);
+ #else
+ static uint16_t ops[6 + 6] = {
+ // unsigned
+ ASM_THUMB_CC_CC,
+ ASM_THUMB_CC_HI,
+ ASM_THUMB_CC_EQ,
+ ASM_THUMB_CC_LS,
+ ASM_THUMB_CC_CS,
+ ASM_THUMB_CC_NE,
+ // signed
+ ASM_THUMB_CC_LT,
+ ASM_THUMB_CC_GT,
+ ASM_THUMB_CC_EQ,
+ ASM_THUMB_CC_LE,
+ ASM_THUMB_CC_GE,
+ ASM_THUMB_CC_NE,
+ };
+ asm_thumb_bcc_rel9(emit->as, ops[op_idx], 6);
+ asm_thumb_mov_rlo_i8(emit->as, REG_RET, 0);
+ asm_thumb_b_rel12(emit->as, 4);
+ asm_thumb_mov_rlo_i8(emit->as, REG_RET, 1);
+ #endif
+ #elif N_ARM
+ asm_arm_cmp_reg_reg(emit->as, REG_ARG_2, reg_rhs);
+ static uint ccs[6 + 6] = {
+ // unsigned
+ ASM_ARM_CC_CC,
+ ASM_ARM_CC_HI,
+ ASM_ARM_CC_EQ,
+ ASM_ARM_CC_LS,
+ ASM_ARM_CC_CS,
+ ASM_ARM_CC_NE,
+ // signed
+ ASM_ARM_CC_LT,
+ ASM_ARM_CC_GT,
+ ASM_ARM_CC_EQ,
+ ASM_ARM_CC_LE,
+ ASM_ARM_CC_GE,
+ ASM_ARM_CC_NE,
+ };
+ asm_arm_setcc_reg(emit->as, REG_RET, ccs[op_idx]);
+ #elif N_XTENSA || N_XTENSAWIN
+ static uint8_t ccs[6 + 6] = {
+ // unsigned
+ ASM_XTENSA_CC_LTU,
+ 0x80 | ASM_XTENSA_CC_LTU, // for GTU we'll swap args
+ ASM_XTENSA_CC_EQ,
+ 0x80 | ASM_XTENSA_CC_GEU, // for LEU we'll swap args
+ ASM_XTENSA_CC_GEU,
+ ASM_XTENSA_CC_NE,
+ // signed
+ ASM_XTENSA_CC_LT,
+ 0x80 | ASM_XTENSA_CC_LT, // for GT we'll swap args
+ ASM_XTENSA_CC_EQ,
+ 0x80 | ASM_XTENSA_CC_GE, // for LE we'll swap args
+ ASM_XTENSA_CC_GE,
+ ASM_XTENSA_CC_NE,
+ };
+ uint8_t cc = ccs[op_idx];
+ if ((cc & 0x80) == 0) {
+ asm_xtensa_setcc_reg_reg_reg(emit->as, cc, REG_RET, REG_ARG_2, reg_rhs);
+ } else {
+ asm_xtensa_setcc_reg_reg_reg(emit->as, cc & ~0x80, REG_RET, reg_rhs, REG_ARG_2);
+ }
+ #else
+ #error not implemented
+ #endif
+ emit_post_push_reg(emit, VTYPE_BOOL, REG_RET);
+ } else {
+ // TODO other ops not yet implemented
+ adjust_stack(emit, 1);
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+ MP_ERROR_TEXT("binary op %q not implemented"), mp_binary_op_method_name[op]);
+ }
+ } else if (vtype_lhs == VTYPE_PYOBJ && vtype_rhs == VTYPE_PYOBJ) {
+ emit_pre_pop_reg_reg(emit, &vtype_rhs, REG_ARG_3, &vtype_lhs, REG_ARG_2);
+ bool invert = false;
+ if (op == MP_BINARY_OP_NOT_IN) {
+ invert = true;
+ op = MP_BINARY_OP_IN;
+ } else if (op == MP_BINARY_OP_IS_NOT) {
+ invert = true;
+ op = MP_BINARY_OP_IS;
+ }
+ emit_call_with_imm_arg(emit, MP_F_BINARY_OP, op, REG_ARG_1);
+ if (invert) {
+ ASM_MOV_REG_REG(emit->as, REG_ARG_2, REG_RET);
+ emit_call_with_imm_arg(emit, MP_F_UNARY_OP, MP_UNARY_OP_NOT, REG_ARG_1);
+ }
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+ } else {
+ adjust_stack(emit, -1);
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+ MP_ERROR_TEXT("can't do binary op between '%q' and '%q'"),
+ vtype_to_qstr(vtype_lhs), vtype_to_qstr(vtype_rhs));
+ }
+}
+
+#if MICROPY_PY_BUILTINS_SLICE
+STATIC void emit_native_build_slice(emit_t *emit, mp_uint_t n_args);
+#endif
+
+STATIC void emit_native_build(emit_t *emit, mp_uint_t n_args, int kind) {
+ // for viper: call runtime, with types of args
+ // if wrapped in byte_array, or something, allocates memory and fills it
+ MP_STATIC_ASSERT(MP_F_BUILD_TUPLE + MP_EMIT_BUILD_TUPLE == MP_F_BUILD_TUPLE);
+ MP_STATIC_ASSERT(MP_F_BUILD_TUPLE + MP_EMIT_BUILD_LIST == MP_F_BUILD_LIST);
+ MP_STATIC_ASSERT(MP_F_BUILD_TUPLE + MP_EMIT_BUILD_MAP == MP_F_BUILD_MAP);
+ MP_STATIC_ASSERT(MP_F_BUILD_TUPLE + MP_EMIT_BUILD_SET == MP_F_BUILD_SET);
+ #if MICROPY_PY_BUILTINS_SLICE
+ if (kind == MP_EMIT_BUILD_SLICE) {
+ emit_native_build_slice(emit, n_args);
+ return;
+ }
+ #endif
+ emit_native_pre(emit);
+ if (kind == MP_EMIT_BUILD_TUPLE || kind == MP_EMIT_BUILD_LIST || kind == MP_EMIT_BUILD_SET) {
+ emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_2, n_args); // pointer to items
+ }
+ emit_call_with_imm_arg(emit, MP_F_BUILD_TUPLE + kind, n_args, REG_ARG_1);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); // new tuple/list/map/set
+}
+
+STATIC void emit_native_store_map(emit_t *emit) {
+ vtype_kind_t vtype_key, vtype_value, vtype_map;
+ emit_pre_pop_reg_reg_reg(emit, &vtype_key, REG_ARG_2, &vtype_value, REG_ARG_3, &vtype_map, REG_ARG_1); // key, value, map
+ assert(vtype_key == VTYPE_PYOBJ);
+ assert(vtype_value == VTYPE_PYOBJ);
+ assert(vtype_map == VTYPE_PYOBJ);
+ emit_call(emit, MP_F_STORE_MAP);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET); // map
+}
+
+#if MICROPY_PY_BUILTINS_SLICE
+STATIC void emit_native_build_slice(emit_t *emit, mp_uint_t n_args) {
+ DEBUG_printf("build_slice %d\n", n_args);
+ if (n_args == 2) {
+ vtype_kind_t vtype_start, vtype_stop;
+ emit_pre_pop_reg_reg(emit, &vtype_stop, REG_ARG_2, &vtype_start, REG_ARG_1); // arg1 = start, arg2 = stop
+ assert(vtype_start == VTYPE_PYOBJ);
+ assert(vtype_stop == VTYPE_PYOBJ);
+ emit_native_mov_reg_const(emit, REG_ARG_3, MP_F_CONST_NONE_OBJ); // arg3 = step
+ } else {
+ assert(n_args == 3);
+ vtype_kind_t vtype_start, vtype_stop, vtype_step;
+ emit_pre_pop_reg_reg_reg(emit, &vtype_step, REG_ARG_3, &vtype_stop, REG_ARG_2, &vtype_start, REG_ARG_1); // arg1 = start, arg2 = stop, arg3 = step
+ assert(vtype_start == VTYPE_PYOBJ);
+ assert(vtype_stop == VTYPE_PYOBJ);
+ assert(vtype_step == VTYPE_PYOBJ);
+ }
+ emit_call(emit, MP_F_NEW_SLICE);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+#endif
+
+STATIC void emit_native_store_comp(emit_t *emit, scope_kind_t kind, mp_uint_t collection_index) {
+ mp_fun_kind_t f;
+ if (kind == SCOPE_LIST_COMP) {
+ vtype_kind_t vtype_item;
+ emit_pre_pop_reg(emit, &vtype_item, REG_ARG_2);
+ assert(vtype_item == VTYPE_PYOBJ);
+ f = MP_F_LIST_APPEND;
+ #if MICROPY_PY_BUILTINS_SET
+ } else if (kind == SCOPE_SET_COMP) {
+ vtype_kind_t vtype_item;
+ emit_pre_pop_reg(emit, &vtype_item, REG_ARG_2);
+ assert(vtype_item == VTYPE_PYOBJ);
+ f = MP_F_STORE_SET;
+ #endif
+ } else {
+ // SCOPE_DICT_COMP
+ vtype_kind_t vtype_key, vtype_value;
+ emit_pre_pop_reg_reg(emit, &vtype_key, REG_ARG_2, &vtype_value, REG_ARG_3);
+ assert(vtype_key == VTYPE_PYOBJ);
+ assert(vtype_value == VTYPE_PYOBJ);
+ f = MP_F_STORE_MAP;
+ }
+ vtype_kind_t vtype_collection;
+ emit_access_stack(emit, collection_index, &vtype_collection, REG_ARG_1);
+ assert(vtype_collection == VTYPE_PYOBJ);
+ emit_call(emit, f);
+ emit_post(emit);
+}
+
+STATIC void emit_native_unpack_sequence(emit_t *emit, mp_uint_t n_args) {
+ DEBUG_printf("unpack_sequence %d\n", n_args);
+ vtype_kind_t vtype_base;
+ emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1); // arg1 = seq
+ assert(vtype_base == VTYPE_PYOBJ);
+ emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_3, n_args); // arg3 = dest ptr
+ emit_call_with_imm_arg(emit, MP_F_UNPACK_SEQUENCE, n_args, REG_ARG_2); // arg2 = n_args
+}
+
+STATIC void emit_native_unpack_ex(emit_t *emit, mp_uint_t n_left, mp_uint_t n_right) {
+ DEBUG_printf("unpack_ex %d %d\n", n_left, n_right);
+ vtype_kind_t vtype_base;
+ emit_pre_pop_reg(emit, &vtype_base, REG_ARG_1); // arg1 = seq
+ assert(vtype_base == VTYPE_PYOBJ);
+ emit_get_stack_pointer_to_reg_for_push(emit, REG_ARG_3, n_left + n_right + 1); // arg3 = dest ptr
+ emit_call_with_imm_arg(emit, MP_F_UNPACK_EX, n_left | (n_right << 8), REG_ARG_2); // arg2 = n_left + n_right
+}
+
+STATIC void emit_native_make_function(emit_t *emit, scope_t *scope, mp_uint_t n_pos_defaults, mp_uint_t n_kw_defaults) {
+ // call runtime, with type info for args, or don't support dict/default params, or only support Python objects for them
+ emit_native_pre(emit);
+ if (n_pos_defaults == 0 && n_kw_defaults == 0) {
+ need_reg_all(emit);
+ ASM_MOV_REG_IMM(emit->as, REG_ARG_2, (mp_uint_t)MP_OBJ_NULL);
+ ASM_MOV_REG_IMM(emit->as, REG_ARG_3, (mp_uint_t)MP_OBJ_NULL);
+ } else {
+ vtype_kind_t vtype_def_tuple, vtype_def_dict;
+ emit_pre_pop_reg_reg(emit, &vtype_def_dict, REG_ARG_3, &vtype_def_tuple, REG_ARG_2);
+ assert(vtype_def_tuple == VTYPE_PYOBJ);
+ assert(vtype_def_dict == VTYPE_PYOBJ);
+ need_reg_all(emit);
+ }
+ emit_load_reg_with_raw_code(emit, REG_ARG_1, scope->raw_code);
+ ASM_CALL_IND(emit->as, MP_F_MAKE_FUNCTION_FROM_RAW_CODE);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+STATIC void emit_native_make_closure(emit_t *emit, scope_t *scope, mp_uint_t n_closed_over, mp_uint_t n_pos_defaults, mp_uint_t n_kw_defaults) {
+ emit_native_pre(emit);
+ if (n_pos_defaults == 0 && n_kw_defaults == 0) {
+ emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, n_closed_over);
+ ASM_MOV_REG_IMM(emit->as, REG_ARG_2, n_closed_over);
+ } else {
+ emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, n_closed_over + 2);
+ ASM_MOV_REG_IMM(emit->as, REG_ARG_2, 0x100 | n_closed_over);
+ }
+ emit_load_reg_with_raw_code(emit, REG_ARG_1, scope->raw_code);
+ ASM_CALL_IND(emit->as, MP_F_MAKE_CLOSURE_FROM_RAW_CODE);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+}
+
+STATIC void emit_native_call_function(emit_t *emit, mp_uint_t n_positional, mp_uint_t n_keyword, mp_uint_t star_flags) {
+ DEBUG_printf("call_function(n_pos=" UINT_FMT ", n_kw=" UINT_FMT ", star_flags=" UINT_FMT ")\n", n_positional, n_keyword, star_flags);
+
+ // TODO: in viper mode, call special runtime routine with type info for args,
+ // and wanted type info for return, to remove need for boxing/unboxing
+
+ emit_native_pre(emit);
+ vtype_kind_t vtype_fun = peek_vtype(emit, n_positional + 2 * n_keyword);
+ if (vtype_fun == VTYPE_BUILTIN_CAST) {
+ // casting operator
+ assert(n_positional == 1 && n_keyword == 0);
+ assert(!star_flags);
+ DEBUG_printf(" cast to %d\n", vtype_fun);
+ vtype_kind_t vtype_cast = peek_stack(emit, 1)->data.u_imm;
+ switch (peek_vtype(emit, 0)) {
+ case VTYPE_PYOBJ: {
+ vtype_kind_t vtype;
+ emit_pre_pop_reg(emit, &vtype, REG_ARG_1);
+ emit_pre_pop_discard(emit);
+ emit_call_with_imm_arg(emit, MP_F_CONVERT_OBJ_TO_NATIVE, vtype_cast, REG_ARG_2); // arg2 = type
+ emit_post_push_reg(emit, vtype_cast, REG_RET);
+ break;
+ }
+ case VTYPE_BOOL:
+ case VTYPE_INT:
+ case VTYPE_UINT:
+ case VTYPE_PTR:
+ case VTYPE_PTR8:
+ case VTYPE_PTR16:
+ case VTYPE_PTR32:
+ case VTYPE_PTR_NONE:
+ emit_fold_stack_top(emit, REG_ARG_1);
+ emit_post_top_set_vtype(emit, vtype_cast);
+ break;
+ default:
+ // this can happen when casting a cast: int(int)
+ mp_raise_NotImplementedError(MP_ERROR_TEXT("casting"));
+ }
+ } else {
+ assert(vtype_fun == VTYPE_PYOBJ);
+ if (star_flags) {
+ emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, n_positional + 2 * n_keyword + 3); // pointer to args
+ emit_call_with_2_imm_args(emit, MP_F_CALL_METHOD_N_KW_VAR, 0, REG_ARG_1, n_positional | (n_keyword << 8), REG_ARG_2);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+ } else {
+ if (n_positional != 0 || n_keyword != 0) {
+ emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, n_positional + 2 * n_keyword); // pointer to args
+ }
+ emit_pre_pop_reg(emit, &vtype_fun, REG_ARG_1); // the function
+ emit_call_with_imm_arg(emit, MP_F_NATIVE_CALL_FUNCTION_N_KW, n_positional | (n_keyword << 8), REG_ARG_2);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+ }
+ }
+}
+
+STATIC void emit_native_call_method(emit_t *emit, mp_uint_t n_positional, mp_uint_t n_keyword, mp_uint_t star_flags) {
+ if (star_flags) {
+ emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, n_positional + 2 * n_keyword + 4); // pointer to args
+ emit_call_with_2_imm_args(emit, MP_F_CALL_METHOD_N_KW_VAR, 1, REG_ARG_1, n_positional | (n_keyword << 8), REG_ARG_2);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+ } else {
+ emit_native_pre(emit);
+ emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, 2 + n_positional + 2 * n_keyword); // pointer to items, including meth and self
+ emit_call_with_2_imm_args(emit, MP_F_CALL_METHOD_N_KW, n_positional, REG_ARG_1, n_keyword, REG_ARG_2);
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
+ }
+}
+
+STATIC void emit_native_return_value(emit_t *emit) {
+ DEBUG_printf("return_value\n");
+
+ if (emit->scope->scope_flags & MP_SCOPE_FLAG_GENERATOR) {
+ // Save pointer to current stack position for caller to access return value
+ emit_get_stack_pointer_to_reg_for_pop(emit, REG_TEMP0, 1);
+ emit_native_mov_state_reg(emit, OFFSETOF_CODE_STATE_SP, REG_TEMP0);
+
+ // Put return type in return value slot
+ ASM_MOV_REG_IMM(emit->as, REG_TEMP0, MP_VM_RETURN_NORMAL);
+ ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_RET_VAL(emit), REG_TEMP0);
+
+ // Do the unwinding jump to get to the return handler
+ emit_native_unwind_jump(emit, emit->exit_label, emit->exc_stack_size);
+ emit->last_emit_was_return_value = true;
+ return;
+ }
+
+ if (emit->do_viper_types) {
+ vtype_kind_t return_vtype = emit->scope->scope_flags >> MP_SCOPE_FLAG_VIPERRET_POS;
+ if (peek_vtype(emit, 0) == VTYPE_PTR_NONE) {
+ emit_pre_pop_discard(emit);
+ if (return_vtype == VTYPE_PYOBJ) {
+ emit_native_mov_reg_const(emit, REG_PARENT_RET, MP_F_CONST_NONE_OBJ);
+ } else {
+ ASM_MOV_REG_IMM(emit->as, REG_ARG_1, 0);
+ }
+ } else {
+ vtype_kind_t vtype;
+ emit_pre_pop_reg(emit, &vtype, return_vtype == VTYPE_PYOBJ ? REG_PARENT_RET : REG_ARG_1);
+ if (vtype != return_vtype) {
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
+ MP_ERROR_TEXT("return expected '%q' but got '%q'"),
+ vtype_to_qstr(return_vtype), vtype_to_qstr(vtype));
+ }
+ }
+ if (return_vtype != VTYPE_PYOBJ) {
+ emit_call_with_imm_arg(emit, MP_F_CONVERT_NATIVE_TO_OBJ, return_vtype, REG_ARG_2);
+ #if REG_RET != REG_PARENT_RET
+ ASM_MOV_REG_REG(emit->as, REG_PARENT_RET, REG_RET);
+ #endif
+ }
+ } else {
+ vtype_kind_t vtype;
+ emit_pre_pop_reg(emit, &vtype, REG_PARENT_RET);
+ assert(vtype == VTYPE_PYOBJ);
+ }
+ if (NEED_GLOBAL_EXC_HANDLER(emit)) {
+ // Save return value for the global exception handler to use
+ ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_RET_VAL(emit), REG_PARENT_RET);
+ }
+ emit_native_unwind_jump(emit, emit->exit_label, emit->exc_stack_size);
+ emit->last_emit_was_return_value = true;
+}
+
+STATIC void emit_native_raise_varargs(emit_t *emit, mp_uint_t n_args) {
+ (void)n_args;
+ assert(n_args == 1);
+ vtype_kind_t vtype_exc;
+ emit_pre_pop_reg(emit, &vtype_exc, REG_ARG_1); // arg1 = object to raise
+ if (vtype_exc != VTYPE_PYOBJ) {
+ EMIT_NATIVE_VIPER_TYPE_ERROR(emit, MP_ERROR_TEXT("must raise an object"));
+ }
+ // TODO probably make this 1 call to the runtime (which could even call convert, native_raise(obj, type))
+ emit_call(emit, MP_F_NATIVE_RAISE);
+}
+
+STATIC void emit_native_yield(emit_t *emit, int kind) {
+ // Note: 1 (yield) or 3 (yield from) labels are reserved for this function, starting at *emit->label_slot
+
+ if (emit->do_viper_types) {
+ mp_raise_NotImplementedError(MP_ERROR_TEXT("native yield"));
+ }
+ emit->scope->scope_flags |= MP_SCOPE_FLAG_GENERATOR;
+
+ need_stack_settled(emit);
+
+ if (kind == MP_EMIT_YIELD_FROM) {
+
+ // Top of yield-from loop, conceptually implementing:
+ // for item in generator:
+ // yield item
+
+ // Jump to start of loop
+ emit_native_jump(emit, *emit->label_slot + 2);
+
+ // Label for top of loop
+ emit_native_label_assign(emit, *emit->label_slot + 1);
+ }
+
+ // Save pointer to current stack position for caller to access yielded value
+ emit_get_stack_pointer_to_reg_for_pop(emit, REG_TEMP0, 1);
+ emit_native_mov_state_reg(emit, OFFSETOF_CODE_STATE_SP, REG_TEMP0);
+
+ // Put return type in return value slot
+ ASM_MOV_REG_IMM(emit->as, REG_TEMP0, MP_VM_RETURN_YIELD);
+ ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_RET_VAL(emit), REG_TEMP0);
+
+ // Save re-entry PC
+ ASM_MOV_REG_PCREL(emit->as, REG_TEMP0, *emit->label_slot);
+ emit_native_mov_state_reg(emit, LOCAL_IDX_GEN_PC(emit), REG_TEMP0);
+
+ // Jump to exit handler
+ ASM_JUMP(emit->as, emit->exit_label);
+
+ // Label re-entry point
+ mp_asm_base_label_assign(&emit->as->base, *emit->label_slot);
+
+ // Re-open any active exception handler
+ if (emit->exc_stack_size > 0) {
+ // Find innermost active exception handler, to restore as current handler
+ exc_stack_entry_t *e = &emit->exc_stack[emit->exc_stack_size - 1];
+ for (; e >= emit->exc_stack; --e) {
+ if (e->is_active) {
+ // Found active handler, get its PC
+ ASM_MOV_REG_PCREL(emit->as, REG_RET, e->label);
+ ASM_MOV_LOCAL_REG(emit->as, LOCAL_IDX_EXC_HANDLER_PC(emit), REG_RET);
+ break;
+ }
+ }
+ }
+
+ emit_native_adjust_stack_size(emit, 1); // send_value
+
+ if (kind == MP_EMIT_YIELD_VALUE) {
+ // Check LOCAL_IDX_EXC_VAL for any injected value
+ ASM_MOV_REG_LOCAL(emit->as, REG_ARG_1, LOCAL_IDX_EXC_VAL(emit));
+ emit_call(emit, MP_F_NATIVE_RAISE);
+ } else {
+ // Label loop entry
+ emit_native_label_assign(emit, *emit->label_slot + 2);
+
+ // Get the next item from the delegate generator
+ vtype_kind_t vtype;
+ emit_pre_pop_reg(emit, &vtype, REG_ARG_2); // send_value
+ emit_access_stack(emit, 1, &vtype, REG_ARG_1); // generator
+ ASM_MOV_REG_LOCAL(emit->as, REG_ARG_3, LOCAL_IDX_EXC_VAL(emit)); // throw_value
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_ARG_3);
+ emit_get_stack_pointer_to_reg_for_pop(emit, REG_ARG_3, 1); // ret_value
+ emit_call(emit, MP_F_NATIVE_YIELD_FROM);
+
+ // If returned non-zero then generator continues
+ ASM_JUMP_IF_REG_NONZERO(emit->as, REG_RET, *emit->label_slot + 1, true);
+
+ // Pop exhausted gen, replace with ret_value
+ emit_native_adjust_stack_size(emit, 1); // ret_value
+ emit_fold_stack_top(emit, REG_ARG_1);
+ }
+}
+
+STATIC void emit_native_start_except_handler(emit_t *emit) {
+ // Protected block has finished so leave the current exception handler
+ emit_native_leave_exc_stack(emit, true);
+
+ // Get and push nlr_buf.ret_val
+ ASM_MOV_REG_LOCAL(emit->as, REG_TEMP0, LOCAL_IDX_EXC_VAL(emit));
+ emit_post_push_reg(emit, VTYPE_PYOBJ, REG_TEMP0);
+}
+
+STATIC void emit_native_end_except_handler(emit_t *emit) {
+ adjust_stack(emit, -1); // pop the exception (end_finally didn't use it)
+}
+
+const emit_method_table_t EXPORT_FUN(method_table) = {
+ #if MICROPY_DYNAMIC_COMPILER
+ EXPORT_FUN(new),
+ EXPORT_FUN(free),
+ #endif
+
+ emit_native_start_pass,
+ emit_native_end_pass,
+ emit_native_last_emit_was_return_value,
+ emit_native_adjust_stack_size,
+ emit_native_set_source_line,
+
+ {
+ emit_native_load_local,
+ emit_native_load_global,
+ },
+ {
+ emit_native_store_local,
+ emit_native_store_global,
+ },
+ {
+ emit_native_delete_local,
+ emit_native_delete_global,
+ },
+
+ emit_native_label_assign,
+ emit_native_import,
+ emit_native_load_const_tok,
+ emit_native_load_const_small_int,
+ emit_native_load_const_str,
+ emit_native_load_const_obj,
+ emit_native_load_null,
+ emit_native_load_method,
+ emit_native_load_build_class,
+ emit_native_subscr,
+ emit_native_attr,
+ emit_native_dup_top,
+ emit_native_dup_top_two,
+ emit_native_pop_top,
+ emit_native_rot_two,
+ emit_native_rot_three,
+ emit_native_jump,
+ emit_native_pop_jump_if,
+ emit_native_jump_if_or_pop,
+ emit_native_unwind_jump,
+ emit_native_setup_block,
+ emit_native_with_cleanup,
+ emit_native_end_finally,
+ emit_native_get_iter,
+ emit_native_for_iter,
+ emit_native_for_iter_end,
+ emit_native_pop_except_jump,
+ emit_native_unary_op,
+ emit_native_binary_op,
+ emit_native_build,
+ emit_native_store_map,
+ emit_native_store_comp,
+ emit_native_unpack_sequence,
+ emit_native_unpack_ex,
+ emit_native_make_function,
+ emit_native_make_closure,
+ emit_native_call_function,
+ emit_native_call_method,
+ emit_native_return_value,
+ emit_native_raise_varargs,
+ emit_native_yield,
+
+ emit_native_start_except_handler,
+ emit_native_end_except_handler,
+};
+
+#endif
diff --git a/circuitpython/py/emitnthumb.c b/circuitpython/py/emitnthumb.c
new file mode 100644
index 0000000..1c33e7a
--- /dev/null
+++ b/circuitpython/py/emitnthumb.c
@@ -0,0 +1,20 @@
+// thumb specific stuff
+
+#include "py/mpconfig.h"
+
+#if MICROPY_EMIT_THUMB
+
+// this is defined so that the assembler exports generic assembler API macros
+#define GENERIC_ASM_API (1)
+#include "py/asmthumb.h"
+
+// Word indices of REG_LOCAL_x in nlr_buf_t
+#define NLR_BUF_IDX_LOCAL_1 (3) // r4
+#define NLR_BUF_IDX_LOCAL_2 (4) // r5
+#define NLR_BUF_IDX_LOCAL_3 (5) // r6
+
+#define N_THUMB (1)
+#define EXPORT_FUN(name) emit_native_thumb_##name
+#include "py/emitnative.c"
+
+#endif
diff --git a/circuitpython/py/emitnx64.c b/circuitpython/py/emitnx64.c
new file mode 100644
index 0000000..4abb3ec
--- /dev/null
+++ b/circuitpython/py/emitnx64.c
@@ -0,0 +1,20 @@
+// x64 specific stuff
+
+#include "py/mpconfig.h"
+
+#if MICROPY_EMIT_X64
+
+// This is defined so that the assembler exports generic assembler API macros
+#define GENERIC_ASM_API (1)
+#include "py/asmx64.h"
+
+// Word indices of REG_LOCAL_x in nlr_buf_t
+#define NLR_BUF_IDX_LOCAL_1 (5) // rbx
+#define NLR_BUF_IDX_LOCAL_2 (6) // r12
+#define NLR_BUF_IDX_LOCAL_3 (7) // r13
+
+#define N_X64 (1)
+#define EXPORT_FUN(name) emit_native_x64_##name
+#include "py/emitnative.c"
+
+#endif
diff --git a/circuitpython/py/emitnx86.c b/circuitpython/py/emitnx86.c
new file mode 100644
index 0000000..f0553f0
--- /dev/null
+++ b/circuitpython/py/emitnx86.c
@@ -0,0 +1,72 @@
+// x86 specific stuff
+
+#include "py/mpconfig.h"
+#include "py/nativeglue.h"
+
+#if MICROPY_EMIT_X86
+
+// This is defined so that the assembler exports generic assembler API macros
+#define GENERIC_ASM_API (1)
+#include "py/asmx86.h"
+
+// Word indices of REG_LOCAL_x in nlr_buf_t
+#define NLR_BUF_IDX_LOCAL_1 (5) // ebx
+#define NLR_BUF_IDX_LOCAL_2 (7) // esi
+#define NLR_BUF_IDX_LOCAL_3 (6) // edi
+
+// x86 needs a table to know how many args a given function has
+STATIC byte mp_f_n_args[MP_F_NUMBER_OF] = {
+ [MP_F_CONVERT_OBJ_TO_NATIVE] = 2,
+ [MP_F_CONVERT_NATIVE_TO_OBJ] = 2,
+ [MP_F_NATIVE_SWAP_GLOBALS] = 1,
+ [MP_F_LOAD_NAME] = 1,
+ [MP_F_LOAD_GLOBAL] = 1,
+ [MP_F_LOAD_BUILD_CLASS] = 0,
+ [MP_F_LOAD_ATTR] = 2,
+ [MP_F_LOAD_METHOD] = 3,
+ [MP_F_LOAD_SUPER_METHOD] = 2,
+ [MP_F_STORE_NAME] = 2,
+ [MP_F_STORE_GLOBAL] = 2,
+ [MP_F_STORE_ATTR] = 3,
+ [MP_F_OBJ_SUBSCR] = 3,
+ [MP_F_OBJ_IS_TRUE] = 1,
+ [MP_F_UNARY_OP] = 2,
+ [MP_F_BINARY_OP] = 3,
+ [MP_F_BUILD_TUPLE] = 2,
+ [MP_F_BUILD_LIST] = 2,
+ [MP_F_BUILD_MAP] = 1,
+ [MP_F_BUILD_SET] = 2,
+ [MP_F_STORE_SET] = 2,
+ [MP_F_LIST_APPEND] = 2,
+ [MP_F_STORE_MAP] = 3,
+ [MP_F_MAKE_FUNCTION_FROM_RAW_CODE] = 3,
+ [MP_F_NATIVE_CALL_FUNCTION_N_KW] = 3,
+ [MP_F_CALL_METHOD_N_KW] = 3,
+ [MP_F_CALL_METHOD_N_KW_VAR] = 3,
+ [MP_F_NATIVE_GETITER] = 2,
+ [MP_F_NATIVE_ITERNEXT] = 1,
+ [MP_F_NLR_PUSH] = 1,
+ [MP_F_NLR_POP] = 0,
+ [MP_F_NATIVE_RAISE] = 1,
+ [MP_F_IMPORT_NAME] = 3,
+ [MP_F_IMPORT_FROM] = 2,
+ [MP_F_IMPORT_ALL] = 1,
+ [MP_F_NEW_SLICE] = 3,
+ [MP_F_UNPACK_SEQUENCE] = 3,
+ [MP_F_UNPACK_EX] = 3,
+ [MP_F_DELETE_NAME] = 1,
+ [MP_F_DELETE_GLOBAL] = 1,
+ [MP_F_MAKE_CLOSURE_FROM_RAW_CODE] = 3,
+ [MP_F_ARG_CHECK_NUM_SIG] = 3,
+ [MP_F_SETUP_CODE_STATE] = 4,
+ [MP_F_SMALL_INT_FLOOR_DIVIDE] = 2,
+ [MP_F_SMALL_INT_MODULO] = 2,
+ [MP_F_NATIVE_YIELD_FROM] = 3,
+ [MP_F_SETJMP] = 1,
+};
+
+#define N_X86 (1)
+#define EXPORT_FUN(name) emit_native_x86_##name
+#include "py/emitnative.c"
+
+#endif
diff --git a/circuitpython/py/emitnxtensa.c b/circuitpython/py/emitnxtensa.c
new file mode 100644
index 0000000..34089e9
--- /dev/null
+++ b/circuitpython/py/emitnxtensa.c
@@ -0,0 +1,20 @@
+// Xtensa specific stuff
+
+#include "py/mpconfig.h"
+
+#if MICROPY_EMIT_XTENSA
+
+// this is defined so that the assembler exports generic assembler API macros
+#define GENERIC_ASM_API (1)
+#include "py/asmxtensa.h"
+
+// Word indices of REG_LOCAL_x in nlr_buf_t
+#define NLR_BUF_IDX_LOCAL_1 (8) // a12
+#define NLR_BUF_IDX_LOCAL_2 (9) // a13
+#define NLR_BUF_IDX_LOCAL_3 (10) // a14
+
+#define N_XTENSA (1)
+#define EXPORT_FUN(name) emit_native_xtensa_##name
+#include "py/emitnative.c"
+
+#endif
diff --git a/circuitpython/py/emitnxtensawin.c b/circuitpython/py/emitnxtensawin.c
new file mode 100644
index 0000000..38d5db1
--- /dev/null
+++ b/circuitpython/py/emitnxtensawin.c
@@ -0,0 +1,23 @@
+// Xtensa-Windowed specific stuff
+
+#include "py/mpconfig.h"
+
+#if MICROPY_EMIT_XTENSAWIN
+
+// this is defined so that the assembler exports generic assembler API macros
+#define GENERIC_ASM_API (1)
+#define GENERIC_ASM_API_WIN (1)
+#include "py/asmxtensa.h"
+
+// Word indices of REG_LOCAL_x in nlr_buf_t
+#define NLR_BUF_IDX_LOCAL_1 (2 + 4) // a4
+#define NLR_BUF_IDX_LOCAL_2 (2 + 5) // a5
+#define NLR_BUF_IDX_LOCAL_3 (2 + 6) // a6
+
+#define N_NLR_SETJMP (1)
+#define N_PRELUDE_AS_BYTES_OBJ (1)
+#define N_XTENSAWIN (1)
+#define EXPORT_FUN(name) emit_native_xtensawin_##name
+#include "py/emitnative.c"
+
+#endif
diff --git a/circuitpython/py/enum.c b/circuitpython/py/enum.c
new file mode 100644
index 0000000..4728c7f
--- /dev/null
+++ b/circuitpython/py/enum.c
@@ -0,0 +1,52 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2020 Jeff Epler for Adafruit Industries
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/enum.h"
+#include "py/runtime.h"
+
+mp_obj_t cp_enum_find(const mp_obj_type_t *type, int value) {
+ const mp_obj_dict_t *dict = type->locals_dict;
+ for (size_t i = 0; i < dict->map.used; i++) {
+ const cp_enum_obj_t *v = MP_OBJ_TO_PTR(dict->map.table[i].value);
+ if (v->value == value) {
+ return (mp_obj_t)v;
+ }
+ }
+ return mp_const_none;
+}
+
+int cp_enum_value(const mp_obj_type_t *type, mp_obj_t obj) {
+ if (!mp_obj_is_type(obj, type)) {
+ mp_raise_TypeError_varg(MP_ERROR_TEXT("Expected a %q"), type->name);
+ }
+ return ((cp_enum_obj_t *)MP_OBJ_TO_PTR(obj))->value;
+}
+
+void cp_enum_obj_print_helper(uint16_t module, const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ (void)kind;
+ cp_enum_obj_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_printf(print, "%q.%q.%q", module, self->base.type->name, self->name);
+}
diff --git a/circuitpython/py/enum.h b/circuitpython/py/enum.h
new file mode 100644
index 0000000..d0b8529
--- /dev/null
+++ b/circuitpython/py/enum.h
@@ -0,0 +1,65 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2020 Jeff Epler for Adafruit Industries
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#pragma once
+
+#include "py/obj.h"
+
+typedef struct {
+ mp_obj_base_t base;
+ int16_t value;
+ int16_t name;
+} cp_enum_obj_t;
+
+#define MAKE_ENUM_VALUE(type, prefix, name, value) \
+ const cp_enum_obj_t prefix##_##name##_obj = { \
+ { &type }, value, MP_QSTR_##name, \
+ }
+
+#define MAKE_ENUM_MAP(name) \
+ const mp_rom_map_elem_t name##_locals_table[] =
+
+#define MAKE_ENUM_MAP_ENTRY(prefix, name) \
+ { MP_ROM_QSTR(MP_QSTR_##name), MP_ROM_PTR(&prefix##_##name##_obj) }
+
+#define MAKE_PRINTER(module, typename) \
+ STATIC void typename##_##print(const mp_print_t * print, mp_obj_t self_in, mp_print_kind_t kind) { \
+ cp_enum_obj_print_helper(MP_QSTR_##module, print, self_in, kind); \
+ }
+
+#define MAKE_ENUM_TYPE(module, type, typename) \
+ const mp_obj_type_t typename##_type = { \
+ { &mp_type_type }, \
+ .name = MP_QSTR_##type, \
+ .print = typename##_print, \
+ .locals_dict = (mp_obj_dict_t *)&typename##_locals_dict, \
+ }
+
+
+
+mp_obj_t cp_enum_find(const mp_obj_type_t *type, int value);
+int cp_enum_value(const mp_obj_type_t *type, mp_obj_t obj);
+void cp_enum_obj_print_helper(uint16_t module, const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind);
diff --git a/circuitpython/py/formatfloat.c b/circuitpython/py/formatfloat.c
new file mode 100644
index 0000000..0677516
--- /dev/null
+++ b/circuitpython/py/formatfloat.c
@@ -0,0 +1,438 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/mpconfig.h"
+#if MICROPY_FLOAT_IMPL != MICROPY_FLOAT_IMPL_NONE
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <math.h>
+#include "py/formatfloat.h"
+
+/***********************************************************************
+
+ Routine for converting a arbitrary floating
+ point number into a string.
+
+ The code in this funcion was inspired from Fred Bayer's pdouble.c.
+ Since pdouble.c was released as Public Domain, I'm releasing this
+ code as public domain as well.
+
+ The original code can be found in https://github.com/dhylands/format-float
+
+ Dave Hylands
+
+***********************************************************************/
+
+#if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
+// 1 sign bit, 8 exponent bits, and 23 mantissa bits.
+// exponent values 0 and 255 are reserved, exponent can be 1 to 254.
+// exponent is stored with a bias of 127.
+// The min and max floats are on the order of 1x10^37 and 1x10^-37
+
+#define FPTYPE float
+#define FPCONST(x) x##F
+#define FPROUND_TO_ONE 0.9999995F
+#define FPDECEXP 32
+#define FPMIN_BUF_SIZE 6 // +9e+99
+
+#define FLT_SIGN_MASK 0x80000000
+#define FLT_EXP_MASK 0x7F800000
+#define FLT_MAN_MASK 0x007FFFFF
+
+union floatbits {
+ float f;
+ uint32_t u;
+};
+static inline int fp_signbit(float x) {
+ union floatbits fb = {x};
+ return fb.u & FLT_SIGN_MASK;
+}
+#define fp_isnan(x) isnan(x)
+#define fp_isinf(x) isinf(x)
+static inline int fp_iszero(float x) {
+ union floatbits fb = {x};
+ return fb.u == 0;
+}
+static inline int fp_isless1(float x) {
+ union floatbits fb = {x};
+ return fb.u < 0x3f800000;
+}
+
+#elif MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE
+
+#define FPTYPE double
+#define FPCONST(x) x
+#define FPROUND_TO_ONE 0.999999999995
+#define FPDECEXP 256
+#define FPMIN_BUF_SIZE 7 // +9e+199
+#define fp_signbit(x) signbit(x)
+#define fp_isnan(x) isnan(x)
+#define fp_isinf(x) isinf(x)
+#define fp_iszero(x) (x == 0)
+#define fp_isless1(x) (x < 1.0)
+
+#endif
+
+static const FPTYPE g_pos_pow[] = {
+ #if FPDECEXP > 32
+ MICROPY_FLOAT_CONST(1e256), MICROPY_FLOAT_CONST(1e128), MICROPY_FLOAT_CONST(1e64),
+ #endif
+ MICROPY_FLOAT_CONST(1e32), MICROPY_FLOAT_CONST(1e16), MICROPY_FLOAT_CONST(1e8), MICROPY_FLOAT_CONST(1e4), MICROPY_FLOAT_CONST(1e2), MICROPY_FLOAT_CONST(1e1)
+};
+static const FPTYPE g_neg_pow[] = {
+ #if FPDECEXP > 32
+ MICROPY_FLOAT_CONST(1e-256), MICROPY_FLOAT_CONST(1e-128), MICROPY_FLOAT_CONST(1e-64),
+ #endif
+ MICROPY_FLOAT_CONST(1e-32), MICROPY_FLOAT_CONST(1e-16), MICROPY_FLOAT_CONST(1e-8), MICROPY_FLOAT_CONST(1e-4), MICROPY_FLOAT_CONST(1e-2), MICROPY_FLOAT_CONST(1e-1)
+};
+
+int mp_format_float(FPTYPE f, char *buf, size_t buf_size, char fmt, int prec, char sign) {
+
+ char *s = buf;
+
+ if (buf_size <= FPMIN_BUF_SIZE) {
+ // FPMIN_BUF_SIZE is the minimum size needed to store any FP number.
+ // If the buffer does not have enough room for this (plus null terminator)
+ // then don't try to format the float.
+
+ if (buf_size >= 2) {
+ *s++ = '?';
+ }
+ if (buf_size >= 1) {
+ *s = '\0';
+ }
+ return buf_size >= 2;
+ }
+ if (fp_signbit(f) && !fp_isnan(f)) {
+ *s++ = '-';
+ f = -f;
+ } else {
+ if (sign) {
+ *s++ = sign;
+ }
+ }
+
+ // buf_remaining contains bytes available for digits and exponent.
+ // It is buf_size minus room for the sign and null byte.
+ int buf_remaining = buf_size - 1 - (s - buf);
+
+ {
+ char uc = fmt & 0x20;
+ if (fp_isinf(f)) {
+ *s++ = 'I' ^ uc;
+ *s++ = 'N' ^ uc;
+ *s++ = 'F' ^ uc;
+ goto ret;
+ } else if (fp_isnan(f)) {
+ *s++ = 'N' ^ uc;
+ *s++ = 'A' ^ uc;
+ *s++ = 'N' ^ uc;
+ ret:
+ *s = '\0';
+ return s - buf;
+ }
+ }
+
+ if (prec < 0) {
+ prec = 6;
+ }
+ char e_char = 'E' | (fmt & 0x20); // e_char will match case of fmt
+ fmt |= 0x20; // Force fmt to be lowercase
+ char org_fmt = fmt;
+ if (fmt == 'g' && prec == 0) {
+ prec = 1;
+ }
+ int e, e1;
+ int dec = 0;
+ char e_sign = '\0';
+ int num_digits = 0;
+ const FPTYPE *pos_pow = g_pos_pow;
+ const FPTYPE *neg_pow = g_neg_pow;
+
+ if (fp_iszero(f)) {
+ e = 0;
+ if (fmt == 'f') {
+ // Truncate precision to prevent buffer overflow
+ if (prec + 2 > buf_remaining) {
+ prec = buf_remaining - 2;
+ }
+ num_digits = prec + 1;
+ } else {
+ // Truncate precision to prevent buffer overflow
+ if (prec + 6 > buf_remaining) {
+ prec = buf_remaining - 6;
+ }
+ if (fmt == 'e') {
+ e_sign = '+';
+ }
+ }
+ } else if (fp_isless1(f)) {
+ // We need to figure out what an integer digit will be used
+ // in case 'f' is used (or we revert other format to it below).
+ // As we just tested number to be <1, this is obviously 0,
+ // but we can round it up to 1 below.
+ char first_dig = '0';
+ if (f >= FPROUND_TO_ONE) {
+ first_dig = '1';
+ }
+
+ // Build negative exponent
+ for (e = 0, e1 = FPDECEXP; e1; e1 >>= 1, pos_pow++, neg_pow++) {
+ if (*neg_pow > f) {
+ e += e1;
+ f *= *pos_pow;
+ }
+ }
+ char e_sign_char = '-';
+ if (fp_isless1(f) && f >= FPROUND_TO_ONE) {
+ f = FPCONST(1.0);
+ if (e == 0) {
+ e_sign_char = '+';
+ }
+ } else if (fp_isless1(f)) {
+ e++;
+ f *= FPCONST(10.0);
+ }
+
+ // If the user specified 'g' format, and e is <= 4, then we'll switch
+ // to the fixed format ('f')
+
+ if (fmt == 'f' || (fmt == 'g' && e <= 4)) {
+ fmt = 'f';
+ dec = -1;
+ *s++ = first_dig;
+
+ if (org_fmt == 'g') {
+ prec += (e - 1);
+ }
+
+ // truncate precision to prevent buffer overflow
+ if (prec + 2 > buf_remaining) {
+ prec = buf_remaining - 2;
+ }
+
+ num_digits = prec;
+ if (num_digits) {
+ *s++ = '.';
+ while (--e && num_digits) {
+ *s++ = '0';
+ num_digits--;
+ }
+ }
+ } else {
+ // For e & g formats, we'll be printing the exponent, so set the
+ // sign.
+ e_sign = e_sign_char;
+ dec = 0;
+
+ if (prec > (buf_remaining - FPMIN_BUF_SIZE)) {
+ prec = buf_remaining - FPMIN_BUF_SIZE;
+ if (fmt == 'g') {
+ prec++;
+ }
+ }
+ }
+ } else {
+ // Build positive exponent
+ for (e = 0, e1 = FPDECEXP; e1; e1 >>= 1, pos_pow++, neg_pow++) {
+ if (*pos_pow <= f) {
+ e += e1;
+ f *= *neg_pow;
+ }
+ }
+
+ // It can be that f was right on the edge of an entry in pos_pow needs to be reduced
+ if ((int)f >= 10) {
+ e += 1;
+ f *= FPCONST(0.1);
+ }
+
+ // If the user specified fixed format (fmt == 'f') and e makes the
+ // number too big to fit into the available buffer, then we'll
+ // switch to the 'e' format.
+
+ if (fmt == 'f') {
+ if (e >= buf_remaining) {
+ fmt = 'e';
+ } else if ((e + prec + 2) > buf_remaining) {
+ prec = buf_remaining - e - 2;
+ if (prec < 0) {
+ // This means no decimal point, so we can add one back
+ // for the decimal.
+ prec++;
+ }
+ }
+ }
+ if (fmt == 'e' && prec > (buf_remaining - FPMIN_BUF_SIZE)) {
+ prec = buf_remaining - FPMIN_BUF_SIZE;
+ }
+ if (fmt == 'g') {
+ // Truncate precision to prevent buffer overflow
+ if (prec + (FPMIN_BUF_SIZE - 1) > buf_remaining) {
+ prec = buf_remaining - (FPMIN_BUF_SIZE - 1);
+ }
+ }
+ // If the user specified 'g' format, and e is < prec, then we'll switch
+ // to the fixed format.
+
+ if (fmt == 'g' && e < prec) {
+ fmt = 'f';
+ prec -= (e + 1);
+ }
+ if (fmt == 'f') {
+ dec = e;
+ num_digits = prec + e + 1;
+ } else {
+ e_sign = '+';
+ }
+ }
+ if (prec < 0) {
+ // This can happen when the prec is trimmed to prevent buffer overflow
+ prec = 0;
+ }
+
+ // We now have num.f as a floating point number between >= 1 and < 10
+ // (or equal to zero), and e contains the absolute value of the power of
+ // 10 exponent. and (dec + 1) == the number of dgits before the decimal.
+
+ // For e, prec is # digits after the decimal
+ // For f, prec is # digits after the decimal
+ // For g, prec is the max number of significant digits
+ //
+ // For e & g there will be a single digit before the decimal
+ // for f there will be e digits before the decimal
+
+ if (fmt == 'e') {
+ num_digits = prec + 1;
+ } else if (fmt == 'g') {
+ if (prec == 0) {
+ prec = 1;
+ }
+ num_digits = prec;
+ }
+
+ // Print the digits of the mantissa
+ for (int i = 0; i < num_digits; ++i, --dec) {
+ int32_t d = (int32_t)f;
+ if (d < 0) {
+ *s++ = '0';
+ } else {
+ *s++ = '0' + d;
+ }
+ if (dec == 0 && prec > 0) {
+ *s++ = '.';
+ }
+ f -= (FPTYPE)d;
+ f *= FPCONST(10.0);
+ }
+
+ // Round
+ // If we print non-exponential format (i.e. 'f'), but a digit we're going
+ // to round by (e) is too far away, then there's nothing to round.
+ if ((org_fmt != 'f' || e <= num_digits) && f >= FPCONST(5.0)) {
+ char *rs = s;
+ rs--;
+ while (1) {
+ if (*rs == '.') {
+ rs--;
+ continue;
+ }
+ if (*rs < '0' || *rs > '9') {
+ // + or -
+ rs++; // So we sit on the digit to the right of the sign
+ break;
+ }
+ if (*rs < '9') {
+ (*rs)++;
+ break;
+ }
+ *rs = '0';
+ if (rs == buf) {
+ break;
+ }
+ rs--;
+ }
+ if (*rs == '0') {
+ // We need to insert a 1
+ if (rs[1] == '.' && fmt != 'f') {
+ // We're going to round 9.99 to 10.00
+ // Move the decimal point
+ rs[0] = '.';
+ rs[1] = '0';
+ if (e_sign == '-') {
+ e--;
+ if (e == 0) {
+ e_sign = '+';
+ }
+ } else {
+ e++;
+ }
+ } else {
+ // Need at extra digit at the end to make room for the leading '1'
+ s++;
+ }
+ char *ss = s;
+ while (ss > rs) {
+ *ss = ss[-1];
+ ss--;
+ }
+ *rs = '1';
+ }
+ }
+
+ // verify that we did not overrun the input buffer so far
+ assert((size_t)(s + 1 - buf) <= buf_size);
+
+ if (org_fmt == 'g' && prec > 0) {
+ // Remove trailing zeros and a trailing decimal point
+ while (s[-1] == '0') {
+ s--;
+ }
+ if (s[-1] == '.') {
+ s--;
+ }
+ }
+ // Append the exponent
+ if (e_sign) {
+ *s++ = e_char;
+ *s++ = e_sign;
+ if (FPMIN_BUF_SIZE == 7 && e >= 100) {
+ *s++ = '0' + (e / 100);
+ }
+ *s++ = '0' + ((e / 10) % 10);
+ *s++ = '0' + (e % 10);
+ }
+ *s = '\0';
+
+ // verify that we did not overrun the input buffer
+ assert((size_t)(s + 1 - buf) <= buf_size);
+
+ return s - buf;
+}
+
+#endif // MICROPY_FLOAT_IMPL != MICROPY_FLOAT_IMPL_NONE
diff --git a/circuitpython/py/formatfloat.h b/circuitpython/py/formatfloat.h
new file mode 100644
index 0000000..c433cb8
--- /dev/null
+++ b/circuitpython/py/formatfloat.h
@@ -0,0 +1,35 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_FORMATFLOAT_H
+#define MICROPY_INCLUDED_PY_FORMATFLOAT_H
+
+#include "py/mpconfig.h"
+
+#if MICROPY_PY_BUILTINS_FLOAT
+int mp_format_float(mp_float_t f, char *buf, size_t bufSize, char fmt, int prec, char sign);
+#endif
+
+#endif // MICROPY_INCLUDED_PY_FORMATFLOAT_H
diff --git a/circuitpython/py/frozenmod.c b/circuitpython/py/frozenmod.c
new file mode 100644
index 0000000..57e9d5d
--- /dev/null
+++ b/circuitpython/py/frozenmod.c
@@ -0,0 +1,135 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2015 Paul Sokolovsky
+ * SPDX-FileCopyrightText: Copyright (c) 2016 Damien P. George
+ * SPDX-FileCopyrightText: Copyright (c) 2021 Jim Mussared
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+#include <stdint.h>
+
+#include "py/lexer.h"
+#include "py/frozenmod.h"
+
+#if MICROPY_MODULE_FROZEN
+
+// Null-separated frozen file names. All string-type entries are listed first,
+// followed by mpy-type entries. Use mp_frozen_str_sizes to determine how
+// many string entries.
+extern const char mp_frozen_names[];
+
+#if MICROPY_MODULE_FROZEN_STR
+
+#ifndef MICROPY_MODULE_FROZEN_LEXER
+#define MICROPY_MODULE_FROZEN_LEXER mp_lexer_new_from_str_len
+#else
+mp_lexer_t *MICROPY_MODULE_FROZEN_LEXER(qstr src_name, const char *str, mp_uint_t len, mp_uint_t free_len);
+#endif
+
+// Size in bytes of each string entry, followed by a zero (terminator).
+extern const uint32_t mp_frozen_str_sizes[];
+// Null-separated string content.
+extern const char mp_frozen_str_content[];
+#endif // MICROPY_MODULE_FROZEN_STR
+
+#if MICROPY_MODULE_FROZEN_MPY
+
+#include "py/emitglue.h"
+
+extern const mp_raw_code_t *const mp_frozen_mpy_content[];
+
+#endif // MICROPY_MODULE_FROZEN_MPY
+
+// Search for "str" as a frozen entry, returning the stat result
+// (no-exist/file/dir), as well as the type (none/str/mpy) and data.
+// frozen_type can be NULL if its value isn't needed (and then data is assumed to be NULL).
+mp_import_stat_t mp_find_frozen_module(const char *str, int *frozen_type, void **data) {
+ size_t len = strlen(str);
+ const char *name = mp_frozen_names;
+
+ if (frozen_type != NULL) {
+ *frozen_type = MP_FROZEN_NONE;
+ }
+
+ // Count the number of str lengths we have to find how many str entries.
+ size_t num_str = 0;
+ #if MICROPY_MODULE_FROZEN_STR && MICROPY_MODULE_FROZEN_MPY
+ for (const uint32_t *s = mp_frozen_str_sizes; *s != 0; ++s) {
+ ++num_str;
+ }
+ #endif
+
+ for (size_t i = 0; *name != 0; i++) {
+ size_t entry_len = strlen(name);
+ if (entry_len >= len && memcmp(str, name, len) == 0) {
+ // Query is a prefix of the current entry.
+ if (entry_len == len) {
+ // Exact match --> file.
+
+ if (frozen_type != NULL) {
+ #if MICROPY_MODULE_FROZEN_STR
+ if (i < num_str) {
+ *frozen_type = MP_FROZEN_STR;
+ // Use the size table to figure out where this index starts.
+ size_t offset = 0;
+ for (size_t j = 0; j < i; ++j) {
+ offset += mp_frozen_str_sizes[j] + 1;
+ }
+ size_t content_len = mp_frozen_str_sizes[i];
+ const char *content = &mp_frozen_str_content[offset];
+
+ // Note: str & len have been updated by find_frozen_entry to strip
+ // the ".frozen/" prefix (to avoid this being a distinct qstr to
+ // the original path QSTR in frozen_content.c).
+ qstr source = qstr_from_strn(str, len);
+ mp_lexer_t *lex = MICROPY_MODULE_FROZEN_LEXER(source, content, content_len, 0);
+ *data = lex;
+ }
+ #endif
+
+ #if MICROPY_MODULE_FROZEN_MPY
+ if (i >= num_str) {
+ *frozen_type = MP_FROZEN_MPY;
+ // Load the corresponding index as a raw_code, taking
+ // into account any string entries to offset by.
+ *data = (void *)mp_frozen_mpy_content[i - num_str];
+ }
+ #endif
+ }
+
+ return MP_IMPORT_STAT_FILE;
+ } else if (name[len] == '/') {
+ // Matches up to directory separator, this is a valid
+ // directory path.
+ return MP_IMPORT_STAT_DIR;
+ }
+ }
+ // Skip null separator.
+ name += entry_len + 1;
+ }
+
+ return MP_IMPORT_STAT_NO_EXIST;
+}
+
+#endif // MICROPY_MODULE_FROZEN
diff --git a/circuitpython/py/frozenmod.h b/circuitpython/py/frozenmod.h
new file mode 100644
index 0000000..0a907b8
--- /dev/null
+++ b/circuitpython/py/frozenmod.h
@@ -0,0 +1,40 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2015 Paul Sokolovsky
+ * SPDX-FileCopyrightText: Copyright (c) 2016 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_FROZENMOD_H
+#define MICROPY_INCLUDED_PY_FROZENMOD_H
+
+#include "py/lexer.h"
+
+enum {
+ MP_FROZEN_NONE,
+ MP_FROZEN_STR,
+ MP_FROZEN_MPY,
+};
+
+mp_import_stat_t mp_find_frozen_module(const char *str, int *frozen_type, void **data);
+
+#endif // MICROPY_INCLUDED_PY_FROZENMOD_H
diff --git a/circuitpython/py/gc.c b/circuitpython/py/gc.c
new file mode 100644
index 0000000..826540d
--- /dev/null
+++ b/circuitpython/py/gc.c
@@ -0,0 +1,1217 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ * SPDX-FileCopyrightText: Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "py/gc.h"
+#include "py/runtime.h"
+
+#if MICROPY_DEBUG_VALGRIND
+#include <valgrind/memcheck.h>
+#endif
+
+#include "supervisor/shared/safe_mode.h"
+
+#if CIRCUITPY_MEMORYMONITOR
+#include "shared-module/memorymonitor/__init__.h"
+#endif
+
+#if MICROPY_ENABLE_GC
+
+#if MICROPY_DEBUG_VERBOSE // print debugging info
+#define DEBUG_PRINT (1)
+#define DEBUG_printf DEBUG_printf
+#else // don't print debugging info
+#define DEBUG_PRINT (0)
+#define DEBUG_printf(...) (void)0
+#endif
+
+// Uncomment this if you want to use a debugger to capture state at every allocation and free.
+// #define LOG_HEAP_ACTIVITY 1
+
+// make this 1 to dump the heap each time it changes
+#define EXTENSIVE_HEAP_PROFILING (0)
+
+// make this 1 to zero out swept memory to more eagerly
+// detect untraced object still in use
+#define CLEAR_ON_SWEEP (0)
+
+// ATB = allocation table byte
+// 0b00 = FREE -- free block
+// 0b01 = HEAD -- head of a chain of blocks
+// 0b10 = TAIL -- in the tail of a chain of blocks
+// 0b11 = MARK -- marked head block
+
+#define AT_FREE (0)
+#define AT_HEAD (1)
+#define AT_TAIL (2)
+#define AT_MARK (3)
+
+#define BLOCKS_PER_ATB (4)
+
+#define BLOCK_SHIFT(block) (2 * ((block) & (BLOCKS_PER_ATB - 1)))
+#define ATB_GET_KIND(block) ((MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] >> BLOCK_SHIFT(block)) & 3)
+#define ATB_ANY_TO_FREE(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] &= (~(AT_MARK << BLOCK_SHIFT(block))); } while (0)
+#define ATB_FREE_TO_HEAD(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] |= (AT_HEAD << BLOCK_SHIFT(block)); } while (0)
+#define ATB_FREE_TO_TAIL(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] |= (AT_TAIL << BLOCK_SHIFT(block)); } while (0)
+#define ATB_HEAD_TO_MARK(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] |= (AT_MARK << BLOCK_SHIFT(block)); } while (0)
+#define ATB_MARK_TO_HEAD(block) do { MP_STATE_MEM(gc_alloc_table_start)[(block) / BLOCKS_PER_ATB] &= (~(AT_TAIL << BLOCK_SHIFT(block))); } while (0)
+
+#define BLOCK_FROM_PTR(ptr) (((byte *)(ptr) - MP_STATE_MEM(gc_pool_start)) / BYTES_PER_BLOCK)
+#define PTR_FROM_BLOCK(block) (((block) * BYTES_PER_BLOCK + (uintptr_t)MP_STATE_MEM(gc_pool_start)))
+#define ATB_FROM_BLOCK(bl) ((bl) / BLOCKS_PER_ATB)
+
+#if MICROPY_ENABLE_FINALISER
+// FTB = finaliser table byte
+// if set, then the corresponding block may have a finaliser
+
+#define BLOCKS_PER_FTB (8)
+
+#define FTB_GET(block) ((MP_STATE_MEM(gc_finaliser_table_start)[(block) / BLOCKS_PER_FTB] >> ((block) & 7)) & 1)
+#define FTB_SET(block) do { MP_STATE_MEM(gc_finaliser_table_start)[(block) / BLOCKS_PER_FTB] |= (1 << ((block) & 7)); } while (0)
+#define FTB_CLEAR(block) do { MP_STATE_MEM(gc_finaliser_table_start)[(block) / BLOCKS_PER_FTB] &= (~(1 << ((block) & 7))); } while (0)
+#endif
+
+#if MICROPY_PY_THREAD && !MICROPY_PY_THREAD_GIL
+#define GC_ENTER() mp_thread_mutex_lock(&MP_STATE_MEM(gc_mutex), 1)
+#define GC_EXIT() mp_thread_mutex_unlock(&MP_STATE_MEM(gc_mutex))
+#else
+#define GC_ENTER()
+#define GC_EXIT()
+#endif
+
+#ifdef LOG_HEAP_ACTIVITY
+volatile uint32_t change_me;
+#pragma GCC push_options
+#pragma GCC optimize ("O0")
+void __attribute__ ((noinline)) gc_log_change(uint32_t start_block, uint32_t length) {
+ change_me += start_block;
+ change_me += length; // Break on this line.
+}
+#pragma GCC pop_options
+#endif
+
+// TODO waste less memory; currently requires that all entries in alloc_table have a corresponding block in pool
+void gc_init(void *start, void *end) {
+ // align end pointer on block boundary
+ end = (void *)((uintptr_t)end & (~(BYTES_PER_BLOCK - 1)));
+ DEBUG_printf("Initializing GC heap: %p..%p = " UINT_FMT " bytes\n", start, end, (byte *)end - (byte *)start);
+
+ // calculate parameters for GC (T=total, A=alloc table, F=finaliser table, P=pool; all in bytes):
+ // T = A + F + P
+ // F = A * BLOCKS_PER_ATB / BLOCKS_PER_FTB
+ // P = A * BLOCKS_PER_ATB * BYTES_PER_BLOCK
+ // => T = A * (1 + BLOCKS_PER_ATB / BLOCKS_PER_FTB + BLOCKS_PER_ATB * BYTES_PER_BLOCK)
+ size_t total_byte_len = (byte *)end - (byte *)start;
+ #if MICROPY_ENABLE_FINALISER
+ MP_STATE_MEM(gc_alloc_table_byte_len) = (total_byte_len - 1) * MP_BITS_PER_BYTE / (MP_BITS_PER_BYTE + MP_BITS_PER_BYTE * BLOCKS_PER_ATB / BLOCKS_PER_FTB + MP_BITS_PER_BYTE * BLOCKS_PER_ATB * BYTES_PER_BLOCK);
+ #else
+ MP_STATE_MEM(gc_alloc_table_byte_len) = total_byte_len / (1 + MP_BITS_PER_BYTE / 2 * BYTES_PER_BLOCK);
+ #endif
+
+ MP_STATE_MEM(gc_alloc_table_start) = (byte *)start;
+
+ #if MICROPY_ENABLE_FINALISER
+ size_t gc_finaliser_table_byte_len = (MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB + BLOCKS_PER_FTB - 1) / BLOCKS_PER_FTB;
+ MP_STATE_MEM(gc_finaliser_table_start) = MP_STATE_MEM(gc_alloc_table_start) + MP_STATE_MEM(gc_alloc_table_byte_len) + 1;
+ #endif
+
+ size_t gc_pool_block_len = MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB;
+ MP_STATE_MEM(gc_pool_start) = (byte *)end - gc_pool_block_len * BYTES_PER_BLOCK;
+ MP_STATE_MEM(gc_pool_end) = end;
+
+ #if MICROPY_ENABLE_FINALISER
+ assert(MP_STATE_MEM(gc_pool_start) >= MP_STATE_MEM(gc_finaliser_table_start) + gc_finaliser_table_byte_len);
+ #endif
+
+ // Clear ATBs plus one more byte. The extra byte might be read when we read the final ATB and
+ // then try to count its tail. Clearing the byte ensures it is 0 and ends the chain. Without an
+ // FTB, it'll just clear the pool byte early.
+ memset(MP_STATE_MEM(gc_alloc_table_start), 0, MP_STATE_MEM(gc_alloc_table_byte_len) + 1);
+
+ #if MICROPY_ENABLE_FINALISER
+ // clear FTBs
+ memset(MP_STATE_MEM(gc_finaliser_table_start), 0, gc_finaliser_table_byte_len);
+ #endif
+
+ // Set first free ATB index to the start of the heap.
+ for (size_t i = 0; i < MICROPY_ATB_INDICES; i++) {
+ MP_STATE_MEM(gc_first_free_atb_index)[i] = 0;
+ }
+
+ // Set last free ATB index to the end of the heap.
+ MP_STATE_MEM(gc_last_free_atb_index) = MP_STATE_MEM(gc_alloc_table_byte_len) - 1;
+
+ // Set the lowest long lived ptr to the end of the heap to start. This will be lowered as long
+ // lived objects are allocated.
+ MP_STATE_MEM(gc_lowest_long_lived_ptr) = (void *)PTR_FROM_BLOCK(MP_STATE_MEM(gc_alloc_table_byte_len * BLOCKS_PER_ATB));
+
+ // unlock the GC
+ MP_STATE_THREAD(gc_lock_depth) = 0;
+
+ // allow auto collection
+ MP_STATE_MEM(gc_auto_collect_enabled) = true;
+
+ #if MICROPY_GC_ALLOC_THRESHOLD
+ // by default, maxuint for gc threshold, effectively turning gc-by-threshold off
+ MP_STATE_MEM(gc_alloc_threshold) = (size_t)-1;
+ MP_STATE_MEM(gc_alloc_amount) = 0;
+ #endif
+
+ #if MICROPY_PY_THREAD && !MICROPY_PY_THREAD_GIL
+ mp_thread_mutex_init(&MP_STATE_MEM(gc_mutex));
+ #endif
+
+ MP_STATE_MEM(permanent_pointers) = NULL;
+
+ DEBUG_printf("GC layout:\n");
+ DEBUG_printf(" alloc table at %p, length " UINT_FMT " bytes, " UINT_FMT " blocks\n", MP_STATE_MEM(gc_alloc_table_start), MP_STATE_MEM(gc_alloc_table_byte_len), MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB);
+ #if MICROPY_ENABLE_FINALISER
+ DEBUG_printf(" finaliser table at %p, length " UINT_FMT " bytes, " UINT_FMT " blocks\n", MP_STATE_MEM(gc_finaliser_table_start), gc_finaliser_table_byte_len, gc_finaliser_table_byte_len * BLOCKS_PER_FTB);
+ #endif
+ DEBUG_printf(" pool at %p, length " UINT_FMT " bytes, " UINT_FMT " blocks\n", MP_STATE_MEM(gc_pool_start), gc_pool_block_len * BYTES_PER_BLOCK, gc_pool_block_len);
+}
+
+void gc_deinit(void) {
+ // Run any finalisers before we stop using the heap.
+ gc_sweep_all();
+
+ MP_STATE_MEM(gc_pool_start) = 0;
+}
+
+void gc_lock(void) {
+ // This does not need to be atomic or have the GC mutex because:
+ // - each thread has its own gc_lock_depth so there are no races between threads;
+ // - a hard interrupt will only change gc_lock_depth during its execution, and
+ // upon return will restore the value of gc_lock_depth.
+ MP_STATE_THREAD(gc_lock_depth)++;
+}
+
+void gc_unlock(void) {
+ // This does not need to be atomic, See comment above in gc_lock.
+ MP_STATE_THREAD(gc_lock_depth)--;
+}
+
+bool gc_is_locked(void) {
+ return MP_STATE_THREAD(gc_lock_depth) != 0;
+}
+
+#ifndef TRACE_MARK
+#if DEBUG_PRINT
+#define TRACE_MARK(block, ptr) DEBUG_printf("gc_mark(%p)\n", ptr)
+#else
+#define TRACE_MARK(block, ptr)
+#endif
+#endif
+
+// Take the given block as the topmost block on the stack. Check all it's
+// children: mark the unmarked child blocks and put those newly marked
+// blocks on the stack. When all children have been checked, pop off the
+// topmost block on the stack and repeat with that one.
+STATIC void gc_mark_subtree(size_t block) {
+ // Start with the block passed in the argument.
+ size_t sp = 0;
+ for (;;) {
+ MICROPY_GC_HOOK_LOOP
+ // work out number of consecutive blocks in the chain starting with this one
+ size_t n_blocks = 0;
+ do {
+ n_blocks += 1;
+ } while (ATB_GET_KIND(block + n_blocks) == AT_TAIL);
+
+ // check this block's children
+ void **ptrs = (void **)PTR_FROM_BLOCK(block);
+ for (size_t i = n_blocks * BYTES_PER_BLOCK / sizeof(void *); i > 0; i--, ptrs++) {
+ MICROPY_GC_HOOK_LOOP
+ void *ptr = *ptrs;
+ if (VERIFY_PTR(ptr)) {
+ // Mark and push this pointer
+ size_t childblock = BLOCK_FROM_PTR(ptr);
+ if (ATB_GET_KIND(childblock) == AT_HEAD) {
+ // an unmarked head, mark it, and push it on gc stack
+ TRACE_MARK(childblock, ptr);
+ ATB_HEAD_TO_MARK(childblock);
+ if (sp < MICROPY_ALLOC_GC_STACK_SIZE) {
+ MP_STATE_MEM(gc_stack)[sp++] = childblock;
+ } else {
+ MP_STATE_MEM(gc_stack_overflow) = 1;
+ }
+ }
+ }
+ }
+
+ // Are there any blocks on the stack?
+ if (sp == 0) {
+ break; // No, stack is empty, we're done.
+ }
+
+ // pop the next block off the stack
+ block = MP_STATE_MEM(gc_stack)[--sp];
+ }
+}
+
+STATIC void gc_deal_with_stack_overflow(void) {
+ while (MP_STATE_MEM(gc_stack_overflow)) {
+ MP_STATE_MEM(gc_stack_overflow) = 0;
+
+ // scan entire memory looking for blocks which have been marked but not their children
+ for (size_t block = 0; block < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB; block++) {
+ MICROPY_GC_HOOK_LOOP
+ // trace (again) if mark bit set
+ if (ATB_GET_KIND(block) == AT_MARK) {
+ gc_mark_subtree(block);
+ }
+ }
+ }
+}
+
+STATIC void gc_sweep(void) {
+ #if MICROPY_PY_GC_COLLECT_RETVAL
+ MP_STATE_MEM(gc_collected) = 0;
+ #endif
+ // free unmarked heads and their tails
+ int free_tail = 0;
+ for (size_t block = 0; block < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB; block++) {
+ MICROPY_GC_HOOK_LOOP
+ switch (ATB_GET_KIND(block)) {
+ case AT_HEAD:
+ #if MICROPY_ENABLE_FINALISER
+ if (FTB_GET(block)) {
+ mp_obj_base_t *obj = (mp_obj_base_t *)PTR_FROM_BLOCK(block);
+ if (obj->type != NULL) {
+ // if the object has a type then see if it has a __del__ method
+ mp_obj_t dest[2];
+ mp_load_method_maybe(MP_OBJ_FROM_PTR(obj), MP_QSTR___del__, dest);
+ if (dest[0] != MP_OBJ_NULL) {
+ // load_method returned a method, execute it in a protected environment
+ #if MICROPY_ENABLE_SCHEDULER
+ mp_sched_lock();
+ #endif
+ mp_call_function_1_protected(dest[0], dest[1]);
+ #if MICROPY_ENABLE_SCHEDULER
+ mp_sched_unlock();
+ #endif
+ }
+ }
+ // clear finaliser flag
+ FTB_CLEAR(block);
+ }
+ #endif
+ free_tail = 1;
+ DEBUG_printf("gc_sweep(%p)\n", (void *)PTR_FROM_BLOCK(block));
+
+ #ifdef LOG_HEAP_ACTIVITY
+ gc_log_change(block, 0);
+ #endif
+ #if MICROPY_PY_GC_COLLECT_RETVAL
+ MP_STATE_MEM(gc_collected)++;
+ #endif
+ // fall through to free the head
+ MP_FALLTHROUGH
+
+ case AT_TAIL:
+ if (free_tail) {
+ ATB_ANY_TO_FREE(block);
+ #if CLEAR_ON_SWEEP
+ memset((void *)PTR_FROM_BLOCK(block), 0, BYTES_PER_BLOCK);
+ #endif
+ }
+ break;
+
+ case AT_MARK:
+ ATB_MARK_TO_HEAD(block);
+ free_tail = 0;
+ break;
+ }
+ }
+}
+
+// Mark can handle NULL pointers because it verifies the pointer is within the heap bounds.
+STATIC void gc_mark(void *ptr) {
+ if (VERIFY_PTR(ptr)) {
+ size_t block = BLOCK_FROM_PTR(ptr);
+ if (ATB_GET_KIND(block) == AT_HEAD) {
+ // An unmarked head: mark it, and mark all its children
+ TRACE_MARK(block, ptr);
+ ATB_HEAD_TO_MARK(block);
+ gc_mark_subtree(block);
+ }
+ }
+}
+
+void gc_collect_start(void) {
+ GC_ENTER();
+ MP_STATE_THREAD(gc_lock_depth)++;
+ #if MICROPY_GC_ALLOC_THRESHOLD
+ MP_STATE_MEM(gc_alloc_amount) = 0;
+ #endif
+ MP_STATE_MEM(gc_stack_overflow) = 0;
+
+ // Trace root pointers. This relies on the root pointers being organised
+ // correctly in the mp_state_ctx structure. We scan nlr_top, dict_locals,
+ // dict_globals, then the root pointer section of mp_state_vm.
+ void **ptrs = (void **)(void *)&mp_state_ctx;
+ size_t root_start = offsetof(mp_state_ctx_t, thread.dict_locals);
+ size_t root_end = offsetof(mp_state_ctx_t, vm.qstr_last_chunk);
+ gc_collect_root(ptrs + root_start / sizeof(void *), (root_end - root_start) / sizeof(void *));
+
+ gc_mark(MP_STATE_MEM(permanent_pointers));
+
+ #if MICROPY_ENABLE_PYSTACK
+ // Trace root pointers from the Python stack.
+ ptrs = (void **)(void *)MP_STATE_THREAD(pystack_start);
+ gc_collect_root(ptrs, (MP_STATE_THREAD(pystack_cur) - MP_STATE_THREAD(pystack_start)) / sizeof(void *));
+ #endif
+}
+
+void gc_collect_ptr(void *ptr) {
+ gc_mark(ptr);
+}
+
+// Address sanitizer needs to know that the access to ptrs[i] must always be
+// considered OK, even if it's a load from an address that would normally be
+// prohibited (due to being undefined, in a red zone, etc).
+#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))
+__attribute__((no_sanitize_address))
+#endif
+static void *gc_get_ptr(void **ptrs, int i) {
+ #if MICROPY_DEBUG_VALGRIND
+ if (!VALGRIND_CHECK_MEM_IS_ADDRESSABLE(&ptrs[i], sizeof(*ptrs))) {
+ return NULL;
+ }
+ #endif
+ return ptrs[i];
+}
+
+void gc_collect_root(void **ptrs, size_t len) {
+ for (size_t i = 0; i < len; i++) {
+ MICROPY_GC_HOOK_LOOP
+ void *ptr = gc_get_ptr(ptrs, i);
+ gc_mark(ptr);
+ }
+}
+
+void gc_collect_end(void) {
+ gc_deal_with_stack_overflow();
+ gc_sweep();
+ for (size_t i = 0; i < MICROPY_ATB_INDICES; i++) {
+ MP_STATE_MEM(gc_first_free_atb_index)[i] = 0;
+ }
+ MP_STATE_MEM(gc_last_free_atb_index) = MP_STATE_MEM(gc_alloc_table_byte_len) - 1;
+ MP_STATE_THREAD(gc_lock_depth)--;
+ GC_EXIT();
+}
+
+void gc_sweep_all(void) {
+ GC_ENTER();
+ MP_STATE_THREAD(gc_lock_depth)++;
+ MP_STATE_MEM(gc_stack_overflow) = 0;
+ gc_collect_end();
+}
+
+void gc_info(gc_info_t *info) {
+ GC_ENTER();
+ info->total = MP_STATE_MEM(gc_pool_end) - MP_STATE_MEM(gc_pool_start);
+ info->used = 0;
+ info->free = 0;
+ info->max_free = 0;
+ info->num_1block = 0;
+ info->num_2block = 0;
+ info->max_block = 0;
+ bool finish = false;
+ for (size_t block = 0, len = 0, len_free = 0; !finish;) {
+ size_t kind = ATB_GET_KIND(block);
+ switch (kind) {
+ case AT_FREE:
+ info->free += 1;
+ len_free += 1;
+ len = 0;
+ break;
+
+ case AT_HEAD:
+ info->used += 1;
+ len = 1;
+ break;
+
+ case AT_TAIL:
+ info->used += 1;
+ len += 1;
+ break;
+
+ case AT_MARK:
+ // shouldn't happen
+ break;
+ }
+
+ block++;
+ finish = (block == MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB);
+ // Get next block type if possible
+ if (!finish) {
+ kind = ATB_GET_KIND(block);
+ }
+
+ if (finish || kind == AT_FREE || kind == AT_HEAD) {
+ if (len == 1) {
+ info->num_1block += 1;
+ } else if (len == 2) {
+ info->num_2block += 1;
+ }
+ if (len > info->max_block) {
+ info->max_block = len;
+ }
+ if (finish || kind == AT_HEAD) {
+ if (len_free > info->max_free) {
+ info->max_free = len_free;
+ }
+ len_free = 0;
+ }
+ }
+ }
+
+ info->used *= BYTES_PER_BLOCK;
+ info->free *= BYTES_PER_BLOCK;
+ GC_EXIT();
+}
+
+bool gc_alloc_possible(void) {
+ return MP_STATE_MEM(gc_pool_start) != 0;
+}
+
+// We place long lived objects at the end of the heap rather than the start. This reduces
+// fragmentation by localizing the heap churn to one portion of memory (the start of the heap.)
+void *gc_alloc(size_t n_bytes, unsigned int alloc_flags, bool long_lived) {
+ bool has_finaliser = alloc_flags & GC_ALLOC_FLAG_HAS_FINALISER;
+ size_t n_blocks = ((n_bytes + BYTES_PER_BLOCK - 1) & (~(BYTES_PER_BLOCK - 1))) / BYTES_PER_BLOCK;
+ DEBUG_printf("gc_alloc(" UINT_FMT " bytes -> " UINT_FMT " blocks)\n", n_bytes, n_blocks);
+
+ // check for 0 allocation
+ if (n_blocks == 0) {
+ return NULL;
+ }
+
+ // check if GC is locked
+ if (MP_STATE_THREAD(gc_lock_depth) > 0) {
+ return NULL;
+ }
+
+ if (MP_STATE_MEM(gc_pool_start) == 0) {
+ reset_into_safe_mode(GC_ALLOC_OUTSIDE_VM);
+ }
+
+ GC_ENTER();
+
+ size_t found_block = 0xffffffff;
+ size_t end_block;
+ size_t start_block;
+ size_t n_free;
+ bool collected = !MP_STATE_MEM(gc_auto_collect_enabled);
+
+ #if MICROPY_GC_ALLOC_THRESHOLD
+ if (!collected && MP_STATE_MEM(gc_alloc_amount) >= MP_STATE_MEM(gc_alloc_threshold)) {
+ GC_EXIT();
+ gc_collect();
+ collected = 1;
+ GC_ENTER();
+ }
+ #endif
+
+ bool keep_looking = true;
+
+ // When we start searching on the other side of the crossover block we make sure to
+ // perform a collect. That way we'll get the closest free block in our section.
+ size_t crossover_block = BLOCK_FROM_PTR(MP_STATE_MEM(gc_lowest_long_lived_ptr));
+ while (keep_looking) {
+ int8_t direction = 1;
+ size_t bucket = MIN(n_blocks, MICROPY_ATB_INDICES) - 1;
+ size_t first_free = MP_STATE_MEM(gc_first_free_atb_index)[bucket];
+ size_t start = first_free;
+ if (long_lived) {
+ direction = -1;
+ start = MP_STATE_MEM(gc_last_free_atb_index);
+ }
+ n_free = 0;
+ // look for a run of n_blocks available blocks
+ for (size_t i = start; keep_looking && first_free <= i && i <= MP_STATE_MEM(gc_last_free_atb_index); i += direction) {
+ byte a = MP_STATE_MEM(gc_alloc_table_start)[i];
+ // Four ATB states are packed into a single byte.
+ int j = 0;
+ if (direction == -1) {
+ j = 3;
+ }
+ for (; keep_looking && 0 <= j && j <= 3; j += direction) {
+ if ((a & (0x3 << (j * 2))) == 0) {
+ if (++n_free >= n_blocks) {
+ found_block = i * BLOCKS_PER_ATB + j;
+ keep_looking = false;
+ }
+ } else {
+ if (!collected) {
+ size_t block = i * BLOCKS_PER_ATB + j;
+ if ((direction == 1 && block >= crossover_block) ||
+ (direction == -1 && block < crossover_block)) {
+ keep_looking = false;
+ }
+ }
+ n_free = 0;
+ }
+ }
+ }
+ if (n_free >= n_blocks) {
+ break;
+ }
+
+ GC_EXIT();
+ // nothing found!
+ if (collected) {
+ return NULL;
+ }
+ DEBUG_printf("gc_alloc(" UINT_FMT "): no free mem, triggering GC\n", n_bytes);
+ gc_collect();
+ collected = true;
+ // Try again since we've hopefully freed up space.
+ keep_looking = true;
+ GC_ENTER();
+ }
+ assert(found_block != 0xffffffff);
+
+ // Found free space ending at found_block inclusive.
+ // Also, set last free ATB index to block after last block we found, for start of
+ // next scan. Also, whenever we free or shrink a block we must check if this index needs
+ // adjusting (see gc_realloc and gc_free).
+ if (!long_lived) {
+ end_block = found_block;
+ start_block = found_block - n_free + 1;
+ if (n_blocks < MICROPY_ATB_INDICES) {
+ size_t next_free_atb = (found_block + n_blocks) / BLOCKS_PER_ATB;
+ // Update all atb indices for larger blocks too.
+ for (size_t i = n_blocks - 1; i < MICROPY_ATB_INDICES; i++) {
+ MP_STATE_MEM(gc_first_free_atb_index)[i] = next_free_atb;
+ }
+ }
+ } else {
+ start_block = found_block;
+ end_block = found_block + n_free - 1;
+ // Always update the bounds of the long lived area because we assume it is contiguous. (It
+ // can still be reset by a sweep.)
+ MP_STATE_MEM(gc_last_free_atb_index) = (found_block - 1) / BLOCKS_PER_ATB;
+ }
+
+ #ifdef LOG_HEAP_ACTIVITY
+ gc_log_change(start_block, end_block - start_block + 1);
+ #endif
+
+ // mark first block as used head
+ ATB_FREE_TO_HEAD(start_block);
+
+ // mark rest of blocks as used tail
+ // TODO for a run of many blocks can make this more efficient
+ for (size_t bl = start_block + 1; bl <= end_block; bl++) {
+ ATB_FREE_TO_TAIL(bl);
+ }
+
+ // get pointer to first block
+ // we must create this pointer before unlocking the GC so a collection can find it
+ void *ret_ptr = (void *)(MP_STATE_MEM(gc_pool_start) + start_block * BYTES_PER_BLOCK);
+ DEBUG_printf("gc_alloc(%p)\n", ret_ptr);
+
+ // If the allocation was long live then update the lowest value. Its used to trigger early
+ // collects when allocations fail in their respective section. Its also used to ignore calls to
+ // gc_make_long_lived where the pointer is already in the long lived section.
+ if (long_lived && ret_ptr < MP_STATE_MEM(gc_lowest_long_lived_ptr)) {
+ MP_STATE_MEM(gc_lowest_long_lived_ptr) = ret_ptr;
+ }
+
+ #if MICROPY_GC_ALLOC_THRESHOLD
+ MP_STATE_MEM(gc_alloc_amount) += n_blocks;
+ #endif
+
+ GC_EXIT();
+
+ #if MICROPY_GC_CONSERVATIVE_CLEAR
+ // be conservative and zero out all the newly allocated blocks
+ memset((byte *)ret_ptr, 0, (end_block - start_block + 1) * BYTES_PER_BLOCK);
+ #else
+ // zero out the additional bytes of the newly allocated blocks
+ // This is needed because the blocks may have previously held pointers
+ // to the heap and will not be set to something else if the caller
+ // doesn't actually use the entire block. As such they will continue
+ // to point to the heap and may prevent other blocks from being reclaimed.
+ memset((byte *)ret_ptr + n_bytes, 0, (end_block - start_block + 1) * BYTES_PER_BLOCK - n_bytes);
+ #endif
+
+ #if MICROPY_ENABLE_FINALISER
+ if (has_finaliser) {
+ // clear type pointer in case it is never set
+ ((mp_obj_base_t *)ret_ptr)->type = NULL;
+ // set mp_obj flag only if it has a finaliser
+ GC_ENTER();
+ FTB_SET(start_block);
+ GC_EXIT();
+ }
+ #else
+ (void)has_finaliser;
+ #endif
+
+ #if EXTENSIVE_HEAP_PROFILING
+ gc_dump_alloc_table();
+ #endif
+
+ #if CIRCUITPY_MEMORYMONITOR
+ memorymonitor_track_allocation(end_block - start_block + 1);
+ #endif
+
+ return ret_ptr;
+}
+
+/*
+void *gc_alloc(mp_uint_t n_bytes) {
+ return _gc_alloc(n_bytes, false);
+}
+
+void *gc_alloc_with_finaliser(mp_uint_t n_bytes) {
+ return _gc_alloc(n_bytes, true);
+}
+*/
+
+// force the freeing of a piece of memory
+// TODO: freeing here does not call finaliser
+void gc_free(void *ptr) {
+ if (MP_STATE_THREAD(gc_lock_depth) > 0) {
+ // TODO how to deal with this error?
+ return;
+ }
+
+ GC_ENTER();
+
+ DEBUG_printf("gc_free(%p)\n", ptr);
+
+ if (ptr == NULL) {
+ GC_EXIT();
+ } else {
+ if (MP_STATE_MEM(gc_pool_start) == 0) {
+ reset_into_safe_mode(GC_ALLOC_OUTSIDE_VM);
+ }
+ // get the GC block number corresponding to this pointer
+ assert(VERIFY_PTR(ptr));
+ size_t start_block = BLOCK_FROM_PTR(ptr);
+ assert(ATB_GET_KIND(start_block) == AT_HEAD);
+
+ #if MICROPY_ENABLE_FINALISER
+ FTB_CLEAR(start_block);
+ #endif
+
+ // free head and all of its tail blocks
+ #ifdef LOG_HEAP_ACTIVITY
+ gc_log_change(start_block, 0);
+ #endif
+ size_t block = start_block;
+ do {
+ ATB_ANY_TO_FREE(block);
+ block += 1;
+ } while (ATB_GET_KIND(block) == AT_TAIL);
+
+ // Update the first free pointer for our size only. Not much calls gc_free directly so there
+ // is decent chance we'll want to allocate this size again. By only updating the specific
+ // size we don't risk something smaller fitting in.
+ size_t n_blocks = block - start_block;
+ size_t bucket = MIN(n_blocks, MICROPY_ATB_INDICES) - 1;
+ size_t new_free_atb = start_block / BLOCKS_PER_ATB;
+ if (new_free_atb < MP_STATE_MEM(gc_first_free_atb_index)[bucket]) {
+ MP_STATE_MEM(gc_first_free_atb_index)[bucket] = new_free_atb;
+ }
+ // set the last_free pointer to this block if it's earlier in the heap
+ if (new_free_atb > MP_STATE_MEM(gc_last_free_atb_index)) {
+ MP_STATE_MEM(gc_last_free_atb_index) = new_free_atb;
+ }
+
+ GC_EXIT();
+
+ #if EXTENSIVE_HEAP_PROFILING
+ gc_dump_alloc_table();
+ #endif
+ }
+}
+
+size_t gc_nbytes(const void *ptr) {
+ GC_ENTER();
+ if (VERIFY_PTR(ptr)) {
+ size_t block = BLOCK_FROM_PTR(ptr);
+ if (ATB_GET_KIND(block) == AT_HEAD) {
+ // work out number of consecutive blocks in the chain starting with this on
+ size_t n_blocks = 0;
+ do {
+ n_blocks += 1;
+ } while (ATB_GET_KIND(block + n_blocks) == AT_TAIL);
+ GC_EXIT();
+ return n_blocks * BYTES_PER_BLOCK;
+ }
+ }
+
+ // invalid pointer
+ GC_EXIT();
+ return 0;
+}
+
+bool gc_has_finaliser(const void *ptr) {
+ #if MICROPY_ENABLE_FINALISER
+ GC_ENTER();
+ if (VERIFY_PTR(ptr)) {
+ bool has_finaliser = FTB_GET(BLOCK_FROM_PTR(ptr));
+ GC_EXIT();
+ return has_finaliser;
+ }
+
+ // invalid pointer
+ GC_EXIT();
+ #else
+ (void)ptr;
+ #endif
+ return false;
+}
+
+void *gc_make_long_lived(void *old_ptr) {
+ // If its already in the long lived section then don't bother moving it.
+ if (old_ptr >= MP_STATE_MEM(gc_lowest_long_lived_ptr)) {
+ return old_ptr;
+ }
+ size_t n_bytes = gc_nbytes(old_ptr);
+ if (n_bytes == 0) {
+ return old_ptr;
+ }
+ bool has_finaliser = gc_has_finaliser(old_ptr);
+
+ // Try and find a new area in the long lived section to copy the memory to.
+ void *new_ptr = gc_alloc(n_bytes, has_finaliser, true);
+ if (new_ptr == NULL) {
+ return old_ptr;
+ } else if (old_ptr > new_ptr) {
+ // Return the old pointer if the new one is lower in the heap and free the new space.
+ gc_free(new_ptr);
+ return old_ptr;
+ }
+ // We copy everything over and let the garbage collection process delete the old copy. That way
+ // we ensure we don't delete memory that has a second reference. (Though if there is we may
+ // confuse things when its mutable.)
+ memcpy(new_ptr, old_ptr, n_bytes);
+ return new_ptr;
+}
+
+#if 0
+// old, simple realloc that didn't expand memory in place
+void *gc_realloc(void *ptr, mp_uint_t n_bytes) {
+ mp_uint_t n_existing = gc_nbytes(ptr);
+ if (n_bytes <= n_existing) {
+ return ptr;
+ } else {
+ bool has_finaliser;
+ if (ptr == NULL) {
+ has_finaliser = false;
+ } else {
+ #if MICROPY_ENABLE_FINALISER
+ has_finaliser = FTB_GET(BLOCK_FROM_PTR((mp_uint_t)ptr));
+ #else
+ has_finaliser = false;
+ #endif
+ }
+ void *ptr2 = gc_alloc(n_bytes, has_finaliser);
+ if (ptr2 == NULL) {
+ return ptr2;
+ }
+ memcpy(ptr2, ptr, n_existing);
+ gc_free(ptr);
+ return ptr2;
+ }
+}
+
+#else // Alternative gc_realloc impl
+
+void *gc_realloc(void *ptr_in, size_t n_bytes, bool allow_move) {
+ // check for pure allocation
+ if (ptr_in == NULL) {
+ return gc_alloc(n_bytes, false, false);
+ }
+
+ // check for pure free
+ if (n_bytes == 0) {
+ gc_free(ptr_in);
+ return NULL;
+ }
+
+ if (MP_STATE_THREAD(gc_lock_depth) > 0) {
+ return NULL;
+ }
+
+ void *ptr = ptr_in;
+
+ GC_ENTER();
+
+ // get the GC block number corresponding to this pointer
+ assert(VERIFY_PTR(ptr));
+ size_t block = BLOCK_FROM_PTR(ptr);
+ assert(ATB_GET_KIND(block) == AT_HEAD);
+
+ // compute number of new blocks that are requested
+ size_t new_blocks = (n_bytes + BYTES_PER_BLOCK - 1) / BYTES_PER_BLOCK;
+
+ // Get the total number of consecutive blocks that are already allocated to
+ // this chunk of memory, and then count the number of free blocks following
+ // it. Stop if we reach the end of the heap, or if we find enough extra
+ // free blocks to satisfy the realloc. Note that we need to compute the
+ // total size of the existing memory chunk so we can correctly and
+ // efficiently shrink it (see below for shrinking code).
+ size_t n_free = 0;
+ size_t n_blocks = 1; // counting HEAD block
+ size_t max_block = MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB;
+ for (size_t bl = block + n_blocks; bl < max_block; bl++) {
+ byte block_type = ATB_GET_KIND(bl);
+ if (block_type == AT_TAIL) {
+ n_blocks++;
+ continue;
+ }
+ if (block_type == AT_FREE) {
+ n_free++;
+ if (n_blocks + n_free >= new_blocks) {
+ // stop as soon as we find enough blocks for n_bytes
+ break;
+ }
+ continue;
+ }
+ break;
+ }
+
+ // return original ptr if it already has the requested number of blocks
+ if (new_blocks == n_blocks) {
+ GC_EXIT();
+ return ptr_in;
+ }
+
+ // check if we can shrink the allocated area
+ if (new_blocks < n_blocks) {
+ // free unneeded tail blocks
+ for (size_t bl = block + new_blocks, count = n_blocks - new_blocks; count > 0; bl++, count--) {
+ ATB_ANY_TO_FREE(bl);
+ }
+
+ // set the last_free pointer to end of this block if it's earlier in the heap
+ size_t new_free_atb = (block + new_blocks) / BLOCKS_PER_ATB;
+ size_t bucket = MIN(n_blocks - new_blocks, MICROPY_ATB_INDICES) - 1;
+ if (new_free_atb < MP_STATE_MEM(gc_first_free_atb_index)[bucket]) {
+ MP_STATE_MEM(gc_first_free_atb_index)[bucket] = new_free_atb;
+ }
+ if (new_free_atb > MP_STATE_MEM(gc_last_free_atb_index)) {
+ MP_STATE_MEM(gc_last_free_atb_index) = new_free_atb;
+ }
+
+ GC_EXIT();
+
+ #if EXTENSIVE_HEAP_PROFILING
+ gc_dump_alloc_table();
+ #endif
+
+ #ifdef LOG_HEAP_ACTIVITY
+ gc_log_change(block, new_blocks);
+ #endif
+
+ #if CIRCUITPY_MEMORYMONITOR
+ memorymonitor_track_allocation(new_blocks);
+ #endif
+
+ return ptr_in;
+ }
+
+ // check if we can expand in place
+ if (new_blocks <= n_blocks + n_free) {
+ // mark few more blocks as used tail
+ for (size_t bl = block + n_blocks; bl < block + new_blocks; bl++) {
+ assert(ATB_GET_KIND(bl) == AT_FREE);
+ ATB_FREE_TO_TAIL(bl);
+ }
+
+ GC_EXIT();
+
+ #if MICROPY_GC_CONSERVATIVE_CLEAR
+ // be conservative and zero out all the newly allocated blocks
+ memset((byte *)ptr_in + n_blocks * BYTES_PER_BLOCK, 0, (new_blocks - n_blocks) * BYTES_PER_BLOCK);
+ #else
+ // zero out the additional bytes of the newly allocated blocks (see comment above in gc_alloc)
+ memset((byte *)ptr_in + n_bytes, 0, new_blocks * BYTES_PER_BLOCK - n_bytes);
+ #endif
+
+ #if EXTENSIVE_HEAP_PROFILING
+ gc_dump_alloc_table();
+ #endif
+
+ #ifdef LOG_HEAP_ACTIVITY
+ gc_log_change(block, new_blocks);
+ #endif
+
+ #if CIRCUITPY_MEMORYMONITOR
+ memorymonitor_track_allocation(new_blocks);
+ #endif
+
+ return ptr_in;
+ }
+
+ #if MICROPY_ENABLE_FINALISER
+ bool ftb_state = FTB_GET(block);
+ #else
+ bool ftb_state = false;
+ #endif
+
+ GC_EXIT();
+
+ if (!allow_move) {
+ // not allowed to move memory block so return failure
+ return NULL;
+ }
+
+ // can't resize inplace; try to find a new contiguous chain
+ void *ptr_out = gc_alloc(n_bytes, ftb_state, false);
+
+ // check that the alloc succeeded
+ if (ptr_out == NULL) {
+ return NULL;
+ }
+
+ DEBUG_printf("gc_realloc(%p -> %p)\n", ptr_in, ptr_out);
+ memcpy(ptr_out, ptr_in, n_blocks * BYTES_PER_BLOCK);
+ gc_free(ptr_in);
+ return ptr_out;
+}
+#endif // Alternative gc_realloc impl
+
+bool gc_never_free(void *ptr) {
+ // Check to make sure the pointer is on the heap in the first place.
+ if (gc_nbytes(ptr) == 0) {
+ return false;
+ }
+ // Pointers are stored in a linked list where each block is BYTES_PER_BLOCK long and the first
+ // pointer is the next block of pointers.
+ void **current_reference_block = MP_STATE_MEM(permanent_pointers);
+ while (current_reference_block != NULL) {
+ for (size_t i = 1; i < BYTES_PER_BLOCK / sizeof(void *); i++) {
+ if (current_reference_block[i] == NULL) {
+ current_reference_block[i] = ptr;
+ return true;
+ }
+ }
+ current_reference_block = current_reference_block[0];
+ }
+ void **next_block = gc_alloc(BYTES_PER_BLOCK, false, true);
+ if (next_block == NULL) {
+ return false;
+ }
+ if (MP_STATE_MEM(permanent_pointers) == NULL) {
+ MP_STATE_MEM(permanent_pointers) = next_block;
+ } else {
+ current_reference_block[0] = next_block;
+ }
+ next_block[1] = ptr;
+ return true;
+}
+
+void gc_dump_info(void) {
+ gc_info_t info;
+ gc_info(&info);
+ mp_printf(&mp_plat_print, "GC: total: %u, used: %u, free: %u\n",
+ (uint)info.total, (uint)info.used, (uint)info.free);
+ mp_printf(&mp_plat_print, " No. of 1-blocks: %u, 2-blocks: %u, max blk sz: %u, max free sz: %u\n",
+ (uint)info.num_1block, (uint)info.num_2block, (uint)info.max_block, (uint)info.max_free);
+}
+
+void gc_dump_alloc_table(void) {
+ GC_ENTER();
+ static const size_t DUMP_BYTES_PER_LINE = 64;
+ #if !EXTENSIVE_HEAP_PROFILING
+ // When comparing heap output we don't want to print the starting
+ // pointer of the heap because it changes from run to run.
+ mp_printf(&mp_plat_print, "GC memory layout; from %p:", MP_STATE_MEM(gc_pool_start));
+ #endif
+ for (size_t bl = 0; bl < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB; bl++) {
+ if (bl % DUMP_BYTES_PER_LINE == 0) {
+ // a new line of blocks
+ {
+ // check if this line contains only free blocks
+ size_t bl2 = bl;
+ while (bl2 < MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB && ATB_GET_KIND(bl2) == AT_FREE) {
+ bl2++;
+ }
+ if (bl2 - bl >= 2 * DUMP_BYTES_PER_LINE) {
+ // there are at least 2 lines containing only free blocks, so abbreviate their printing
+ mp_printf(&mp_plat_print, "\n (%u lines all free)", (uint)(bl2 - bl) / DUMP_BYTES_PER_LINE);
+ bl = bl2 & (~(DUMP_BYTES_PER_LINE - 1));
+ if (bl >= MP_STATE_MEM(gc_alloc_table_byte_len) * BLOCKS_PER_ATB) {
+ // got to end of heap
+ break;
+ }
+ }
+ }
+ // print header for new line of blocks
+ // (the cast to uint32_t is for 16-bit ports)
+ // mp_printf(&mp_plat_print, "\n%05x: ", (uint)(PTR_FROM_BLOCK(bl) & (uint32_t)0xfffff));
+ mp_printf(&mp_plat_print, "\n%05x: ", (uint)((bl * BYTES_PER_BLOCK) & (uint32_t)0xfffff));
+ }
+ int c = ' ';
+ switch (ATB_GET_KIND(bl)) {
+ case AT_FREE:
+ c = '.';
+ break;
+ /* this prints out if the object is reachable from BSS or STACK (for unix only)
+ case AT_HEAD: {
+ c = 'h';
+ void **ptrs = (void**)(void*)&mp_state_ctx;
+ mp_uint_t len = offsetof(mp_state_ctx_t, vm.stack_top) / sizeof(mp_uint_t);
+ for (mp_uint_t i = 0; i < len; i++) {
+ mp_uint_t ptr = (mp_uint_t)ptrs[i];
+ if (VERIFY_PTR(ptr) && BLOCK_FROM_PTR(ptr) == bl) {
+ c = 'B';
+ break;
+ }
+ }
+ if (c == 'h') {
+ ptrs = (void**)&c;
+ len = ((mp_uint_t)MP_STATE_THREAD(stack_top) - (mp_uint_t)&c) / sizeof(mp_uint_t);
+ for (mp_uint_t i = 0; i < len; i++) {
+ mp_uint_t ptr = (mp_uint_t)ptrs[i];
+ if (VERIFY_PTR(ptr) && BLOCK_FROM_PTR(ptr) == bl) {
+ c = 'S';
+ break;
+ }
+ }
+ }
+ break;
+ }
+ */
+ /* this prints the uPy object type of the head block */
+ case AT_HEAD: {
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wcast-align"
+ void **ptr = (void **)(MP_STATE_MEM(gc_pool_start) + bl * BYTES_PER_BLOCK);
+ #pragma GCC diagnostic pop
+ if (*ptr == &mp_type_tuple) {
+ c = 'T';
+ } else if (*ptr == &mp_type_list) {
+ c = 'L';
+ } else if (*ptr == &mp_type_dict) {
+ c = 'D';
+ } else if (*ptr == &mp_type_str || *ptr == &mp_type_bytes) {
+ c = 'S';
+ }
+ #if MICROPY_PY_BUILTINS_BYTEARRAY
+ else if (*ptr == &mp_type_bytearray) {
+ c = 'A';
+ }
+ #endif
+ #if MICROPY_PY_ARRAY
+ else if (*ptr == &mp_type_array) {
+ c = 'A';
+ }
+ #endif
+ #if MICROPY_PY_BUILTINS_FLOAT
+ else if (*ptr == &mp_type_float) {
+ c = 'F';
+ }
+ #endif
+ else if (*ptr == &mp_type_fun_bc) {
+ c = 'B';
+ } else if (*ptr == &mp_type_module) {
+ c = 'M';
+ } else {
+ c = 'h';
+ #if 0
+ // This code prints "Q" for qstr-pool data, and "q" for qstr-str
+ // data. It can be useful to see how qstrs are being allocated,
+ // but is disabled by default because it is very slow.
+ for (qstr_pool_t *pool = MP_STATE_VM(last_pool); c == 'h' && pool != NULL; pool = pool->prev) {
+ if ((qstr_pool_t *)ptr == pool) {
+ c = 'Q';
+ break;
+ }
+ for (const byte **q = pool->qstrs, **q_top = pool->qstrs + pool->len; q < q_top; q++) {
+ if ((const byte *)ptr == *q) {
+ c = 'q';
+ break;
+ }
+ }
+ }
+ #endif
+ }
+ break;
+ }
+ case AT_TAIL:
+ c = '=';
+ break;
+ case AT_MARK:
+ c = 'm';
+ break;
+ }
+ mp_printf(&mp_plat_print, "%c", c);
+ }
+ mp_print_str(&mp_plat_print, "\n");
+ GC_EXIT();
+}
+
+#if 0
+// For testing the GC functions
+void gc_test(void) {
+ mp_uint_t len = 500;
+ mp_uint_t *heap = malloc(len);
+ gc_init(heap, heap + len / sizeof(mp_uint_t));
+ void *ptrs[100];
+ {
+ mp_uint_t **p = gc_alloc(16, false);
+ p[0] = gc_alloc(64, false);
+ p[1] = gc_alloc(1, false);
+ p[2] = gc_alloc(1, false);
+ p[3] = gc_alloc(1, false);
+ mp_uint_t ***p2 = gc_alloc(16, false);
+ p2[0] = p;
+ p2[1] = p;
+ ptrs[0] = p2;
+ }
+ for (int i = 0; i < 25; i += 2) {
+ mp_uint_t *p = gc_alloc(i, false);
+ printf("p=%p\n", p);
+ if (i & 3) {
+ // ptrs[i] = p;
+ }
+ }
+
+ printf("Before GC:\n");
+ gc_dump_alloc_table();
+ printf("Starting GC...\n");
+ gc_collect_start();
+ gc_collect_root(ptrs, sizeof(ptrs) / sizeof(void *));
+ gc_collect_end();
+ printf("After GC:\n");
+ gc_dump_alloc_table();
+}
+#endif
+
+#endif // MICROPY_ENABLE_GC
diff --git a/circuitpython/py/gc.h b/circuitpython/py/gc.h
new file mode 100644
index 0000000..b9036ff
--- /dev/null
+++ b/circuitpython/py/gc.h
@@ -0,0 +1,101 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_GC_H
+#define MICROPY_INCLUDED_PY_GC_H
+
+#include <stdint.h>
+
+#include "py/mpconfig.h"
+#include "py/mpstate.h"
+#include "py/misc.h"
+
+#define WORDS_PER_BLOCK ((MICROPY_BYTES_PER_GC_BLOCK) / MP_BYTES_PER_OBJ_WORD)
+#define BYTES_PER_BLOCK (MICROPY_BYTES_PER_GC_BLOCK)
+
+#define HEAP_PTR(ptr) ( \
+ MP_STATE_MEM(gc_pool_start) != 0 /* Not on the heap if it isn't inited */ \
+ && ptr >= (void *)MP_STATE_MEM(gc_pool_start) /* must be above start of pool */ \
+ && ptr < (void *)MP_STATE_MEM(gc_pool_end) /* must be below end of pool */ \
+ )
+
+// ptr should be of type void*
+#define VERIFY_PTR(ptr) ( \
+ ((uintptr_t)(ptr) & (BYTES_PER_BLOCK - 1)) == 0 /* must be aligned on a block */ \
+ && HEAP_PTR(ptr) \
+ )
+
+void gc_init(void *start, void *end);
+void gc_deinit(void);
+
+// These lock/unlock functions can be nested.
+// They can be used to prevent the GC from allocating/freeing.
+void gc_lock(void);
+void gc_unlock(void);
+bool gc_is_locked(void);
+
+// A given port must implement gc_collect by using the other collect functions.
+void gc_collect(void);
+void gc_collect_start(void);
+void gc_collect_ptr(void *ptr);
+void gc_collect_root(void **ptrs, size_t len);
+void gc_collect_end(void);
+
+// Is the gc heap available?
+bool gc_alloc_possible(void);
+
+// Use this function to sweep the whole heap and run all finalisers
+void gc_sweep_all(void);
+
+enum {
+ GC_ALLOC_FLAG_HAS_FINALISER = 1,
+};
+
+void *gc_alloc(size_t n_bytes, unsigned int alloc_flags, bool long_lived);
+void gc_free(void *ptr); // does not call finaliser
+size_t gc_nbytes(const void *ptr);
+bool gc_has_finaliser(const void *ptr);
+void *gc_make_long_lived(void *old_ptr);
+void *gc_realloc(void *ptr, size_t n_bytes, bool allow_move);
+
+// Prevents a pointer from ever being freed because it establishes a permanent reference to it. Use
+// very sparingly because it can leak memory.
+bool gc_never_free(void *ptr);
+
+typedef struct _gc_info_t {
+ size_t total;
+ size_t used;
+ size_t free;
+ size_t max_free;
+ size_t num_1block;
+ size_t num_2block;
+ size_t max_block;
+} gc_info_t;
+
+void gc_info(gc_info_t *info);
+void gc_dump_info(void);
+void gc_dump_alloc_table(void);
+
+#endif // MICROPY_INCLUDED_PY_GC_H
diff --git a/circuitpython/py/gc_long_lived.c b/circuitpython/py/gc_long_lived.c
new file mode 100644
index 0000000..647c4f7
--- /dev/null
+++ b/circuitpython/py/gc_long_lived.c
@@ -0,0 +1,149 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2018 Scott Shawcroft for Adafruit Industries LLC
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/emitglue.h"
+#include "py/gc_long_lived.h"
+#include "py/gc.h"
+#include "py/mpstate.h"
+
+mp_obj_fun_bc_t *make_fun_bc_long_lived(mp_obj_fun_bc_t *fun_bc, uint8_t max_depth) {
+ #ifndef MICROPY_ENABLE_GC
+ return fun_bc;
+ #endif
+ if (fun_bc == NULL || MP_OBJ_FROM_PTR(fun_bc) == mp_const_none || max_depth == 0) {
+ return fun_bc;
+ }
+ fun_bc->bytecode = gc_make_long_lived((byte *)fun_bc->bytecode);
+ fun_bc->globals = make_dict_long_lived(fun_bc->globals, max_depth - 1);
+ for (uint32_t i = 0; i < gc_nbytes(fun_bc->const_table) / sizeof(mp_obj_t); i++) {
+ // Skip things that aren't allocated on the heap (and hence have zero bytes.)
+ if (gc_nbytes(MP_OBJ_TO_PTR(fun_bc->const_table[i])) == 0) {
+ continue;
+ }
+ // Try to detect raw code.
+ mp_raw_code_t *raw_code = MP_OBJ_TO_PTR(fun_bc->const_table[i]);
+ if (raw_code->kind == MP_CODE_BYTECODE) {
+ raw_code->fun_data = gc_make_long_lived((byte *)raw_code->fun_data);
+ raw_code->const_table = gc_make_long_lived((byte *)raw_code->const_table);
+ }
+ ((mp_uint_t *)fun_bc->const_table)[i] = (mp_uint_t)make_obj_long_lived(
+ (mp_obj_t)fun_bc->const_table[i], max_depth - 1);
+
+ }
+ fun_bc->const_table = gc_make_long_lived((mp_uint_t *)fun_bc->const_table);
+ // extra_args stores keyword only argument default values.
+ size_t words = gc_nbytes(fun_bc) / sizeof(mp_uint_t *);
+ // Functions (mp_obj_fun_bc_t) have four pointers (base, globals, bytecode and const_table)
+ // before the variable length extra_args so remove them from the length.
+ for (size_t i = 0; i < words - 4; i++) {
+ if (MP_OBJ_TO_PTR(fun_bc->extra_args[i]) == NULL) {
+ continue;
+ }
+ if (mp_obj_is_type(fun_bc->extra_args[i], &mp_type_dict)) {
+ fun_bc->extra_args[i] = MP_OBJ_FROM_PTR(make_dict_long_lived(MP_OBJ_TO_PTR(fun_bc->extra_args[i]), max_depth - 1));
+ } else {
+ fun_bc->extra_args[i] = make_obj_long_lived(fun_bc->extra_args[i], max_depth - 1);
+ }
+
+ }
+ return gc_make_long_lived(fun_bc);
+}
+
+mp_obj_property_t *make_property_long_lived(mp_obj_property_t *prop, uint8_t max_depth) {
+ #ifndef MICROPY_ENABLE_GC
+ return prop;
+ #endif
+ if (max_depth == 0) {
+ return prop;
+ }
+ prop->proxy[0] = make_obj_long_lived(prop->proxy[0], max_depth - 1);
+ prop->proxy[1] = make_obj_long_lived(prop->proxy[1], max_depth - 1);
+ prop->proxy[2] = make_obj_long_lived(prop->proxy[2], max_depth - 1);
+ return gc_make_long_lived(prop);
+}
+
+mp_obj_dict_t *make_dict_long_lived(mp_obj_dict_t *dict, uint8_t max_depth) {
+ #ifndef MICROPY_ENABLE_GC
+ return dict;
+ #endif
+ if (dict == NULL || max_depth == 0 || dict == &MP_STATE_VM(dict_main) || dict->map.is_fixed) {
+ return dict;
+ }
+ // Don't recurse unnecessarily. Return immediately if we've already seen this dict.
+ if (dict->map.scanning) {
+ return dict;
+ }
+ // Mark that we're processing this dict.
+ dict->map.scanning = 1;
+
+ // Update all of the references first so that we reduce the chance of references to the old
+ // copies.
+ dict->map.table = gc_make_long_lived(dict->map.table);
+ for (size_t i = 0; i < dict->map.alloc; i++) {
+ if (mp_map_slot_is_filled(&dict->map, i)) {
+ mp_obj_t value = dict->map.table[i].value;
+ dict->map.table[i].value = make_obj_long_lived(value, max_depth - 1);
+ }
+ }
+ dict = gc_make_long_lived(dict);
+ // Done recursing through this dict.
+ dict->map.scanning = 0;
+ return dict;
+}
+
+mp_obj_str_t *make_str_long_lived(mp_obj_str_t *str) {
+ str->data = gc_make_long_lived((byte *)str->data);
+ return gc_make_long_lived(str);
+}
+
+mp_obj_t make_obj_long_lived(mp_obj_t obj, uint8_t max_depth) {
+ #ifndef MICROPY_ENABLE_GC
+ return obj;
+ #endif
+ if (MP_OBJ_TO_PTR(obj) == NULL) {
+ return obj;
+ }
+ // If not in the GC pool, do nothing. This can happen (at least) when
+ // there are frozen mp_type_bytes objects in ROM.
+ if (!VERIFY_PTR((void *)obj)) {
+ return obj;
+ }
+ if (mp_obj_is_type(obj, &mp_type_fun_bc)) {
+ mp_obj_fun_bc_t *fun_bc = MP_OBJ_TO_PTR(obj);
+ return MP_OBJ_FROM_PTR(make_fun_bc_long_lived(fun_bc, max_depth));
+ } else if (mp_obj_is_type(obj, &mp_type_property)) {
+ mp_obj_property_t *prop = MP_OBJ_TO_PTR(obj);
+ return MP_OBJ_FROM_PTR(make_property_long_lived(prop, max_depth));
+ } else if (mp_obj_is_type(obj, &mp_type_str) || mp_obj_is_type(obj, &mp_type_bytes)) {
+ mp_obj_str_t *str = MP_OBJ_TO_PTR(obj);
+ return MP_OBJ_FROM_PTR(make_str_long_lived(str));
+ } else if (mp_obj_is_type(obj, &mp_type_type)) {
+ // Types are already long lived during creation.
+ return obj;
+ } else {
+ return MP_OBJ_FROM_PTR(gc_make_long_lived(MP_OBJ_TO_PTR(obj)));
+ }
+}
diff --git a/circuitpython/py/gc_long_lived.h b/circuitpython/py/gc_long_lived.h
new file mode 100644
index 0000000..229bc73
--- /dev/null
+++ b/circuitpython/py/gc_long_lived.h
@@ -0,0 +1,43 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2018 Scott Shawcroft for Adafruit Industries LLC
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+// These helpers move MicroPython objects and their sub-objects to the long lived portion of the
+// heap.
+
+#ifndef MICROPY_INCLUDED_PY_GC_LONG_LIVED_H
+#define MICROPY_INCLUDED_PY_GC_LONG_LIVED_H
+
+#include "py/objfun.h"
+#include "py/objproperty.h"
+#include "py/objstr.h"
+
+mp_obj_fun_bc_t *make_fun_bc_long_lived(mp_obj_fun_bc_t *fun_bc, uint8_t max_depth);
+mp_obj_property_t *make_property_long_lived(mp_obj_property_t *prop, uint8_t max_depth);
+mp_obj_dict_t *make_dict_long_lived(mp_obj_dict_t *dict, uint8_t max_depth);
+mp_obj_str_t *make_str_long_lived(mp_obj_str_t *str);
+mp_obj_t make_obj_long_lived(mp_obj_t obj, uint8_t max_depth);
+
+#endif // MICROPY_INCLUDED_PY_GC_LONG_LIVED_H
diff --git a/circuitpython/py/genlast.py b/circuitpython/py/genlast.py
new file mode 100644
index 0000000..ad44745
--- /dev/null
+++ b/circuitpython/py/genlast.py
@@ -0,0 +1,90 @@
+#!/usr/bin/env python3
+
+import sys
+import re
+import os
+import itertools
+from concurrent.futures import ProcessPoolExecutor
+import multiprocessing
+import threading
+import subprocess
+
+from makeqstrdefs import qstr_unescape, QSTRING_BLOCK_LIST
+
+re_line = re.compile(r"#[line]*\s(\d+)\s\"([^\"]+)\"", re.DOTALL)
+re_qstr = re.compile(r"MP_QSTR_[_a-zA-Z0-9]+", re.DOTALL)
+re_translate = re.compile(r"translate\(\"((?:(?=(\\?))\2.)*?)\"\)", re.DOTALL)
+
+
+def write_out(fname, output_dir, output):
+ if output:
+ for m, r in [("/", "__"), ("\\", "__"), (":", "@"), ("..", "@@")]:
+ fname = fname.replace(m, r)
+ with open(output_dir + "/" + fname + ".qstr", "w") as f:
+ f.write("\n".join(output) + "\n")
+
+
+def process_file(fname, output_dir, content):
+ content = content.decode("utf-8", errors="ignore")
+ output = []
+ for match in re_qstr.findall(content):
+ name = match.replace("MP_QSTR_", "")
+ if name not in QSTRING_BLOCK_LIST:
+ output.append("Q(" + qstr_unescape(name) + ")")
+ for match in re_translate.findall(content):
+ output.append('TRANSLATE("' + match[0] + '")')
+
+ write_out(fname, output_dir, output)
+
+
+def checkoutput1(args):
+ info = subprocess.run(args, check=True, stdout=subprocess.PIPE, input="")
+ return info.stdout
+
+
+def preprocess(command, output_dir, fn):
+ try:
+ output = checkoutput1(command + [fn])
+ process_file(fn, output_dir, output)
+ except Exception as e:
+ print(e, file=sys.stderr)
+ raise
+
+
+def maybe_preprocess(command, output_dir, fn):
+ # Preprocess the source file if it contains "MP_QSTR", "translate",
+ # or if it uses enum.h (which generates "MP_QSTR" strings.
+ if subprocess.call(["grep", "-lqE", r"(MP_QSTR|translate|enum\.h)", fn]) == 0:
+ preprocess(command, output_dir, fn)
+
+
+if __name__ == "__main__":
+
+ idx1 = sys.argv.index("--")
+ idx2 = sys.argv.index("--", idx1 + 1)
+ output_dir = sys.argv[1]
+ check = sys.argv[2:idx1]
+ always = sys.argv[idx1 + 1 : idx2]
+ command = sys.argv[idx2 + 1 :]
+
+ if not os.path.isdir(output_dir):
+ os.makedirs(output_dir)
+
+ # Mac and Windows use 'spawn'. Uncomment this during testing to catch spawn-specific problems on Linux.
+ # multiprocessing.set_start_method("spawn")
+ executor = ProcessPoolExecutor(max_workers=multiprocessing.cpu_count() + 1)
+ results = []
+ try:
+ results.extend(
+ executor.map(
+ maybe_preprocess, itertools.repeat(command), itertools.repeat(output_dir), check
+ )
+ )
+ results.extend(
+ executor.map(
+ preprocess, itertools.repeat(command), itertools.repeat(output_dir), always
+ )
+ )
+ except subprocess.CalledProcessError:
+ raise SystemExit(1)
+ executor.shutdown()
diff --git a/circuitpython/py/grammar.h b/circuitpython/py/grammar.h
new file mode 100644
index 0000000..3b4ceb8
--- /dev/null
+++ b/circuitpython/py/grammar.h
@@ -0,0 +1,372 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013-2020 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+// *FORMAT-OFF*
+
+// rules for writing rules:
+// - zero_or_more is implemented using opt_rule around a one_or_more rule
+// - don't put opt_rule in arguments of or rule; instead, wrap the call to this or rule in opt_rule
+
+// Generic sub-rules used by multiple rules below.
+
+DEF_RULE_NC(generic_colon_test, and_ident(2), tok(DEL_COLON), rule(test))
+DEF_RULE_NC(generic_equal_test, and_ident(2), tok(DEL_EQUAL), rule(test))
+
+// # Start symbols for the grammar:
+// # single_input is a single interactive statement;
+// # file_input is a module or sequence of commands read from an input file;
+// # eval_input is the input for the eval() functions.
+// # NB: compound_stmt in single_input is followed by extra NEWLINE! --> not in MicroPython
+// single_input: NEWLINE | simple_stmt | compound_stmt
+// file_input: (NEWLINE | stmt)* ENDMARKER
+// eval_input: testlist NEWLINE* ENDMARKER
+
+DEF_RULE_NC(single_input, or(3), tok(NEWLINE), rule(simple_stmt), rule(compound_stmt))
+DEF_RULE(file_input, c(generic_all_nodes), and_ident(1), opt_rule(file_input_2))
+DEF_RULE(file_input_2, c(generic_all_nodes), one_or_more, rule(file_input_3))
+DEF_RULE_NC(file_input_3, or(2), tok(NEWLINE), rule(stmt))
+DEF_RULE_NC(eval_input, and_ident(2), rule(testlist), opt_rule(eval_input_2))
+DEF_RULE_NC(eval_input_2, and(1), tok(NEWLINE))
+
+// decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
+// decorators: decorator+
+// decorated: decorators (classdef | funcdef | async_funcdef)
+// funcdef: 'def' NAME parameters ['->' test] ':' suite
+// async_funcdef: 'async' funcdef
+// parameters: '(' [typedargslist] ')'
+// typedargslist: tfpdef ['=' test] (',' tfpdef ['=' test])* [',' ['*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef]] | '*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef
+// tfpdef: NAME [':' test]
+// varargslist: vfpdef ['=' test] (',' vfpdef ['=' test])* [',' ['*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef]] | '*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef
+// vfpdef: NAME
+
+DEF_RULE_NC(decorator, and(4), tok(OP_AT), rule(dotted_name), opt_rule(trailer_paren), tok(NEWLINE))
+DEF_RULE_NC(decorators, one_or_more, rule(decorator))
+DEF_RULE(decorated, c(decorated), and_ident(2), rule(decorators), rule(decorated_body))
+#if MICROPY_PY_ASYNC_AWAIT
+DEF_RULE_NC(decorated_body, or(3), rule(classdef), rule(funcdef), rule(async_funcdef))
+DEF_RULE_NC(async_funcdef, and(2), tok(KW_ASYNC), rule(funcdef))
+#else
+DEF_RULE_NC(decorated_body, or(2), rule(classdef), rule(funcdef))
+#endif
+DEF_RULE(funcdef, c(funcdef), and_blank(8), tok(KW_DEF), tok(NAME), tok(DEL_PAREN_OPEN), opt_rule(typedargslist), tok(DEL_PAREN_CLOSE), opt_rule(funcdefrettype), tok(DEL_COLON), rule(suite))
+DEF_RULE_NC(funcdefrettype, and_ident(2), tok(DEL_MINUS_MORE), rule(test))
+// note: typedargslist lets through more than is allowed, compiler does further checks
+DEF_RULE_NC(typedargslist, list_with_end, rule(typedargslist_item), tok(DEL_COMMA))
+DEF_RULE_NC(typedargslist_item, or(3), rule(typedargslist_name), rule(typedargslist_star), rule(typedargslist_dbl_star))
+DEF_RULE_NC(typedargslist_name, and_ident(3), tok(NAME), opt_rule(generic_colon_test), opt_rule(generic_equal_test))
+DEF_RULE_NC(typedargslist_star, and(2), tok(OP_STAR), opt_rule(tfpdef))
+DEF_RULE_NC(typedargslist_dbl_star, and(3), tok(OP_DBL_STAR), tok(NAME), opt_rule(generic_colon_test))
+DEF_RULE_NC(tfpdef, and(2), tok(NAME), opt_rule(generic_colon_test))
+// note: varargslist lets through more than is allowed, compiler does further checks
+DEF_RULE_NC(varargslist, list_with_end, rule(varargslist_item), tok(DEL_COMMA))
+DEF_RULE_NC(varargslist_item, or(3), rule(varargslist_name), rule(varargslist_star), rule(varargslist_dbl_star))
+DEF_RULE_NC(varargslist_name, and_ident(2), tok(NAME), opt_rule(generic_equal_test))
+DEF_RULE_NC(varargslist_star, and(2), tok(OP_STAR), opt_rule(vfpdef))
+DEF_RULE_NC(varargslist_dbl_star, and(2), tok(OP_DBL_STAR), tok(NAME))
+DEF_RULE_NC(vfpdef, and_ident(1), tok(NAME))
+
+// stmt: compound_stmt | simple_stmt
+
+DEF_RULE_NC(stmt, or(2), rule(compound_stmt), rule(simple_stmt))
+
+// simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
+
+DEF_RULE_NC(simple_stmt, and_ident(2), rule(simple_stmt_2), tok(NEWLINE))
+DEF_RULE(simple_stmt_2, c(generic_all_nodes), list_with_end, rule(small_stmt), tok(DEL_SEMICOLON))
+
+// small_stmt: expr_stmt | del_stmt | pass_stmt | flow_stmt | import_stmt | global_stmt | nonlocal_stmt | assert_stmt
+// expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | ('=' (yield_expr|testlist_star_expr))*)
+// testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
+// annassign: ':' test ['=' (yield_expr|testlist_star_expr)]
+// augassign: '+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | '<<=' | '>>=' | '**=' | '//='
+// # For normal and annotated assignments, additional restrictions enforced by the interpreter
+
+DEF_RULE_NC(small_stmt, or(8), rule(del_stmt), rule(pass_stmt), rule(flow_stmt), rule(import_stmt), rule(global_stmt), rule(nonlocal_stmt), rule(assert_stmt), rule(expr_stmt))
+DEF_RULE(expr_stmt, c(expr_stmt), and(2), rule(testlist_star_expr), opt_rule(expr_stmt_2))
+DEF_RULE_NC(expr_stmt_2, or(3), rule(annassign), rule(expr_stmt_augassign), rule(expr_stmt_assign_list))
+DEF_RULE_NC(expr_stmt_augassign, and_ident(2), rule(augassign), rule(expr_stmt_6))
+DEF_RULE_NC(expr_stmt_assign_list, one_or_more, rule(expr_stmt_assign))
+DEF_RULE_NC(expr_stmt_assign, and_ident(2), tok(DEL_EQUAL), rule(expr_stmt_6))
+DEF_RULE_NC(expr_stmt_6, or(2), rule(yield_expr), rule(testlist_star_expr))
+DEF_RULE(testlist_star_expr, c(generic_tuple), list_with_end, rule(testlist_star_expr_2), tok(DEL_COMMA))
+DEF_RULE_NC(testlist_star_expr_2, or(2), rule(star_expr), rule(test))
+DEF_RULE_NC(annassign, and(3), tok(DEL_COLON), rule(test), opt_rule(expr_stmt_assign))
+DEF_RULE_NC(augassign, or(13), tok(DEL_PLUS_EQUAL), tok(DEL_MINUS_EQUAL), tok(DEL_STAR_EQUAL), tok(DEL_AT_EQUAL), tok(DEL_SLASH_EQUAL), tok(DEL_PERCENT_EQUAL), tok(DEL_AMPERSAND_EQUAL), tok(DEL_PIPE_EQUAL), tok(DEL_CARET_EQUAL), tok(DEL_DBL_LESS_EQUAL), tok(DEL_DBL_MORE_EQUAL), tok(DEL_DBL_STAR_EQUAL), tok(DEL_DBL_SLASH_EQUAL))
+
+// del_stmt: 'del' exprlist
+// pass_stmt: 'pass'
+// flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
+// break_stmt: 'break'
+// continue_stmt: 'continue'
+// return_stmt: 'return' [testlist]
+// yield_stmt: yield_expr
+// raise_stmt: 'raise' [test ['from' test]]
+
+DEF_RULE(del_stmt, c(del_stmt), and(2), tok(KW_DEL), rule(exprlist))
+DEF_RULE(pass_stmt, c(generic_all_nodes), and(1), tok(KW_PASS))
+DEF_RULE_NC(flow_stmt, or(5), rule(break_stmt), rule(continue_stmt), rule(return_stmt), rule(raise_stmt), rule(yield_stmt))
+DEF_RULE(break_stmt, c(break_cont_stmt), and(1), tok(KW_BREAK))
+DEF_RULE(continue_stmt, c(break_cont_stmt), and(1), tok(KW_CONTINUE))
+DEF_RULE(return_stmt, c(return_stmt), and(2), tok(KW_RETURN), opt_rule(testlist))
+DEF_RULE(yield_stmt, c(yield_stmt), and(1), rule(yield_expr))
+DEF_RULE(raise_stmt, c(raise_stmt), and(2), tok(KW_RAISE), opt_rule(raise_stmt_arg))
+DEF_RULE_NC(raise_stmt_arg, and_ident(2), rule(test), opt_rule(raise_stmt_from))
+DEF_RULE_NC(raise_stmt_from, and_ident(2), tok(KW_FROM), rule(test))
+
+// import_stmt: import_name | import_from
+// import_name: 'import' dotted_as_names
+// import_from: 'from' (('.' | '...')* dotted_name | ('.' | '...')+) 'import' ('*' | '(' import_as_names ')' | import_as_names)
+// import_as_name: NAME ['as' NAME]
+// dotted_as_name: dotted_name ['as' NAME]
+// import_as_names: import_as_name (',' import_as_name)* [',']
+// dotted_as_names: dotted_as_name (',' dotted_as_name)*
+// dotted_name: NAME ('.' NAME)*
+// global_stmt: 'global' NAME (',' NAME)*
+// nonlocal_stmt: 'nonlocal' NAME (',' NAME)*
+// assert_stmt: 'assert' test [',' test]
+
+DEF_RULE_NC(import_stmt, or(2), rule(import_name), rule(import_from))
+DEF_RULE(import_name, c(import_name), and(2), tok(KW_IMPORT), rule(dotted_as_names))
+DEF_RULE(import_from, c(import_from), and(4), tok(KW_FROM), rule(import_from_2), tok(KW_IMPORT), rule(import_from_3))
+DEF_RULE_NC(import_from_2, or(2), rule(dotted_name), rule(import_from_2b))
+DEF_RULE_NC(import_from_2b, and_ident(2), rule(one_or_more_period_or_ellipsis), opt_rule(dotted_name))
+DEF_RULE_NC(import_from_3, or(3), tok(OP_STAR), rule(import_as_names_paren), rule(import_as_names))
+DEF_RULE_NC(import_as_names_paren, and_ident(3), tok(DEL_PAREN_OPEN), rule(import_as_names), tok(DEL_PAREN_CLOSE))
+DEF_RULE_NC(one_or_more_period_or_ellipsis, one_or_more, rule(period_or_ellipsis))
+DEF_RULE_NC(period_or_ellipsis, or(2), tok(DEL_PERIOD), tok(ELLIPSIS))
+DEF_RULE_NC(import_as_name, and(2), tok(NAME), opt_rule(as_name))
+DEF_RULE_NC(dotted_as_name, and_ident(2), rule(dotted_name), opt_rule(as_name))
+DEF_RULE_NC(as_name, and_ident(2), tok(KW_AS), tok(NAME))
+DEF_RULE_NC(import_as_names, list_with_end, rule(import_as_name), tok(DEL_COMMA))
+DEF_RULE_NC(dotted_as_names, list, rule(dotted_as_name), tok(DEL_COMMA))
+DEF_RULE_NC(dotted_name, list, tok(NAME), tok(DEL_PERIOD))
+DEF_RULE(global_stmt, c(global_nonlocal_stmt), and(2), tok(KW_GLOBAL), rule(name_list))
+DEF_RULE(nonlocal_stmt, c(global_nonlocal_stmt), and(2), tok(KW_NONLOCAL), rule(name_list))
+DEF_RULE_NC(name_list, list, tok(NAME), tok(DEL_COMMA))
+DEF_RULE(assert_stmt, c(assert_stmt), and(3), tok(KW_ASSERT), rule(test), opt_rule(assert_stmt_extra))
+DEF_RULE_NC(assert_stmt_extra, and_ident(2), tok(DEL_COMMA), rule(test))
+
+// compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt
+// if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
+// while_stmt: 'while' test ':' suite ['else' ':' suite]
+// for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
+// try_stmt: 'try' ':' suite ((except_clause ':' suite)+ ['else' ':' suite] ['finally' ':' suite] | 'finally' ':' suite)
+// # NB compile.c makes sure that the default except clause is last
+// except_clause: 'except' [test ['as' NAME]]
+// with_stmt: 'with' with_item (',' with_item)* ':' suite
+// with_item: test ['as' expr]
+// suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
+// async_stmt: 'async' (funcdef | with_stmt | for_stmt)
+
+#if MICROPY_PY_ASYNC_AWAIT
+DEF_RULE_NC(compound_stmt, or(9), rule(if_stmt), rule(while_stmt), rule(for_stmt), rule(try_stmt), rule(with_stmt), rule(funcdef), rule(classdef), rule(decorated), rule(async_stmt))
+DEF_RULE(async_stmt, c(async_stmt), and(2), tok(KW_ASYNC), rule(async_stmt_2))
+DEF_RULE_NC(async_stmt_2, or(3), rule(funcdef), rule(with_stmt), rule(for_stmt))
+#else
+DEF_RULE_NC(compound_stmt, or(8), rule(if_stmt), rule(while_stmt), rule(for_stmt), rule(try_stmt), rule(with_stmt), rule(funcdef), rule(classdef), rule(decorated))
+#endif
+DEF_RULE(if_stmt, c(if_stmt), and(6), tok(KW_IF), rule(namedexpr_test), tok(DEL_COLON), rule(suite), opt_rule(if_stmt_elif_list), opt_rule(else_stmt))
+DEF_RULE_NC(if_stmt_elif_list, one_or_more, rule(if_stmt_elif))
+DEF_RULE_NC(if_stmt_elif, and(4), tok(KW_ELIF), rule(namedexpr_test), tok(DEL_COLON), rule(suite))
+DEF_RULE(while_stmt, c(while_stmt), and(5), tok(KW_WHILE), rule(namedexpr_test), tok(DEL_COLON), rule(suite), opt_rule(else_stmt))
+DEF_RULE(for_stmt, c(for_stmt), and(7), tok(KW_FOR), rule(exprlist), tok(KW_IN), rule(testlist), tok(DEL_COLON), rule(suite), opt_rule(else_stmt))
+DEF_RULE(try_stmt, c(try_stmt), and(4), tok(KW_TRY), tok(DEL_COLON), rule(suite), rule(try_stmt_2))
+DEF_RULE_NC(try_stmt_2, or(2), rule(try_stmt_except_and_more), rule(try_stmt_finally))
+DEF_RULE_NC(try_stmt_except_and_more, and_ident(3), rule(try_stmt_except_list), opt_rule(else_stmt), opt_rule(try_stmt_finally))
+DEF_RULE_NC(try_stmt_except, and(4), tok(KW_EXCEPT), opt_rule(try_stmt_as_name), tok(DEL_COLON), rule(suite))
+DEF_RULE_NC(try_stmt_as_name, and_ident(2), rule(test), opt_rule(as_name))
+DEF_RULE_NC(try_stmt_except_list, one_or_more, rule(try_stmt_except))
+DEF_RULE_NC(try_stmt_finally, and(3), tok(KW_FINALLY), tok(DEL_COLON), rule(suite))
+DEF_RULE_NC(else_stmt, and_ident(3), tok(KW_ELSE), tok(DEL_COLON), rule(suite))
+DEF_RULE(with_stmt, c(with_stmt), and(4), tok(KW_WITH), rule(with_stmt_list), tok(DEL_COLON), rule(suite))
+DEF_RULE_NC(with_stmt_list, list, rule(with_item), tok(DEL_COMMA))
+DEF_RULE_NC(with_item, and_ident(2), rule(test), opt_rule(with_item_as))
+DEF_RULE_NC(with_item_as, and_ident(2), tok(KW_AS), rule(expr))
+DEF_RULE_NC(suite, or(2), rule(suite_block), rule(simple_stmt))
+DEF_RULE_NC(suite_block, and_ident(4), tok(NEWLINE), tok(INDENT), rule(suite_block_stmts), tok(DEDENT))
+DEF_RULE(suite_block_stmts, c(generic_all_nodes), one_or_more, rule(stmt))
+
+// test: or_test ['if' or_test 'else' test] | lambdef
+// test_nocond: or_test | lambdef_nocond
+// lambdef: 'lambda' [varargslist] ':' test
+// lambdef_nocond: 'lambda' [varargslist] ':' test_nocond
+
+#if MICROPY_PY_ASSIGN_EXPR
+DEF_RULE(namedexpr_test, c(namedexpr), and_ident(2), rule(test), opt_rule(namedexpr_test_2))
+DEF_RULE_NC(namedexpr_test_2, and_ident(2), tok(OP_ASSIGN), rule(test))
+#else
+DEF_RULE_NC(namedexpr_test, or(1), rule(test))
+#endif
+DEF_RULE_NC(test, or(2), rule(lambdef), rule(test_if_expr))
+DEF_RULE(test_if_expr, c(test_if_expr), and_ident(2), rule(or_test), opt_rule(test_if_else))
+DEF_RULE_NC(test_if_else, and(4), tok(KW_IF), rule(or_test), tok(KW_ELSE), rule(test))
+DEF_RULE_NC(test_nocond, or(2), rule(lambdef_nocond), rule(or_test))
+DEF_RULE(lambdef, c(lambdef), and_blank(4), tok(KW_LAMBDA), opt_rule(varargslist), tok(DEL_COLON), rule(test))
+DEF_RULE(lambdef_nocond, c(lambdef), and_blank(4), tok(KW_LAMBDA), opt_rule(varargslist), tok(DEL_COLON), rule(test_nocond))
+
+// or_test: and_test ('or' and_test)*
+// and_test: not_test ('and' not_test)*
+// not_test: 'not' not_test | comparison
+// comparison: expr (comp_op expr)*
+// comp_op: '<'|'>'|'=='|'>='|'<='|'!='|'in'|'not' 'in'|'is'|'is' 'not'
+// star_expr: '*' expr
+// expr: xor_expr ('|' xor_expr)*
+// xor_expr: and_expr ('^' and_expr)*
+// and_expr: shift_expr ('&' shift_expr)*
+// shift_expr: arith_expr (('<<'|'>>') arith_expr)*
+// arith_expr: term (('+'|'-') term)*
+// term: factor (('*'|'@'|'/'|'%'|'//') factor)*
+// factor: ('+'|'-'|'~') factor | power
+// power: atom_expr ['**' factor]
+// atom_expr: 'await' atom trailer* | atom trailer*
+
+DEF_RULE(or_test, c(or_and_test), list, rule(and_test), tok(KW_OR))
+DEF_RULE(and_test, c(or_and_test), list, rule(not_test), tok(KW_AND))
+DEF_RULE_NC(not_test, or(2), rule(not_test_2), rule(comparison))
+DEF_RULE(not_test_2, c(not_test_2), and(2), tok(KW_NOT), rule(not_test))
+DEF_RULE(comparison, c(comparison), list, rule(expr), rule(comp_op))
+DEF_RULE_NC(comp_op, or(9), tok(OP_LESS), tok(OP_MORE), tok(OP_DBL_EQUAL), tok(OP_LESS_EQUAL), tok(OP_MORE_EQUAL), tok(OP_NOT_EQUAL), tok(KW_IN), rule(comp_op_not_in), rule(comp_op_is))
+DEF_RULE_NC(comp_op_not_in, and(2), tok(KW_NOT), tok(KW_IN))
+DEF_RULE_NC(comp_op_is, and(2), tok(KW_IS), opt_rule(comp_op_is_not))
+DEF_RULE_NC(comp_op_is_not, and(1), tok(KW_NOT))
+DEF_RULE(star_expr, c(star_expr), and(2), tok(OP_STAR), rule(expr))
+DEF_RULE(expr, c(binary_op), list, rule(xor_expr), tok(OP_PIPE))
+DEF_RULE(xor_expr, c(binary_op), list, rule(and_expr), tok(OP_CARET))
+DEF_RULE(and_expr, c(binary_op), list, rule(shift_expr), tok(OP_AMPERSAND))
+DEF_RULE(shift_expr, c(term), list, rule(arith_expr), rule(shift_op))
+DEF_RULE_NC(shift_op, or(2), tok(OP_DBL_LESS), tok(OP_DBL_MORE))
+DEF_RULE(arith_expr, c(term), list, rule(term), rule(arith_op))
+DEF_RULE_NC(arith_op, or(2), tok(OP_PLUS), tok(OP_MINUS))
+DEF_RULE(term, c(term), list, rule(factor), rule(term_op))
+DEF_RULE_NC(term_op, or(5), tok(OP_STAR), tok(OP_AT), tok(OP_SLASH), tok(OP_PERCENT), tok(OP_DBL_SLASH))
+DEF_RULE_NC(factor, or(2), rule(factor_2), rule(power))
+DEF_RULE(factor_2, c(factor_2), and_ident(2), rule(factor_op), rule(factor))
+DEF_RULE_NC(factor_op, or(3), tok(OP_PLUS), tok(OP_MINUS), tok(OP_TILDE))
+DEF_RULE(power, c(power), and_ident(2), rule(atom_expr), opt_rule(power_dbl_star))
+#if MICROPY_PY_ASYNC_AWAIT
+DEF_RULE_NC(atom_expr, or(2), rule(atom_expr_await), rule(atom_expr_normal))
+DEF_RULE(atom_expr_await, c(atom_expr_await), and(3), tok(KW_AWAIT), rule(atom), opt_rule(atom_expr_trailers))
+#else
+DEF_RULE_NC(atom_expr, or(1), rule(atom_expr_normal))
+#endif
+DEF_RULE(atom_expr_normal, c(atom_expr_normal), and_ident(2), rule(atom), opt_rule(atom_expr_trailers))
+DEF_RULE_NC(atom_expr_trailers, one_or_more, rule(trailer))
+DEF_RULE_NC(power_dbl_star, and_ident(2), tok(OP_DBL_STAR), rule(factor))
+
+// atom: '(' [yield_expr|testlist_comp] ')' | '[' [testlist_comp] ']' | '{' [dictorsetmaker] '}' | NAME | NUMBER | STRING+ | '...' | 'None' | 'True' | 'False'
+// testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )
+// trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
+
+DEF_RULE_NC(atom, or(12), tok(NAME), tok(INTEGER), tok(FLOAT_OR_IMAG), tok(STRING), tok(BYTES), tok(ELLIPSIS), tok(KW_NONE), tok(KW_TRUE), tok(KW_FALSE), rule(atom_paren), rule(atom_bracket), rule(atom_brace))
+DEF_RULE(atom_paren, c(atom_paren), and(3), tok(DEL_PAREN_OPEN), opt_rule(atom_2b), tok(DEL_PAREN_CLOSE))
+DEF_RULE_NC(atom_2b, or(2), rule(yield_expr), rule(testlist_comp))
+DEF_RULE(atom_bracket, c(atom_bracket), and(3), tok(DEL_BRACKET_OPEN), opt_rule(testlist_comp), tok(DEL_BRACKET_CLOSE))
+DEF_RULE(atom_brace, c(atom_brace), and(3), tok(DEL_BRACE_OPEN), opt_rule(dictorsetmaker), tok(DEL_BRACE_CLOSE))
+DEF_RULE_NC(testlist_comp, and_ident(2), rule(testlist_comp_2), opt_rule(testlist_comp_3))
+DEF_RULE_NC(testlist_comp_2, or(2), rule(star_expr), rule(namedexpr_test))
+DEF_RULE_NC(testlist_comp_3, or(2), rule(comp_for), rule(testlist_comp_3b))
+DEF_RULE_NC(testlist_comp_3b, and_ident(2), tok(DEL_COMMA), opt_rule(testlist_comp_3c))
+DEF_RULE_NC(testlist_comp_3c, list_with_end, rule(testlist_comp_2), tok(DEL_COMMA))
+DEF_RULE_NC(trailer, or(3), rule(trailer_paren), rule(trailer_bracket), rule(trailer_period))
+DEF_RULE(trailer_paren, c(trailer_paren), and(3), tok(DEL_PAREN_OPEN), opt_rule(arglist), tok(DEL_PAREN_CLOSE))
+DEF_RULE(trailer_bracket, c(trailer_bracket), and(3), tok(DEL_BRACKET_OPEN), rule(subscriptlist), tok(DEL_BRACKET_CLOSE))
+DEF_RULE(trailer_period, c(trailer_period), and(2), tok(DEL_PERIOD), tok(NAME))
+
+// subscriptlist: subscript (',' subscript)* [',']
+// subscript: test | [test] ':' [test] [sliceop]
+// sliceop: ':' [test]
+
+#if MICROPY_PY_BUILTINS_SLICE
+DEF_RULE(subscriptlist, c(generic_tuple), list_with_end, rule(subscript), tok(DEL_COMMA))
+DEF_RULE_NC(subscript, or(2), rule(subscript_3), rule(subscript_2))
+DEF_RULE(subscript_2, c(subscript), and_ident(2), rule(test), opt_rule(subscript_3))
+DEF_RULE(subscript_3, c(subscript), and(2), tok(DEL_COLON), opt_rule(subscript_3b))
+DEF_RULE_NC(subscript_3b, or(2), rule(subscript_3c), rule(subscript_3d))
+DEF_RULE_NC(subscript_3c, and(2), tok(DEL_COLON), opt_rule(test))
+DEF_RULE_NC(subscript_3d, and_ident(2), rule(test), opt_rule(sliceop))
+DEF_RULE_NC(sliceop, and(2), tok(DEL_COLON), opt_rule(test))
+#else
+DEF_RULE(subscriptlist, c(generic_tuple), list_with_end, rule(test), tok(DEL_COMMA))
+#endif
+
+// exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
+// testlist: test (',' test)* [',']
+// dictorsetmaker: (test ':' test (comp_for | (',' test ':' test)* [','])) | (test (comp_for | (',' test)* [',']))
+
+DEF_RULE_NC(exprlist, list_with_end, rule(exprlist_2), tok(DEL_COMMA))
+DEF_RULE_NC(exprlist_2, or(2), rule(star_expr), rule(expr))
+DEF_RULE(testlist, c(generic_tuple), list_with_end, rule(test), tok(DEL_COMMA))
+// TODO dictorsetmaker lets through more than is allowed
+DEF_RULE_NC(dictorsetmaker, and_ident(2), rule(dictorsetmaker_item), opt_rule(dictorsetmaker_tail))
+#if MICROPY_PY_BUILTINS_SET
+DEF_RULE(dictorsetmaker_item, c(dictorsetmaker_item), and_ident(2), rule(test), opt_rule(generic_colon_test))
+#else
+DEF_RULE(dictorsetmaker_item, c(dictorsetmaker_item), and(3), rule(test), tok(DEL_COLON), rule(test))
+#endif
+DEF_RULE_NC(dictorsetmaker_tail, or(2), rule(comp_for), rule(dictorsetmaker_list))
+DEF_RULE_NC(dictorsetmaker_list, and(2), tok(DEL_COMMA), opt_rule(dictorsetmaker_list2))
+DEF_RULE_NC(dictorsetmaker_list2, list_with_end, rule(dictorsetmaker_item), tok(DEL_COMMA))
+
+// classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
+
+DEF_RULE(classdef, c(classdef), and_blank(5), tok(KW_CLASS), tok(NAME), opt_rule(classdef_2), tok(DEL_COLON), rule(suite))
+DEF_RULE_NC(classdef_2, and_ident(3), tok(DEL_PAREN_OPEN), opt_rule(arglist), tok(DEL_PAREN_CLOSE))
+
+// arglist: (argument ',')* (argument [','] | '*' test (',' argument)* [',' '**' test] | '**' test)
+
+// TODO arglist lets through more than is allowed, compiler needs to do further verification
+DEF_RULE_NC(arglist, list_with_end, rule(arglist_2), tok(DEL_COMMA))
+DEF_RULE_NC(arglist_2, or(3), rule(arglist_star), rule(arglist_dbl_star), rule(argument))
+DEF_RULE_NC(arglist_star, and(2), tok(OP_STAR), rule(test))
+DEF_RULE_NC(arglist_dbl_star, and(2), tok(OP_DBL_STAR), rule(test))
+
+// # The reason that keywords are test nodes instead of NAME is that using NAME
+// # results in an ambiguity. ast.c makes sure it's a NAME.
+// argument: test [comp_for] | test '=' test # Really [keyword '='] test
+// comp_iter: comp_for | comp_if
+// comp_for: 'for' exprlist 'in' or_test [comp_iter]
+// comp_if: 'if' test_nocond [comp_iter]
+
+DEF_RULE_NC(argument, and_ident(2), rule(test), opt_rule(argument_2))
+#if MICROPY_PY_ASSIGN_EXPR
+DEF_RULE_NC(argument_2, or(3), rule(comp_for), rule(generic_equal_test), rule(argument_3))
+DEF_RULE_NC(argument_3, and(2), tok(OP_ASSIGN), rule(test))
+#else
+DEF_RULE_NC(argument_2, or(2), rule(comp_for), rule(generic_equal_test))
+#endif
+DEF_RULE_NC(comp_iter, or(2), rule(comp_for), rule(comp_if))
+DEF_RULE_NC(comp_for, and_blank(5), tok(KW_FOR), rule(exprlist), tok(KW_IN), rule(or_test), opt_rule(comp_iter))
+DEF_RULE_NC(comp_if, and(3), tok(KW_IF), rule(test_nocond), opt_rule(comp_iter))
+
+// # not used in grammar, but may appear in "node" passed from Parser to Compiler
+// encoding_decl: NAME
+
+// yield_expr: 'yield' [yield_arg]
+// yield_arg: 'from' test | testlist
+
+DEF_RULE(yield_expr, c(yield_expr), and(2), tok(KW_YIELD), opt_rule(yield_arg))
+DEF_RULE_NC(yield_arg, or(2), rule(yield_arg_from), rule(testlist))
+DEF_RULE_NC(yield_arg_from, and(2), tok(KW_FROM), rule(test))
diff --git a/circuitpython/py/ioctl.h b/circuitpython/py/ioctl.h
new file mode 100644
index 0000000..8c84835
--- /dev/null
+++ b/circuitpython/py/ioctl.h
@@ -0,0 +1,38 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013-2015 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_IOCTL_H
+#define MICROPY_INCLUDED_PY_IOCTL_H
+
+#define MP_IOCTL_POLL (0x100 | 1)
+
+// These values are compatible with Linux, which are in turn
+// compatible with iBCS2 spec.
+#define MP_IOCTL_POLL_RD (0x0001)
+#define MP_IOCTL_POLL_WR (0x0004)
+#define MP_IOCTL_POLL_ERR (0x0008)
+#define MP_IOCTL_POLL_HUP (0x0010)
+
+#endif // MICROPY_INCLUDED_PY_IOCTL_H
diff --git a/circuitpython/py/lexer.c b/circuitpython/py/lexer.c
new file mode 100644
index 0000000..196f9a2
--- /dev/null
+++ b/circuitpython/py/lexer.c
@@ -0,0 +1,921 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/reader.h"
+#include "py/lexer.h"
+#include "py/runtime.h"
+
+#include "supervisor/shared/translate.h"
+
+#if MICROPY_ENABLE_COMPILER
+
+#define TAB_SIZE (8)
+
+// TODO seems that CPython allows NULL byte in the input stream
+// don't know if that's intentional or not, but we don't allow it
+
+#define MP_LEXER_EOF ((unichar)MP_READER_EOF)
+#define CUR_CHAR(lex) ((lex)->chr0)
+
+STATIC bool is_end(mp_lexer_t *lex) {
+ return lex->chr0 == MP_LEXER_EOF;
+}
+
+STATIC bool is_physical_newline(mp_lexer_t *lex) {
+ return lex->chr0 == '\n';
+}
+
+STATIC bool is_char(mp_lexer_t *lex, byte c) {
+ return lex->chr0 == c;
+}
+
+STATIC bool is_char_or(mp_lexer_t *lex, byte c1, byte c2) {
+ return lex->chr0 == c1 || lex->chr0 == c2;
+}
+
+STATIC bool is_char_or3(mp_lexer_t *lex, byte c1, byte c2, byte c3) {
+ return lex->chr0 == c1 || lex->chr0 == c2 || lex->chr0 == c3;
+}
+
+#if MICROPY_PY_FSTRINGS
+STATIC bool is_char_or4(mp_lexer_t *lex, byte c1, byte c2, byte c3, byte c4) {
+ return lex->chr0 == c1 || lex->chr0 == c2 || lex->chr0 == c3 || lex->chr0 == c4;
+}
+#endif
+
+STATIC bool is_char_following(mp_lexer_t *lex, byte c) {
+ return lex->chr1 == c;
+}
+
+STATIC bool is_char_following_or(mp_lexer_t *lex, byte c1, byte c2) {
+ return lex->chr1 == c1 || lex->chr1 == c2;
+}
+
+STATIC bool is_char_following_following_or(mp_lexer_t *lex, byte c1, byte c2) {
+ return lex->chr2 == c1 || lex->chr2 == c2;
+}
+
+STATIC bool is_char_and(mp_lexer_t *lex, byte c1, byte c2) {
+ return lex->chr0 == c1 && lex->chr1 == c2;
+}
+
+STATIC bool is_whitespace(mp_lexer_t *lex) {
+ return unichar_isspace(lex->chr0);
+}
+
+STATIC bool is_letter(mp_lexer_t *lex) {
+ return unichar_isalpha(lex->chr0);
+}
+
+STATIC bool is_digit(mp_lexer_t *lex) {
+ return unichar_isdigit(lex->chr0);
+}
+
+STATIC bool is_following_digit(mp_lexer_t *lex) {
+ return unichar_isdigit(lex->chr1);
+}
+
+STATIC bool is_following_base_char(mp_lexer_t *lex) {
+ const unichar chr1 = lex->chr1 | 0x20;
+ return chr1 == 'b' || chr1 == 'o' || chr1 == 'x';
+}
+
+STATIC bool is_following_odigit(mp_lexer_t *lex) {
+ return lex->chr1 >= '0' && lex->chr1 <= '7';
+}
+
+STATIC bool is_string_or_bytes(mp_lexer_t *lex) {
+ return is_char_or(lex, '\'', '\"')
+ #if MICROPY_PY_FSTRINGS
+ || (is_char_or4(lex, 'r', 'u', 'b', 'f') && is_char_following_or(lex, '\'', '\"'))
+ || (((is_char_and(lex, 'r', 'f') || is_char_and(lex, 'f', 'r'))
+ && is_char_following_following_or(lex, '\'', '\"')))
+ #else
+ || (is_char_or3(lex, 'r', 'u', 'b') && is_char_following_or(lex, '\'', '\"'))
+ #endif
+ || ((is_char_and(lex, 'r', 'b') || is_char_and(lex, 'b', 'r'))
+ && is_char_following_following_or(lex, '\'', '\"'));
+}
+
+// to easily parse utf-8 identifiers we allow any raw byte with high bit set
+STATIC bool is_head_of_identifier(mp_lexer_t *lex) {
+ return is_letter(lex) || lex->chr0 == '_' || lex->chr0 >= 0x80;
+}
+
+STATIC bool is_tail_of_identifier(mp_lexer_t *lex) {
+ return is_head_of_identifier(lex) || is_digit(lex);
+}
+
+STATIC void next_char(mp_lexer_t *lex) {
+ if (lex->chr0 == '\n') {
+ // a new line
+ ++lex->line;
+ lex->column = 1;
+ } else if (lex->chr0 == '\t') {
+ // a tab
+ lex->column = (((lex->column - 1 + TAB_SIZE) / TAB_SIZE) * TAB_SIZE) + 1;
+ } else {
+ // a character worth one column
+ ++lex->column;
+ }
+
+ // shift the input queue forward
+ lex->chr0 = lex->chr1;
+ lex->chr1 = lex->chr2;
+
+ // and add the next byte from either the fstring args or the reader
+ #if MICROPY_PY_FSTRINGS
+ if (lex->fstring_args_idx) {
+ // if there are saved chars, then we're currently injecting fstring args
+ if (lex->fstring_args_idx < lex->fstring_args.len) {
+ lex->chr2 = lex->fstring_args.buf[lex->fstring_args_idx++];
+ } else {
+ // no more fstring arg bytes
+ lex->chr2 = '\0';
+ }
+
+ if (lex->chr0 == '\0') {
+ // consumed all fstring data, restore saved input queue
+ lex->chr0 = lex->chr0_saved;
+ lex->chr1 = lex->chr1_saved;
+ lex->chr2 = lex->chr2_saved;
+ // stop consuming fstring arg data
+ vstr_reset(&lex->fstring_args);
+ lex->fstring_args_idx = 0;
+ }
+ } else
+ #endif
+ {
+ lex->chr2 = lex->reader.readbyte(lex->reader.data);
+ }
+
+ if (lex->chr1 == '\r') {
+ // CR is a new line, converted to LF
+ lex->chr1 = '\n';
+ if (lex->chr2 == '\n') {
+ // CR LF is a single new line, throw out the extra LF
+ lex->chr2 = lex->reader.readbyte(lex->reader.data);
+ }
+ }
+
+ // check if we need to insert a newline at end of file
+ if (lex->chr2 == MP_LEXER_EOF && lex->chr1 != MP_LEXER_EOF && lex->chr1 != '\n') {
+ lex->chr2 = '\n';
+ }
+}
+
+STATIC void indent_push(mp_lexer_t *lex, size_t indent) {
+ if (lex->num_indent_level >= lex->alloc_indent_level) {
+ lex->indent_level = m_renew(uint16_t, lex->indent_level, lex->alloc_indent_level, lex->alloc_indent_level + MICROPY_ALLOC_LEXEL_INDENT_INC);
+ lex->alloc_indent_level += MICROPY_ALLOC_LEXEL_INDENT_INC;
+ }
+ lex->indent_level[lex->num_indent_level++] = indent;
+}
+
+STATIC size_t indent_top(mp_lexer_t *lex) {
+ return lex->indent_level[lex->num_indent_level - 1];
+}
+
+STATIC void indent_pop(mp_lexer_t *lex) {
+ lex->num_indent_level -= 1;
+}
+
+// some tricky operator encoding:
+// <op> = begin with <op>, if this opchar matches then begin here
+// e<op> = end with <op>, if this opchar matches then end
+// c<op> = continue with <op>, if this opchar matches then continue matching
+// this means if the start of two ops are the same then they are equal til the last char
+
+STATIC const char *const tok_enc =
+ "()[]{},;~" // singles
+ ":e=" // : :=
+ "<e=c<e=" // < <= << <<=
+ ">e=c>e=" // > >= >> >>=
+ "*e=c*e=" // * *= ** **=
+ "+e=" // + +=
+ "-e=e>" // - -= ->
+ "&e=" // & &=
+ "|e=" // | |=
+ "/e=c/e=" // / /= // //=
+ "%e=" // % %=
+ "^e=" // ^ ^=
+ "@e=" // @ @=
+ "=e=" // = ==
+ "!."; // start of special cases: != . ...
+
+// TODO static assert that number of tokens is less than 256 so we can safely make this table with byte sized entries
+STATIC const uint8_t tok_enc_kind[] = {
+ MP_TOKEN_DEL_PAREN_OPEN, MP_TOKEN_DEL_PAREN_CLOSE,
+ MP_TOKEN_DEL_BRACKET_OPEN, MP_TOKEN_DEL_BRACKET_CLOSE,
+ MP_TOKEN_DEL_BRACE_OPEN, MP_TOKEN_DEL_BRACE_CLOSE,
+ MP_TOKEN_DEL_COMMA, MP_TOKEN_DEL_SEMICOLON, MP_TOKEN_OP_TILDE,
+
+ MP_TOKEN_DEL_COLON, MP_TOKEN_OP_ASSIGN,
+ MP_TOKEN_OP_LESS, MP_TOKEN_OP_LESS_EQUAL, MP_TOKEN_OP_DBL_LESS, MP_TOKEN_DEL_DBL_LESS_EQUAL,
+ MP_TOKEN_OP_MORE, MP_TOKEN_OP_MORE_EQUAL, MP_TOKEN_OP_DBL_MORE, MP_TOKEN_DEL_DBL_MORE_EQUAL,
+ MP_TOKEN_OP_STAR, MP_TOKEN_DEL_STAR_EQUAL, MP_TOKEN_OP_DBL_STAR, MP_TOKEN_DEL_DBL_STAR_EQUAL,
+ MP_TOKEN_OP_PLUS, MP_TOKEN_DEL_PLUS_EQUAL,
+ MP_TOKEN_OP_MINUS, MP_TOKEN_DEL_MINUS_EQUAL, MP_TOKEN_DEL_MINUS_MORE,
+ MP_TOKEN_OP_AMPERSAND, MP_TOKEN_DEL_AMPERSAND_EQUAL,
+ MP_TOKEN_OP_PIPE, MP_TOKEN_DEL_PIPE_EQUAL,
+ MP_TOKEN_OP_SLASH, MP_TOKEN_DEL_SLASH_EQUAL, MP_TOKEN_OP_DBL_SLASH, MP_TOKEN_DEL_DBL_SLASH_EQUAL,
+ MP_TOKEN_OP_PERCENT, MP_TOKEN_DEL_PERCENT_EQUAL,
+ MP_TOKEN_OP_CARET, MP_TOKEN_DEL_CARET_EQUAL,
+ MP_TOKEN_OP_AT, MP_TOKEN_DEL_AT_EQUAL,
+ MP_TOKEN_DEL_EQUAL, MP_TOKEN_OP_DBL_EQUAL,
+};
+
+// must have the same order as enum in lexer.h
+// must be sorted according to strcmp
+STATIC const char *const tok_kw[] = {
+ "False",
+ "None",
+ "True",
+ "__debug__",
+ "and",
+ "as",
+ "assert",
+ #if MICROPY_PY_ASYNC_AWAIT
+ "async",
+ "await",
+ #endif
+ "break",
+ "class",
+ "continue",
+ "def",
+ "del",
+ "elif",
+ "else",
+ "except",
+ "finally",
+ "for",
+ "from",
+ "global",
+ "if",
+ "import",
+ "in",
+ "is",
+ "lambda",
+ "nonlocal",
+ "not",
+ "or",
+ "pass",
+ "raise",
+ "return",
+ "try",
+ "while",
+ "with",
+ "yield",
+};
+
+// This is called with CUR_CHAR() before first hex digit, and should return with
+// it pointing to last hex digit
+// num_digits must be greater than zero
+STATIC bool get_hex(mp_lexer_t *lex, size_t num_digits, mp_uint_t *result) {
+ mp_uint_t num = 0;
+ while (num_digits-- != 0) {
+ next_char(lex);
+ unichar c = CUR_CHAR(lex);
+ if (!unichar_isxdigit(c)) {
+ return false;
+ }
+ num = (num << 4) + unichar_xdigit_value(c);
+ }
+ *result = num;
+ return true;
+}
+
+STATIC void parse_string_literal(mp_lexer_t *lex, bool is_raw, bool is_fstring) {
+ // get first quoting character
+ char quote_char = '\'';
+ if (is_char(lex, '\"')) {
+ quote_char = '\"';
+ }
+ next_char(lex);
+
+ // work out if it's a single or triple quoted literal
+ size_t num_quotes;
+ if (is_char_and(lex, quote_char, quote_char)) {
+ // triple quotes
+ next_char(lex);
+ next_char(lex);
+ num_quotes = 3;
+ } else {
+ // single quotes
+ num_quotes = 1;
+ }
+
+ size_t n_closing = 0;
+ #if MICROPY_PY_FSTRINGS
+ if (is_fstring) {
+ // assume there's going to be interpolation, so prep the injection data
+ // fstring_args_idx==0 && len(fstring_args)>0 means we're extracting the args.
+ // only when fstring_args_idx>0 will we consume the arg data
+ // note: lex->fstring_args will be empty already (it's reset when finished)
+ vstr_add_str(&lex->fstring_args, ".format(");
+ }
+ #endif
+
+ while (!is_end(lex) && (num_quotes > 1 || !is_char(lex, '\n')) && n_closing < num_quotes) {
+ if (is_char(lex, quote_char)) {
+ n_closing += 1;
+ vstr_add_char(&lex->vstr, CUR_CHAR(lex));
+ } else {
+ n_closing = 0;
+
+ #if MICROPY_PY_FSTRINGS
+ while (is_fstring && is_char(lex, '{')) {
+ next_char(lex);
+ if (is_char(lex, '{')) {
+ // "{{" is passed through unchanged to be handled by str.format
+ vstr_add_byte(&lex->vstr, '{');
+ next_char(lex);
+ } else {
+ // remember the start of this argument (if we need it for f'{a=}').
+ size_t i = lex->fstring_args.len;
+ // extract characters inside the { until we reach the
+ // format specifier or closing }.
+ // (MicroPython limitation) note: this is completely unaware of
+ // Python syntax and will not handle any expression containing '}' or ':'.
+ // e.g. f'{"}"}' or f'{foo({})}'.
+ unsigned int nested_bracket_level = 0;
+ while (!is_end(lex) && (nested_bracket_level != 0 || !is_char_or(lex, ':', '}'))) {
+ unichar c = CUR_CHAR(lex);
+ if (c == '[' || c == '{') {
+ nested_bracket_level += 1;
+ } else if (c == ']' || c == '}') {
+ nested_bracket_level -= 1;
+ }
+ // like the default case at the end of this function, stay 8-bit clean
+ vstr_add_byte(&lex->fstring_args, c);
+ next_char(lex);
+ }
+ if (lex->fstring_args.buf[lex->fstring_args.len - 1] == '=') {
+ // if the last character of the arg was '=', then inject "arg=" before the '{'.
+ // f'{a=}' --> 'a={}'.format(a)
+ vstr_add_strn(&lex->vstr, lex->fstring_args.buf + i, lex->fstring_args.len - i);
+ // remove the trailing '='
+ lex->fstring_args.len--;
+ }
+ // comma-separate args
+ vstr_add_byte(&lex->fstring_args, ',');
+ }
+ vstr_add_byte(&lex->vstr, '{');
+ }
+ #endif
+
+ if (is_char(lex, '\\')) {
+ next_char(lex);
+ unichar c = CUR_CHAR(lex);
+
+ if (is_raw) {
+ // raw strings allow escaping of quotes, but the backslash is also emitted
+ vstr_add_char(&lex->vstr, '\\');
+ } else {
+ switch (c) {
+ // note: "c" can never be MP_LEXER_EOF because next_char
+ // always inserts a newline at the end of the input stream
+ case '\n':
+ c = MP_LEXER_EOF;
+ break; // backslash escape the newline, just ignore it
+ case '\\':
+ break;
+ case '\'':
+ break;
+ case '"':
+ break;
+ case 'a':
+ c = 0x07;
+ break;
+ case 'b':
+ c = 0x08;
+ break;
+ case 't':
+ c = 0x09;
+ break;
+ case 'n':
+ c = 0x0a;
+ break;
+ case 'v':
+ c = 0x0b;
+ break;
+ case 'f':
+ c = 0x0c;
+ break;
+ case 'r':
+ c = 0x0d;
+ break;
+ case 'u':
+ case 'U':
+ if (lex->tok_kind == MP_TOKEN_BYTES) {
+ // b'\u1234' == b'\\u1234'
+ vstr_add_char(&lex->vstr, '\\');
+ break;
+ }
+ // Otherwise fall through.
+ MP_FALLTHROUGH
+ case 'x': {
+ mp_uint_t num = 0;
+ if (!get_hex(lex, (c == 'x' ? 2 : c == 'u' ? 4 : 8), &num)) {
+ // not enough hex chars for escape sequence
+ lex->tok_kind = MP_TOKEN_INVALID;
+ }
+ c = num;
+ break;
+ }
+ case 'N':
+ // Supporting '\N{LATIN SMALL LETTER A}' == 'a' would require keeping the
+ // entire Unicode name table in the core. As of Unicode 6.3.0, that's nearly
+ // 3MB of text; even gzip-compressed and with minimal structure, it'll take
+ // roughly half a meg of storage. This form of Unicode escape may be added
+ // later on, but it's definitely not a priority right now. -- CJA 20140607
+ mp_raise_NotImplementedError(MP_ERROR_TEXT("unicode name escapes"));
+ break;
+ default:
+ if (c >= '0' && c <= '7') {
+ // Octal sequence, 1-3 chars
+ size_t digits = 3;
+ mp_uint_t num = c - '0';
+ while (is_following_odigit(lex) && --digits != 0) {
+ next_char(lex);
+ num = num * 8 + (CUR_CHAR(lex) - '0');
+ }
+ c = num;
+ } else {
+ // unrecognised escape character; CPython lets this through verbatim as '\' and then the character
+ vstr_add_char(&lex->vstr, '\\');
+ }
+ break;
+ }
+ }
+ if (c != MP_LEXER_EOF) {
+ if (MICROPY_PY_BUILTINS_STR_UNICODE_DYNAMIC) {
+ if (c < 0x110000 && lex->tok_kind == MP_TOKEN_STRING) {
+ vstr_add_char(&lex->vstr, c);
+ } else if (c < 0x100 && lex->tok_kind == MP_TOKEN_BYTES) {
+ vstr_add_byte(&lex->vstr, c);
+ } else {
+ // unicode character out of range
+ // this raises a generic SyntaxError; could provide more info
+ lex->tok_kind = MP_TOKEN_INVALID;
+ }
+ } else {
+ // without unicode everything is just added as an 8-bit byte
+ if (c < 0x100) {
+ vstr_add_byte(&lex->vstr, c);
+ } else {
+ // 8-bit character out of range
+ // this raises a generic SyntaxError; could provide more info
+ lex->tok_kind = MP_TOKEN_INVALID;
+ }
+ }
+ }
+ } else {
+ // Add the "character" as a byte so that we remain 8-bit clean.
+ // This way, strings are parsed correctly whether or not they contain utf-8 chars.
+ vstr_add_byte(&lex->vstr, CUR_CHAR(lex));
+ }
+ }
+ next_char(lex);
+ }
+
+ // check we got the required end quotes
+ if (n_closing < num_quotes) {
+ lex->tok_kind = MP_TOKEN_LONELY_STRING_OPEN;
+ }
+
+ // cut off the end quotes from the token text
+ vstr_cut_tail_bytes(&lex->vstr, n_closing);
+}
+
+STATIC bool skip_whitespace(mp_lexer_t *lex, bool stop_at_newline) {
+ bool had_physical_newline = false;
+ while (!is_end(lex)) {
+ if (is_physical_newline(lex)) {
+ if (stop_at_newline && lex->nested_bracket_level == 0) {
+ break;
+ }
+ had_physical_newline = true;
+ next_char(lex);
+ } else if (is_whitespace(lex)) {
+ next_char(lex);
+ } else if (is_char(lex, '#')) {
+ next_char(lex);
+ while (!is_end(lex) && !is_physical_newline(lex)) {
+ next_char(lex);
+ }
+ // had_physical_newline will be set on next loop
+ } else if (is_char_and(lex, '\\', '\n')) {
+ // line-continuation, so don't set had_physical_newline
+ next_char(lex);
+ next_char(lex);
+ } else {
+ break;
+ }
+ }
+ return had_physical_newline;
+}
+
+void mp_lexer_to_next(mp_lexer_t *lex) {
+ #if MICROPY_PY_FSTRINGS
+ if (lex->fstring_args.len && lex->fstring_args_idx == 0) {
+ // moving onto the next token means the literal string is complete.
+ // switch into injecting the format args.
+ vstr_add_byte(&lex->fstring_args, ')');
+ lex->chr0_saved = lex->chr0;
+ lex->chr1_saved = lex->chr1;
+ lex->chr2_saved = lex->chr2;
+ lex->chr0 = lex->fstring_args.buf[0];
+ lex->chr1 = lex->fstring_args.buf[1];
+ lex->chr2 = lex->fstring_args.buf[2];
+ // we've already extracted 3 chars, but setting this non-zero also
+ // means we'll start consuming the fstring data
+ lex->fstring_args_idx = 3;
+ }
+ #endif
+
+ // start new token text
+ vstr_reset(&lex->vstr);
+
+ // skip white space and comments
+ bool had_physical_newline = skip_whitespace(lex, false);
+
+ // set token source information
+ lex->tok_line = lex->line;
+ lex->tok_column = lex->column;
+
+ if (lex->emit_dent < 0) {
+ lex->tok_kind = MP_TOKEN_DEDENT;
+ lex->emit_dent += 1;
+
+ } else if (lex->emit_dent > 0) {
+ lex->tok_kind = MP_TOKEN_INDENT;
+ lex->emit_dent -= 1;
+
+ } else if (had_physical_newline && lex->nested_bracket_level == 0) {
+ lex->tok_kind = MP_TOKEN_NEWLINE;
+
+ size_t num_spaces = lex->column - 1;
+ if (num_spaces == indent_top(lex)) {
+ } else if (num_spaces > indent_top(lex)) {
+ indent_push(lex, num_spaces);
+ lex->emit_dent += 1;
+ } else {
+ while (num_spaces < indent_top(lex)) {
+ indent_pop(lex);
+ lex->emit_dent -= 1;
+ }
+ if (num_spaces != indent_top(lex)) {
+ lex->tok_kind = MP_TOKEN_DEDENT_MISMATCH;
+ }
+ }
+
+ } else if (is_end(lex)) {
+ lex->tok_kind = MP_TOKEN_END;
+
+ } else if (is_string_or_bytes(lex)) {
+ // a string or bytes literal
+
+ // Python requires adjacent string/bytes literals to be automatically
+ // concatenated. We do it here in the tokeniser to make efficient use of RAM,
+ // because then the lexer's vstr can be used to accumulate the string literal,
+ // in contrast to creating a parse tree of strings and then joining them later
+ // in the compiler. It's also more compact in code size to do it here.
+
+ // MP_TOKEN_END is used to indicate that this is the first string token
+ lex->tok_kind = MP_TOKEN_END;
+
+ // Loop to accumulate string/bytes literals
+ do {
+ // parse type codes
+ bool is_raw = false;
+ bool is_fstring = false;
+ mp_token_kind_t kind = MP_TOKEN_STRING;
+ int n_char = 0;
+ if (is_char(lex, 'u')) {
+ n_char = 1;
+ } else if (is_char(lex, 'b')) {
+ kind = MP_TOKEN_BYTES;
+ n_char = 1;
+ if (is_char_following(lex, 'r')) {
+ is_raw = true;
+ n_char = 2;
+ }
+ } else if (is_char(lex, 'r')) {
+ is_raw = true;
+ n_char = 1;
+ if (is_char_following(lex, 'b')) {
+ kind = MP_TOKEN_BYTES;
+ n_char = 2;
+ }
+ #if MICROPY_PY_FSTRINGS
+ if (is_char_following(lex, 'f')) {
+ // raw-f-strings unsupported, immediately return (invalid) token.
+ lex->tok_kind = MP_TOKEN_FSTRING_RAW;
+ break;
+ }
+ #endif
+ }
+ #if MICROPY_PY_FSTRINGS
+ else if (is_char(lex, 'f')) {
+ if (is_char_following(lex, 'r')) {
+ // raw-f-strings unsupported, immediately return (invalid) token.
+ lex->tok_kind = MP_TOKEN_FSTRING_RAW;
+ break;
+ }
+ n_char = 1;
+ is_fstring = true;
+ }
+ #endif
+
+ // Set or check token kind
+ if (lex->tok_kind == MP_TOKEN_END) {
+ lex->tok_kind = kind;
+ } else if (lex->tok_kind != kind) {
+ // Can't concatenate string with bytes
+ break;
+ }
+
+ // Skip any type code characters
+ if (n_char != 0) {
+ next_char(lex);
+ if (n_char == 2) {
+ next_char(lex);
+ }
+ }
+
+ // Parse the literal
+ parse_string_literal(lex, is_raw, is_fstring);
+
+ // Skip whitespace so we can check if there's another string following
+ skip_whitespace(lex, true);
+
+ } while (is_string_or_bytes(lex));
+ } else if (is_head_of_identifier(lex)) {
+ lex->tok_kind = MP_TOKEN_NAME;
+
+ // get first char (add as byte to remain 8-bit clean and support utf-8)
+ vstr_add_byte(&lex->vstr, CUR_CHAR(lex));
+ next_char(lex);
+
+ // get tail chars
+ while (!is_end(lex) && is_tail_of_identifier(lex)) {
+ vstr_add_byte(&lex->vstr, CUR_CHAR(lex));
+ next_char(lex);
+ }
+
+ // Check if the name is a keyword.
+ // We also check for __debug__ here and convert it to its value. This is
+ // so the parser gives a syntax error on, eg, x.__debug__. Otherwise, we
+ // need to check for this special token in many places in the compiler.
+ const char *s = vstr_null_terminated_str(&lex->vstr);
+ for (size_t i = 0; i < MP_ARRAY_SIZE(tok_kw); i++) {
+ int cmp = strcmp(s, tok_kw[i]);
+ if (cmp == 0) {
+ lex->tok_kind = MP_TOKEN_KW_FALSE + i;
+ if (lex->tok_kind == MP_TOKEN_KW___DEBUG__) {
+ lex->tok_kind = (MP_STATE_VM(mp_optimise_value) == 0 ? MP_TOKEN_KW_TRUE : MP_TOKEN_KW_FALSE);
+ }
+ break;
+ } else if (cmp < 0) {
+ // Table is sorted and comparison was less-than, so stop searching
+ break;
+ }
+ }
+
+ } else if (is_digit(lex) || (is_char(lex, '.') && is_following_digit(lex))) {
+ bool forced_integer = false;
+ if (is_char(lex, '.')) {
+ lex->tok_kind = MP_TOKEN_FLOAT_OR_IMAG;
+ } else {
+ lex->tok_kind = MP_TOKEN_INTEGER;
+ if (is_char(lex, '0') && is_following_base_char(lex)) {
+ forced_integer = true;
+ }
+ }
+
+ // get first char
+ vstr_add_char(&lex->vstr, CUR_CHAR(lex));
+ next_char(lex);
+
+ // get tail chars
+ while (!is_end(lex)) {
+ if (!forced_integer && is_char_or(lex, 'e', 'E')) {
+ lex->tok_kind = MP_TOKEN_FLOAT_OR_IMAG;
+ vstr_add_char(&lex->vstr, 'e');
+ next_char(lex);
+ if (is_char(lex, '+') || is_char(lex, '-')) {
+ vstr_add_char(&lex->vstr, CUR_CHAR(lex));
+ next_char(lex);
+ }
+ } else if (is_letter(lex) || is_digit(lex) || is_char(lex, '.')) {
+ if (is_char_or3(lex, '.', 'j', 'J')) {
+ lex->tok_kind = MP_TOKEN_FLOAT_OR_IMAG;
+ }
+ vstr_add_char(&lex->vstr, CUR_CHAR(lex));
+ next_char(lex);
+ } else if (is_char(lex, '_')) {
+ next_char(lex);
+ } else {
+ break;
+ }
+ }
+
+ } else {
+ // search for encoded delimiter or operator
+
+ const char *t = tok_enc;
+ size_t tok_enc_index = 0;
+ for (; *t != 0 && !is_char(lex, *t); t += 1) {
+ if (*t == 'e' || *t == 'c') {
+ t += 1;
+ }
+ tok_enc_index += 1;
+ }
+
+ next_char(lex);
+
+ if (*t == 0) {
+ // didn't match any delimiter or operator characters
+ lex->tok_kind = MP_TOKEN_INVALID;
+
+ } else if (*t == '!') {
+ // "!=" is a special case because "!" is not a valid operator
+ if (is_char(lex, '=')) {
+ next_char(lex);
+ lex->tok_kind = MP_TOKEN_OP_NOT_EQUAL;
+ } else {
+ lex->tok_kind = MP_TOKEN_INVALID;
+ }
+
+ } else if (*t == '.') {
+ // "." and "..." are special cases because ".." is not a valid operator
+ if (is_char_and(lex, '.', '.')) {
+ next_char(lex);
+ next_char(lex);
+ lex->tok_kind = MP_TOKEN_ELLIPSIS;
+ } else {
+ lex->tok_kind = MP_TOKEN_DEL_PERIOD;
+ }
+
+ } else {
+ // matched a delimiter or operator character
+
+ // get the maximum characters for a valid token
+ t += 1;
+ size_t t_index = tok_enc_index;
+ while (*t == 'c' || *t == 'e') {
+ t_index += 1;
+ if (is_char(lex, t[1])) {
+ next_char(lex);
+ tok_enc_index = t_index;
+ if (*t == 'e') {
+ break;
+ }
+ } else if (*t == 'c') {
+ break;
+ }
+ t += 2;
+ }
+
+ // set token kind
+ lex->tok_kind = tok_enc_kind[tok_enc_index];
+
+ // compute bracket level for implicit line joining
+ if (lex->tok_kind == MP_TOKEN_DEL_PAREN_OPEN || lex->tok_kind == MP_TOKEN_DEL_BRACKET_OPEN || lex->tok_kind == MP_TOKEN_DEL_BRACE_OPEN) {
+ lex->nested_bracket_level += 1;
+ } else if (lex->tok_kind == MP_TOKEN_DEL_PAREN_CLOSE || lex->tok_kind == MP_TOKEN_DEL_BRACKET_CLOSE || lex->tok_kind == MP_TOKEN_DEL_BRACE_CLOSE) {
+ lex->nested_bracket_level -= 1;
+ }
+ }
+ }
+}
+
+mp_lexer_t *mp_lexer_new(qstr src_name, mp_reader_t reader) {
+ mp_lexer_t *lex = m_new_obj(mp_lexer_t);
+
+ lex->source_name = src_name;
+ lex->reader = reader;
+ lex->line = 1;
+ lex->column = (size_t)-2; // account for 3 dummy bytes
+ lex->emit_dent = 0;
+ lex->nested_bracket_level = 0;
+ lex->alloc_indent_level = MICROPY_ALLOC_LEXER_INDENT_INIT;
+ lex->num_indent_level = 1;
+ lex->indent_level = m_new(uint16_t, lex->alloc_indent_level);
+ vstr_init(&lex->vstr, 32);
+ #if MICROPY_PY_FSTRINGS
+ vstr_init(&lex->fstring_args, 0);
+ #endif
+
+ // store sentinel for first indentation level
+ lex->indent_level[0] = 0;
+
+ // load lexer with start of file, advancing lex->column to 1
+ // start with dummy bytes and use next_char() for proper EOL/EOF handling
+ lex->chr0 = lex->chr1 = lex->chr2 = 0;
+ next_char(lex);
+ next_char(lex);
+ next_char(lex);
+
+ // preload first token
+ mp_lexer_to_next(lex);
+
+ // Check that the first token is in the first column. If it's not then we
+ // convert the token kind to INDENT so that the parser gives a syntax error.
+ if (lex->tok_column != 1) {
+ lex->tok_kind = MP_TOKEN_INDENT;
+ }
+
+ return lex;
+}
+
+mp_lexer_t *mp_lexer_new_from_str_len(qstr src_name, const char *str, size_t len, size_t free_len) {
+ mp_reader_t reader;
+ mp_reader_new_mem(&reader, (const byte *)str, len, free_len);
+ return mp_lexer_new(src_name, reader);
+}
+
+#if MICROPY_READER_POSIX || MICROPY_READER_VFS
+
+mp_lexer_t *mp_lexer_new_from_file(const char *filename) {
+ mp_reader_t reader;
+ mp_reader_new_file(&reader, filename);
+ return mp_lexer_new(qstr_from_str(filename), reader);
+}
+
+#if MICROPY_HELPER_LEXER_UNIX
+
+mp_lexer_t *mp_lexer_new_from_fd(qstr filename, int fd, bool close_fd) {
+ mp_reader_t reader;
+ mp_reader_new_file_from_fd(&reader, fd, close_fd);
+ return mp_lexer_new(filename, reader);
+}
+
+#endif
+
+#endif
+
+void mp_lexer_free(mp_lexer_t *lex) {
+ if (lex) {
+ lex->reader.close(lex->reader.data);
+ vstr_clear(&lex->vstr);
+ #if MICROPY_PY_FSTRINGS
+ vstr_clear(&lex->fstring_args);
+ #endif
+ m_del(uint16_t, lex->indent_level, lex->alloc_indent_level);
+ m_del_obj(mp_lexer_t, lex);
+ }
+}
+
+#if 0
+// This function is used to print the current token and should only be
+// needed to debug the lexer, so it's not available via a config option.
+void mp_lexer_show_token(const mp_lexer_t *lex) {
+ printf("(" UINT_FMT ":" UINT_FMT ") kind:%u str:%p len:%zu", lex->tok_line, lex->tok_column, lex->tok_kind, lex->vstr.buf, lex->vstr.len);
+ if (lex->vstr.len > 0) {
+ const byte *i = (const byte *)lex->vstr.buf;
+ const byte *j = (const byte *)i + lex->vstr.len;
+ printf(" ");
+ while (i < j) {
+ unichar c = utf8_get_char(i);
+ i = utf8_next_char(i);
+ if (unichar_isprint(c)) {
+ printf("%c", (int)c);
+ } else {
+ printf("?");
+ }
+ }
+ }
+ printf("\n");
+}
+#endif
+
+#endif // MICROPY_ENABLE_COMPILER
diff --git a/circuitpython/py/lexer.h b/circuitpython/py/lexer.h
new file mode 100644
index 0000000..bcc1c04
--- /dev/null
+++ b/circuitpython/py/lexer.h
@@ -0,0 +1,212 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_LEXER_H
+#define MICROPY_INCLUDED_PY_LEXER_H
+
+#include <stdint.h>
+
+#include "py/mpconfig.h"
+#include "py/qstr.h"
+#include "py/reader.h"
+
+/* lexer.h -- simple tokeniser for MicroPython
+ *
+ * Uses (byte) length instead of null termination.
+ * Tokens are the same - UTF-8 with (byte) length.
+ */
+
+typedef enum _mp_token_kind_t {
+ MP_TOKEN_END,
+
+ MP_TOKEN_INVALID,
+ MP_TOKEN_DEDENT_MISMATCH,
+ MP_TOKEN_LONELY_STRING_OPEN,
+ #if MICROPY_PY_FSTRINGS
+ MP_TOKEN_MALFORMED_FSTRING,
+ MP_TOKEN_FSTRING_RAW,
+ #endif
+
+ MP_TOKEN_NEWLINE,
+ MP_TOKEN_INDENT,
+ MP_TOKEN_DEDENT,
+
+ MP_TOKEN_NAME,
+ MP_TOKEN_INTEGER,
+ MP_TOKEN_FLOAT_OR_IMAG,
+ MP_TOKEN_STRING,
+ MP_TOKEN_BYTES,
+
+ MP_TOKEN_ELLIPSIS,
+
+ MP_TOKEN_KW_FALSE,
+ MP_TOKEN_KW_NONE,
+ MP_TOKEN_KW_TRUE,
+ MP_TOKEN_KW___DEBUG__,
+ MP_TOKEN_KW_AND,
+ MP_TOKEN_KW_AS,
+ MP_TOKEN_KW_ASSERT,
+ #if MICROPY_PY_ASYNC_AWAIT
+ MP_TOKEN_KW_ASYNC,
+ MP_TOKEN_KW_AWAIT,
+ #endif
+ MP_TOKEN_KW_BREAK,
+ MP_TOKEN_KW_CLASS,
+ MP_TOKEN_KW_CONTINUE,
+ MP_TOKEN_KW_DEF,
+ MP_TOKEN_KW_DEL,
+ MP_TOKEN_KW_ELIF,
+ MP_TOKEN_KW_ELSE,
+ MP_TOKEN_KW_EXCEPT,
+ MP_TOKEN_KW_FINALLY,
+ MP_TOKEN_KW_FOR,
+ MP_TOKEN_KW_FROM,
+ MP_TOKEN_KW_GLOBAL,
+ MP_TOKEN_KW_IF,
+ MP_TOKEN_KW_IMPORT,
+ MP_TOKEN_KW_IN,
+ MP_TOKEN_KW_IS,
+ MP_TOKEN_KW_LAMBDA,
+ MP_TOKEN_KW_NONLOCAL,
+ MP_TOKEN_KW_NOT,
+ MP_TOKEN_KW_OR,
+ MP_TOKEN_KW_PASS,
+ MP_TOKEN_KW_RAISE,
+ MP_TOKEN_KW_RETURN,
+ MP_TOKEN_KW_TRY,
+ MP_TOKEN_KW_WHILE,
+ MP_TOKEN_KW_WITH,
+ MP_TOKEN_KW_YIELD,
+
+ MP_TOKEN_OP_ASSIGN,
+ MP_TOKEN_OP_TILDE,
+
+ // Order of these 6 matches corresponding mp_binary_op_t operator
+ MP_TOKEN_OP_LESS,
+ MP_TOKEN_OP_MORE,
+ MP_TOKEN_OP_DBL_EQUAL,
+ MP_TOKEN_OP_LESS_EQUAL,
+ MP_TOKEN_OP_MORE_EQUAL,
+ MP_TOKEN_OP_NOT_EQUAL,
+
+ // Order of these 13 matches corresponding mp_binary_op_t operator
+ MP_TOKEN_OP_PIPE,
+ MP_TOKEN_OP_CARET,
+ MP_TOKEN_OP_AMPERSAND,
+ MP_TOKEN_OP_DBL_LESS,
+ MP_TOKEN_OP_DBL_MORE,
+ MP_TOKEN_OP_PLUS,
+ MP_TOKEN_OP_MINUS,
+ MP_TOKEN_OP_STAR,
+ MP_TOKEN_OP_AT,
+ MP_TOKEN_OP_DBL_SLASH,
+ MP_TOKEN_OP_SLASH,
+ MP_TOKEN_OP_PERCENT,
+ MP_TOKEN_OP_DBL_STAR,
+
+ // Order of these 13 matches corresponding mp_binary_op_t operator
+ MP_TOKEN_DEL_PIPE_EQUAL,
+ MP_TOKEN_DEL_CARET_EQUAL,
+ MP_TOKEN_DEL_AMPERSAND_EQUAL,
+ MP_TOKEN_DEL_DBL_LESS_EQUAL,
+ MP_TOKEN_DEL_DBL_MORE_EQUAL,
+ MP_TOKEN_DEL_PLUS_EQUAL,
+ MP_TOKEN_DEL_MINUS_EQUAL,
+ MP_TOKEN_DEL_STAR_EQUAL,
+ MP_TOKEN_DEL_AT_EQUAL,
+ MP_TOKEN_DEL_DBL_SLASH_EQUAL,
+ MP_TOKEN_DEL_SLASH_EQUAL,
+ MP_TOKEN_DEL_PERCENT_EQUAL,
+ MP_TOKEN_DEL_DBL_STAR_EQUAL,
+
+ MP_TOKEN_DEL_PAREN_OPEN,
+ MP_TOKEN_DEL_PAREN_CLOSE,
+ MP_TOKEN_DEL_BRACKET_OPEN,
+ MP_TOKEN_DEL_BRACKET_CLOSE,
+ MP_TOKEN_DEL_BRACE_OPEN,
+ MP_TOKEN_DEL_BRACE_CLOSE,
+ MP_TOKEN_DEL_COMMA,
+ MP_TOKEN_DEL_COLON,
+ MP_TOKEN_DEL_PERIOD,
+ MP_TOKEN_DEL_SEMICOLON,
+ MP_TOKEN_DEL_EQUAL,
+ MP_TOKEN_DEL_MINUS_MORE,
+} mp_token_kind_t;
+
+// this data structure is exposed for efficiency
+// public members are: source_name, tok_line, tok_column, tok_kind, vstr
+typedef struct _mp_lexer_t {
+ qstr source_name; // name of source
+ mp_reader_t reader; // stream source
+
+ unichar chr0, chr1, chr2; // current cached characters from source
+ #if MICROPY_PY_FSTRINGS
+ unichar chr0_saved, chr1_saved, chr2_saved; // current cached characters from alt source
+ #endif
+
+ size_t line; // current source line
+ size_t column; // current source column
+
+ mp_int_t emit_dent; // non-zero when there are INDENT/DEDENT tokens to emit
+ mp_int_t nested_bracket_level; // >0 when there are nested brackets over multiple lines
+
+ size_t alloc_indent_level;
+ size_t num_indent_level;
+ uint16_t *indent_level;
+
+ size_t tok_line; // token source line
+ size_t tok_column; // token source column
+ mp_token_kind_t tok_kind; // token kind
+ vstr_t vstr; // token data
+ #if MICROPY_PY_FSTRINGS
+ vstr_t fstring_args; // extracted arguments to pass to .format()
+ size_t fstring_args_idx; // how many bytes of fstring_args have been read
+ #endif
+} mp_lexer_t;
+
+mp_lexer_t *mp_lexer_new(qstr src_name, mp_reader_t reader);
+mp_lexer_t *mp_lexer_new_from_str_len(qstr src_name, const char *str, size_t len, size_t free_len);
+
+void mp_lexer_free(mp_lexer_t *lex);
+void mp_lexer_to_next(mp_lexer_t *lex);
+
+/******************************************************************/
+// platform specific import function; must be implemented for a specific port
+// TODO tidy up, rename, or put elsewhere
+
+typedef enum {
+ MP_IMPORT_STAT_NO_EXIST,
+ MP_IMPORT_STAT_DIR,
+ MP_IMPORT_STAT_FILE,
+} mp_import_stat_t;
+
+mp_import_stat_t mp_import_stat(const char *path);
+mp_lexer_t *mp_lexer_new_from_file(const char *filename);
+
+#if MICROPY_HELPER_LEXER_UNIX
+mp_lexer_t *mp_lexer_new_from_fd(qstr filename, int fd, bool close_fd);
+#endif
+
+#endif // MICROPY_INCLUDED_PY_LEXER_H
diff --git a/circuitpython/py/makecompresseddata.py b/circuitpython/py/makecompresseddata.py
new file mode 100644
index 0000000..9603de8
--- /dev/null
+++ b/circuitpython/py/makecompresseddata.py
@@ -0,0 +1,205 @@
+from __future__ import print_function
+
+import collections
+import re
+import sys
+
+import gzip
+import zlib
+
+
+_COMPRESSED_MARKER = 0xFF
+
+
+def check_non_ascii(msg):
+ for c in msg:
+ if ord(c) >= 0x80:
+ print(
+ 'Unable to generate compressed data: message "{}" contains a non-ascii character "{}".'.format(
+ msg, c
+ ),
+ file=sys.stderr,
+ )
+ sys.exit(1)
+
+
+# Replace <char><space> with <char | 0x80>.
+# Trival scheme to demo/test.
+def space_compression(error_strings):
+ for line in error_strings:
+ check_non_ascii(line)
+ result = ""
+ for i in range(len(line)):
+ if i > 0 and line[i] == " ":
+ result = result[:-1]
+ result += "\\{:03o}".format(ord(line[i - 1]))
+ else:
+ result += line[i]
+ error_strings[line] = result
+ return None
+
+
+# Replace common words with <0x80 | index>.
+# Index is into a table of words stored as aaaaa<0x80|a>bbb<0x80|b>...
+# Replaced words are assumed to have spaces either side to avoid having to store the spaces in the compressed strings.
+def word_compression(error_strings):
+ topn = collections.Counter()
+
+ for line in error_strings.keys():
+ check_non_ascii(line)
+ for word in line.split(" "):
+ topn[word] += 1
+
+ # Order not just by frequency, but by expected saving. i.e. prefer a longer string that is used less frequently.
+ # Use the word itself for ties so that compression is deterministic.
+ def bytes_saved(item):
+ w, n = item
+ return -((len(w) + 1) * (n - 1)), w
+
+ top128 = sorted(topn.items(), key=bytes_saved)[:128]
+
+ index = [w for w, _ in top128]
+ index_lookup = {w: i for i, w in enumerate(index)}
+
+ for line in error_strings.keys():
+ result = ""
+ need_space = False
+ for word in line.split(" "):
+ if word in index_lookup:
+ result += "\\{:03o}".format(0b10000000 | index_lookup[word])
+ need_space = False
+ else:
+ if need_space:
+ result += " "
+ need_space = True
+ result += word
+ error_strings[line] = result.strip()
+
+ return "".join(w[:-1] + "\\{:03o}".format(0b10000000 | ord(w[-1])) for w in index)
+
+
+# Replace chars in text with variable length bit sequence.
+# For comparison only (the table is not emitted).
+def huffman_compression(error_strings):
+ # https://github.com/tannewt/huffman
+ import huffman
+
+ all_strings = "".join(error_strings)
+ cb = huffman.codebook(collections.Counter(all_strings).items())
+
+ for line in error_strings:
+ b = "1"
+ for c in line:
+ b += cb[c]
+ n = len(b)
+ if n % 8 != 0:
+ n += 8 - (n % 8)
+ result = ""
+ for i in range(0, n, 8):
+ result += "\\{:03o}".format(int(b[i : i + 8], 2))
+ if len(result) > len(line) * 4:
+ result = line
+ error_strings[line] = result
+
+ # TODO: This would be the prefix lengths and the table ordering.
+ return "_" * (10 + len(cb))
+
+
+# Replace common N-letter sequences with <0x80 | index>, where
+# the common sequences are stored in a separate table.
+# This isn't very useful, need a smarter way to find top-ngrams.
+def ngram_compression(error_strings):
+ topn = collections.Counter()
+ N = 2
+
+ for line in error_strings.keys():
+ check_non_ascii(line)
+ if len(line) < N:
+ continue
+ for i in range(0, len(line) - N, N):
+ topn[line[i : i + N]] += 1
+
+ def bytes_saved(item):
+ w, n = item
+ return -(len(w) * (n - 1))
+
+ top128 = sorted(topn.items(), key=bytes_saved)[:128]
+
+ index = [w for w, _ in top128]
+ index_lookup = {w: i for i, w in enumerate(index)}
+
+ for line in error_strings.keys():
+ result = ""
+ for i in range(0, len(line) - N + 1, N):
+ word = line[i : i + N]
+ if word in index_lookup:
+ result += "\\{:03o}".format(0b10000000 | index_lookup[word])
+ else:
+ result += word
+ if len(line) % N != 0:
+ result += line[len(line) - len(line) % N :]
+ error_strings[line] = result.strip()
+
+ return "".join(index)
+
+
+def main(collected_path, fn):
+ error_strings = collections.OrderedDict()
+ max_uncompressed_len = 0
+ num_uses = 0
+
+ # Read in all MP_ERROR_TEXT strings.
+ with open(collected_path, "r") as f:
+ for line in f:
+ line = line.strip()
+ if not line:
+ continue
+ num_uses += 1
+ error_strings[line] = None
+ max_uncompressed_len = max(max_uncompressed_len, len(line))
+
+ # So that objexcept.c can figure out how big the buffer needs to be.
+ print("#define MP_MAX_UNCOMPRESSED_TEXT_LEN ({})".format(max_uncompressed_len))
+
+ # Run the compression.
+ compressed_data = fn(error_strings)
+
+ # Print the data table.
+ print('MP_COMPRESSED_DATA("{}")'.format(compressed_data))
+
+ # Print the replacements.
+ for uncomp, comp in error_strings.items():
+ if uncomp == comp:
+ prefix = ""
+ else:
+ prefix = "\\{:03o}".format(_COMPRESSED_MARKER)
+ print('MP_MATCH_COMPRESSED("{}", "{}{}")'.format(uncomp, prefix, comp))
+
+ # Used to calculate the "true" length of the (escaped) compressed strings.
+ def unescape(s):
+ return re.sub(r"\\\d\d\d", "!", s)
+
+ # Stats. Note this doesn't include the cost of the decompressor code.
+ uncomp_len = sum(len(s) + 1 for s in error_strings.keys())
+ comp_len = sum(1 + len(unescape(s)) + 1 for s in error_strings.values())
+ data_len = len(compressed_data) + 1 if compressed_data else 0
+ print("// Total input length: {}".format(uncomp_len))
+ print("// Total compressed length: {}".format(comp_len))
+ print("// Total data length: {}".format(data_len))
+ print("// Predicted saving: {}".format(uncomp_len - comp_len - data_len))
+
+ # Somewhat meaningless comparison to zlib/gzip.
+ all_input_bytes = "\\0".join(error_strings.keys()).encode()
+ print()
+ if hasattr(gzip, "compress"):
+ gzip_len = len(gzip.compress(all_input_bytes)) + num_uses * 4
+ print("// gzip length: {}".format(gzip_len))
+ print("// Percentage of gzip: {:.1f}%".format(100 * (comp_len + data_len) / gzip_len))
+ if hasattr(zlib, "compress"):
+ zlib_len = len(zlib.compress(all_input_bytes)) + num_uses * 4
+ print("// zlib length: {}".format(zlib_len))
+ print("// Percentage of zlib: {:.1f}%".format(100 * (comp_len + data_len) / zlib_len))
+
+
+if __name__ == "__main__":
+ main(sys.argv[1], word_compression)
diff --git a/circuitpython/py/makemoduledefs.py b/circuitpython/py/makemoduledefs.py
new file mode 100644
index 0000000..aa2e507
--- /dev/null
+++ b/circuitpython/py/makemoduledefs.py
@@ -0,0 +1,111 @@
+#!/usr/bin/env python
+
+# This pre-processor parses provided objects' c files for
+# MP_REGISTER_MODULE(module_name, obj_module, enabled_define)
+# These are used to generate a header with the required entries for
+# "mp_rom_map_elem_t mp_builtin_module_table[]" in py/objmodule.c
+
+from __future__ import print_function
+
+import re
+import io
+import os
+import argparse
+
+
+pattern = re.compile(r"[\n;]\s*MP_REGISTER_MODULE\((.*?),\s*(.*?),\s*(.*?)\);", flags=re.DOTALL)
+
+
+def find_c_file(obj_file, vpath):
+ """Search vpaths for the c file that matches the provided object_file.
+
+ :param str obj_file: object file to find the matching c file for
+ :param List[str] vpath: List of base paths, similar to gcc vpath
+ :return: str path to c file or None
+ """
+ c_file = None
+ relative_c_file = os.path.splitext(obj_file)[0] + ".c"
+ relative_c_file = relative_c_file.lstrip("/\\")
+ for p in vpath:
+ possible_c_file = os.path.join(p, relative_c_file)
+ if os.path.exists(possible_c_file):
+ c_file = possible_c_file
+ break
+
+ return c_file
+
+
+def find_module_registrations(c_file):
+ """Find any MP_REGISTER_MODULE definitions in the provided c file.
+
+ :param str c_file: path to c file to check
+ :return: List[(module_name, obj_module, enabled_define)]
+ """
+ global pattern
+
+ if c_file is None:
+ # No c file to match the object file, skip
+ return set()
+
+ with io.open(c_file, encoding="utf-8") as c_file_obj:
+ return set(re.findall(pattern, c_file_obj.read()))
+
+
+def generate_module_table_header(modules):
+ """Generate header with module table entries for builtin modules.
+
+ :param List[(module_name, obj_module, enabled_define)] modules: module defs
+ :return: None
+ """
+
+ # Print header file for all external modules.
+ mod_defs = []
+ print("// Automatically generated by makemoduledefs.py.\n")
+ print('#include "py/mpconfig.h"')
+ for module_name, obj_module, enabled_define in modules:
+ mod_def = "MODULE_DEF_{}".format(module_name.upper())
+ mod_defs.append(mod_def)
+ print(
+ (
+ "#if ({enabled_define})\n"
+ " extern const struct _mp_obj_module_t {obj_module};\n"
+ " #define {mod_def} {{ MP_ROM_QSTR({module_name}), MP_ROM_PTR(&{obj_module}) }},\n"
+ "#else\n"
+ " #define {mod_def}\n"
+ "#endif\n"
+ ).format(
+ module_name=module_name,
+ obj_module=obj_module,
+ enabled_define=enabled_define,
+ mod_def=mod_def,
+ )
+ )
+
+ print("\n#define MICROPY_REGISTERED_MODULES \\")
+
+ for mod_def in mod_defs:
+ print(" {mod_def} \\".format(mod_def=mod_def))
+
+ print("// MICROPY_REGISTERED_MODULES")
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--vpath", default=".", help="comma separated list of folders to search for c files in"
+ )
+ parser.add_argument("files", nargs="*", help="list of c files to search")
+ args = parser.parse_args()
+
+ vpath = [p.strip() for p in args.vpath.split(",")]
+
+ modules = set()
+ for obj_file in args.files:
+ c_file = find_c_file(obj_file, vpath)
+ modules |= find_module_registrations(c_file)
+
+ generate_module_table_header(sorted(modules))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/circuitpython/py/makeqstrdata.py b/circuitpython/py/makeqstrdata.py
new file mode 100644
index 0000000..74ad78c
--- /dev/null
+++ b/circuitpython/py/makeqstrdata.py
@@ -0,0 +1,786 @@
+"""
+Process raw qstr file and output qstr data with length, hash and data bytes.
+
+This script works with Python 2.7, 3.3 and 3.4.
+
+For documentation about the format of compressed translated strings, see
+supervisor/shared/translate.h
+"""
+
+from __future__ import print_function
+
+import bisect
+import re
+import sys
+
+import collections
+import gettext
+import os.path
+
+if hasattr(sys.stdout, "reconfigure"):
+ sys.stdout.reconfigure(encoding="utf-8")
+ sys.stderr.reconfigure(errors="backslashreplace")
+
+py = os.path.dirname(sys.argv[0])
+top = os.path.dirname(py)
+
+sys.path.append(os.path.join(top, "tools/huffman"))
+
+import huffman
+
+# Python 2/3 compatibility:
+# - iterating through bytes is different
+# - codepoint2name lives in a different module
+import platform
+
+if platform.python_version_tuple()[0] == "2":
+ bytes_cons = lambda val, enc=None: bytearray(val)
+ from htmlentitydefs import codepoint2name
+elif platform.python_version_tuple()[0] == "3":
+ bytes_cons = bytes
+ from html.entities import codepoint2name
+# end compatibility code
+
+codepoint2name[ord("-")] = "hyphen"
+
+# add some custom names to map characters that aren't in HTML
+codepoint2name[ord(" ")] = "space"
+codepoint2name[ord("'")] = "squot"
+codepoint2name[ord(",")] = "comma"
+codepoint2name[ord(".")] = "dot"
+codepoint2name[ord(":")] = "colon"
+codepoint2name[ord(";")] = "semicolon"
+codepoint2name[ord("/")] = "slash"
+codepoint2name[ord("%")] = "percent"
+codepoint2name[ord("#")] = "hash"
+codepoint2name[ord("(")] = "paren_open"
+codepoint2name[ord(")")] = "paren_close"
+codepoint2name[ord("[")] = "bracket_open"
+codepoint2name[ord("]")] = "bracket_close"
+codepoint2name[ord("{")] = "brace_open"
+codepoint2name[ord("}")] = "brace_close"
+codepoint2name[ord("*")] = "star"
+codepoint2name[ord("!")] = "bang"
+codepoint2name[ord("\\")] = "backslash"
+codepoint2name[ord("+")] = "plus"
+codepoint2name[ord("$")] = "dollar"
+codepoint2name[ord("=")] = "equals"
+codepoint2name[ord("?")] = "question"
+codepoint2name[ord("@")] = "at_sign"
+codepoint2name[ord("^")] = "caret"
+codepoint2name[ord("|")] = "pipe"
+codepoint2name[ord("~")] = "tilde"
+
+C_ESCAPES = {
+ "\a": "\\a",
+ "\b": "\\b",
+ "\f": "\\f",
+ "\n": "\\n",
+ "\r": "\\r",
+ "\t": "\\t",
+ "\v": "\\v",
+ "'": "\\'",
+ '"': '\\"',
+}
+
+# static qstrs, should be sorted
+# These are qstrs that are always included and always have the same number. It allows mpy files to omit them.
+static_qstr_list = [
+ "",
+ "__dir__", # Put __dir__ after empty qstr for builtin dir() to work
+ "\n",
+ " ",
+ "*",
+ "/",
+ "<module>",
+ "_",
+ "__call__",
+ "__class__",
+ "__delitem__",
+ "__enter__",
+ "__exit__",
+ "__getattr__",
+ "__getitem__",
+ "__hash__",
+ "__init__",
+ "__int__",
+ "__iter__",
+ "__len__",
+ "__main__",
+ "__module__",
+ "__name__",
+ "__new__",
+ "__next__",
+ "__qualname__",
+ "__repr__",
+ "__setitem__",
+ "__str__",
+ "ArithmeticError",
+ "AssertionError",
+ "AttributeError",
+ "BaseException",
+ "EOFError",
+ "Ellipsis",
+ "Exception",
+ "GeneratorExit",
+ "ImportError",
+ "IndentationError",
+ "IndexError",
+ "KeyError",
+ "KeyboardInterrupt",
+ "LookupError",
+ "MemoryError",
+ "NameError",
+ "NoneType",
+ "NotImplementedError",
+ "OSError",
+ "OverflowError",
+ "RuntimeError",
+ "StopIteration",
+ "SyntaxError",
+ "SystemExit",
+ "TypeError",
+ "ValueError",
+ "ZeroDivisionError",
+ "abs",
+ "all",
+ "any",
+ "append",
+ "args",
+ "bool",
+ "builtins",
+ "bytearray",
+ "bytecode",
+ "bytes",
+ "callable",
+ "chr",
+ "classmethod",
+ "clear",
+ "close",
+ "const",
+ "copy",
+ "count",
+ "dict",
+ "dir",
+ "divmod",
+ "end",
+ "endswith",
+ "eval",
+ "exec",
+ "extend",
+ "find",
+ "format",
+ "from_bytes",
+ "get",
+ "getattr",
+ "globals",
+ "hasattr",
+ "hash",
+ "id",
+ "index",
+ "insert",
+ "int",
+ "isalpha",
+ "isdigit",
+ "isinstance",
+ "islower",
+ "isspace",
+ "issubclass",
+ "isupper",
+ "items",
+ "iter",
+ "join",
+ "key",
+ "keys",
+ "len",
+ "list",
+ "little",
+ "locals",
+ "lower",
+ "lstrip",
+ "main",
+ "map",
+ "micropython",
+ "next",
+ "object",
+ "open",
+ "ord",
+ "pop",
+ "popitem",
+ "pow",
+ "print",
+ "range",
+ "read",
+ "readinto",
+ "readline",
+ "remove",
+ "replace",
+ "repr",
+ "reverse",
+ "rfind",
+ "rindex",
+ "round",
+ "rsplit",
+ "rstrip",
+ "self",
+ "send",
+ "sep",
+ "set",
+ "setattr",
+ "setdefault",
+ "sort",
+ "sorted",
+ "split",
+ "start",
+ "startswith",
+ "staticmethod",
+ "step",
+ "stop",
+ "str",
+ "strip",
+ "sum",
+ "super",
+ "throw",
+ "to_bytes",
+ "tuple",
+ "type",
+ "update",
+ "upper",
+ "utf-8",
+ "value",
+ "values",
+ "write",
+ "zip",
+]
+
+# this must match the equivalent function in qstr.c
+def compute_hash(qstr, bytes_hash):
+ hash = 5381
+ for b in qstr:
+ hash = (hash * 33) ^ b
+ # Make sure that valid hash is never zero, zero means "hash not computed"
+ return (hash & ((1 << (8 * bytes_hash)) - 1)) or 1
+
+
+def translate(translation_file, i18ns):
+ with open(translation_file, "rb") as f:
+ table = gettext.GNUTranslations(f)
+
+ translations = []
+ for original in i18ns:
+ unescaped = original
+ for s in C_ESCAPES:
+ unescaped = unescaped.replace(C_ESCAPES[s], s)
+ translation = table.gettext(unescaped)
+ # Add in carriage returns to work in terminals
+ translation = translation.replace("\n", "\r\n")
+ translations.append((original, translation))
+ return translations
+
+
+class TextSplitter:
+ def __init__(self, words):
+ words = sorted(words, key=lambda x: len(x), reverse=True)
+ self.words = set(words)
+ if words:
+ pat = "|".join(re.escape(w) for w in words) + "|."
+ else:
+ pat = "."
+ self.pat = re.compile(pat, flags=re.DOTALL)
+
+ def iter_words(self, text):
+ s = []
+ words = self.words
+ for m in self.pat.finditer(text):
+ t = m.group(0)
+ if t in words:
+ if s:
+ yield (False, "".join(s))
+ s = []
+ yield (True, t)
+ else:
+ s.append(t)
+ if s:
+ yield (False, "".join(s))
+
+ def iter(self, text):
+ for m in self.pat.finditer(text):
+ yield m.group(0)
+
+
+def iter_substrings(s, minlen, maxlen):
+ len_s = len(s)
+ maxlen = min(len_s, maxlen)
+ for n in range(minlen, maxlen + 1):
+ for begin in range(0, len_s - n + 1):
+ yield s[begin : begin + n]
+
+
+def compute_huffman_coding(translations, compression_filename):
+ texts = [t[1] for t in translations]
+ words = []
+
+ start_unused = 0x80
+ end_unused = 0xFF
+ max_ord = 0
+ for text in texts:
+ for c in text:
+ ord_c = ord(c)
+ max_ord = max(ord_c, max_ord)
+ if 0x80 <= ord_c < 0xFF:
+ end_unused = min(ord_c, end_unused)
+ max_words = end_unused - 0x80
+
+ bits_per_codepoint = 16 if max_ord > 255 else 8
+ values_type = "uint16_t" if max_ord > 255 else "uint8_t"
+ while len(words) < max_words:
+ # Until the dictionary is filled to capacity, use a heuristic to find
+ # the best "word" (2- to 11-gram) to add to it.
+ #
+ # The TextSplitter allows us to avoid considering parts of the text
+ # that are already covered by a previously chosen word, for example
+ # if "the" is in words then not only will "the" not be considered
+ # again, neither will "there" or "wither", since they have "the"
+ # as substrings.
+ extractor = TextSplitter(words)
+ counter = collections.Counter()
+ for t in texts:
+ for atom in extractor.iter(t):
+ counter[atom] += 1
+ cb = huffman.codebook(counter.items())
+ lengths = sorted(dict((v, len(cb[k])) for k, v in counter.items()).items())
+
+ def bit_length(s):
+ return sum(len(cb[c]) for c in s)
+
+ def est_len(occ):
+ idx = bisect.bisect_left(lengths, (occ, 0))
+ return lengths[idx][1] + 1
+
+ # The cost of adding a dictionary word is just its storage size
+ # while its savings is close to the difference between the original
+ # huffman bit-length of the string and the estimated bit-length
+ # of the dictionary word, times the number of times the word appears.
+ #
+ # The savings is not strictly accurate because including a word into
+ # the Huffman tree bumps up the encoding lengths of all words in the
+ # same subtree. In the extreme case when the new word is so frequent
+ # that it gets a one-bit encoding, all other words will cost an extra
+ # bit each. This is empirically modeled by the constant factor added to
+ # cost, but the specific value used isn't "proven" to be correct.
+ #
+ # Another source of inaccuracy is that compressed strings end up
+ # on byte boundaries, not bit boundaries, so saving 1 bit somewhere
+ # might not save a byte.
+ #
+ # In fact, when this change was first made, some translations (luckily,
+ # ones on boards not at all close to full) wasted up to 40 bytes,
+ # while the most constrained boards typically gained 100 bytes or
+ # more.
+ #
+ # The difference between the two is the estimated net savings, in bits.
+ def est_net_savings(s, occ):
+ savings = occ * (bit_length(s) - est_len(occ))
+ cost = len(s) * bits_per_codepoint + 24
+ return savings - cost
+
+ counter = collections.Counter()
+ for t in texts:
+ for (found, word) in extractor.iter_words(t):
+ if not found:
+ for substr in iter_substrings(word, minlen=2, maxlen=11):
+ counter[substr] += 1
+
+ # Score the candidates we found. This is a semi-empirical formula that
+ # attempts to model the number of bits saved as closely as possible.
+ #
+ # It attempts to compute the codeword lengths of the original word
+ # to the codeword length the dictionary entry would get, times
+ # the number of occurrences, less the ovehead of the entries in the
+ # words[] array.
+
+ scores = sorted(
+ ((s, -est_net_savings(s, occ)) for (s, occ) in counter.items()), key=lambda x: x[1]
+ )
+
+ # Pick the one with the highest score. The score must be negative.
+ if not scores or scores[0][-1] >= 0:
+ break
+
+ word = scores[0][0]
+ words.append(word)
+
+ words.sort(key=len)
+ extractor = TextSplitter(words)
+ counter = collections.Counter()
+ for t in texts:
+ for atom in extractor.iter(t):
+ counter[atom] += 1
+ cb = huffman.codebook(counter.items())
+
+ word_start = start_unused
+ word_end = word_start + len(words) - 1
+ print("// # words", len(words))
+ print("// words", words)
+
+ values = []
+ length_count = {}
+ renumbered = 0
+ last_length = None
+ canonical = {}
+ for atom, code in sorted(cb.items(), key=lambda x: (len(x[1]), x[0])):
+ values.append(atom)
+ length = len(code)
+ if length not in length_count:
+ length_count[length] = 0
+ length_count[length] += 1
+ if last_length:
+ renumbered <<= length - last_length
+ canonical[atom] = "{0:0{width}b}".format(renumbered, width=length)
+ # print(f"atom={repr(atom)} code={code}", file=sys.stderr)
+ if len(atom) > 1:
+ o = words.index(atom) + 0x80
+ s = "".join(C_ESCAPES.get(ch1, ch1) for ch1 in atom)
+ else:
+ s = C_ESCAPES.get(atom, atom)
+ o = ord(atom)
+ print("//", o, s, counter[atom], canonical[atom], renumbered)
+ renumbered += 1
+ last_length = length
+ lengths = bytearray()
+ print("// length count", length_count)
+
+ for i in range(1, max(length_count) + 2):
+ lengths.append(length_count.get(i, 0))
+ print("// values", values, "lengths", len(lengths), lengths)
+
+ print("//", values, lengths)
+ values = [(atom if len(atom) == 1 else chr(0x80 + words.index(atom))) for atom in values]
+ max_translation_encoded_length = max(
+ len(translation.encode("utf-8")) for (original, translation) in translations
+ )
+
+ maxlen = len(words[-1])
+ minlen = len(words[0])
+ wlencount = [len([None for w in words if len(w) == l]) for l in range(minlen, maxlen + 1)]
+
+ with open(compression_filename, "w") as f:
+ f.write("typedef {} mchar_t;".format(values_type))
+ f.write("const uint8_t lengths[] = {{ {} }};\n".format(", ".join(map(str, lengths))))
+ f.write(
+ "const mchar_t values[] = {{ {} }};\n".format(", ".join(str(ord(u)) for u in values))
+ )
+ f.write(
+ "#define compress_max_length_bits ({})\n".format(
+ max_translation_encoded_length.bit_length()
+ )
+ )
+ f.write(
+ "const mchar_t words[] = {{ {} }};\n".format(
+ ", ".join(str(ord(c)) for w in words for c in w)
+ )
+ )
+ f.write(
+ "const uint8_t wlencount[] = {{ {} }};\n".format(", ".join(str(p) for p in wlencount))
+ )
+ f.write("#define word_start {}\n".format(word_start))
+ f.write("#define word_end {}\n".format(word_end))
+ f.write("#define minlen {}\n".format(minlen))
+ f.write("#define maxlen {}\n".format(maxlen))
+
+ return (values, lengths, words, canonical, extractor)
+
+
+def decompress(encoding_table, encoded, encoded_length_bits):
+ (values, lengths, words, _, _) = encoding_table
+ dec = []
+ this_byte = 0
+ this_bit = 7
+ b = encoded[this_byte]
+ bits = 0
+ for i in range(encoded_length_bits):
+ bits <<= 1
+ if 0x80 & b:
+ bits |= 1
+
+ b <<= 1
+ if this_bit == 0:
+ this_bit = 7
+ this_byte += 1
+ if this_byte < len(encoded):
+ b = encoded[this_byte]
+ else:
+ this_bit -= 1
+ length = bits
+
+ i = 0
+ while i < length:
+ bits = 0
+ bit_length = 0
+ max_code = lengths[0]
+ searched_length = lengths[0]
+ while True:
+ bits <<= 1
+ if 0x80 & b:
+ bits |= 1
+
+ b <<= 1
+ bit_length += 1
+ if this_bit == 0:
+ this_bit = 7
+ this_byte += 1
+ if this_byte < len(encoded):
+ b = encoded[this_byte]
+ else:
+ this_bit -= 1
+ if max_code > 0 and bits < max_code:
+ # print('{0:0{width}b}'.format(bits, width=bit_length))
+ break
+ max_code = (max_code << 1) + lengths[bit_length]
+ searched_length += lengths[bit_length]
+
+ v = values[searched_length + bits - max_code]
+ if v >= chr(0x80) and v < chr(0x80 + len(words)):
+ v = words[ord(v) - 0x80]
+ i += len(v.encode("utf-8"))
+ dec.append(v)
+ return "".join(dec)
+
+
+def compress(encoding_table, decompressed, encoded_length_bits, len_translation_encoded):
+ if not isinstance(decompressed, str):
+ raise TypeError()
+ (_, _, _, canonical, extractor) = encoding_table
+
+ enc = bytearray(len(decompressed) * 3)
+ current_bit = 7
+ current_byte = 0
+
+ bits = encoded_length_bits + 1
+ for i in range(bits - 1, 0, -1):
+ if len_translation_encoded & (1 << (i - 1)):
+ enc[current_byte] |= 1 << current_bit
+ if current_bit == 0:
+ current_bit = 7
+ current_byte += 1
+ else:
+ current_bit -= 1
+
+ for atom in extractor.iter(decompressed):
+ for b in canonical[atom]:
+ if b == "1":
+ enc[current_byte] |= 1 << current_bit
+ if current_bit == 0:
+ current_bit = 7
+ current_byte += 1
+ else:
+ current_bit -= 1
+
+ if current_bit != 7:
+ current_byte += 1
+ return enc[:current_byte]
+
+
+def qstr_escape(qst):
+ def esc_char(m):
+ c = ord(m.group(0))
+ try:
+ name = codepoint2name[c]
+ except KeyError:
+ name = "0x%02x" % c
+ return "_" + name + "_"
+
+ return re.sub(r"[^A-Za-z0-9_]", esc_char, qst)
+
+
+def parse_input_headers(infiles):
+ qcfgs = {}
+ qstrs = {}
+ i18ns = set()
+
+ # add static qstrs
+ for qstr in static_qstr_list:
+ # work out the corresponding qstr name
+ ident = qstr_escape(qstr)
+
+ # don't add duplicates
+ assert ident not in qstrs
+
+ # add the qstr to the list, with order number to retain original order in file
+ order = len(qstrs) - 300000
+ qstrs[ident] = (order, ident, qstr)
+
+ # read the qstrs in from the input files
+ for infile in infiles:
+ with open(infile, "rt") as f:
+ for line in f:
+ line = line.strip()
+
+ # is this a config line?
+ match = re.match(r"^QCFG\((.+), (.+)\)", line)
+ if match:
+ value = match.group(2)
+ if value[0] == "(" and value[-1] == ")":
+ # strip parenthesis from config value
+ value = value[1:-1]
+ qcfgs[match.group(1)] = value
+ continue
+
+ match = re.match(r'^TRANSLATE\("(.*)"\)$', line)
+ if match:
+ i18ns.add(match.group(1))
+ continue
+
+ # is this a QSTR line?
+ match = re.match(r"^Q\((.*)\)$", line)
+ if not match:
+ continue
+
+ # get the qstr value
+ qstr = match.group(1)
+
+ # special cases to specify control characters
+ if qstr == "\\n":
+ qstr = "\n"
+ elif qstr == "\\r\\n":
+ qstr = "\r\n"
+
+ # work out the corresponding qstr name
+ ident = qstr_escape(qstr)
+
+ # don't add duplicates
+ if ident in qstrs:
+ continue
+
+ # add the qstr to the list, with order number to retain original order in file
+ order = len(qstrs)
+ # but put special method names like __add__ at the top of list, so
+ # that their id's fit into a byte
+ if ident == "":
+ # Sort empty qstr above all still
+ order = -200000
+ elif ident == "__dir__":
+ # Put __dir__ after empty qstr for builtin dir() to work
+ order = -190000
+ elif ident.startswith("__"):
+ order -= 100000
+ qstrs[ident] = (order, ident, qstr)
+
+ if not qcfgs and qstrs:
+ sys.stderr.write("ERROR: Empty preprocessor output - check for errors above\n")
+ sys.exit(1)
+
+ return qcfgs, qstrs, i18ns
+
+
+def escape_bytes(qstr):
+ if all(32 <= ord(c) <= 126 and c != "\\" and c != '"' for c in qstr):
+ # qstr is all printable ASCII so render it as-is (for easier debugging)
+ return qstr
+ else:
+ # qstr contains non-printable codes so render entire thing as hex pairs
+ qbytes = bytes_cons(qstr, "utf8")
+ return "".join(("\\x%02x" % b) for b in qbytes)
+
+
+def make_bytes(cfg_bytes_len, cfg_bytes_hash, qstr):
+ qbytes = bytes_cons(qstr, "utf8")
+ qlen = len(qbytes)
+ qhash = compute_hash(qbytes, cfg_bytes_hash)
+ if qlen >= (1 << (8 * cfg_bytes_len)):
+ print("qstr is too long:", qstr)
+ assert False
+ qdata = escape_bytes(qstr)
+ return '%d, %d, "%s"' % (qhash, qlen, qdata)
+
+
+def print_qstr_data(encoding_table, qcfgs, qstrs, i18ns):
+ # get config variables
+ cfg_bytes_len = int(qcfgs["BYTES_IN_LEN"])
+ cfg_bytes_hash = int(qcfgs["BYTES_IN_HASH"])
+
+ # print out the starter of the generated C header file
+ print("// This file was automatically generated by makeqstrdata.py")
+ print("")
+
+ # add NULL qstr with no hash or data
+ print('QDEF(MP_QSTRnull, 0, 0, "")')
+
+ total_qstr_size = 0
+ total_qstr_compressed_size = 0
+ # go through each qstr and print it out
+ for order, ident, qstr in sorted(qstrs.values(), key=lambda x: x[0]):
+ qbytes = make_bytes(cfg_bytes_len, cfg_bytes_hash, qstr)
+ print("QDEF(MP_QSTR_%s, %s)" % (ident, qbytes))
+
+ total_qstr_size += len(qstr)
+
+ total_text_size = 0
+ total_text_compressed_size = 0
+ max_translation_encoded_length = max(
+ len(translation.encode("utf-8")) for original, translation in i18ns
+ )
+ encoded_length_bits = max_translation_encoded_length.bit_length()
+ for original, translation in i18ns:
+ translation_encoded = translation.encode("utf-8")
+ compressed = compress(
+ encoding_table, translation, encoded_length_bits, len(translation_encoded)
+ )
+ total_text_compressed_size += len(compressed)
+ decompressed = decompress(encoding_table, compressed, encoded_length_bits)
+ assert decompressed == translation
+ for c in C_ESCAPES:
+ decompressed = decompressed.replace(c, C_ESCAPES[c])
+ print(
+ 'TRANSLATION("{}", {}) // {}'.format(
+ original, ", ".join(["{:d}".format(x) for x in compressed]), decompressed
+ )
+ )
+ total_text_size += len(translation.encode("utf-8"))
+
+ print()
+ print("// {} bytes worth of qstr".format(total_qstr_size))
+ print("// {} bytes worth of translations".format(total_text_size))
+ print("// {} bytes worth of translations compressed".format(total_text_compressed_size))
+ print("// {} bytes saved".format(total_text_size - total_text_compressed_size))
+
+
+def print_qstr_enums(qstrs):
+ # print out the starter of the generated C header file
+ print("// This file was automatically generated by makeqstrdata.py")
+ print("")
+
+ # add NULL qstr with no hash or data
+ print("QENUM(MP_QSTRnull)")
+
+ # go through each qstr and print it out
+ for order, ident, qstr in sorted(qstrs.values(), key=lambda x: x[0]):
+ print("QENUM(MP_QSTR_%s)" % (ident,))
+
+
+if __name__ == "__main__":
+ import argparse
+
+ parser = argparse.ArgumentParser(
+ description="Process QSTR definitions into headers for compilation"
+ )
+ parser.add_argument(
+ "infiles", metavar="N", type=str, nargs="+", help="an integer for the accumulator"
+ )
+ parser.add_argument(
+ "--translation", default=None, type=str, help="translations for i18n() items"
+ )
+ parser.add_argument(
+ "--compression_filename", default=None, type=str, help="header for compression info"
+ )
+
+ args = parser.parse_args()
+
+ qcfgs, qstrs, i18ns = parse_input_headers(args.infiles)
+ if args.translation:
+ i18ns = sorted(i18ns)
+ translations = translate(args.translation, i18ns)
+ encoding_table = compute_huffman_coding(translations, args.compression_filename)
+ print_qstr_data(encoding_table, qcfgs, qstrs, translations)
+ else:
+ print_qstr_enums(qstrs)
diff --git a/circuitpython/py/makeqstrdefs.py b/circuitpython/py/makeqstrdefs.py
new file mode 100644
index 0000000..f035ed5
--- /dev/null
+++ b/circuitpython/py/makeqstrdefs.py
@@ -0,0 +1,226 @@
+"""
+This script processes the output from the C preprocessor and extracts all
+qstr. Each qstr is transformed into a qstr definition of the form 'Q(...)'.
+
+This script works with Python 2.6, 2.7, 3.3 and 3.4.
+"""
+
+from __future__ import print_function
+
+import io
+import os
+import re
+import subprocess
+import sys
+import multiprocessing, multiprocessing.dummy
+
+# Python 2/3 compatibility:
+# - iterating through bytes is different
+# - codepoint2name lives in a different module
+import platform
+
+if platform.python_version_tuple()[0] == "2":
+ bytes_cons = lambda val, enc=None: bytearray(val)
+ from htmlentitydefs import name2codepoint
+elif platform.python_version_tuple()[0] == "3":
+ bytes_cons = bytes
+ from html.entities import name2codepoint
+
+ unichr = chr
+# end compatibility code
+
+# Blocklist of qstrings that are specially handled in further
+# processing and should be ignored
+QSTRING_BLOCK_LIST = set(["NULL", "number_of"])
+
+# add some custom names to map characters that aren't in HTML
+name2codepoint["hyphen"] = ord("-")
+name2codepoint["space"] = ord(" ")
+name2codepoint["squot"] = ord("'")
+name2codepoint["comma"] = ord(",")
+name2codepoint["dot"] = ord(".")
+name2codepoint["colon"] = ord(":")
+name2codepoint["semicolon"] = ord(";")
+name2codepoint["slash"] = ord("/")
+name2codepoint["percent"] = ord("%")
+name2codepoint["hash"] = ord("#")
+name2codepoint["paren_open"] = ord("(")
+name2codepoint["paren_close"] = ord(")")
+name2codepoint["bracket_open"] = ord("[")
+name2codepoint["bracket_close"] = ord("]")
+name2codepoint["brace_open"] = ord("{")
+name2codepoint["brace_close"] = ord("}")
+name2codepoint["star"] = ord("*")
+name2codepoint["bang"] = ord("!")
+name2codepoint["backslash"] = ord("\\")
+name2codepoint["plus"] = ord("+")
+name2codepoint["dollar"] = ord("$")
+name2codepoint["equals"] = ord("=")
+name2codepoint["question"] = ord("?")
+name2codepoint["at_sign"] = ord("@")
+name2codepoint["caret"] = ord("^")
+name2codepoint["pipe"] = ord("|")
+name2codepoint["tilde"] = ord("~")
+
+# These are just vexing!
+del name2codepoint["and"]
+del name2codepoint["or"]
+
+
+def preprocess():
+ if any(src in args.dependencies for src in args.changed_sources):
+ sources = args.sources
+ elif any(args.changed_sources):
+ sources = args.changed_sources
+ else:
+ sources = args.sources
+ csources = []
+ cxxsources = []
+ for source in sources:
+ if source.endswith(".cpp"):
+ cxxsources.append(source)
+ elif source.endswith(".c"):
+ csources.append(source)
+ try:
+ os.makedirs(os.path.dirname(args.output[0]))
+ except OSError:
+ pass
+
+ def pp(flags):
+ def run(files):
+ return subprocess.check_output(args.pp + flags + files)
+
+ return run
+
+ try:
+ cpus = multiprocessing.cpu_count()
+ except NotImplementedError:
+ cpus = 1
+ p = multiprocessing.dummy.Pool(cpus)
+ with open(args.output[0], "wb") as out_file:
+ for flags, sources in (
+ (args.cflags, csources),
+ (args.cxxflags, cxxsources),
+ ):
+ batch_size = (len(sources) + cpus - 1) // cpus
+ chunks = [sources[i : i + batch_size] for i in range(0, len(sources), batch_size or 1)]
+ for output in p.imap(pp(flags), chunks):
+ out_file.write(output)
+
+
+def write_out(fname, output):
+ if output:
+ for m, r in [("/", "__"), ("\\", "__"), (":", "@"), ("..", "@@")]:
+ fname = fname.replace(m, r)
+ with open(args.output_dir + "/" + fname + ".qstr", "w") as f:
+ f.write("\n".join(output) + "\n")
+
+
+def qstr_unescape(qstr):
+ for name in name2codepoint:
+ if "__" + name + "__" in qstr:
+ continue
+ if "_" + name + "_" in qstr:
+ qstr = qstr.replace("_" + name + "_", str(unichr(name2codepoint[name])))
+ return qstr
+
+
+def process_file(f):
+ re_line = re.compile(r"#[line]*\s(\d+)\s\"([^\"]+)\"")
+ re_qstr = re.compile(r"MP_QSTR_[_a-zA-Z0-9]+")
+ re_translate = re.compile(r"translate\(\"((?:(?=(\\?))\2.)*?)\"\)")
+ output = []
+ last_fname = None
+ lineno = 0
+ for line in f:
+ if line.isspace():
+ continue
+ # match gcc-like output (# n "file") and msvc-like output (#line n "file")
+ if line.startswith(("# ", "#line")):
+ m = re_line.match(line)
+ assert m is not None
+ lineno = int(m.group(1))
+ fname = m.group(2)
+ if os.path.splitext(fname)[1] not in [".c", ".cpp"]:
+ continue
+ if fname != last_fname:
+ write_out(last_fname, output)
+ output = []
+ last_fname = fname
+ continue
+ for match in re_qstr.findall(line):
+ name = match.replace("MP_QSTR_", "")
+ if name not in QSTRING_BLOCK_LIST:
+ output.append("Q(" + qstr_unescape(name) + ")")
+ for match in re_translate.findall(line):
+ output.append('TRANSLATE("' + match[0] + '")')
+ lineno += 1
+
+ if last_fname:
+ write_out(last_fname, output)
+ return ""
+
+
+def cat_together():
+ import glob
+ import hashlib
+
+ hasher = hashlib.md5()
+ all_lines = []
+ outf = open(args.output_dir + "/out", "wb")
+ for fname in glob.glob(args.output_dir + "/*.qstr"):
+ with open(fname, "rb") as f:
+ lines = f.readlines()
+ all_lines += lines
+ all_lines.sort()
+ all_lines = b"\n".join(all_lines)
+ outf.write(all_lines)
+ outf.close()
+ hasher.update(all_lines)
+ new_hash = hasher.hexdigest()
+ # print(new_hash)
+ old_hash = None
+ try:
+ with open(args.output_file + ".hash") as f:
+ old_hash = f.read()
+ except IOError:
+ pass
+ if old_hash != new_hash:
+ print("QSTR updated")
+ try:
+ # rename below might fail if file exists
+ os.remove(args.output_file)
+ except:
+ pass
+ os.rename(args.output_dir + "/out", args.output_file)
+ with open(args.output_file + ".hash", "w") as f:
+ f.write(new_hash)
+ else:
+ print("QSTR not updated")
+
+
+if __name__ == "__main__":
+ if len(sys.argv) != 5:
+ print("usage: %s command input_filename output_dir output_file" % sys.argv[0])
+ sys.exit(2)
+
+ class Args:
+ pass
+
+ args = Args()
+ args.command = sys.argv[1]
+ args.input_filename = sys.argv[2]
+ args.output_dir = sys.argv[3]
+ args.output_file = sys.argv[4]
+
+ try:
+ os.makedirs(args.output_dir)
+ except OSError:
+ pass
+
+ if args.command == "split":
+ with io.open(args.input_filename, encoding="utf-8") as infile:
+ process_file(infile)
+
+ if args.command == "cat":
+ cat_together()
diff --git a/circuitpython/py/makeversionhdr.py b/circuitpython/py/makeversionhdr.py
new file mode 100644
index 0000000..29abcd9
--- /dev/null
+++ b/circuitpython/py/makeversionhdr.py
@@ -0,0 +1,129 @@
+"""
+Generate header file with macros defining MicroPython version info.
+
+This script works with Python 3.7 and newer
+"""
+
+from __future__ import print_function
+
+import sys
+import os
+import pathlib
+import datetime
+import subprocess
+
+tools_describe = str(pathlib.Path(__file__).parent.parent / "tools/describe")
+
+
+def get_version_info_from_git():
+ # Note: git describe doesn't work if no tag is available
+ try:
+ git_tag = subprocess.check_output(
+ [tools_describe], stderr=subprocess.STDOUT, universal_newlines=True, shell=True
+ ).strip()
+ except subprocess.CalledProcessError as er:
+ if er.returncode == 128:
+ # git exit code of 128 means no repository found
+ return None
+ git_tag = ""
+ except OSError as e:
+ return None
+ try:
+ git_hash = subprocess.check_output(
+ ["git", "rev-parse", "--short", "HEAD"],
+ stderr=subprocess.STDOUT,
+ universal_newlines=True,
+ ).strip()
+ except subprocess.CalledProcessError:
+ git_hash = "unknown"
+ except OSError:
+ return None
+
+ try:
+ # Check if there are any modified files.
+ subprocess.check_call(
+ ["git", "diff", "--no-ext-diff", "--quiet", "--exit-code"], stderr=subprocess.STDOUT
+ )
+ # Check if there are any staged files.
+ subprocess.check_call(
+ ["git", "diff-index", "--cached", "--quiet", "HEAD", "--"], stderr=subprocess.STDOUT
+ )
+ except subprocess.CalledProcessError:
+ git_hash += "-dirty"
+ except OSError:
+ return None
+
+ # Try to extract MicroPython version from git tag
+ ver = git_tag.split("-")[0].split(".")
+
+ return git_tag, git_hash, ver
+
+
+def get_version_info_from_docs_conf():
+ with open(os.path.join(os.path.dirname(sys.argv[0]), "..", "conf.py")) as f:
+ for line in f:
+ if line.startswith("version = release = '"):
+ ver = line.strip().split(" = ")[2].strip("'")
+ git_tag = "v" + ver
+ ver = ver.split(".")
+ if len(ver) == 2:
+ ver.append("0")
+ return git_tag, "<no hash>", ver
+ return None
+
+
+def make_version_header(filename):
+ # Get version info using git, with fallback to docs/conf.py
+ info = get_version_info_from_git()
+ if info is None:
+ info = get_version_info_from_docs_conf()
+
+ git_tag, git_hash, ver = "7.3.0", "d0953e8f845a7544a27bea92bbcd533f0a84d493", "7.3.0"
+ if len(ver) < 3:
+ ver = ("0", "0", "0")
+ version_string = git_hash
+ else:
+ version_string = ".".join(ver)
+
+ build_date = datetime.date.today()
+ if "SOURCE_DATE_EPOCH" in os.environ:
+ build_date = datetime.datetime.utcfromtimestamp(
+ int(os.environ["SOURCE_DATE_EPOCH"])
+ ).date()
+
+ # Generate the file with the git and version info
+ file_data = """\
+// This file was generated by py/makeversionhdr.py
+#define MICROPY_GIT_TAG "%s"
+#define MICROPY_GIT_HASH "%s"
+#define MICROPY_BUILD_DATE "%s"
+#define MICROPY_VERSION_MAJOR (%s)
+#define MICROPY_VERSION_MINOR (3)
+#define MICROPY_VERSION_MICRO (%s)
+#define MICROPY_VERSION_STRING "%s"
+#define MICROPY_FULL_VERSION_INFO "Adafruit CircuitPython " MICROPY_GIT_TAG " on " MICROPY_BUILD_DATE "; " MICROPY_HW_BOARD_NAME " with " MICROPY_HW_MCU_NAME
+""" % (
+ git_tag,
+ git_hash,
+ datetime.date.today().strftime("%Y-%m-%d"),
+ ver[0].replace("v", ""),
+ ver[2],
+ version_string,
+ )
+
+ # Check if the file contents changed from last time
+ write_file = True
+ if os.path.isfile(filename):
+ with open(filename, "r") as f:
+ existing_data = f.read()
+ if existing_data == file_data:
+ write_file = False
+
+ # Only write the file if we need to
+ if write_file:
+ with open(filename, "w") as f:
+ f.write(file_data)
+
+
+if __name__ == "__main__":
+ make_version_header(sys.argv[1])
diff --git a/circuitpython/py/malloc.c b/circuitpython/py/malloc.c
new file mode 100644
index 0000000..5c09a2d
--- /dev/null
+++ b/circuitpython/py/malloc.c
@@ -0,0 +1,222 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "py/mpconfig.h"
+#include "py/misc.h"
+#include "py/mpstate.h"
+
+#if MICROPY_DEBUG_VERBOSE // print debugging info
+#define DEBUG_printf DEBUG_printf
+#else // don't print debugging info
+#define DEBUG_printf(...) (void)0
+#endif
+
+#if MICROPY_MEM_STATS
+#if !MICROPY_MALLOC_USES_ALLOCATED_SIZE
+#error MICROPY_MEM_STATS requires MICROPY_MALLOC_USES_ALLOCATED_SIZE
+#endif
+#define UPDATE_PEAK() { if (MP_STATE_MEM(current_bytes_allocated) > MP_STATE_MEM(peak_bytes_allocated)) MP_STATE_MEM(peak_bytes_allocated) = MP_STATE_MEM(current_bytes_allocated); }
+#endif
+
+#if MICROPY_ENABLE_GC
+#include "py/gc.h"
+
+// We redirect standard alloc functions to GC heap - just for the rest of
+// this module. In the rest of MicroPython source, system malloc can be
+// freely accessed - for interfacing with system and 3rd-party libs for
+// example. On the other hand, some (e.g. bare-metal) ports may use GC
+// heap as system heap, so, to avoid warnings, we do undef's first.
+#undef malloc
+#undef free
+#undef realloc
+#define malloc_ll(b, ll) gc_alloc((b), false, (ll))
+#define malloc_with_finaliser(b, ll) gc_alloc((b), true, (ll))
+#define free gc_free
+#define realloc(ptr, n) gc_realloc(ptr, n, true)
+#define realloc_ext(ptr, n, mv) gc_realloc(ptr, n, mv)
+#else
+
+// GC is disabled. Use system malloc/realloc/free.
+
+#if MICROPY_ENABLE_FINALISER
+#error MICROPY_ENABLE_FINALISER requires MICROPY_ENABLE_GC
+#endif
+
+#define malloc_ll(b, ll) malloc(b)
+#define malloc_with_finaliser(b) malloc((b))
+
+STATIC void *realloc_ext(void *ptr, size_t n_bytes, bool allow_move) {
+ if (allow_move) {
+ return realloc(ptr, n_bytes);
+ } else {
+ // We are asked to resize, but without moving the memory region pointed to
+ // by ptr. Unless the underlying memory manager has special provision for
+ // this behaviour there is nothing we can do except fail to resize.
+ return NULL;
+ }
+}
+
+#endif // MICROPY_ENABLE_GC
+
+void *m_malloc(size_t num_bytes, bool long_lived) {
+ void *ptr = malloc_ll(num_bytes, long_lived);
+ if (ptr == NULL && num_bytes != 0) {
+ m_malloc_fail(num_bytes);
+ }
+ #if MICROPY_MEM_STATS
+ MP_STATE_MEM(total_bytes_allocated) += num_bytes;
+ MP_STATE_MEM(current_bytes_allocated) += num_bytes;
+ UPDATE_PEAK();
+ #endif
+ DEBUG_printf("malloc %d : %p\n", num_bytes, ptr);
+ return ptr;
+}
+
+void *m_malloc_maybe(size_t num_bytes, bool long_lived) {
+ void *ptr = malloc_ll(num_bytes, long_lived);
+ #if MICROPY_MEM_STATS
+ MP_STATE_MEM(total_bytes_allocated) += num_bytes;
+ MP_STATE_MEM(current_bytes_allocated) += num_bytes;
+ UPDATE_PEAK();
+ #endif
+ DEBUG_printf("malloc %d : %p\n", num_bytes, ptr);
+ return ptr;
+}
+
+#if MICROPY_ENABLE_FINALISER
+void *m_malloc_with_finaliser(size_t num_bytes, bool long_lived) {
+ void *ptr = malloc_with_finaliser(num_bytes, long_lived);
+ if (ptr == NULL && num_bytes != 0) {
+ m_malloc_fail(num_bytes);
+ }
+ #if MICROPY_MEM_STATS
+ MP_STATE_MEM(total_bytes_allocated) += num_bytes;
+ MP_STATE_MEM(current_bytes_allocated) += num_bytes;
+ UPDATE_PEAK();
+ #endif
+ DEBUG_printf("malloc %d : %p\n", num_bytes, ptr);
+ return ptr;
+}
+#endif
+
+void *m_malloc0(size_t num_bytes, bool long_lived) {
+ void *ptr = m_malloc(num_bytes, long_lived);
+ // If this config is set then the GC clears all memory, so we don't need to.
+ #if !MICROPY_GC_CONSERVATIVE_CLEAR
+ memset(ptr, 0, num_bytes);
+ #endif
+ return ptr;
+}
+
+#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
+void *m_realloc(void *ptr, size_t old_num_bytes, size_t new_num_bytes) {
+#else
+void *m_realloc(void *ptr, size_t new_num_bytes) {
+ #endif
+ void *new_ptr = realloc(ptr, new_num_bytes);
+ if (new_ptr == NULL && new_num_bytes != 0) {
+ m_malloc_fail(new_num_bytes);
+ }
+ #if MICROPY_MEM_STATS
+ // At first thought, "Total bytes allocated" should only grow,
+ // after all, it's *total*. But consider for example 2K block
+ // shrunk to 1K and then grown to 2K again. It's still 2K
+ // allocated total. If we process only positive increments,
+ // we'll count 3K.
+ size_t diff = new_num_bytes - old_num_bytes;
+ MP_STATE_MEM(total_bytes_allocated) += diff;
+ MP_STATE_MEM(current_bytes_allocated) += diff;
+ UPDATE_PEAK();
+ #endif
+ #if MICROPY_MALLOC_USES_ALLOCATED_SIZE
+ DEBUG_printf("realloc %p, %d, %d : %p\n", ptr, old_num_bytes, new_num_bytes, new_ptr);
+ #else
+ DEBUG_printf("realloc %p, %d : %p\n", ptr, new_num_bytes, new_ptr);
+ #endif
+ return new_ptr;
+}
+
+#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
+void *m_realloc_maybe(void *ptr, size_t old_num_bytes, size_t new_num_bytes, bool allow_move) {
+#else
+void *m_realloc_maybe(void *ptr, size_t new_num_bytes, bool allow_move) {
+ #endif
+ void *new_ptr = realloc_ext(ptr, new_num_bytes, allow_move);
+ #if MICROPY_MEM_STATS
+ // At first thought, "Total bytes allocated" should only grow,
+ // after all, it's *total*. But consider for example 2K block
+ // shrunk to 1K and then grown to 2K again. It's still 2K
+ // allocated total. If we process only positive increments,
+ // we'll count 3K.
+ // Also, don't count failed reallocs.
+ if (!(new_ptr == NULL && new_num_bytes != 0)) {
+ size_t diff = new_num_bytes - old_num_bytes;
+ MP_STATE_MEM(total_bytes_allocated) += diff;
+ MP_STATE_MEM(current_bytes_allocated) += diff;
+ UPDATE_PEAK();
+ }
+ #endif
+ #if MICROPY_MALLOC_USES_ALLOCATED_SIZE
+ DEBUG_printf("realloc %p, %d, %d : %p\n", ptr, old_num_bytes, new_num_bytes, new_ptr);
+ #else
+ DEBUG_printf("realloc %p, %d, %d : %p\n", ptr, new_num_bytes, new_ptr);
+ #endif
+ return new_ptr;
+}
+
+#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
+void m_free(void *ptr, size_t num_bytes) {
+#else
+void m_free(void *ptr) {
+ #endif
+ free(ptr);
+ #if MICROPY_MEM_STATS
+ MP_STATE_MEM(current_bytes_allocated) -= num_bytes;
+ #endif
+ #if MICROPY_MALLOC_USES_ALLOCATED_SIZE
+ DEBUG_printf("free %p, %d\n", ptr, num_bytes);
+ #else
+ DEBUG_printf("free %p\n", ptr);
+ #endif
+}
+
+#if MICROPY_MEM_STATS
+size_t m_get_total_bytes_allocated(void) {
+ return MP_STATE_MEM(total_bytes_allocated);
+}
+
+size_t m_get_current_bytes_allocated(void) {
+ return MP_STATE_MEM(current_bytes_allocated);
+}
+
+size_t m_get_peak_bytes_allocated(void) {
+ return MP_STATE_MEM(peak_bytes_allocated);
+}
+#endif
diff --git a/circuitpython/py/map.c b/circuitpython/py/map.c
new file mode 100644
index 0000000..092adf9
--- /dev/null
+++ b/circuitpython/py/map.c
@@ -0,0 +1,462 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/mpconfig.h"
+#include "py/misc.h"
+#include "py/runtime.h"
+
+#include "supervisor/linker.h"
+
+#if MICROPY_DEBUG_VERBOSE // print debugging info
+#define DEBUG_PRINT (1)
+#else // don't print debugging info
+#define DEBUG_PRINT (0)
+#define DEBUG_printf(...) (void)0
+#endif
+
+#if MICROPY_OPT_MAP_LOOKUP_CACHE
+// MP_STATE_VM(map_lookup_cache) provides a cache of index to the last known
+// position of that index in any map. On a cache hit, this allows
+// short-circuiting the full linear search in the case of an ordered map
+// (i.e. all builtin modules and objects' locals dicts), and computation of
+// the hash (and potentially some linear probing) in the case of a regular
+// map. Note the same cache is shared across all maps.
+
+// Gets the index into the cache for this index. Shift down by two to remove
+// mp_obj_t tag bits.
+#define MAP_CACHE_OFFSET(index) ((((uintptr_t)(index)) >> 2) % MICROPY_OPT_MAP_LOOKUP_CACHE_SIZE)
+// Gets the map cache entry for the corresponding index.
+#define MAP_CACHE_ENTRY(index) (MP_STATE_VM(map_lookup_cache)[MAP_CACHE_OFFSET(index)])
+// Retrieve the mp_obj_t at the location suggested by the cache.
+#define MAP_CACHE_GET(map, index) (&(map)->table[MAP_CACHE_ENTRY(index) % (map)->alloc])
+// Update the cache for this index.
+#define MAP_CACHE_SET(index, pos) MAP_CACHE_ENTRY(index) = (pos) & 0xff;
+#else
+#define MAP_CACHE_SET(index, pos)
+#endif
+
+// This table of sizes is used to control the growth of hash tables.
+// The first set of sizes are chosen so the allocation fits exactly in a
+// 4-word GC block, and it's not so important for these small values to be
+// prime. The latter sizes are prime and increase at an increasing rate.
+STATIC const uint16_t hash_allocation_sizes[] = {
+ 0, 2, 4, 6, 8, 10, 12, // +2
+ 17, 23, 29, 37, 47, 59, 73, // *1.25
+ 97, 127, 167, 223, 293, 389, 521, 691, 919, 1223, 1627, 2161, // *1.33
+ 3229, 4831, 7243, 10861, 16273, 24407, 36607, 54907, // *1.5
+};
+
+STATIC size_t get_hash_alloc_greater_or_equal_to(size_t x) {
+ for (size_t i = 0; i < MP_ARRAY_SIZE(hash_allocation_sizes); i++) {
+ if (hash_allocation_sizes[i] >= x) {
+ return hash_allocation_sizes[i];
+ }
+ }
+ // ran out of primes in the table!
+ // return something sensible, at least make it odd
+ return (x + x / 2) | 1;
+}
+
+/******************************************************************************/
+/* map */
+
+void mp_map_init(mp_map_t *map, size_t n) {
+ if (n == 0) {
+ map->alloc = 0;
+ map->table = NULL;
+ } else {
+ map->alloc = n;
+ map->table = m_new0(mp_map_elem_t, map->alloc);
+ }
+ map->used = 0;
+ map->all_keys_are_qstrs = 1;
+ map->is_fixed = 0;
+ map->is_ordered = 0;
+}
+
+void mp_map_init_fixed_table(mp_map_t *map, size_t n, const mp_obj_t *table) {
+ map->alloc = n;
+ map->used = n;
+ map->all_keys_are_qstrs = 1;
+ map->is_fixed = 1;
+ map->is_ordered = 1;
+ map->table = (mp_map_elem_t *)table;
+}
+
+// Differentiate from mp_map_clear() - semantics is different
+void mp_map_deinit(mp_map_t *map) {
+ if (!map->is_fixed) {
+ m_del(mp_map_elem_t, map->table, map->alloc);
+ }
+ map->used = map->alloc = 0;
+}
+
+void mp_map_clear(mp_map_t *map) {
+ if (!map->is_fixed) {
+ m_del(mp_map_elem_t, map->table, map->alloc);
+ }
+ map->alloc = 0;
+ map->used = 0;
+ map->all_keys_are_qstrs = 1;
+ map->is_fixed = 0;
+ map->table = NULL;
+}
+
+STATIC void mp_map_rehash(mp_map_t *map) {
+ size_t old_alloc = map->alloc;
+ size_t new_alloc = get_hash_alloc_greater_or_equal_to(map->alloc + 1);
+ DEBUG_printf("mp_map_rehash(%p): " UINT_FMT " -> " UINT_FMT "\n", map, old_alloc, new_alloc);
+ mp_map_elem_t *old_table = map->table;
+ mp_map_elem_t *new_table = m_new0(mp_map_elem_t, new_alloc);
+ // If we reach this point, table resizing succeeded, now we can edit the old map.
+ map->alloc = new_alloc;
+ map->used = 0;
+ map->all_keys_are_qstrs = 1;
+ map->table = new_table;
+ for (size_t i = 0; i < old_alloc; i++) {
+ if (old_table[i].key != MP_OBJ_NULL && old_table[i].key != MP_OBJ_SENTINEL) {
+ mp_map_lookup(map, old_table[i].key, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = old_table[i].value;
+ }
+ }
+ m_del(mp_map_elem_t, old_table, old_alloc);
+}
+
+// MP_MAP_LOOKUP behaviour:
+// - returns NULL if not found, else the slot it was found in with key,value non-null
+// MP_MAP_LOOKUP_ADD_IF_NOT_FOUND behaviour:
+// - returns slot, with key non-null and value=MP_OBJ_NULL if it was added
+// MP_MAP_LOOKUP_REMOVE_IF_FOUND behaviour:
+// - returns NULL if not found, else the slot if was found in with key null and value non-null
+mp_map_elem_t *MICROPY_WRAP_MP_MAP_LOOKUP(mp_map_lookup)(mp_map_t * map, mp_obj_t index, mp_map_lookup_kind_t lookup_kind) {
+ // If the map is a fixed array then we must only be called for a lookup
+ assert(!map->is_fixed || lookup_kind == MP_MAP_LOOKUP);
+
+ #if MICROPY_OPT_MAP_LOOKUP_CACHE
+ // Try the cache for lookup or add-if-not-found.
+ if (lookup_kind != MP_MAP_LOOKUP_REMOVE_IF_FOUND && map->alloc) {
+ mp_map_elem_t *slot = MAP_CACHE_GET(map, index);
+ // Note: Just comparing key for value equality will have false negatives, but
+ // these will be handled by the regular path below.
+ if (slot->key == index) {
+ return slot;
+ }
+ }
+ #endif
+
+ // Work out if we can compare just pointers
+ bool compare_only_ptrs = map->all_keys_are_qstrs;
+ if (compare_only_ptrs) {
+ if (mp_obj_is_qstr(index)) {
+ // Index is a qstr, so can just do ptr comparison.
+ } else if (mp_obj_is_type(index, &mp_type_str)) {
+ // Index is a non-interned string.
+ // We can either intern the string, or force a full equality comparison.
+ // We chose the latter, since interning costs time and potentially RAM,
+ // and it won't necessarily benefit subsequent calls because these calls
+ // most likely won't pass the newly-interned string.
+ compare_only_ptrs = false;
+ } else if (lookup_kind != MP_MAP_LOOKUP_ADD_IF_NOT_FOUND) {
+ // If we are not adding, then we can return straight away a failed
+ // lookup because we know that the index will never be found.
+ return NULL;
+ }
+ }
+
+ // if the map is an ordered array then we must do a brute force linear search
+ if (map->is_ordered) {
+ for (mp_map_elem_t *elem = &map->table[0], *top = &map->table[map->used]; elem < top; elem++) {
+ if (elem->key == index || (!compare_only_ptrs && mp_obj_equal(elem->key, index))) {
+ #if MICROPY_PY_COLLECTIONS_ORDEREDDICT
+ if (MP_UNLIKELY(lookup_kind == MP_MAP_LOOKUP_REMOVE_IF_FOUND)) {
+ // remove the found element by moving the rest of the array down
+ mp_obj_t value = elem->value;
+ --map->used;
+ memmove(elem, elem + 1, (top - elem - 1) * sizeof(*elem));
+ // put the found element after the end so the caller can access it if needed
+ // note: caller must NULL the value so the GC can clean up (e.g. see dict_get_helper).
+ elem = &map->table[map->used];
+ elem->key = MP_OBJ_NULL;
+ elem->value = value;
+ }
+ #endif
+ MAP_CACHE_SET(index, elem - map->table);
+ return elem;
+ }
+ }
+ #if MICROPY_PY_COLLECTIONS_ORDEREDDICT
+ if (MP_LIKELY(lookup_kind != MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)) {
+ return NULL;
+ }
+ if (map->used == map->alloc) {
+ // TODO: Alloc policy
+ map->alloc += 4;
+ map->table = m_renew(mp_map_elem_t, map->table, map->used, map->alloc);
+ mp_seq_clear(map->table, map->used, map->alloc, sizeof(*map->table));
+ }
+ mp_map_elem_t *elem = map->table + map->used++;
+ elem->key = index;
+ if (!mp_obj_is_qstr(index)) {
+ map->all_keys_are_qstrs = 0;
+ }
+ return elem;
+ #else
+ return NULL;
+ #endif
+ }
+
+ // map is a hash table (not an ordered array), so do a hash lookup
+
+ if (map->alloc == 0) {
+ if (lookup_kind == MP_MAP_LOOKUP_ADD_IF_NOT_FOUND) {
+ mp_map_rehash(map);
+ } else {
+ return NULL;
+ }
+ }
+
+ // get hash of index, with fast path for common case of qstr
+ mp_uint_t hash;
+ if (mp_obj_is_qstr(index)) {
+ hash = qstr_hash(MP_OBJ_QSTR_VALUE(index));
+ } else {
+ hash = MP_OBJ_SMALL_INT_VALUE(mp_unary_op(MP_UNARY_OP_HASH, index));
+ }
+
+ size_t pos = hash % map->alloc;
+ size_t start_pos = pos;
+ mp_map_elem_t *avail_slot = NULL;
+ for (;;) {
+ mp_map_elem_t *slot = &map->table[pos];
+ if (slot->key == MP_OBJ_NULL) {
+ // found NULL slot, so index is not in table
+ if (lookup_kind == MP_MAP_LOOKUP_ADD_IF_NOT_FOUND) {
+ map->used += 1;
+ if (avail_slot == NULL) {
+ avail_slot = slot;
+ }
+ avail_slot->key = index;
+ avail_slot->value = MP_OBJ_NULL;
+ if (!mp_obj_is_qstr(index)) {
+ map->all_keys_are_qstrs = 0;
+ }
+ return avail_slot;
+ } else {
+ return NULL;
+ }
+ } else if (slot->key == MP_OBJ_SENTINEL) {
+ // found deleted slot, remember for later
+ if (avail_slot == NULL) {
+ avail_slot = slot;
+ }
+ } else if (slot->key == index || (!compare_only_ptrs && mp_obj_equal(slot->key, index))) {
+ // found index
+ // Note: CPython does not replace the index; try x={True:'true'};x[1]='one';x
+ if (lookup_kind == MP_MAP_LOOKUP_REMOVE_IF_FOUND) {
+ // delete element in this slot
+ map->used--;
+ if (map->table[(pos + 1) % map->alloc].key == MP_OBJ_NULL) {
+ // optimisation if next slot is empty
+ slot->key = MP_OBJ_NULL;
+ } else {
+ slot->key = MP_OBJ_SENTINEL;
+ }
+ // keep slot->value so that caller can access it if needed
+ }
+ MAP_CACHE_SET(index, pos);
+ return slot;
+ }
+
+ // not yet found, keep searching in this table
+ pos = (pos + 1) % map->alloc;
+
+ if (pos == start_pos) {
+ // search got back to starting position, so index is not in table
+ if (lookup_kind == MP_MAP_LOOKUP_ADD_IF_NOT_FOUND) {
+ if (avail_slot != NULL) {
+ // there was an available slot, so use that
+ map->used++;
+ avail_slot->key = index;
+ avail_slot->value = MP_OBJ_NULL;
+ if (!mp_obj_is_qstr(index)) {
+ map->all_keys_are_qstrs = 0;
+ }
+ return avail_slot;
+ } else {
+ // not enough room in table, rehash it
+ mp_map_rehash(map);
+ // restart the search for the new element
+ start_pos = pos = hash % map->alloc;
+ }
+ } else {
+ return NULL;
+ }
+ }
+ }
+}
+
+/******************************************************************************/
+/* set */
+
+#if MICROPY_PY_BUILTINS_SET
+
+void mp_set_init(mp_set_t *set, size_t n) {
+ set->alloc = n;
+ set->used = 0;
+ set->table = m_new0(mp_obj_t, set->alloc);
+}
+
+STATIC void mp_set_rehash(mp_set_t *set) {
+ size_t old_alloc = set->alloc;
+ mp_obj_t *old_table = set->table;
+ set->alloc = get_hash_alloc_greater_or_equal_to(set->alloc + 1);
+ set->used = 0;
+ set->table = m_new0(mp_obj_t, set->alloc);
+ for (size_t i = 0; i < old_alloc; i++) {
+ if (old_table[i] != MP_OBJ_NULL && old_table[i] != MP_OBJ_SENTINEL) {
+ mp_set_lookup(set, old_table[i], MP_MAP_LOOKUP_ADD_IF_NOT_FOUND);
+ }
+ }
+ m_del(mp_obj_t, old_table, old_alloc);
+}
+
+mp_obj_t mp_set_lookup(mp_set_t *set, mp_obj_t index, mp_map_lookup_kind_t lookup_kind) {
+ // Note: lookup_kind can be MP_MAP_LOOKUP_ADD_IF_NOT_FOUND_OR_REMOVE_IF_FOUND which
+ // is handled by using bitwise operations.
+
+ if (set->alloc == 0) {
+ if (lookup_kind & MP_MAP_LOOKUP_ADD_IF_NOT_FOUND) {
+ mp_set_rehash(set);
+ } else {
+ return MP_OBJ_NULL;
+ }
+ }
+ mp_uint_t hash = MP_OBJ_SMALL_INT_VALUE(mp_unary_op(MP_UNARY_OP_HASH, index));
+ size_t pos = hash % set->alloc;
+ size_t start_pos = pos;
+ mp_obj_t *avail_slot = NULL;
+ for (;;) {
+ mp_obj_t elem = set->table[pos];
+ if (elem == MP_OBJ_NULL) {
+ // found NULL slot, so index is not in table
+ if (lookup_kind & MP_MAP_LOOKUP_ADD_IF_NOT_FOUND) {
+ if (avail_slot == NULL) {
+ avail_slot = &set->table[pos];
+ }
+ set->used++;
+ *avail_slot = index;
+ return index;
+ } else {
+ return MP_OBJ_NULL;
+ }
+ } else if (elem == MP_OBJ_SENTINEL) {
+ // found deleted slot, remember for later
+ if (avail_slot == NULL) {
+ avail_slot = &set->table[pos];
+ }
+ } else if (mp_obj_equal(elem, index)) {
+ // found index
+ if (lookup_kind & MP_MAP_LOOKUP_REMOVE_IF_FOUND) {
+ // delete element
+ set->used--;
+ if (set->table[(pos + 1) % set->alloc] == MP_OBJ_NULL) {
+ // optimisation if next slot is empty
+ set->table[pos] = MP_OBJ_NULL;
+ } else {
+ set->table[pos] = MP_OBJ_SENTINEL;
+ }
+ }
+ return elem;
+ }
+
+ // not yet found, keep searching in this table
+ pos = (pos + 1) % set->alloc;
+
+ if (pos == start_pos) {
+ // search got back to starting position, so index is not in table
+ if (lookup_kind & MP_MAP_LOOKUP_ADD_IF_NOT_FOUND) {
+ if (avail_slot != NULL) {
+ // there was an available slot, so use that
+ set->used++;
+ *avail_slot = index;
+ return index;
+ } else {
+ // not enough room in table, rehash it
+ mp_set_rehash(set);
+ // restart the search for the new element
+ start_pos = pos = hash % set->alloc;
+ }
+ } else {
+ return MP_OBJ_NULL;
+ }
+ }
+ }
+}
+
+mp_obj_t mp_set_remove_first(mp_set_t *set) {
+ for (size_t pos = 0; pos < set->alloc; pos++) {
+ if (mp_set_slot_is_filled(set, pos)) {
+ mp_obj_t elem = set->table[pos];
+ // delete element
+ set->used--;
+ if (set->table[(pos + 1) % set->alloc] == MP_OBJ_NULL) {
+ // optimisation if next slot is empty
+ set->table[pos] = MP_OBJ_NULL;
+ } else {
+ set->table[pos] = MP_OBJ_SENTINEL;
+ }
+ return elem;
+ }
+ }
+ return MP_OBJ_NULL;
+}
+
+void mp_set_clear(mp_set_t *set) {
+ m_del(mp_obj_t, set->table, set->alloc);
+ set->alloc = 0;
+ set->used = 0;
+ set->table = NULL;
+}
+
+#endif // MICROPY_PY_BUILTINS_SET
+
+#if defined(DEBUG_PRINT) && DEBUG_PRINT
+void mp_map_dump(mp_map_t *map) {
+ for (size_t i = 0; i < map->alloc; i++) {
+ if (map->table[i].key != MP_OBJ_NULL) {
+ mp_obj_print(map->table[i].key, PRINT_REPR);
+ } else {
+ DEBUG_printf("(nil)");
+ }
+ DEBUG_printf(": %p\n", map->table[i].value);
+ }
+ DEBUG_printf("---\n");
+}
+#endif
diff --git a/circuitpython/py/misc.h b/circuitpython/py/misc.h
new file mode 100644
index 0000000..8f2d3a1
--- /dev/null
+++ b/circuitpython/py/misc.h
@@ -0,0 +1,275 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_MISC_H
+#define MICROPY_INCLUDED_PY_MISC_H
+
+// a mini library of useful types and functions
+
+/** types *******************************************************/
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stddef.h>
+
+#include "mpconfig.h"
+
+typedef unsigned char byte;
+typedef unsigned int uint;
+
+/** generic ops *************************************************/
+
+#ifndef MIN
+#define MIN(x, y) ((x) < (y) ? (x) : (y))
+#endif
+#ifndef MAX
+#define MAX(x, y) ((x) > (y) ? (x) : (y))
+#endif
+
+// Classical double-indirection stringification of preprocessor macro's value
+#define MP_STRINGIFY_HELPER(x) #x
+#define MP_STRINGIFY(x) MP_STRINGIFY_HELPER(x)
+
+// Static assertion macro
+#define MP_STATIC_ASSERT(cond) ((void)sizeof(char[1 - 2 * !(cond)]))
+
+// Round-up integer division
+#define MP_CEIL_DIVIDE(a, b) (((a) + (b) - 1) / (b))
+#define MP_ROUND_DIVIDE(a, b) (((a) + (b) / 2) / (b))
+
+/** memory allocation ******************************************/
+
+// TODO make a lazy m_renew that can increase by a smaller amount than requested (but by at least 1 more element)
+
+#define m_new(type, num) ((type *)(m_malloc(sizeof(type) * (num), false)))
+#define m_new_ll(type, num) ((type *)(m_malloc(sizeof(type) * (num), true)))
+#define m_new_maybe(type, num) ((type *)(m_malloc_maybe(sizeof(type) * (num), false)))
+#define m_new_ll_maybe(type, num) ((type *)(m_malloc_maybe(sizeof(type) * (num), true)))
+#define m_new0(type, num) ((type *)(m_malloc0(sizeof(type) * (num), false)))
+#define m_new0_ll(type, num) ((type *)(m_malloc0(sizeof(type) * (num), true)))
+#define m_new_obj(type) (m_new(type, 1))
+#define m_new_ll_obj(type) (m_new_ll(type, 1))
+#define m_new_obj_maybe(type) (m_new_maybe(type, 1))
+#define m_new_obj_var(obj_type, var_type, var_num) ((obj_type *)m_malloc(sizeof(obj_type) + sizeof(var_type) * (var_num), false))
+#define m_new_obj_var_maybe(obj_type, var_type, var_num) ((obj_type *)m_malloc_maybe(sizeof(obj_type) + sizeof(var_type) * (var_num), false))
+#define m_new_ll_obj_var_maybe(obj_type, var_type, var_num) ((obj_type *)m_malloc_maybe(sizeof(obj_type) + sizeof(var_type) * (var_num), true))
+#if MICROPY_ENABLE_FINALISER
+#define m_new_obj_with_finaliser(type) ((type *)(m_malloc_with_finaliser(sizeof(type), false)))
+#define m_new_obj_var_with_finaliser(type, var_type, var_num) ((type *)m_malloc_with_finaliser(sizeof(type) + sizeof(var_type) * (var_num), false))
+#define m_new_ll_obj_with_finaliser(type) ((type *)(m_malloc_with_finaliser(sizeof(type), true)))
+#else
+#define m_new_obj_with_finaliser(type) m_new_obj(type)
+#define m_new_obj_var_with_finaliser(type, var_type, var_num) m_new_obj_var(type, var_type, var_num)
+#define m_new_ll_obj_with_finaliser(type) m_new_ll_obj(type)
+#endif
+#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
+#define m_renew(type, ptr, old_num, new_num) ((type *)(m_realloc((ptr), sizeof(type) * (old_num), sizeof(type) * (new_num))))
+#define m_renew_maybe(type, ptr, old_num, new_num, allow_move) ((type *)(m_realloc_maybe((ptr), sizeof(type) * (old_num), sizeof(type) * (new_num), (allow_move))))
+#define m_del(type, ptr, num) m_free(ptr, sizeof(type) * (num))
+#define m_del_var(obj_type, var_type, var_num, ptr) (m_free(ptr, sizeof(obj_type) + sizeof(var_type) * (var_num)))
+#else
+#define m_renew(type, ptr, old_num, new_num) ((type *)(m_realloc((ptr), sizeof(type) * (new_num))))
+#define m_renew_maybe(type, ptr, old_num, new_num, allow_move) ((type *)(m_realloc_maybe((ptr), sizeof(type) * (new_num), (allow_move))))
+#define m_del(type, ptr, num) ((void)(num), m_free(ptr))
+#define m_del_var(obj_type, var_type, var_num, ptr) ((void)(var_num), m_free(ptr))
+#endif
+#define m_del_obj(type, ptr) (m_del(type, ptr, 1))
+
+void *m_malloc(size_t num_bytes, bool long_lived);
+void *m_malloc_maybe(size_t num_bytes, bool long_lived);
+void *m_malloc_with_finaliser(size_t num_bytes, bool long_lived);
+void *m_malloc0(size_t num_bytes, bool long_lived);
+#if MICROPY_MALLOC_USES_ALLOCATED_SIZE
+void *m_realloc(void *ptr, size_t old_num_bytes, size_t new_num_bytes);
+void *m_realloc_maybe(void *ptr, size_t old_num_bytes, size_t new_num_bytes, bool allow_move);
+void m_free(void *ptr, size_t num_bytes);
+#else
+void *m_realloc(void *ptr, size_t new_num_bytes);
+void *m_realloc_maybe(void *ptr, size_t new_num_bytes, bool allow_move);
+void m_free(void *ptr);
+#endif
+NORETURN void m_malloc_fail(size_t num_bytes);
+
+#if MICROPY_MEM_STATS
+size_t m_get_total_bytes_allocated(void);
+size_t m_get_current_bytes_allocated(void);
+size_t m_get_peak_bytes_allocated(void);
+#endif
+
+/** array helpers ***********************************************/
+
+// get the number of elements in a fixed-size array
+#define MP_ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+// align ptr to the nearest multiple of "alignment"
+#define MP_ALIGN(ptr, alignment) (void *)(((uintptr_t)(ptr) + ((alignment) - 1)) & ~((alignment) - 1))
+
+#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
+
+/** unichar / UTF-8 *********************************************/
+
+#if MICROPY_PY_BUILTINS_STR_UNICODE
+// with unicode enabled we need a type which can fit chars up to 0x10ffff
+typedef uint32_t unichar;
+#else
+// without unicode enabled we can only need to fit chars up to 0xff
+// (on 16-bit archs uint is 16-bits and more efficient than uint32_t)
+typedef uint unichar;
+#endif
+
+#if MICROPY_PY_BUILTINS_STR_UNICODE
+unichar utf8_get_char(const byte *s);
+const byte *utf8_next_char(const byte *s);
+size_t utf8_charlen(const byte *str, size_t len);
+#else
+static inline unichar utf8_get_char(const byte *s) {
+ return *s;
+}
+static inline const byte *utf8_next_char(const byte *s) {
+ return s + 1;
+}
+static inline size_t utf8_charlen(const byte *str, size_t len) {
+ (void)str;
+ return len;
+}
+#endif
+
+bool unichar_isspace(unichar c);
+bool unichar_isalpha(unichar c);
+bool unichar_isprint(unichar c);
+bool unichar_isdigit(unichar c);
+bool unichar_isxdigit(unichar c);
+bool unichar_isident(unichar c);
+bool unichar_isalnum(unichar c);
+bool unichar_isupper(unichar c);
+bool unichar_islower(unichar c);
+unichar unichar_tolower(unichar c);
+unichar unichar_toupper(unichar c);
+mp_uint_t unichar_xdigit_value(unichar c);
+#define UTF8_IS_NONASCII(ch) ((ch) & 0x80)
+#define UTF8_IS_CONT(ch) (((ch) & 0xC0) == 0x80)
+
+/** variable string *********************************************/
+
+typedef struct _vstr_t {
+ size_t alloc;
+ size_t len;
+ char *buf;
+ bool fixed_buf : 1;
+} vstr_t;
+
+// convenience macro to declare a vstr with a fixed size buffer on the stack
+#define VSTR_FIXED(vstr, alloc) vstr_t vstr; char vstr##_buf[(alloc)]; vstr_init_fixed_buf(&vstr, (alloc), vstr##_buf);
+
+void vstr_init(vstr_t *vstr, size_t alloc);
+void vstr_init_len(vstr_t *vstr, size_t len);
+void vstr_init_fixed_buf(vstr_t *vstr, size_t alloc, char *buf);
+struct _mp_print_t;
+void vstr_init_print(vstr_t *vstr, size_t alloc, struct _mp_print_t *print);
+void vstr_clear(vstr_t *vstr);
+vstr_t *vstr_new(size_t alloc);
+void vstr_free(vstr_t *vstr);
+static inline void vstr_reset(vstr_t *vstr) {
+ vstr->len = 0;
+}
+static inline char *vstr_str(vstr_t *vstr) {
+ return vstr->buf;
+}
+static inline size_t vstr_len(vstr_t *vstr) {
+ return vstr->len;
+}
+void vstr_hint_size(vstr_t *vstr, size_t size);
+char *vstr_extend(vstr_t *vstr, size_t size);
+char *vstr_add_len(vstr_t *vstr, size_t len);
+char *vstr_null_terminated_str(vstr_t *vstr);
+void vstr_add_byte(vstr_t *vstr, byte v);
+void vstr_add_char(vstr_t *vstr, unichar chr);
+void vstr_add_str(vstr_t *vstr, const char *str);
+void vstr_add_strn(vstr_t *vstr, const char *str, size_t len);
+void vstr_ins_byte(vstr_t *vstr, size_t byte_pos, byte b);
+void vstr_ins_char(vstr_t *vstr, size_t char_pos, unichar chr);
+void vstr_cut_head_bytes(vstr_t *vstr, size_t bytes_to_cut);
+void vstr_cut_tail_bytes(vstr_t *vstr, size_t bytes_to_cut);
+void vstr_cut_out_bytes(vstr_t *vstr, size_t byte_pos, size_t bytes_to_cut);
+void vstr_printf(vstr_t *vstr, const char *fmt, ...);
+
+/** non-dynamic size-bounded variable buffer/string *************/
+
+#define CHECKBUF(buf, max_size) char buf[max_size + 1]; size_t buf##_len = max_size; char *buf##_p = buf;
+#define CHECKBUF_RESET(buf, max_size) buf##_len = max_size; buf##_p = buf;
+#define CHECKBUF_APPEND(buf, src, src_len) \
+ { size_t l = MIN(src_len, buf##_len); \
+ memcpy(buf##_p, src, l); \
+ buf##_len -= l; \
+ buf##_p += l; }
+#define CHECKBUF_APPEND_0(buf) { *buf##_p = 0; }
+#define CHECKBUF_LEN(buf) (buf##_p - buf)
+
+#ifdef va_start
+void vstr_vprintf(vstr_t *vstr, const char *fmt, va_list ap);
+#endif
+
+// Debugging helpers
+int DEBUG_printf(const char *fmt, ...);
+
+extern mp_uint_t mp_verbose_flag;
+
+/** float internals *************/
+
+#if MICROPY_PY_BUILTINS_FLOAT
+
+#if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE
+#define MP_FLOAT_EXP_BITS (11)
+#define MP_FLOAT_FRAC_BITS (52)
+typedef uint64_t mp_float_uint_t;
+#elif MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
+#define MP_FLOAT_EXP_BITS (8)
+#define MP_FLOAT_FRAC_BITS (23)
+typedef uint32_t mp_float_uint_t;
+#endif
+
+#define MP_FLOAT_EXP_BIAS ((1 << (MP_FLOAT_EXP_BITS - 1)) - 1)
+
+typedef union _mp_float_union_t {
+ mp_float_t f;
+ #if MP_ENDIANNESS_LITTLE
+ struct {
+ mp_float_uint_t frc : MP_FLOAT_FRAC_BITS;
+ mp_float_uint_t exp : MP_FLOAT_EXP_BITS;
+ mp_float_uint_t sgn : 1;
+ } p;
+ #else
+ struct {
+ mp_float_uint_t sgn : 1;
+ mp_float_uint_t exp : MP_FLOAT_EXP_BITS;
+ mp_float_uint_t frc : MP_FLOAT_FRAC_BITS;
+ } p;
+ #endif
+ mp_float_uint_t i;
+} mp_float_union_t;
+
+#endif // MICROPY_PY_BUILTINS_FLOAT
+
+#endif // MICROPY_INCLUDED_PY_MISC_H
diff --git a/circuitpython/py/mkenv.mk b/circuitpython/py/mkenv.mk
new file mode 100644
index 0000000..e2cdb2d
--- /dev/null
+++ b/circuitpython/py/mkenv.mk
@@ -0,0 +1,84 @@
+ifneq ($(lastword a b),b)
+$(error These Makefiles require make 3.81 or newer)
+endif
+
+# Set TOP to be the path to get from the current directory (where make was
+# invoked) to the top of the tree. $(lastword $(MAKEFILE_LIST)) returns
+# the name of this makefile relative to where make was invoked.
+#
+# We assume that this file is in the py directory so we use $(dir ) twice
+# to get to the top of the tree.
+
+THIS_MAKEFILE := $(lastword $(MAKEFILE_LIST))
+TOP := $(patsubst %/py/mkenv.mk,%,$(THIS_MAKEFILE))
+
+# Turn on increased build verbosity by defining BUILD_VERBOSE in your main
+# Makefile or in your environment. You can also use V=1 on the make command
+# line.
+
+ifeq ("$(origin V)", "command line")
+BUILD_VERBOSE=$(V)
+endif
+ifndef BUILD_VERBOSE
+$(info Use make V=1, make V=2 or set BUILD_VERBOSE similarly in your environment to increase build verbosity.)
+BUILD_VERBOSE = 0
+endif
+ifeq ($(BUILD_VERBOSE),0)
+Q = @
+STEPECHO = @:
+else ifeq ($(BUILD_VERBOSE),1)
+Q = @
+STEPECHO = @echo
+else
+Q =
+STEPECHO = @echo
+endif
+
+# default settings; can be overridden in main Makefile
+
+PY_SRC ?= $(TOP)/py
+BUILD ?= build
+
+ECHO = @echo
+
+CAT = cat
+CD = cd
+CP = cp
+FIND = find
+MKDIR = mkdir
+PYTHON = python3
+RM = rm
+RSYNC = rsync
+SED = sed
+TOUCH = touch
+# Linux has 'nproc', macOS has 'sysctl -n hw.logicalcpu', this is cross-platform
+NPROC = $(PYTHON) -c 'import multiprocessing as mp; print(mp.cpu_count())'
+
+AS = $(CROSS_COMPILE)as
+CC = $(CROSS_COMPILE)gcc
+CXX = $(CROSS_COMPILE)g++
+GDB = $(CROSS_COMPILE)gdb
+LD = $(CROSS_COMPILE)ld
+OBJCOPY = $(CROSS_COMPILE)objcopy
+SIZE = $(CROSS_COMPILE)size
+STRIP = $(CROSS_COMPILE)strip
+AR = $(CROSS_COMPILE)ar
+
+MAKE_MANIFEST = $(PYTHON) $(TOP)/tools/makemanifest.py
+MAKE_FROZEN = $(PYTHON) $(TOP)/tools/make-frozen.py
+MPY_TOOL = $(PYTHON) $(TOP)/tools/mpy-tool.py
+PREPROCESS_FROZEN_MODULES = PYTHONPATH=$(TOP)/tools/python-semver $(TOP)/tools/preprocess_frozen_modules.py
+
+MPY_LIB_DIR = $(TOP)/../micropython-lib
+
+ifeq ($(MICROPY_MPYCROSS),)
+MICROPY_MPYCROSS = $(TOP)/mpy-cross/mpy-cross
+MICROPY_MPYCROSS_DEPENDENCY = $(MICROPY_MPYCROSS)
+endif
+
+all:
+.PHONY: all
+
+.DELETE_ON_ERROR:
+
+MKENV_INCLUDED = 1
diff --git a/circuitpython/py/mkrules.cmake b/circuitpython/py/mkrules.cmake
new file mode 100644
index 0000000..cb5fdab
--- /dev/null
+++ b/circuitpython/py/mkrules.cmake
@@ -0,0 +1,161 @@
+# CMake fragment for MicroPython rules
+
+set(MICROPY_GENHDR_DIR "${CMAKE_BINARY_DIR}/genhdr")
+set(MICROPY_MPVERSION "${MICROPY_GENHDR_DIR}/mpversion.h")
+set(MICROPY_MODULEDEFS "${MICROPY_GENHDR_DIR}/moduledefs.h")
+set(MICROPY_QSTRDEFS_PY "${MICROPY_PY_DIR}/qstrdefs.h")
+set(MICROPY_QSTRDEFS_LAST "${MICROPY_GENHDR_DIR}/qstr.i.last")
+set(MICROPY_QSTRDEFS_SPLIT "${MICROPY_GENHDR_DIR}/qstr.split")
+set(MICROPY_QSTRDEFS_COLLECTED "${MICROPY_GENHDR_DIR}/qstrdefs.collected.h")
+set(MICROPY_QSTRDEFS_PREPROCESSED "${MICROPY_GENHDR_DIR}/qstrdefs.preprocessed.h")
+set(MICROPY_QSTRDEFS_GENERATED "${MICROPY_GENHDR_DIR}/qstrdefs.generated.h")
+
+# Need to do this before extracting MICROPY_CPP_DEF below. Rest of frozen
+# manifest handling is at the end of this file.
+if(MICROPY_FROZEN_MANIFEST)
+ target_compile_definitions(${MICROPY_TARGET} PUBLIC
+ MICROPY_QSTR_EXTRA_POOL=mp_qstr_frozen_const_pool
+ MICROPY_MODULE_FROZEN_MPY=\(1\)
+ )
+endif()
+
+# Provide defaults for preprocessor flags if not already defined
+if(NOT MICROPY_CPP_FLAGS)
+ get_target_property(MICROPY_CPP_INC ${MICROPY_TARGET} INCLUDE_DIRECTORIES)
+ get_target_property(MICROPY_CPP_DEF ${MICROPY_TARGET} COMPILE_DEFINITIONS)
+endif()
+
+# Compute MICROPY_CPP_FLAGS for preprocessor
+list(APPEND MICROPY_CPP_INC ${MICROPY_CPP_INC_EXTRA})
+list(APPEND MICROPY_CPP_DEF ${MICROPY_CPP_DEF_EXTRA})
+set(_prefix "-I")
+foreach(_arg ${MICROPY_CPP_INC})
+ list(APPEND MICROPY_CPP_FLAGS ${_prefix}${_arg})
+endforeach()
+set(_prefix "-D")
+foreach(_arg ${MICROPY_CPP_DEF})
+ list(APPEND MICROPY_CPP_FLAGS ${_prefix}${_arg})
+endforeach()
+list(APPEND MICROPY_CPP_FLAGS ${MICROPY_CPP_FLAGS_EXTRA})
+
+find_package(Python3 REQUIRED COMPONENTS Interpreter)
+
+target_sources(${MICROPY_TARGET} PRIVATE
+ ${MICROPY_MPVERSION}
+ ${MICROPY_QSTRDEFS_GENERATED}
+)
+
+# Command to force the build of another command
+
+add_custom_command(
+ OUTPUT MICROPY_FORCE_BUILD
+ COMMENT ""
+ COMMAND echo -n
+)
+
+# Generate mpversion.h
+
+add_custom_command(
+ OUTPUT ${MICROPY_MPVERSION}
+ COMMAND ${CMAKE_COMMAND} -E make_directory ${MICROPY_GENHDR_DIR}
+ COMMAND ${Python3_EXECUTABLE} ${MICROPY_DIR}/py/makeversionhdr.py ${MICROPY_MPVERSION}
+ DEPENDS MICROPY_FORCE_BUILD
+)
+
+# Generate moduledefs.h
+
+add_custom_command(
+ OUTPUT ${MICROPY_MODULEDEFS}
+ COMMAND ${Python3_EXECUTABLE} ${MICROPY_PY_DIR}/makemoduledefs.py --vpath="/" ${MICROPY_SOURCE_QSTR} > ${MICROPY_MODULEDEFS}
+ DEPENDS ${MICROPY_MPVERSION}
+ ${MICROPY_SOURCE_QSTR}
+)
+
+# Generate qstrs
+
+# If any of the dependencies in this rule change then the C-preprocessor step must be run.
+# It only needs to be passed the list of MICROPY_SOURCE_QSTR files that have changed since
+# it was last run, but it looks like it's not possible to specify that with cmake.
+add_custom_command(
+ OUTPUT ${MICROPY_QSTRDEFS_LAST}
+ COMMAND ${Python3_EXECUTABLE} ${MICROPY_PY_DIR}/makeqstrdefs.py pp ${CMAKE_C_COMPILER} -E output ${MICROPY_GENHDR_DIR}/qstr.i.last cflags ${MICROPY_CPP_FLAGS} -DNO_QSTR cxxflags ${MICROPY_CPP_FLAGS} -DNO_QSTR sources ${MICROPY_SOURCE_QSTR}
+ DEPENDS ${MICROPY_MODULEDEFS}
+ ${MICROPY_SOURCE_QSTR}
+ VERBATIM
+ COMMAND_EXPAND_LISTS
+)
+
+add_custom_command(
+ OUTPUT ${MICROPY_QSTRDEFS_SPLIT}
+ COMMAND ${Python3_EXECUTABLE} ${MICROPY_PY_DIR}/makeqstrdefs.py split qstr ${MICROPY_GENHDR_DIR}/qstr.i.last ${MICROPY_GENHDR_DIR}/qstr _
+ COMMAND touch ${MICROPY_QSTRDEFS_SPLIT}
+ DEPENDS ${MICROPY_QSTRDEFS_LAST}
+ VERBATIM
+ COMMAND_EXPAND_LISTS
+)
+
+add_custom_command(
+ OUTPUT ${MICROPY_QSTRDEFS_COLLECTED}
+ COMMAND ${Python3_EXECUTABLE} ${MICROPY_PY_DIR}/makeqstrdefs.py cat qstr _ ${MICROPY_GENHDR_DIR}/qstr ${MICROPY_QSTRDEFS_COLLECTED}
+ DEPENDS ${MICROPY_QSTRDEFS_SPLIT}
+ VERBATIM
+ COMMAND_EXPAND_LISTS
+)
+
+add_custom_command(
+ OUTPUT ${MICROPY_QSTRDEFS_PREPROCESSED}
+ COMMAND cat ${MICROPY_QSTRDEFS_PY} ${MICROPY_QSTRDEFS_PORT} ${MICROPY_QSTRDEFS_COLLECTED} | sed "s/^Q(.*)/\"&\"/" | ${CMAKE_C_COMPILER} -E ${MICROPY_CPP_FLAGS} - | sed "s/^\\\"\\(Q(.*)\\)\\\"/\\1/" > ${MICROPY_QSTRDEFS_PREPROCESSED}
+ DEPENDS ${MICROPY_QSTRDEFS_PY}
+ ${MICROPY_QSTRDEFS_PORT}
+ ${MICROPY_QSTRDEFS_COLLECTED}
+ VERBATIM
+ COMMAND_EXPAND_LISTS
+)
+
+add_custom_command(
+ OUTPUT ${MICROPY_QSTRDEFS_GENERATED}
+ COMMAND ${Python3_EXECUTABLE} ${MICROPY_PY_DIR}/makeqstrdata.py ${MICROPY_QSTRDEFS_PREPROCESSED} > ${MICROPY_QSTRDEFS_GENERATED}
+ DEPENDS ${MICROPY_QSTRDEFS_PREPROCESSED}
+ VERBATIM
+ COMMAND_EXPAND_LISTS
+)
+
+# Build frozen code if enabled
+
+if(MICROPY_FROZEN_MANIFEST)
+ set(MICROPY_FROZEN_CONTENT "${CMAKE_BINARY_DIR}/frozen_content.c")
+
+ target_sources(${MICROPY_TARGET} PRIVATE
+ ${MICROPY_FROZEN_CONTENT}
+ )
+
+ # Note: target_compile_definitions already added earlier.
+
+ if(NOT MICROPY_LIB_DIR)
+ set(MICROPY_LIB_DIR ${MICROPY_DIR}/../micropython-lib)
+ endif()
+
+ # If MICROPY_MPYCROSS is not explicitly defined in the environment (which
+ # is what makemanifest.py will use) then create an mpy-cross dependency
+ # to automatically build mpy-cross if needed.
+ set(MICROPY_MPYCROSS $ENV{MICROPY_MPYCROSS})
+ if(NOT MICROPY_MPYCROSS)
+ set(MICROPY_MPYCROSS_DEPENDENCY ${MICROPY_DIR}/mpy-cross/mpy-cross)
+ if(NOT MICROPY_MAKE_EXECUTABLE)
+ set(MICROPY_MAKE_EXECUTABLE make)
+ endif()
+ add_custom_command(
+ OUTPUT ${MICROPY_MPYCROSS_DEPENDENCY}
+ COMMAND ${MICROPY_MAKE_EXECUTABLE} -C ${MICROPY_DIR}/mpy-cross
+ )
+ endif()
+
+ add_custom_command(
+ OUTPUT ${MICROPY_FROZEN_CONTENT}
+ COMMAND ${Python3_EXECUTABLE} ${MICROPY_DIR}/tools/makemanifest.py -o ${MICROPY_FROZEN_CONTENT} -v "MPY_DIR=${MICROPY_DIR}" -v "MPY_LIB_DIR=${MICROPY_LIB_DIR}" -v "PORT_DIR=${MICROPY_PORT_DIR}" -v "BOARD_DIR=${MICROPY_BOARD_DIR}" -b "${CMAKE_BINARY_DIR}" -f${MICROPY_CROSS_FLAGS} ${MICROPY_FROZEN_MANIFEST}
+ DEPENDS MICROPY_FORCE_BUILD
+ ${MICROPY_QSTRDEFS_GENERATED}
+ ${MICROPY_MPYCROSS_DEPENDENCY}
+ VERBATIM
+ )
+endif()
diff --git a/circuitpython/py/mkrules.mk b/circuitpython/py/mkrules.mk
new file mode 100644
index 0000000..6bef64f
--- /dev/null
+++ b/circuitpython/py/mkrules.mk
@@ -0,0 +1,210 @@
+ifneq ($(MKENV_INCLUDED),1)
+# We assume that mkenv is in the same directory as this file.
+THIS_MAKEFILE = $(lastword $(MAKEFILE_LIST))
+include $(dir $(THIS_MAKEFILE))mkenv.mk
+endif
+
+# Extra deps that need to happen before object compilation.
+OBJ_EXTRA_ORDER_DEPS =
+
+# This file expects that OBJ contains a list of all of the object files.
+# The directory portion of each object file is used to locate the source
+# and should not contain any ..'s but rather be relative to the top of the
+# tree.
+#
+# So for example, py/map.c would have an object file name py/map.o
+# The object files will go into the build directory and mantain the same
+# directory structure as the source tree. So the final dependency will look
+# like this:
+#
+# build/py/map.o: py/map.c
+#
+# We set vpath to point to the top of the tree so that the source files
+# can be located. By following this scheme, it allows a single build rule
+# to be used to compile all .c files.
+
+vpath %.S . $(TOP) $(USER_C_MODULES)
+$(BUILD)/%.o: %.S
+ $(STEPECHO) "CC $<"
+ $(Q)$(CC) $(CFLAGS) -c -o $@ $<
+
+vpath %.s . $(TOP) $(USER_C_MODULES)
+$(BUILD)/%.o: %.s
+ $(STEPECHO) "AS $<"
+ $(Q)$(AS) -o $@ $<
+
+define compile_c
+$(STEPECHO) "CC $<"
+$(Q)$(CC) $(CFLAGS) -c -MD -o $@ $<
+@# The following fixes the dependency file.
+@# See http://make.paulandlesley.org/autodep.html for details.
+@# Regex adjusted from the above to play better with Windows paths, etc.
+@$(CP) $(@:.o=.d) $(@:.o=.P); \
+ $(SED) -e 's/#.*//' -e 's/^.*: *//' -e 's/ *\\$$//' \
+ -e '/^$$/ d' -e 's/$$/ :/' < $(@:.o=.d) >> $(@:.o=.P); \
+ $(RM) -f $(@:.o=.d)
+endef
+
+define compile_cxx
+$(ECHO) "CXX $<"
+$(Q)$(CXX) $(CXXFLAGS) -c -MD -o $@ $<
+@# The following fixes the dependency file.
+@# See http://make.paulandlesley.org/autodep.html for details.
+@# Regex adjusted from the above to play better with Windows paths, etc.
+@$(CP) $(@:.o=.d) $(@:.o=.P); \
+ $(SED) -e 's/#.*//' -e 's/^.*: *//' -e 's/ *\\$$//' \
+ -e '/^$$/ d' -e 's/$$/ :/' < $(@:.o=.d) >> $(@:.o=.P); \
+ $(RM) -f $(@:.o=.d)
+endef
+
+vpath %.c . $(TOP) $(USER_C_MODULES) $(DEVICES_MODULES)
+$(BUILD)/%.o: %.c
+ $(call compile_c)
+
+vpath %.cpp . $(TOP) $(USER_C_MODULES)
+$(BUILD)/%.o: %.cpp
+ $(call compile_cxx)
+
+QSTR_GEN_EXTRA_CFLAGS += -DNO_QSTR -x c
+
+# frozen.c and frozen_mpy.c are created in $(BUILD), so use our rule
+# for those as well.
+vpath %.c . $(BUILD)
+$(BUILD)/%.o: %.c
+ $(call compile_c)
+
+QSTR_GEN_EXTRA_CFLAGS += -I$(BUILD)/tmp
+
+vpath %.c . $(TOP) $(USER_C_MODULES) $(DEVICES_MODULES)
+$(BUILD)/%.pp: %.c
+ $(STEPECHO) "PreProcess $<"
+ $(Q)$(CPP) $(CFLAGS) -E -Wp,-C,-dD,-dI -o $@ $<
+
+# The following rule uses | to create an order only prerequisite. Order only
+# prerequisites only get built if they don't exist. They don't cause timestamp
+# checking to be performed.
+#
+# We don't know which source files actually need the generated.h (since
+# it is #included from str.h). The compiler generated dependencies will cause
+# the right .o's to get recompiled if the generated.h file changes. Adding
+# an order-only dependency to all of the .o's will cause the generated .h
+# to get built before we try to compile any of them.
+$(OBJ): | $(HEADER_BUILD)/qstrdefs.enum.h $(HEADER_BUILD)/mpversion.h
+
+# The logic for qstr regeneration (applied by makeqstrdefs.py) is:
+# - if anything in QSTR_GLOBAL_DEPENDENCIES is newer, then process all source files ($^)
+# - else, if list of newer prerequisites ($?) is not empty, then process just these ($?)
+# - else, process all source files ($^) [this covers "make -B" which can set $? to empty]
+$(HEADER_BUILD)/qstr.split: $(SRC_QSTR) $(SRC_QSTR_PREPROCESSOR) $(QSTR_GLOBAL_DEPENDENCIES) $(HEADER_BUILD)/moduledefs.h | $(HEADER_BUILD)/mpversion.h $(PY_SRC)/genlast.py
+ $(STEPECHO) "GEN $@"
+ $(Q)$(PYTHON) $(PY_SRC)/genlast.py $(HEADER_BUILD)/qstr $(if $(filter $?,$(QSTR_GLOBAL_DEPENDENCIES)),$^,$(if $?,$?,$^)) -- $(SRC_QSTR_PREPROCESSOR) -- $(CPP) $(QSTR_GEN_EXTRA_CFLAGS) $(CFLAGS)
+ $(Q)$(TOUCH) $@
+
+$(QSTR_DEFS_COLLECTED): $(HEADER_BUILD)/qstr.split $(PY_SRC)/makeqstrdefs.py
+ $(STEPECHO) "GEN $@"
+ $(Q)$(PYTHON) $(PY_SRC)/makeqstrdefs.py cat - $(HEADER_BUILD)/qstr $(QSTR_DEFS_COLLECTED)
+
+# $(sort $(var)) removes duplicates
+#
+# The net effect of this, is it causes the objects to depend on the
+# object directories (but only for existence), and the object directories
+# will be created if they don't exist.
+OBJ_DIRS = $(sort $(dir $(OBJ)))
+$(OBJ): | $(OBJ_DIRS)
+$(OBJ_DIRS):
+ $(Q)$(MKDIR) -p $@
+
+$(HEADER_BUILD):
+ $(Q)$(MKDIR) -p $@
+
+ $(MKDIR) -p $@
+
+ifneq ($(MICROPY_MPYCROSS_DEPENDENCY),)
+# to automatically build mpy-cross, if needed
+$(MICROPY_MPYCROSS_DEPENDENCY):
+ $(MAKE) -C $(dir $@)
+endif
+
+ifneq ($(FROZEN_DIR),)
+$(error Support for FROZEN_DIR was removed. Please use manifest.py instead, see https://docs.micropython.org/en/latest/reference/manifest.html)
+endif
+
+ifneq ($(FROZEN_MPY_DIR),)
+$(error Support for FROZEN_MPY_DIR was removed. Please use manifest.py instead, see https://docs.micropython.org/en/latest/reference/manifest.html)
+endif
+
+ifneq ($(FROZEN_MANIFEST),)
+# to build frozen_content.c from a manifest
+$(BUILD)/frozen_content.c: FORCE $(FROZEN_MANIFEST) $(BUILD)/genhdr/qstrdefs.generated.h | $(MICROPY_MPYCROSS_DEPENDENCY) $(TOP)/tools/makemanifest.py
+ $(Q)$(MAKE_MANIFEST) -o $@ -v "MPY_DIR=$(TOP)" -v "MPY_LIB_DIR=$(MPY_LIB_DIR)" -v "PORT_DIR=$(shell pwd)" -v "BOARD_DIR=$(BOARD_DIR)" -b "$(BUILD)" $(if $(MPY_CROSS_FLAGS),-f"$(MPY_CROSS_FLAGS)",) --mpy-tool-flags="$(MPY_TOOL_FLAGS)" $(FROZEN_MANIFEST)
+endif
+
+ifneq ($(PROG),)
+# Build a standalone executable (unix does this)
+
+# The executable should have an .exe extension for builds targetting 'pure'
+# Windows, i.e. msvc or mingw builds, but not when using msys or cygwin's gcc.
+COMPILER_TARGET := $(shell $(CC) -dumpmachine)
+ifneq (,$(findstring mingw,$(COMPILER_TARGET)))
+PROG := $(PROG).exe
+endif
+
+all: $(PROG)
+
+$(PROG): $(OBJ)
+ $(STEPECHO) "LINK $@"
+# Do not pass COPT here - it's *C* compiler optimizations. For example,
+# we may want to compile using Thumb, but link with non-Thumb libc.
+ $(Q)$(CC) -o $@ $^ $(LIB) $(LDFLAGS)
+ifdef STRIP_CIRCUITPYTHON
+ $(Q)$(STRIP) $(STRIPFLAGS_EXTRA) $@
+endif
+ $(Q)$(SIZE) $$(find $(BUILD) -path "$(BUILD)/build/frozen*.o") $@
+
+clean: clean-prog
+clean-prog:
+ $(RM) -f $(PROG)
+ $(RM) -f $(PROG).map
+
+.PHONY: clean-prog
+endif
+
+submodules:
+ $(ECHO) "Updating submodules: $(GIT_SUBMODULES)"
+ifneq ($(GIT_SUBMODULES),)
+ $(Q)git submodule sync $(addprefix $(TOP)/,$(GIT_SUBMODULES))
+ $(Q)git submodule update --init $(addprefix $(TOP)/,$(GIT_SUBMODULES))
+endif
+.PHONY: submodules
+
+LIBMICROPYTHON = libmicropython.a
+
+# We can execute extra commands after library creation using
+# LIBMICROPYTHON_EXTRA_CMD. This may be needed e.g. to integrate
+# with 3rd-party projects which don't have proper dependency
+# tracking. Then LIBMICROPYTHON_EXTRA_CMD can e.g. touch some
+# other file to cause needed effect, e.g. relinking with new lib.
+lib $(LIBMICROPYTHON): $(OBJ)
+ $(Q)$(AR) rcs $(LIBMICROPYTHON) $^
+ $(LIBMICROPYTHON_EXTRA_CMD)
+
+clean:
+ $(RM) -rf $(BUILD) $(CLEAN_EXTRA)
+.PHONY: clean
+
+print-cfg:
+ $(ECHO) "PY_SRC = $(PY_SRC)"
+ $(ECHO) "BUILD = $(BUILD)"
+ $(ECHO) "OBJ = $(OBJ)"
+.PHONY: print-cfg
+
+print-def:
+ @$(ECHO) "The following defines are built into the $(CC) compiler"
+ $(TOUCH) __empty__.c
+ @$(CC) -E -Wp,-dM __empty__.c
+ @$(RM) -f __empty__.c
+
+tags:
+ ctags -e -R $(TOP)
+
+-include $(OBJ:.o=.P)
diff --git a/circuitpython/py/modarray.c b/circuitpython/py/modarray.c
new file mode 100644
index 0000000..daae346
--- /dev/null
+++ b/circuitpython/py/modarray.c
@@ -0,0 +1,45 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/builtin.h"
+
+#if MICROPY_PY_ARRAY
+
+STATIC const mp_rom_map_elem_t mp_module_array_globals_table[] = {
+ { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_array) },
+ { MP_ROM_QSTR(MP_QSTR_array), MP_ROM_PTR(&mp_type_array) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(mp_module_array_globals, mp_module_array_globals_table);
+
+const mp_obj_module_t mp_module_array = {
+ .base = { &mp_type_module },
+ .globals = (mp_obj_dict_t *)&mp_module_array_globals,
+};
+
+MP_REGISTER_MODULE(MP_QSTR_array, mp_module_array, MICROPY_PY_ARRAY);
+
+#endif
diff --git a/circuitpython/py/modbuiltins.c b/circuitpython/py/modbuiltins.c
new file mode 100644
index 0000000..ebdbd52
--- /dev/null
+++ b/circuitpython/py/modbuiltins.c
@@ -0,0 +1,797 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <assert.h>
+
+#include "py/smallint.h"
+#include "py/objint.h"
+#include "py/objstr.h"
+#include "py/objtype.h"
+#include "py/runtime.h"
+#include "py/builtin.h"
+#include "py/stream.h"
+
+#include "supervisor/shared/translate.h"
+
+#if MICROPY_PY_BUILTINS_FLOAT
+#include <math.h>
+#endif
+
+#if MICROPY_PY_IO
+extern struct _mp_dummy_t mp_sys_stdout_obj; // type is irrelevant, just need pointer
+#endif
+
+// args[0] is function from class body
+// args[1] is class name
+// args[2:] are base objects
+STATIC mp_obj_t mp_builtin___build_class__(size_t n_args, const mp_obj_t *args) {
+ assert(2 <= n_args);
+
+ // set the new classes __locals__ object
+ mp_obj_dict_t *old_locals = mp_locals_get();
+ mp_obj_t class_locals = mp_obj_new_dict(0);
+ mp_locals_set(MP_OBJ_TO_PTR(class_locals));
+
+ // call the class code
+ mp_obj_t cell = mp_call_function_0(args[0]);
+
+ // restore old __locals__ object
+ mp_locals_set(old_locals);
+
+ // get the class type (meta object) from the base objects
+ mp_obj_t meta;
+ if (n_args == 2) {
+ // no explicit bases, so use 'type'
+ meta = MP_OBJ_FROM_PTR(&mp_type_type);
+ } else {
+ // use type of first base object
+ meta = MP_OBJ_FROM_PTR(mp_obj_get_type(args[2]));
+ }
+
+ // TODO do proper metaclass resolution for multiple base objects
+
+ // create the new class using a call to the meta object
+ mp_obj_t meta_args[3];
+ meta_args[0] = args[1]; // class name
+ meta_args[1] = mp_obj_new_tuple(n_args - 2, args + 2); // tuple of bases
+ meta_args[2] = class_locals; // dict of members
+ mp_obj_t new_class = mp_call_function_n_kw(meta, 3, 0, meta_args);
+
+ // store into cell if needed
+ if (cell != mp_const_none) {
+ mp_obj_cell_set(cell, new_class);
+ }
+
+ return new_class;
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR(mp_builtin___build_class___obj, 2, mp_builtin___build_class__);
+
+STATIC mp_obj_t mp_builtin_abs(mp_obj_t o_in) {
+ return mp_unary_op(MP_UNARY_OP_ABS, o_in);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_abs_obj, mp_builtin_abs);
+
+STATIC mp_obj_t mp_builtin_all(mp_obj_t o_in) {
+ mp_obj_iter_buf_t iter_buf;
+ mp_obj_t iterable = mp_getiter(o_in, &iter_buf);
+ mp_obj_t item;
+ while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
+ if (!mp_obj_is_true(item)) {
+ return mp_const_false;
+ }
+ }
+ return mp_const_true;
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_all_obj, mp_builtin_all);
+
+STATIC mp_obj_t mp_builtin_any(mp_obj_t o_in) {
+ mp_obj_iter_buf_t iter_buf;
+ mp_obj_t iterable = mp_getiter(o_in, &iter_buf);
+ mp_obj_t item;
+ while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
+ if (mp_obj_is_true(item)) {
+ return mp_const_true;
+ }
+ }
+ return mp_const_false;
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_any_obj, mp_builtin_any);
+
+STATIC mp_obj_t mp_builtin_bin(mp_obj_t o_in) {
+ mp_obj_t args[] = { MP_OBJ_NEW_QSTR(MP_QSTR__brace_open__colon__hash_b_brace_close_), o_in };
+ return mp_obj_str_format(MP_ARRAY_SIZE(args), args, NULL);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_bin_obj, mp_builtin_bin);
+
+STATIC mp_obj_t mp_builtin_callable(mp_obj_t o_in) {
+ if (mp_obj_is_callable(o_in)) {
+ return mp_const_true;
+ } else {
+ return mp_const_false;
+ }
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_callable_obj, mp_builtin_callable);
+
+STATIC mp_obj_t mp_builtin_chr(mp_obj_t o_in) {
+ #if MICROPY_PY_BUILTINS_STR_UNICODE
+ mp_uint_t c = mp_obj_get_int(o_in);
+ uint8_t str[4];
+ int len = 0;
+ if (c < 0x80) {
+ *str = c;
+ len = 1;
+ } else if (c < 0x800) {
+ str[0] = (c >> 6) | 0xC0;
+ str[1] = (c & 0x3F) | 0x80;
+ len = 2;
+ } else if (c < 0x10000) {
+ str[0] = (c >> 12) | 0xE0;
+ str[1] = ((c >> 6) & 0x3F) | 0x80;
+ str[2] = (c & 0x3F) | 0x80;
+ len = 3;
+ } else if (c < 0x110000) {
+ str[0] = (c >> 18) | 0xF0;
+ str[1] = ((c >> 12) & 0x3F) | 0x80;
+ str[2] = ((c >> 6) & 0x3F) | 0x80;
+ str[3] = (c & 0x3F) | 0x80;
+ len = 4;
+ } else {
+ mp_raise_ValueError(MP_ERROR_TEXT("chr() arg not in range(0x110000)"));
+ }
+ return mp_obj_new_str_via_qstr((char *)str, len);
+ #else
+ mp_int_t ord = mp_obj_get_int(o_in);
+ if (0 <= ord && ord <= 0xff) {
+ uint8_t str[1] = {ord};
+ return mp_obj_new_str_via_qstr((char *)str, 1);
+ } else {
+ mp_raise_ValueError(MP_ERROR_TEXT("chr() arg not in range(256)"));
+ }
+ #endif
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_chr_obj, mp_builtin_chr);
+
+STATIC mp_obj_t mp_builtin_dir(size_t n_args, const mp_obj_t *args) {
+ mp_obj_t dir = mp_obj_new_list(0, NULL);
+ if (n_args == 0) {
+ // Make a list of names in the local namespace
+ mp_obj_dict_t *dict = mp_locals_get();
+ for (size_t i = 0; i < dict->map.alloc; i++) {
+ if (mp_map_slot_is_filled(&dict->map, i)) {
+ mp_obj_list_append(dir, dict->map.table[i].key);
+ }
+ }
+ } else { // n_args == 1
+ // Make a list of names in the given object
+ // Implemented by probing all possible qstrs with mp_load_method_maybe
+ size_t nqstr = QSTR_TOTAL();
+ for (size_t i = MP_QSTR_ + 1; i < nqstr; ++i) {
+ mp_obj_t dest[2];
+ mp_load_method_protected(args[0], i, dest, false);
+ if (dest[0] != MP_OBJ_NULL) {
+ #if MICROPY_PY_ALL_SPECIAL_METHODS
+ // Support for __dir__: see if we can dispatch to this special method
+ // This relies on MP_QSTR__dir__ being first after MP_QSTR_
+ if (i == MP_QSTR___dir__ && dest[1] != MP_OBJ_NULL) {
+ return mp_call_method_n_kw(0, 0, dest);
+ }
+ #endif
+ mp_obj_list_append(dir, MP_OBJ_NEW_QSTR(i));
+ }
+ }
+ }
+ return dir;
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_dir_obj, 0, 1, mp_builtin_dir);
+
+STATIC mp_obj_t mp_builtin_divmod(mp_obj_t o1_in, mp_obj_t o2_in) {
+ return mp_binary_op(MP_BINARY_OP_DIVMOD, o1_in, o2_in);
+}
+MP_DEFINE_CONST_FUN_OBJ_2(mp_builtin_divmod_obj, mp_builtin_divmod);
+
+STATIC mp_obj_t mp_builtin_hash(mp_obj_t o_in) {
+ // result is guaranteed to be a (small) int
+ return mp_unary_op(MP_UNARY_OP_HASH, o_in);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_hash_obj, mp_builtin_hash);
+
+STATIC mp_obj_t mp_builtin_hex(mp_obj_t o_in) {
+ #if MICROPY_PY_BUILTINS_STR_OP_MODULO
+ return mp_binary_op(MP_BINARY_OP_MODULO, MP_OBJ_NEW_QSTR(MP_QSTR__percent__hash_x), o_in);
+ #else
+ mp_obj_t args[] = { MP_OBJ_NEW_QSTR(MP_QSTR__brace_open__colon__hash_x_brace_close_), o_in };
+ return mp_obj_str_format(MP_ARRAY_SIZE(args), args, NULL);
+ #endif
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_hex_obj, mp_builtin_hex);
+
+#if MICROPY_PY_BUILTINS_INPUT
+
+#include "py/mphal.h"
+#include "shared/readline/readline.h"
+
+// A port can define mp_hal_readline if they want to use a custom function here
+#ifndef mp_hal_readline
+#define mp_hal_readline readline
+#endif
+
+STATIC mp_obj_t mp_builtin_input(size_t n_args, const mp_obj_t *args) {
+ if (n_args == 1) {
+ mp_obj_print(args[0], PRINT_STR);
+ }
+ vstr_t line;
+ vstr_init(&line, 16);
+ int ret = mp_hal_readline(&line, "");
+ if (ret == CHAR_CTRL_C) {
+ mp_raise_type(&mp_type_KeyboardInterrupt);
+ }
+ if (line.len == 0 && ret == CHAR_CTRL_D) {
+ mp_raise_type(&mp_type_EOFError);
+ }
+ return mp_obj_new_str_from_vstr(&mp_type_str, &line);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_input_obj, 0, 1, mp_builtin_input);
+
+#endif
+
+STATIC mp_obj_t mp_builtin_iter(mp_obj_t o_in) {
+ return mp_getiter(o_in, NULL);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_iter_obj, mp_builtin_iter);
+
+#if MICROPY_PY_BUILTINS_MIN_MAX
+
+STATIC mp_obj_t mp_builtin_min_max(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs, mp_uint_t op) {
+ mp_map_elem_t *key_elem = mp_map_lookup(kwargs, MP_OBJ_NEW_QSTR(MP_QSTR_key), MP_MAP_LOOKUP);
+ mp_map_elem_t *default_elem;
+ mp_obj_t key_fn = key_elem == NULL ? MP_OBJ_NULL : key_elem->value;
+ if (n_args == 1) {
+ // given an iterable
+ mp_obj_iter_buf_t iter_buf;
+ mp_obj_t iterable = mp_getiter(args[0], &iter_buf);
+ mp_obj_t best_key = MP_OBJ_NULL;
+ mp_obj_t best_obj = MP_OBJ_NULL;
+ mp_obj_t item;
+ while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
+ mp_obj_t key = key_fn == MP_OBJ_NULL ? item : mp_call_function_1(key_fn, item);
+ if (best_obj == MP_OBJ_NULL || (mp_binary_op(op, key, best_key) == mp_const_true)) {
+ best_key = key;
+ best_obj = item;
+ }
+ }
+ if (best_obj == MP_OBJ_NULL) {
+ default_elem = mp_map_lookup(kwargs, MP_OBJ_NEW_QSTR(MP_QSTR_default), MP_MAP_LOOKUP);
+ if (default_elem != NULL) {
+ best_obj = default_elem->value;
+ } else {
+ mp_raise_ValueError(MP_ERROR_TEXT("arg is an empty sequence"));
+ }
+ }
+ return best_obj;
+ } else {
+ // given many args
+ mp_obj_t best_key = MP_OBJ_NULL;
+ mp_obj_t best_obj = MP_OBJ_NULL;
+ for (size_t i = 0; i < n_args; i++) {
+ mp_obj_t key = key_fn == MP_OBJ_NULL ? args[i] : mp_call_function_1(key_fn, args[i]);
+ if (best_obj == MP_OBJ_NULL || (mp_binary_op(op, key, best_key) == mp_const_true)) {
+ best_key = key;
+ best_obj = args[i];
+ }
+ }
+ return best_obj;
+ }
+}
+
+STATIC mp_obj_t mp_builtin_max(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs) {
+ return mp_builtin_min_max(n_args, args, kwargs, MP_BINARY_OP_MORE);
+}
+MP_DEFINE_CONST_FUN_OBJ_KW(mp_builtin_max_obj, 1, mp_builtin_max);
+
+STATIC mp_obj_t mp_builtin_min(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs) {
+ return mp_builtin_min_max(n_args, args, kwargs, MP_BINARY_OP_LESS);
+}
+MP_DEFINE_CONST_FUN_OBJ_KW(mp_builtin_min_obj, 1, mp_builtin_min);
+
+#endif
+
+#if MICROPY_PY_BUILTINS_NEXT2
+STATIC mp_obj_t mp_builtin_next(size_t n_args, const mp_obj_t *args) {
+ if (n_args == 1) {
+ mp_obj_t ret = mp_iternext_allow_raise(args[0]);
+ if (ret == MP_OBJ_STOP_ITERATION) {
+ mp_raise_StopIteration(MP_STATE_THREAD(stop_iteration_arg));
+ } else {
+ return ret;
+ }
+ } else {
+ mp_obj_t ret = mp_iternext(args[0]);
+ return ret == MP_OBJ_STOP_ITERATION ? args[1] : ret;
+ }
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_next_obj, 1, 2, mp_builtin_next);
+#else
+STATIC mp_obj_t mp_builtin_next(mp_obj_t o) {
+ mp_obj_t ret = mp_iternext_allow_raise(o);
+ if (ret == MP_OBJ_STOP_ITERATION) {
+ mp_raise_StopIteration(MP_STATE_THREAD(stop_iteration_arg));
+ } else {
+ return ret;
+ }
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_next_obj, mp_builtin_next);
+#endif
+
+STATIC mp_obj_t mp_builtin_oct(mp_obj_t o_in) {
+ #if MICROPY_PY_BUILTINS_STR_OP_MODULO
+ return mp_binary_op(MP_BINARY_OP_MODULO, MP_OBJ_NEW_QSTR(MP_QSTR__percent__hash_o), o_in);
+ #else
+ mp_obj_t args[] = { MP_OBJ_NEW_QSTR(MP_QSTR__brace_open__colon__hash_o_brace_close_), o_in };
+ return mp_obj_str_format(MP_ARRAY_SIZE(args), args, NULL);
+ #endif
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_oct_obj, mp_builtin_oct);
+
+STATIC mp_obj_t mp_builtin_ord(mp_obj_t o_in) {
+ size_t len;
+ const byte *str = (const byte *)mp_obj_str_get_data(o_in, &len);
+ #if MICROPY_PY_BUILTINS_STR_UNICODE
+ if (mp_obj_is_str(o_in)) {
+ len = utf8_charlen(str, len);
+ if (len == 1) {
+ return mp_obj_new_int(utf8_get_char(str));
+ }
+ } else
+ #endif
+ {
+ // a bytes object, or a str without unicode support (don't sign extend the char)
+ if (len == 1) {
+ return MP_OBJ_NEW_SMALL_INT(str[0]);
+ }
+ }
+
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_TypeError(MP_ERROR_TEXT("ord expects a character"));
+ #else
+ mp_raise_TypeError_varg(
+ MP_ERROR_TEXT("ord() expected a character, but string of length %d found"), (int)len);
+ #endif
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_ord_obj, mp_builtin_ord);
+
+STATIC mp_obj_t mp_builtin_pow(size_t n_args, const mp_obj_t *args) {
+ switch (n_args) {
+ case 2:
+ return mp_binary_op(MP_BINARY_OP_POWER, args[0], args[1]);
+ default:
+ #if !MICROPY_PY_BUILTINS_POW3
+ mp_raise_NotImplementedError(MP_ERROR_TEXT("3-arg pow() not supported"));
+ #elif MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_MPZ
+ return mp_binary_op(MP_BINARY_OP_MODULO, mp_binary_op(MP_BINARY_OP_POWER, args[0], args[1]), args[2]);
+ #else
+ return mp_obj_int_pow3(args[0], args[1], args[2]);
+ #endif
+ }
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_pow_obj, 2, 3, mp_builtin_pow);
+
+STATIC mp_obj_t mp_builtin_print(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ enum { ARG_sep, ARG_end, ARG_flush, ARG_file };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_sep, MP_ARG_KW_ONLY | MP_ARG_OBJ, {.u_rom_obj = MP_ROM_QSTR(MP_QSTR__space_)} },
+ { MP_QSTR_end, MP_ARG_KW_ONLY | MP_ARG_OBJ, {.u_rom_obj = MP_ROM_QSTR(MP_QSTR__0x0a_)} },
+ { MP_QSTR_flush, MP_ARG_KW_ONLY | MP_ARG_BOOL, {.u_bool = false} },
+ #if MICROPY_PY_IO && MICROPY_PY_SYS_STDFILES
+ { MP_QSTR_file, MP_ARG_KW_ONLY | MP_ARG_OBJ, {.u_rom_obj = MP_ROM_PTR(&mp_sys_stdout_obj)} },
+ #endif
+ };
+
+ // parse args (a union is used to reduce the amount of C stack that is needed)
+ union {
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ size_t len[2];
+ } u;
+ mp_arg_parse_all(0, NULL, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, u.args);
+
+ #if MICROPY_PY_IO && MICROPY_PY_SYS_STDFILES
+ mp_get_stream_raise(u.args[ARG_file].u_obj, MP_STREAM_OP_WRITE);
+ mp_print_t print = {MP_OBJ_TO_PTR(u.args[ARG_file].u_obj), mp_stream_write_adaptor};
+ #endif
+
+ // extract the objects first because we are going to use the other part of the union
+ mp_obj_t sep = u.args[ARG_sep].u_obj;
+ mp_obj_t end = u.args[ARG_end].u_obj;
+ const char *sep_data = mp_obj_str_get_data(sep, &u.len[0]);
+ const char *end_data = mp_obj_str_get_data(end, &u.len[1]);
+
+ for (size_t i = 0; i < n_args; i++) {
+ if (i > 0) {
+ #if MICROPY_PY_IO && MICROPY_PY_SYS_STDFILES
+ mp_stream_write_adaptor(print.data, sep_data, u.len[0]);
+ #else
+ mp_print_strn(&mp_plat_print, sep_data, u.len[0], 0, 0, 0);
+ #endif
+ }
+ #if MICROPY_PY_IO && MICROPY_PY_SYS_STDFILES
+ mp_obj_print_helper(&print, pos_args[i], PRINT_STR);
+ #else
+ mp_obj_print_helper(&mp_plat_print, pos_args[i], PRINT_STR);
+ #endif
+ }
+ #if MICROPY_PY_IO && MICROPY_PY_SYS_STDFILES
+ mp_stream_write_adaptor(print.data, end_data, u.len[1]);
+ if (u.args[ARG_flush].u_bool) {
+ mp_stream_flush(MP_OBJ_FROM_PTR(print.data));
+ }
+ #else
+ mp_print_strn(&mp_plat_print, end_data, u.len[1], 0, 0, 0);
+ #endif
+ return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_KW(mp_builtin_print_obj, 0, mp_builtin_print);
+
+STATIC mp_obj_t mp_builtin___repl_print__(mp_obj_t o) {
+ if (o != mp_const_none) {
+ mp_obj_print_helper(MP_PYTHON_PRINTER, o, PRINT_REPR);
+ mp_print_str(MP_PYTHON_PRINTER, "\n");
+ #if MICROPY_CAN_OVERRIDE_BUILTINS
+ // Set "_" special variable
+ mp_obj_t dest[2] = {MP_OBJ_SENTINEL, o};
+ mp_type_module.attr(MP_OBJ_FROM_PTR(&mp_module_builtins), MP_QSTR__, dest);
+ #endif
+ }
+ return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin___repl_print___obj, mp_builtin___repl_print__);
+
+STATIC mp_obj_t mp_builtin_repr(mp_obj_t o_in) {
+ vstr_t vstr;
+ mp_print_t print;
+ vstr_init_print(&vstr, 16, &print);
+ mp_obj_print_helper(&print, o_in, PRINT_REPR);
+ return mp_obj_new_str_from_vstr(&mp_type_str, &vstr);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_repr_obj, mp_builtin_repr);
+
+STATIC mp_obj_t mp_builtin_round(size_t n_args, const mp_obj_t *args) {
+ mp_obj_t o_in = args[0];
+ if (mp_obj_is_int(o_in)) {
+ if (n_args <= 1) {
+ return o_in;
+ }
+
+ mp_int_t num_dig = mp_obj_get_int(args[1]);
+ if (num_dig >= 0) {
+ return o_in;
+ }
+ #if !MICROPY_PY_BUILTINS_ROUND_INT
+ mp_raise_NotImplementedError(NULL);
+ #else
+
+ mp_obj_t mult = mp_binary_op(MP_BINARY_OP_POWER, MP_OBJ_NEW_SMALL_INT(10), MP_OBJ_NEW_SMALL_INT(-num_dig));
+ mp_obj_t half_mult = mp_binary_op(MP_BINARY_OP_FLOOR_DIVIDE, mult, MP_OBJ_NEW_SMALL_INT(2));
+ mp_obj_t modulo = mp_binary_op(MP_BINARY_OP_MODULO, o_in, mult);
+ mp_obj_t rounded = mp_binary_op(MP_BINARY_OP_SUBTRACT, o_in, modulo);
+ if (mp_obj_is_true(mp_binary_op(MP_BINARY_OP_MORE, half_mult, modulo))) {
+ return rounded;
+ } else if (mp_obj_is_true(mp_binary_op(MP_BINARY_OP_MORE, modulo, half_mult))) {
+ return mp_binary_op(MP_BINARY_OP_ADD, rounded, mult);
+ } else {
+ // round to even number
+ mp_obj_t floor = mp_binary_op(MP_BINARY_OP_FLOOR_DIVIDE, o_in, mult);
+ if (mp_obj_is_true(mp_binary_op(MP_BINARY_OP_AND, floor, MP_OBJ_NEW_SMALL_INT(1)))) {
+ return mp_binary_op(MP_BINARY_OP_ADD, rounded, mult);
+ } else {
+ return rounded;
+ }
+ }
+ #endif
+ }
+ #if MICROPY_PY_BUILTINS_FLOAT
+ mp_float_t val = mp_obj_get_float(o_in);
+ if (n_args > 1) {
+ mp_int_t num_dig = mp_obj_get_int(args[1]);
+ mp_float_t mult = MICROPY_FLOAT_C_FUN(pow)(10, (mp_float_t)num_dig);
+ // TODO may lead to overflow
+ mp_float_t rounded = MICROPY_FLOAT_C_FUN(nearbyint)(val * mult) / mult;
+ return mp_obj_new_float(rounded);
+ }
+ mp_float_t rounded = MICROPY_FLOAT_C_FUN(nearbyint)(val);
+ return mp_obj_new_int_from_float(rounded);
+ #else
+ mp_int_t r = mp_obj_get_int(o_in);
+ return mp_obj_new_int(r);
+ #endif
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_round_obj, 1, 2, mp_builtin_round);
+
+STATIC mp_obj_t mp_builtin_sum(size_t n_args, const mp_obj_t *args) {
+ mp_obj_t value;
+ switch (n_args) {
+ case 1:
+ value = MP_OBJ_NEW_SMALL_INT(0);
+ break;
+ default:
+ value = args[1];
+ break;
+ }
+ mp_obj_iter_buf_t iter_buf;
+ mp_obj_t iterable = mp_getiter(args[0], &iter_buf);
+ mp_obj_t item;
+ while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
+ value = mp_binary_op(MP_BINARY_OP_ADD, value, item);
+ }
+ return value;
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_sum_obj, 1, 2, mp_builtin_sum);
+
+STATIC mp_obj_t mp_builtin_sorted(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs) {
+ if (n_args > 1) {
+ mp_raise_TypeError(MP_ERROR_TEXT("must use keyword argument for key function"));
+ }
+ mp_obj_t self = mp_type_list.make_new(&mp_type_list, 1, 0, args);
+ mp_obj_list_sort(1, &self, kwargs);
+
+ return self;
+}
+MP_DEFINE_CONST_FUN_OBJ_KW(mp_builtin_sorted_obj, 1, mp_builtin_sorted);
+
+// See mp_load_attr() if making any changes
+static inline mp_obj_t mp_load_attr_default(mp_obj_t base, qstr attr, mp_obj_t defval) {
+ mp_obj_t dest[2];
+ // use load_method, raising or not raising exception
+ if (defval == MP_OBJ_NULL) {
+ mp_load_method(base, attr, dest);
+ } else {
+ mp_load_method_protected(base, attr, dest, false);
+ }
+ if (dest[0] == MP_OBJ_NULL) {
+ return defval;
+ } else if (dest[1] == MP_OBJ_NULL) {
+ // load_method returned just a normal attribute
+ return dest[0];
+ } else {
+ // load_method returned a method, so build a bound method object
+ return mp_obj_new_bound_meth(dest[0], dest[1]);
+ }
+}
+
+STATIC mp_obj_t mp_builtin_getattr(size_t n_args, const mp_obj_t *args) {
+ mp_obj_t defval = MP_OBJ_NULL;
+ if (n_args > 2) {
+ defval = args[2];
+ }
+ return mp_load_attr_default(args[0], mp_obj_str_get_qstr(args[1]), defval);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_builtin_getattr_obj, 2, 3, mp_builtin_getattr);
+
+STATIC mp_obj_t mp_builtin_setattr(mp_obj_t base, mp_obj_t attr, mp_obj_t value) {
+ mp_store_attr(base, mp_obj_str_get_qstr(attr), value);
+ return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_3(mp_builtin_setattr_obj, mp_builtin_setattr);
+
+#if MICROPY_CPYTHON_COMPAT
+STATIC mp_obj_t mp_builtin_delattr(mp_obj_t base, mp_obj_t attr) {
+ return mp_builtin_setattr(base, attr, MP_OBJ_NULL);
+}
+MP_DEFINE_CONST_FUN_OBJ_2(mp_builtin_delattr_obj, mp_builtin_delattr);
+#endif
+
+STATIC mp_obj_t mp_builtin_hasattr(mp_obj_t object_in, mp_obj_t attr_in) {
+ qstr attr = mp_obj_str_get_qstr(attr_in);
+ mp_obj_t dest[2];
+ mp_load_method_protected(object_in, attr, dest, false);
+ return mp_obj_new_bool(dest[0] != MP_OBJ_NULL);
+}
+MP_DEFINE_CONST_FUN_OBJ_2(mp_builtin_hasattr_obj, mp_builtin_hasattr);
+
+STATIC mp_obj_t mp_builtin_globals(void) {
+ return MP_OBJ_FROM_PTR(mp_globals_get());
+}
+MP_DEFINE_CONST_FUN_OBJ_0(mp_builtin_globals_obj, mp_builtin_globals);
+
+STATIC mp_obj_t mp_builtin_locals(void) {
+ return MP_OBJ_FROM_PTR(mp_locals_get());
+}
+MP_DEFINE_CONST_FUN_OBJ_0(mp_builtin_locals_obj, mp_builtin_locals);
+
+// These are defined in terms of MicroPython API functions right away
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_id_obj, mp_obj_id);
+MP_DEFINE_CONST_FUN_OBJ_1(mp_builtin_len_obj, mp_obj_len);
+
+STATIC const mp_rom_map_elem_t mp_module_builtins_globals_table[] = {
+ { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_builtins) },
+
+ // built-in core functions
+ { MP_ROM_QSTR(MP_QSTR___build_class__), MP_ROM_PTR(&mp_builtin___build_class___obj) },
+ { MP_ROM_QSTR(MP_QSTR___import__), MP_ROM_PTR(&mp_builtin___import___obj) },
+ { MP_ROM_QSTR(MP_QSTR___repl_print__), MP_ROM_PTR(&mp_builtin___repl_print___obj) },
+
+ // built-in types
+ { MP_ROM_QSTR(MP_QSTR_bool), MP_ROM_PTR(&mp_type_bool) },
+ { MP_ROM_QSTR(MP_QSTR_bytes), MP_ROM_PTR(&mp_type_bytes) },
+ #if MICROPY_PY_BUILTINS_BYTEARRAY
+ { MP_ROM_QSTR(MP_QSTR_bytearray), MP_ROM_PTR(&mp_type_bytearray) },
+ #endif
+ #if MICROPY_PY_BUILTINS_COMPLEX
+ { MP_ROM_QSTR(MP_QSTR_complex), MP_ROM_PTR(&mp_type_complex) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_dict), MP_ROM_PTR(&mp_type_dict) },
+ #if MICROPY_PY_BUILTINS_ENUMERATE
+ { MP_ROM_QSTR(MP_QSTR_enumerate), MP_ROM_PTR(&mp_type_enumerate) },
+ #endif
+ #if MICROPY_PY_BUILTINS_FILTER
+ { MP_ROM_QSTR(MP_QSTR_filter), MP_ROM_PTR(&mp_type_filter) },
+ #endif
+ #if MICROPY_PY_BUILTINS_FLOAT
+ { MP_ROM_QSTR(MP_QSTR_float), MP_ROM_PTR(&mp_type_float) },
+ #endif
+ #if MICROPY_PY_BUILTINS_SET && MICROPY_PY_BUILTINS_FROZENSET
+ { MP_ROM_QSTR(MP_QSTR_frozenset), MP_ROM_PTR(&mp_type_frozenset) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_int), MP_ROM_PTR(&mp_type_int) },
+ { MP_ROM_QSTR(MP_QSTR_list), MP_ROM_PTR(&mp_type_list) },
+ { MP_ROM_QSTR(MP_QSTR_map), MP_ROM_PTR(&mp_type_map) },
+ #if MICROPY_PY_BUILTINS_MEMORYVIEW
+ { MP_ROM_QSTR(MP_QSTR_memoryview), MP_ROM_PTR(&mp_type_memoryview) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_object), MP_ROM_PTR(&mp_type_object) },
+ #if MICROPY_PY_BUILTINS_PROPERTY
+ { MP_ROM_QSTR(MP_QSTR_property), MP_ROM_PTR(&mp_type_property) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_range), MP_ROM_PTR(&mp_type_range) },
+ #if MICROPY_PY_BUILTINS_REVERSED
+ { MP_ROM_QSTR(MP_QSTR_reversed), MP_ROM_PTR(&mp_type_reversed) },
+ #endif
+ #if MICROPY_PY_BUILTINS_SET
+ { MP_ROM_QSTR(MP_QSTR_set), MP_ROM_PTR(&mp_type_set) },
+ #endif
+ #if MICROPY_PY_BUILTINS_SLICE
+ { MP_ROM_QSTR(MP_QSTR_slice), MP_ROM_PTR(&mp_type_slice) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_str), MP_ROM_PTR(&mp_type_str) },
+ { MP_ROM_QSTR(MP_QSTR_super), MP_ROM_PTR(&mp_type_super) },
+ { MP_ROM_QSTR(MP_QSTR_tuple), MP_ROM_PTR(&mp_type_tuple) },
+ { MP_ROM_QSTR(MP_QSTR_type), MP_ROM_PTR(&mp_type_type) },
+ { MP_ROM_QSTR(MP_QSTR_zip), MP_ROM_PTR(&mp_type_zip) },
+
+ { MP_ROM_QSTR(MP_QSTR_classmethod), MP_ROM_PTR(&mp_type_classmethod) },
+ { MP_ROM_QSTR(MP_QSTR_staticmethod), MP_ROM_PTR(&mp_type_staticmethod) },
+
+ // built-in objects
+ { MP_ROM_QSTR(MP_QSTR_Ellipsis), MP_ROM_PTR(&mp_const_ellipsis_obj) },
+ #if MICROPY_PY_BUILTINS_NOTIMPLEMENTED
+ { MP_ROM_QSTR(MP_QSTR_NotImplemented), MP_ROM_PTR(&mp_const_notimplemented_obj) },
+ #endif
+
+ // built-in user functions
+ { MP_ROM_QSTR(MP_QSTR_abs), MP_ROM_PTR(&mp_builtin_abs_obj) },
+ { MP_ROM_QSTR(MP_QSTR_all), MP_ROM_PTR(&mp_builtin_all_obj) },
+ { MP_ROM_QSTR(MP_QSTR_any), MP_ROM_PTR(&mp_builtin_any_obj) },
+ { MP_ROM_QSTR(MP_QSTR_bin), MP_ROM_PTR(&mp_builtin_bin_obj) },
+ { MP_ROM_QSTR(MP_QSTR_callable), MP_ROM_PTR(&mp_builtin_callable_obj) },
+ #if MICROPY_PY_BUILTINS_COMPILE
+ { MP_ROM_QSTR(MP_QSTR_compile), MP_ROM_PTR(&mp_builtin_compile_obj) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_chr), MP_ROM_PTR(&mp_builtin_chr_obj) },
+ #if MICROPY_CPYTHON_COMPAT
+ { MP_ROM_QSTR(MP_QSTR_delattr), MP_ROM_PTR(&mp_builtin_delattr_obj) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_dir), MP_ROM_PTR(&mp_builtin_dir_obj) },
+ { MP_ROM_QSTR(MP_QSTR_divmod), MP_ROM_PTR(&mp_builtin_divmod_obj) },
+ #if MICROPY_PY_BUILTINS_EVAL_EXEC
+ { MP_ROM_QSTR(MP_QSTR_eval), MP_ROM_PTR(&mp_builtin_eval_obj) },
+ { MP_ROM_QSTR(MP_QSTR_exec), MP_ROM_PTR(&mp_builtin_exec_obj) },
+ #endif
+ #if MICROPY_PY_BUILTINS_EXECFILE
+ { MP_ROM_QSTR(MP_QSTR_execfile), MP_ROM_PTR(&mp_builtin_execfile_obj) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_getattr), MP_ROM_PTR(&mp_builtin_getattr_obj) },
+ { MP_ROM_QSTR(MP_QSTR_setattr), MP_ROM_PTR(&mp_builtin_setattr_obj) },
+ { MP_ROM_QSTR(MP_QSTR_globals), MP_ROM_PTR(&mp_builtin_globals_obj) },
+ { MP_ROM_QSTR(MP_QSTR_hasattr), MP_ROM_PTR(&mp_builtin_hasattr_obj) },
+ { MP_ROM_QSTR(MP_QSTR_hash), MP_ROM_PTR(&mp_builtin_hash_obj) },
+ #if MICROPY_PY_BUILTINS_HELP
+ { MP_ROM_QSTR(MP_QSTR_help), MP_ROM_PTR(&mp_builtin_help_obj) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_hex), MP_ROM_PTR(&mp_builtin_hex_obj) },
+ { MP_ROM_QSTR(MP_QSTR_id), MP_ROM_PTR(&mp_builtin_id_obj) },
+ #if MICROPY_PY_BUILTINS_INPUT
+ { MP_ROM_QSTR(MP_QSTR_input), MP_ROM_PTR(&mp_builtin_input_obj) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_isinstance), MP_ROM_PTR(&mp_builtin_isinstance_obj) },
+ { MP_ROM_QSTR(MP_QSTR_issubclass), MP_ROM_PTR(&mp_builtin_issubclass_obj) },
+ { MP_ROM_QSTR(MP_QSTR_iter), MP_ROM_PTR(&mp_builtin_iter_obj) },
+ { MP_ROM_QSTR(MP_QSTR_len), MP_ROM_PTR(&mp_builtin_len_obj) },
+ { MP_ROM_QSTR(MP_QSTR_locals), MP_ROM_PTR(&mp_builtin_locals_obj) },
+ #if MICROPY_PY_BUILTINS_MIN_MAX
+ { MP_ROM_QSTR(MP_QSTR_max), MP_ROM_PTR(&mp_builtin_max_obj) },
+ { MP_ROM_QSTR(MP_QSTR_min), MP_ROM_PTR(&mp_builtin_min_obj) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_next), MP_ROM_PTR(&mp_builtin_next_obj) },
+ { MP_ROM_QSTR(MP_QSTR_oct), MP_ROM_PTR(&mp_builtin_oct_obj) },
+ { MP_ROM_QSTR(MP_QSTR_ord), MP_ROM_PTR(&mp_builtin_ord_obj) },
+ { MP_ROM_QSTR(MP_QSTR_pow), MP_ROM_PTR(&mp_builtin_pow_obj) },
+ { MP_ROM_QSTR(MP_QSTR_print), MP_ROM_PTR(&mp_builtin_print_obj) },
+ { MP_ROM_QSTR(MP_QSTR_repr), MP_ROM_PTR(&mp_builtin_repr_obj) },
+ { MP_ROM_QSTR(MP_QSTR_round), MP_ROM_PTR(&mp_builtin_round_obj) },
+ { MP_ROM_QSTR(MP_QSTR_sorted), MP_ROM_PTR(&mp_builtin_sorted_obj) },
+ { MP_ROM_QSTR(MP_QSTR_sum), MP_ROM_PTR(&mp_builtin_sum_obj) },
+
+ // built-in exceptions
+ { MP_ROM_QSTR(MP_QSTR_BaseException), MP_ROM_PTR(&mp_type_BaseException) },
+ { MP_ROM_QSTR(MP_QSTR_ArithmeticError), MP_ROM_PTR(&mp_type_ArithmeticError) },
+ { MP_ROM_QSTR(MP_QSTR_AssertionError), MP_ROM_PTR(&mp_type_AssertionError) },
+ { MP_ROM_QSTR(MP_QSTR_AttributeError), MP_ROM_PTR(&mp_type_AttributeError) },
+ { MP_ROM_QSTR(MP_QSTR_EOFError), MP_ROM_PTR(&mp_type_EOFError) },
+ { MP_ROM_QSTR(MP_QSTR_Exception), MP_ROM_PTR(&mp_type_Exception) },
+ { MP_ROM_QSTR(MP_QSTR_GeneratorExit), MP_ROM_PTR(&mp_type_GeneratorExit) },
+ { MP_ROM_QSTR(MP_QSTR_ImportError), MP_ROM_PTR(&mp_type_ImportError) },
+ { MP_ROM_QSTR(MP_QSTR_IndentationError), MP_ROM_PTR(&mp_type_IndentationError) },
+ { MP_ROM_QSTR(MP_QSTR_IndexError), MP_ROM_PTR(&mp_type_IndexError) },
+ { MP_ROM_QSTR(MP_QSTR_KeyboardInterrupt), MP_ROM_PTR(&mp_type_KeyboardInterrupt) },
+ { MP_ROM_QSTR(MP_QSTR_ReloadException), MP_ROM_PTR(&mp_type_ReloadException) },
+ { MP_ROM_QSTR(MP_QSTR_KeyError), MP_ROM_PTR(&mp_type_KeyError) },
+ { MP_ROM_QSTR(MP_QSTR_LookupError), MP_ROM_PTR(&mp_type_LookupError) },
+ { MP_ROM_QSTR(MP_QSTR_MemoryError), MP_ROM_PTR(&mp_type_MemoryError) },
+ { MP_ROM_QSTR(MP_QSTR_MpyError), MP_ROM_PTR(&mp_type_MpyError) },
+ { MP_ROM_QSTR(MP_QSTR_NameError), MP_ROM_PTR(&mp_type_NameError) },
+ { MP_ROM_QSTR(MP_QSTR_NotImplementedError), MP_ROM_PTR(&mp_type_NotImplementedError) },
+ { MP_ROM_QSTR(MP_QSTR_OSError), MP_ROM_PTR(&mp_type_OSError) },
+ { MP_ROM_QSTR(MP_QSTR_TimeoutError), MP_ROM_PTR(&mp_type_TimeoutError) },
+ { MP_ROM_QSTR(MP_QSTR_ConnectionError), MP_ROM_PTR(&mp_type_ConnectionError) },
+ { MP_ROM_QSTR(MP_QSTR_BrokenPipeError), MP_ROM_PTR(&mp_type_BrokenPipeError) },
+ { MP_ROM_QSTR(MP_QSTR_OverflowError), MP_ROM_PTR(&mp_type_OverflowError) },
+ { MP_ROM_QSTR(MP_QSTR_RuntimeError), MP_ROM_PTR(&mp_type_RuntimeError) },
+ #if MICROPY_PY_ASYNC_AWAIT
+ { MP_ROM_QSTR(MP_QSTR_StopAsyncIteration), MP_ROM_PTR(&mp_type_StopAsyncIteration) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_StopIteration), MP_ROM_PTR(&mp_type_StopIteration) },
+ { MP_ROM_QSTR(MP_QSTR_SyntaxError), MP_ROM_PTR(&mp_type_SyntaxError) },
+ { MP_ROM_QSTR(MP_QSTR_SystemExit), MP_ROM_PTR(&mp_type_SystemExit) },
+ { MP_ROM_QSTR(MP_QSTR_TypeError), MP_ROM_PTR(&mp_type_TypeError) },
+ #if MICROPY_PY_BUILTINS_STR_UNICODE
+ { MP_ROM_QSTR(MP_QSTR_UnicodeError), MP_ROM_PTR(&mp_type_UnicodeError) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_ValueError), MP_ROM_PTR(&mp_type_ValueError) },
+ #if MICROPY_EMIT_NATIVE
+ { MP_ROM_QSTR(MP_QSTR_ViperTypeError), MP_ROM_PTR(&mp_type_ViperTypeError) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_ZeroDivisionError), MP_ROM_PTR(&mp_type_ZeroDivisionError) },
+
+ // Extra builtins as defined by a port
+ MICROPY_PORT_BUILTINS
+ MICROPY_PORT_EXTRA_BUILTINS
+};
+
+MP_DEFINE_CONST_DICT(mp_module_builtins_globals, mp_module_builtins_globals_table);
+
+const mp_obj_module_t mp_module_builtins = {
+ .base = { &mp_type_module },
+ .globals = (mp_obj_dict_t *)&mp_module_builtins_globals,
+};
diff --git a/circuitpython/py/modcmath.c b/circuitpython/py/modcmath.c
new file mode 100644
index 0000000..a361ab5
--- /dev/null
+++ b/circuitpython/py/modcmath.c
@@ -0,0 +1,152 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/builtin.h"
+
+#if MICROPY_PY_BUILTINS_FLOAT && MICROPY_PY_BUILTINS_COMPLEX && MICROPY_PY_CMATH
+
+#include <math.h>
+
+// phase(z): returns the phase of the number z in the range (-pi, +pi]
+STATIC mp_obj_t mp_cmath_phase(mp_obj_t z_obj) {
+ mp_float_t real, imag;
+ mp_obj_get_complex(z_obj, &real, &imag);
+ return mp_obj_new_float(MICROPY_FLOAT_C_FUN(atan2)(imag, real));
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_phase_obj, mp_cmath_phase);
+
+// polar(z): returns the polar form of z as a tuple
+STATIC mp_obj_t mp_cmath_polar(mp_obj_t z_obj) {
+ mp_float_t real, imag;
+ mp_obj_get_complex(z_obj, &real, &imag);
+ mp_obj_t tuple[2] = {
+ mp_obj_new_float(MICROPY_FLOAT_C_FUN(sqrt)(real * real + imag * imag)),
+ mp_obj_new_float(MICROPY_FLOAT_C_FUN(atan2)(imag, real)),
+ };
+ return mp_obj_new_tuple(2, tuple);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_polar_obj, mp_cmath_polar);
+
+// rect(r, phi): returns the complex number with modulus r and phase phi
+STATIC mp_obj_t mp_cmath_rect(mp_obj_t r_obj, mp_obj_t phi_obj) {
+ mp_float_t r = mp_obj_get_float(r_obj);
+ mp_float_t phi = mp_obj_get_float(phi_obj);
+ return mp_obj_new_complex(r * MICROPY_FLOAT_C_FUN(cos)(phi), r * MICROPY_FLOAT_C_FUN(sin)(phi));
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(mp_cmath_rect_obj, mp_cmath_rect);
+
+// exp(z): return the exponential of z
+STATIC mp_obj_t mp_cmath_exp(mp_obj_t z_obj) {
+ mp_float_t real, imag;
+ mp_obj_get_complex(z_obj, &real, &imag);
+ mp_float_t exp_real = MICROPY_FLOAT_C_FUN(exp)(real);
+ return mp_obj_new_complex(exp_real * MICROPY_FLOAT_C_FUN(cos)(imag), exp_real * MICROPY_FLOAT_C_FUN(sin)(imag));
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_exp_obj, mp_cmath_exp);
+
+// log(z): return the natural logarithm of z, with branch cut along the negative real axis
+// TODO can take second argument, being the base
+STATIC mp_obj_t mp_cmath_log(mp_obj_t z_obj) {
+ mp_float_t real, imag;
+ mp_obj_get_complex(z_obj, &real, &imag);
+ return mp_obj_new_complex(MICROPY_FLOAT_CONST(0.5) * MICROPY_FLOAT_C_FUN(log)(real * real + imag * imag), MICROPY_FLOAT_C_FUN(atan2)(imag, real));
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_log_obj, mp_cmath_log);
+
+#if MICROPY_PY_MATH_SPECIAL_FUNCTIONS
+// log10(z): return the base-10 logarithm of z, with branch cut along the negative real axis
+STATIC mp_obj_t mp_cmath_log10(mp_obj_t z_obj) {
+ mp_float_t real, imag;
+ mp_obj_get_complex(z_obj, &real, &imag);
+ return mp_obj_new_complex(MICROPY_FLOAT_CONST(0.5) * MICROPY_FLOAT_C_FUN(log10)(real * real + imag * imag), MICROPY_FLOAT_CONST(0.4342944819032518) * MICROPY_FLOAT_C_FUN(atan2)(imag, real));
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_log10_obj, mp_cmath_log10);
+#endif
+
+// sqrt(z): return the square-root of z
+STATIC mp_obj_t mp_cmath_sqrt(mp_obj_t z_obj) {
+ mp_float_t real, imag;
+ mp_obj_get_complex(z_obj, &real, &imag);
+ mp_float_t sqrt_abs = MICROPY_FLOAT_C_FUN(pow)(real * real + imag * imag, MICROPY_FLOAT_CONST(0.25));
+ mp_float_t theta = MICROPY_FLOAT_CONST(0.5) * MICROPY_FLOAT_C_FUN(atan2)(imag, real);
+ return mp_obj_new_complex(sqrt_abs * MICROPY_FLOAT_C_FUN(cos)(theta), sqrt_abs * MICROPY_FLOAT_C_FUN(sin)(theta));
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_sqrt_obj, mp_cmath_sqrt);
+
+// cos(z): return the cosine of z
+STATIC mp_obj_t mp_cmath_cos(mp_obj_t z_obj) {
+ mp_float_t real, imag;
+ mp_obj_get_complex(z_obj, &real, &imag);
+ return mp_obj_new_complex(MICROPY_FLOAT_C_FUN(cos)(real) * MICROPY_FLOAT_C_FUN(cosh)(imag), -MICROPY_FLOAT_C_FUN(sin)(real) * MICROPY_FLOAT_C_FUN(sinh)(imag));
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_cos_obj, mp_cmath_cos);
+
+// sin(z): return the sine of z
+STATIC mp_obj_t mp_cmath_sin(mp_obj_t z_obj) {
+ mp_float_t real, imag;
+ mp_obj_get_complex(z_obj, &real, &imag);
+ return mp_obj_new_complex(MICROPY_FLOAT_C_FUN(sin)(real) * MICROPY_FLOAT_C_FUN(cosh)(imag), MICROPY_FLOAT_C_FUN(cos)(real) * MICROPY_FLOAT_C_FUN(sinh)(imag));
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_cmath_sin_obj, mp_cmath_sin);
+
+STATIC const mp_rom_map_elem_t mp_module_cmath_globals_table[] = {
+ { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_cmath) },
+ { MP_ROM_QSTR(MP_QSTR_e), mp_const_float_e },
+ { MP_ROM_QSTR(MP_QSTR_pi), mp_const_float_pi },
+ { MP_ROM_QSTR(MP_QSTR_phase), MP_ROM_PTR(&mp_cmath_phase_obj) },
+ { MP_ROM_QSTR(MP_QSTR_polar), MP_ROM_PTR(&mp_cmath_polar_obj) },
+ { MP_ROM_QSTR(MP_QSTR_rect), MP_ROM_PTR(&mp_cmath_rect_obj) },
+ { MP_ROM_QSTR(MP_QSTR_exp), MP_ROM_PTR(&mp_cmath_exp_obj) },
+ { MP_ROM_QSTR(MP_QSTR_log), MP_ROM_PTR(&mp_cmath_log_obj) },
+ #if MICROPY_PY_MATH_SPECIAL_FUNCTIONS
+ { MP_ROM_QSTR(MP_QSTR_log10), MP_ROM_PTR(&mp_cmath_log10_obj) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_sqrt), MP_ROM_PTR(&mp_cmath_sqrt_obj) },
+ // { MP_ROM_QSTR(MP_QSTR_acos), MP_ROM_PTR(&mp_cmath_acos_obj) },
+ // { MP_ROM_QSTR(MP_QSTR_asin), MP_ROM_PTR(&mp_cmath_asin_obj) },
+ // { MP_ROM_QSTR(MP_QSTR_atan), MP_ROM_PTR(&mp_cmath_atan_obj) },
+ { MP_ROM_QSTR(MP_QSTR_cos), MP_ROM_PTR(&mp_cmath_cos_obj) },
+ { MP_ROM_QSTR(MP_QSTR_sin), MP_ROM_PTR(&mp_cmath_sin_obj) },
+ // { MP_ROM_QSTR(MP_QSTR_tan), MP_ROM_PTR(&mp_cmath_tan_obj) },
+ // { MP_ROM_QSTR(MP_QSTR_acosh), MP_ROM_PTR(&mp_cmath_acosh_obj) },
+ // { MP_ROM_QSTR(MP_QSTR_asinh), MP_ROM_PTR(&mp_cmath_asinh_obj) },
+ // { MP_ROM_QSTR(MP_QSTR_atanh), MP_ROM_PTR(&mp_cmath_atanh_obj) },
+ // { MP_ROM_QSTR(MP_QSTR_cosh), MP_ROM_PTR(&mp_cmath_cosh_obj) },
+ // { MP_ROM_QSTR(MP_QSTR_sinh), MP_ROM_PTR(&mp_cmath_sinh_obj) },
+ // { MP_ROM_QSTR(MP_QSTR_tanh), MP_ROM_PTR(&mp_cmath_tanh_obj) },
+ // { MP_ROM_QSTR(MP_QSTR_isfinite), MP_ROM_PTR(&mp_cmath_isfinite_obj) },
+ // { MP_ROM_QSTR(MP_QSTR_isinf), MP_ROM_PTR(&mp_cmath_isinf_obj) },
+ // { MP_ROM_QSTR(MP_QSTR_isnan), MP_ROM_PTR(&mp_cmath_isnan_obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(mp_module_cmath_globals, mp_module_cmath_globals_table);
+
+const mp_obj_module_t mp_module_cmath = {
+ .base = { &mp_type_module },
+ .globals = (mp_obj_dict_t *)&mp_module_cmath_globals,
+};
+
+#endif // MICROPY_PY_BUILTINS_FLOAT && MICROPY_PY_CMATH
diff --git a/circuitpython/py/modcollections.c b/circuitpython/py/modcollections.c
new file mode 100644
index 0000000..235745f
--- /dev/null
+++ b/circuitpython/py/modcollections.c
@@ -0,0 +1,49 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/builtin.h"
+
+#if MICROPY_PY_COLLECTIONS
+
+STATIC const mp_rom_map_elem_t mp_module_collections_globals_table[] = {
+ { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_collections) },
+ #if MICROPY_PY_COLLECTIONS_DEQUE
+ { MP_ROM_QSTR(MP_QSTR_deque), MP_ROM_PTR(&mp_type_deque) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_namedtuple), MP_ROM_PTR(&mp_namedtuple_obj) },
+ #if MICROPY_PY_COLLECTIONS_ORDEREDDICT
+ { MP_ROM_QSTR(MP_QSTR_OrderedDict), MP_ROM_PTR(&mp_type_ordereddict) },
+ #endif
+};
+
+STATIC MP_DEFINE_CONST_DICT(mp_module_collections_globals, mp_module_collections_globals_table);
+
+const mp_obj_module_t mp_module_collections = {
+ .base = { &mp_type_module },
+ .globals = (mp_obj_dict_t *)&mp_module_collections_globals,
+};
+
+#endif // MICROPY_PY_COLLECTIONS
diff --git a/circuitpython/py/modgc.c b/circuitpython/py/modgc.c
new file mode 100644
index 0000000..e655cfa
--- /dev/null
+++ b/circuitpython/py/modgc.c
@@ -0,0 +1,118 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/mpstate.h"
+#include "py/obj.h"
+#include "py/gc.h"
+
+#if MICROPY_PY_GC && MICROPY_ENABLE_GC
+
+// collect(): run a garbage collection
+STATIC mp_obj_t py_gc_collect(void) {
+ gc_collect();
+ #if MICROPY_PY_GC_COLLECT_RETVAL
+ return MP_OBJ_NEW_SMALL_INT(MP_STATE_MEM(gc_collected));
+ #else
+ return mp_const_none;
+ #endif
+}
+MP_DEFINE_CONST_FUN_OBJ_0(gc_collect_obj, py_gc_collect);
+
+// disable(): disable the garbage collector
+STATIC mp_obj_t gc_disable(void) {
+ MP_STATE_MEM(gc_auto_collect_enabled) = 0;
+ return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_0(gc_disable_obj, gc_disable);
+
+// enable(): enable the garbage collector
+STATIC mp_obj_t gc_enable(void) {
+ MP_STATE_MEM(gc_auto_collect_enabled) = 1;
+ return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_0(gc_enable_obj, gc_enable);
+
+STATIC mp_obj_t gc_isenabled(void) {
+ return mp_obj_new_bool(MP_STATE_MEM(gc_auto_collect_enabled));
+}
+MP_DEFINE_CONST_FUN_OBJ_0(gc_isenabled_obj, gc_isenabled);
+
+// mem_free(): return the number of bytes of available heap RAM
+STATIC mp_obj_t gc_mem_free(void) {
+ gc_info_t info;
+ gc_info(&info);
+ return MP_OBJ_NEW_SMALL_INT(info.free);
+}
+MP_DEFINE_CONST_FUN_OBJ_0(gc_mem_free_obj, gc_mem_free);
+
+// mem_alloc(): return the number of bytes of heap RAM that are allocated
+STATIC mp_obj_t gc_mem_alloc(void) {
+ gc_info_t info;
+ gc_info(&info);
+ return MP_OBJ_NEW_SMALL_INT(info.used);
+}
+MP_DEFINE_CONST_FUN_OBJ_0(gc_mem_alloc_obj, gc_mem_alloc);
+
+#if MICROPY_GC_ALLOC_THRESHOLD
+STATIC mp_obj_t gc_threshold(size_t n_args, const mp_obj_t *args) {
+ if (n_args == 0) {
+ if (MP_STATE_MEM(gc_alloc_threshold) == (size_t)-1) {
+ return MP_OBJ_NEW_SMALL_INT(-1);
+ }
+ return mp_obj_new_int(MP_STATE_MEM(gc_alloc_threshold) * MICROPY_BYTES_PER_GC_BLOCK);
+ }
+ mp_int_t val = mp_obj_get_int(args[0]);
+ if (val < 0) {
+ MP_STATE_MEM(gc_alloc_threshold) = (size_t)-1;
+ } else {
+ MP_STATE_MEM(gc_alloc_threshold) = val / MICROPY_BYTES_PER_GC_BLOCK;
+ }
+ return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(gc_threshold_obj, 0, 1, gc_threshold);
+#endif
+
+STATIC const mp_rom_map_elem_t mp_module_gc_globals_table[] = {
+ { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_gc) },
+ { MP_ROM_QSTR(MP_QSTR_collect), MP_ROM_PTR(&gc_collect_obj) },
+ { MP_ROM_QSTR(MP_QSTR_disable), MP_ROM_PTR(&gc_disable_obj) },
+ { MP_ROM_QSTR(MP_QSTR_enable), MP_ROM_PTR(&gc_enable_obj) },
+ { MP_ROM_QSTR(MP_QSTR_isenabled), MP_ROM_PTR(&gc_isenabled_obj) },
+ { MP_ROM_QSTR(MP_QSTR_mem_free), MP_ROM_PTR(&gc_mem_free_obj) },
+ { MP_ROM_QSTR(MP_QSTR_mem_alloc), MP_ROM_PTR(&gc_mem_alloc_obj) },
+ #if MICROPY_GC_ALLOC_THRESHOLD
+ { MP_ROM_QSTR(MP_QSTR_threshold), MP_ROM_PTR(&gc_threshold_obj) },
+ #endif
+};
+
+STATIC MP_DEFINE_CONST_DICT(mp_module_gc_globals, mp_module_gc_globals_table);
+
+const mp_obj_module_t mp_module_gc = {
+ .base = { &mp_type_module },
+ .globals = (mp_obj_dict_t *)&mp_module_gc_globals,
+};
+
+#endif
diff --git a/circuitpython/py/modio.c b/circuitpython/py/modio.c
new file mode 100644
index 0000000..819a997
--- /dev/null
+++ b/circuitpython/py/modio.c
@@ -0,0 +1,249 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include "py/runtime.h"
+#include "py/builtin.h"
+#include "py/stream.h"
+#include "py/binary.h"
+#include "py/objarray.h"
+#include "py/objstringio.h"
+#include "py/frozenmod.h"
+
+#if MICROPY_PY_IO
+
+extern const mp_obj_type_t mp_type_fileio;
+extern const mp_obj_type_t mp_type_textio;
+
+#if MICROPY_PY_IO_IOBASE
+
+STATIC const mp_obj_type_t mp_type_iobase;
+
+STATIC const mp_obj_base_t iobase_singleton = {&mp_type_iobase};
+
+STATIC mp_obj_t iobase_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ (void)type;
+ (void)n_args;
+ (void)n_kw;
+ (void)args;
+ return MP_OBJ_FROM_PTR(&iobase_singleton);
+}
+
+STATIC mp_uint_t iobase_read_write(mp_obj_t obj, void *buf, mp_uint_t size, int *errcode, qstr qst) {
+ mp_obj_t dest[3];
+ mp_load_method(obj, qst, dest);
+ mp_obj_array_t ar = {{&mp_type_bytearray}, BYTEARRAY_TYPECODE, 0, size, buf};
+ dest[2] = MP_OBJ_FROM_PTR(&ar);
+ mp_obj_t ret_obj = mp_call_method_n_kw(1, 0, dest);
+ if (ret_obj == mp_const_none) {
+ *errcode = MP_EAGAIN;
+ return MP_STREAM_ERROR;
+ }
+ mp_int_t ret = mp_obj_get_int(ret_obj);
+ if (ret >= 0) {
+ return ret;
+ } else {
+ *errcode = -ret;
+ return MP_STREAM_ERROR;
+ }
+}
+STATIC mp_uint_t iobase_read(mp_obj_t obj, void *buf, mp_uint_t size, int *errcode) {
+ return iobase_read_write(obj, buf, size, errcode, MP_QSTR_readinto);
+}
+
+STATIC mp_uint_t iobase_write(mp_obj_t obj, const void *buf, mp_uint_t size, int *errcode) {
+ return iobase_read_write(obj, (void *)buf, size, errcode, MP_QSTR_write);
+}
+
+STATIC mp_uint_t iobase_ioctl(mp_obj_t obj, mp_uint_t request, uintptr_t arg, int *errcode) {
+ mp_obj_t dest[4];
+ mp_load_method(obj, MP_QSTR_ioctl, dest);
+ dest[2] = mp_obj_new_int_from_uint(request);
+ dest[3] = mp_obj_new_int_from_uint(arg);
+ mp_int_t ret = mp_obj_get_int(mp_call_method_n_kw(2, 0, dest));
+ if (ret >= 0) {
+ return ret;
+ } else {
+ *errcode = -ret;
+ return MP_STREAM_ERROR;
+ }
+}
+
+STATIC const mp_stream_p_t iobase_p = {
+ MP_PROTO_IMPLEMENT(MP_QSTR_protocol_stream)
+ .read = iobase_read,
+ .write = iobase_write,
+ .ioctl = iobase_ioctl,
+};
+
+STATIC const mp_obj_type_t mp_type_iobase = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_IOBase,
+ .make_new = iobase_make_new,
+ MP_TYPE_EXTENDED_FIELDS(
+ .protocol = &iobase_p,
+ ),
+};
+
+#endif // MICROPY_PY_IO_IOBASE
+
+#if MICROPY_PY_IO_BUFFEREDWRITER
+typedef struct _mp_obj_bufwriter_t {
+ mp_obj_base_t base;
+ mp_obj_t stream;
+ size_t alloc;
+ size_t len;
+ byte buf[0];
+} mp_obj_bufwriter_t;
+
+STATIC mp_obj_t bufwriter_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_arg_check_num(n_args, n_kw, 2, 2, false);
+ size_t alloc = mp_obj_get_int(args[1]);
+ mp_obj_bufwriter_t *o = m_new_obj_var(mp_obj_bufwriter_t, byte, alloc);
+ o->base.type = type;
+ o->stream = args[0];
+ o->alloc = alloc;
+ o->len = 0;
+ return o;
+}
+
+STATIC mp_uint_t bufwriter_write(mp_obj_t self_in, const void *buf, mp_uint_t size, int *errcode) {
+ mp_obj_bufwriter_t *self = MP_OBJ_TO_PTR(self_in);
+
+ mp_uint_t org_size = size;
+
+ while (size > 0) {
+ mp_uint_t rem = self->alloc - self->len;
+ if (size < rem) {
+ memcpy(self->buf + self->len, buf, size);
+ self->len += size;
+ return org_size;
+ }
+
+ // Buffer flushing policy here is to flush entire buffer all the time.
+ // This allows e.g. to have a block device as backing storage and write
+ // entire block to it. memcpy below is not ideal and could be optimized
+ // in some cases. But the way it is now it at least ensures that buffer
+ // is word-aligned, to guard against obscure cases when it matters, e.g.
+ // https://github.com/micropython/micropython/issues/1863
+ memcpy(self->buf + self->len, buf, rem);
+ buf = (byte *)buf + rem;
+ size -= rem;
+ mp_uint_t out_sz = mp_stream_write_exactly(self->stream, self->buf, self->alloc, errcode);
+ (void)out_sz;
+ if (*errcode != 0) {
+ return MP_STREAM_ERROR;
+ }
+ // TODO: try to recover from a case of non-blocking stream, e.g. move
+ // remaining chunk to the beginning of buffer.
+ assert(out_sz == self->alloc);
+ self->len = 0;
+ }
+
+ return org_size;
+}
+
+STATIC mp_obj_t bufwriter_flush(mp_obj_t self_in) {
+ mp_obj_bufwriter_t *self = MP_OBJ_TO_PTR(self_in);
+
+ if (self->len != 0) {
+ int err;
+ mp_uint_t out_sz = mp_stream_write_exactly(self->stream, self->buf, self->len, &err);
+ (void)out_sz;
+ // TODO: try to recover from a case of non-blocking stream, e.g. move
+ // remaining chunk to the beginning of buffer.
+ assert(out_sz == self->len);
+ self->len = 0;
+ if (err != 0) {
+ mp_raise_OSError(err);
+ }
+ }
+
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(bufwriter_flush_obj, bufwriter_flush);
+
+STATIC const mp_rom_map_elem_t bufwriter_locals_dict_table[] = {
+ { MP_ROM_QSTR(MP_QSTR_write), MP_ROM_PTR(&mp_stream_write_obj) },
+ { MP_ROM_QSTR(MP_QSTR_flush), MP_ROM_PTR(&bufwriter_flush_obj) },
+};
+STATIC MP_DEFINE_CONST_DICT(bufwriter_locals_dict, bufwriter_locals_dict_table);
+
+STATIC const mp_stream_p_t bufwriter_stream_p = {
+ MP_PROTO_IMPLEMENT(MP_QSTR_protocol_stream)
+ .write = bufwriter_write,
+};
+
+STATIC const mp_obj_type_t mp_type_bufwriter = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_BufferedWriter,
+ .make_new = bufwriter_make_new,
+ .locals_dict = (mp_obj_dict_t *)&bufwriter_locals_dict,
+ MP_TYPE_EXTENDED_FIELDS(
+ .protocol = &bufwriter_stream_p,
+ ),
+};
+#endif // MICROPY_PY_IO_BUFFEREDWRITER
+
+STATIC const mp_rom_map_elem_t mp_module_io_globals_table[] = {
+ #if CIRCUITPY
+ { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_io) },
+ #else
+ { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_uio) },
+ #endif
+ // Note: mp_builtin_open_obj should be defined by port, it's not
+ // part of the core.
+ { MP_ROM_QSTR(MP_QSTR_open), MP_ROM_PTR(&mp_builtin_open_obj) },
+ #if MICROPY_PY_IO_IOBASE
+ { MP_ROM_QSTR(MP_QSTR_IOBase), MP_ROM_PTR(&mp_type_iobase) },
+ #endif
+ #if MICROPY_PY_IO_FILEIO
+ { MP_ROM_QSTR(MP_QSTR_FileIO), MP_ROM_PTR(&mp_type_fileio) },
+ #if MICROPY_CPYTHON_COMPAT
+ { MP_ROM_QSTR(MP_QSTR_TextIOWrapper), MP_ROM_PTR(&mp_type_textio) },
+ #endif
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_StringIO), MP_ROM_PTR(&mp_type_stringio) },
+ #if MICROPY_PY_IO_BYTESIO
+ { MP_ROM_QSTR(MP_QSTR_BytesIO), MP_ROM_PTR(&mp_type_bytesio) },
+ #endif
+ #if MICROPY_PY_IO_BUFFEREDWRITER
+ { MP_ROM_QSTR(MP_QSTR_BufferedWriter), MP_ROM_PTR(&mp_type_bufwriter) },
+ #endif
+};
+
+STATIC MP_DEFINE_CONST_DICT(mp_module_io_globals, mp_module_io_globals_table);
+
+const mp_obj_module_t mp_module_io = {
+ .base = { &mp_type_module },
+ .globals = (mp_obj_dict_t *)&mp_module_io_globals,
+};
+
+#endif
diff --git a/circuitpython/py/modmath.c b/circuitpython/py/modmath.c
new file mode 100644
index 0000000..103310d
--- /dev/null
+++ b/circuitpython/py/modmath.c
@@ -0,0 +1,435 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013-2017 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/builtin.h"
+#include "py/runtime.h"
+
+#include "supervisor/shared/translate.h"
+
+#if MICROPY_PY_BUILTINS_FLOAT && MICROPY_PY_MATH
+
+#include <math.h>
+
+// M_PI is not part of the math.h standard and may not be defined
+// And by defining our own we can ensure it uses the correct const format.
+#define MP_PI MICROPY_FLOAT_CONST(3.14159265358979323846)
+#define MP_PI_4 MICROPY_FLOAT_CONST(0.78539816339744830962)
+#define MP_3_PI_4 MICROPY_FLOAT_CONST(2.35619449019234492885)
+
+STATIC NORETURN void math_error(void) {
+ mp_raise_ValueError(MP_ERROR_TEXT("math domain error"));
+}
+
+STATIC mp_obj_t math_generic_1(mp_obj_t x_obj, mp_float_t (*f)(mp_float_t)) {
+ mp_float_t x = mp_obj_get_float(x_obj);
+ mp_float_t ans = f(x);
+ if ((isnan(ans) && !isnan(x)) || (isinf(ans) && !isinf(x))) {
+ math_error();
+ }
+ return mp_obj_new_float(ans);
+}
+
+STATIC mp_obj_t math_generic_2(mp_obj_t x_obj, mp_obj_t y_obj, mp_float_t (*f)(mp_float_t, mp_float_t)) {
+ mp_float_t x = mp_obj_get_float(x_obj);
+ mp_float_t y = mp_obj_get_float(y_obj);
+ mp_float_t ans = f(x, y);
+ if ((isnan(ans) && !isnan(x) && !isnan(y)) || (isinf(ans) && !isinf(x))) {
+ math_error();
+ }
+ return mp_obj_new_float(ans);
+}
+
+#define MATH_FUN_1(py_name, c_name) \
+ STATIC mp_obj_t mp_math_##py_name(mp_obj_t x_obj) { \
+ return math_generic_1(x_obj, MICROPY_FLOAT_C_FUN(c_name)); \
+ } \
+ STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_math_##py_name##_obj, mp_math_##py_name);
+
+#define MATH_FUN_1_TO_BOOL(py_name, c_name) \
+ STATIC mp_obj_t mp_math_##py_name(mp_obj_t x_obj) { return mp_obj_new_bool(c_name(mp_obj_get_float(x_obj))); } \
+ STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_math_##py_name##_obj, mp_math_##py_name);
+
+#define MATH_FUN_1_TO_INT(py_name, c_name) \
+ STATIC mp_obj_t mp_math_##py_name(mp_obj_t x_obj) { return mp_obj_new_int_from_float(MICROPY_FLOAT_C_FUN(c_name)(mp_obj_get_float(x_obj))); } \
+ STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_math_##py_name##_obj, mp_math_##py_name);
+
+#define MATH_FUN_2(py_name, c_name) \
+ STATIC mp_obj_t mp_math_##py_name(mp_obj_t x_obj, mp_obj_t y_obj) { \
+ return math_generic_2(x_obj, y_obj, MICROPY_FLOAT_C_FUN(c_name)); \
+ } \
+ STATIC MP_DEFINE_CONST_FUN_OBJ_2(mp_math_##py_name##_obj, mp_math_##py_name);
+
+#define MATH_FUN_2_FLT_INT(py_name, c_name) \
+ STATIC mp_obj_t mp_math_##py_name(mp_obj_t x_obj, mp_obj_t y_obj) { \
+ return mp_obj_new_float(MICROPY_FLOAT_C_FUN(c_name)(mp_obj_get_float(x_obj), mp_obj_get_int(y_obj))); \
+ } \
+ STATIC MP_DEFINE_CONST_FUN_OBJ_2(mp_math_##py_name##_obj, mp_math_##py_name);
+
+#if MP_NEED_LOG2
+#undef log2
+#undef log2f
+// 1.442695040888963407354163704 is 1/_M_LN2
+mp_float_t MICROPY_FLOAT_C_FUN(log2)(mp_float_t x) {
+ return MICROPY_FLOAT_C_FUN(log)(x) * MICROPY_FLOAT_CONST(1.442695040888963407354163704);
+}
+#endif
+
+// sqrt(x): returns the square root of x
+MATH_FUN_1(sqrt, sqrt)
+// pow(x, y): returns x to the power of y
+#if MICROPY_PY_MATH_POW_FIX_NAN
+mp_float_t pow_func(mp_float_t x, mp_float_t y) {
+ // pow(base, 0) returns 1 for any base, even when base is NaN
+ // pow(+1, exponent) returns 1 for any exponent, even when exponent is NaN
+ if (x == MICROPY_FLOAT_CONST(1.0) || y == MICROPY_FLOAT_CONST(0.0)) {
+ return MICROPY_FLOAT_CONST(1.0);
+ }
+ return MICROPY_FLOAT_C_FUN(pow)(x, y);
+}
+MATH_FUN_2(pow, pow_func)
+#else
+MATH_FUN_2(pow, pow)
+#endif
+// exp(x)
+MATH_FUN_1(exp, exp)
+#if MICROPY_PY_MATH_SPECIAL_FUNCTIONS
+// expm1(x)
+MATH_FUN_1(expm1, expm1)
+// log2(x)
+MATH_FUN_1(log2, log2)
+// log10(x)
+MATH_FUN_1(log10, log10)
+// cosh(x)
+MATH_FUN_1(cosh, cosh)
+// sinh(x)
+MATH_FUN_1(sinh, sinh)
+// tanh(x)
+MATH_FUN_1(tanh, tanh)
+// acosh(x)
+MATH_FUN_1(acosh, acosh)
+// asinh(x)
+MATH_FUN_1(asinh, asinh)
+// atanh(x)
+MATH_FUN_1(atanh, atanh)
+#endif
+// cos(x)
+MATH_FUN_1(cos, cos)
+// sin(x)
+MATH_FUN_1(sin, sin)
+// tan(x)
+MATH_FUN_1(tan, tan)
+// acos(x)
+MATH_FUN_1(acos, acos)
+// asin(x)
+MATH_FUN_1(asin, asin)
+// atan(x)
+MATH_FUN_1(atan, atan)
+// atan2(y, x)
+#if MICROPY_PY_MATH_ATAN2_FIX_INFNAN
+mp_float_t atan2_func(mp_float_t x, mp_float_t y) {
+ if (isinf(x) && isinf(y)) {
+ return copysign(y < 0 ? MP_3_PI_4 : MP_PI_4, x);
+ }
+ return atan2(x, y);
+}
+MATH_FUN_2(atan2, atan2_func)
+#else
+MATH_FUN_2(atan2, atan2)
+#endif
+// ceil(x)
+MATH_FUN_1_TO_INT(ceil, ceil)
+// copysign(x, y)
+STATIC mp_float_t MICROPY_FLOAT_C_FUN(copysign_func)(mp_float_t x, mp_float_t y) {
+ return MICROPY_FLOAT_C_FUN(copysign)(x, y);
+}
+MATH_FUN_2(copysign, copysign_func)
+// fabs(x)
+STATIC mp_float_t MICROPY_FLOAT_C_FUN(fabs_func)(mp_float_t x) {
+ return MICROPY_FLOAT_C_FUN(fabs)(x);
+}
+MATH_FUN_1(fabs, fabs_func)
+// floor(x)
+MATH_FUN_1_TO_INT(floor, floor) // TODO: delegate to x.__floor__() if x is not a float
+// fmod(x, y)
+#if MICROPY_PY_MATH_FMOD_FIX_INFNAN
+mp_float_t fmod_func(mp_float_t x, mp_float_t y) {
+ return (!isinf(x) && isinf(y)) ? x : fmod(x, y);
+}
+MATH_FUN_2(fmod, fmod_func)
+#else
+MATH_FUN_2(fmod, fmod)
+#endif
+// isfinite(x)
+MATH_FUN_1_TO_BOOL(isfinite, isfinite)
+// isinf(x)
+MATH_FUN_1_TO_BOOL(isinf, isinf)
+// isnan(x)
+MATH_FUN_1_TO_BOOL(isnan, isnan)
+// trunc(x)
+MATH_FUN_1_TO_INT(trunc, trunc)
+// ldexp(x, exp)
+MATH_FUN_2_FLT_INT(ldexp, ldexp)
+#if MICROPY_PY_MATH_SPECIAL_FUNCTIONS
+// erf(x): return the error function of x
+MATH_FUN_1(erf, erf)
+// erfc(x): return the complementary error function of x
+MATH_FUN_1(erfc, erfc)
+// gamma(x): return the gamma function of x
+MATH_FUN_1(gamma, tgamma)
+// lgamma(x): return the natural logarithm of the gamma function of x
+MATH_FUN_1(lgamma, lgamma)
+#endif
+// TODO: fsum
+
+#if MICROPY_PY_MATH_ISCLOSE
+STATIC mp_obj_t mp_math_isclose(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ enum { ARG_rel_tol, ARG_abs_tol };
+ static const mp_arg_t allowed_args[] = {
+ {MP_QSTR_rel_tol, MP_ARG_KW_ONLY | MP_ARG_OBJ, {.u_obj = MP_OBJ_NULL}},
+ {MP_QSTR_abs_tol, MP_ARG_KW_ONLY | MP_ARG_OBJ, {.u_obj = MP_OBJ_NEW_SMALL_INT(0)}},
+ };
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args - 2, pos_args + 2, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+ const mp_float_t a = mp_obj_get_float(pos_args[0]);
+ const mp_float_t b = mp_obj_get_float(pos_args[1]);
+ const mp_float_t rel_tol = args[ARG_rel_tol].u_obj == MP_OBJ_NULL
+ ? (mp_float_t)1e-9 : mp_obj_get_float(args[ARG_rel_tol].u_obj);
+ const mp_float_t abs_tol = mp_obj_get_float(args[ARG_abs_tol].u_obj);
+ if (rel_tol < (mp_float_t)0.0 || abs_tol < (mp_float_t)0.0) {
+ math_error();
+ }
+ if (a == b) {
+ return mp_const_true;
+ }
+ const mp_float_t difference = MICROPY_FLOAT_C_FUN(fabs)(a - b);
+ if (isinf(difference)) { // Either a or b is inf
+ return mp_const_false;
+ }
+ if ((difference <= abs_tol) ||
+ (difference <= MICROPY_FLOAT_C_FUN(fabs)(rel_tol * a)) ||
+ (difference <= MICROPY_FLOAT_C_FUN(fabs)(rel_tol * b))) {
+ return mp_const_true;
+ }
+ return mp_const_false;
+}
+MP_DEFINE_CONST_FUN_OBJ_KW(mp_math_isclose_obj, 2, mp_math_isclose);
+#endif
+
+// Function that takes a variable number of arguments
+
+// log(x[, base])
+STATIC mp_obj_t mp_math_log(size_t n_args, const mp_obj_t *args) {
+ mp_float_t x = mp_obj_get_float(args[0]);
+ if (x <= (mp_float_t)0.0) {
+ math_error();
+ }
+ mp_float_t l = MICROPY_FLOAT_C_FUN(log)(x);
+ if (n_args == 1) {
+ return mp_obj_new_float(l);
+ } else {
+ mp_float_t base = mp_obj_get_float(args[1]);
+ if (base <= (mp_float_t)0.0) {
+ math_error();
+ } else if (base == (mp_float_t)1.0) {
+ mp_raise_msg(&mp_type_ZeroDivisionError, MP_ERROR_TEXT("division by zero"));
+ }
+ return mp_obj_new_float(l / MICROPY_FLOAT_C_FUN(log)(base));
+ }
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_math_log_obj, 1, 2, mp_math_log);
+
+// Functions that return a tuple
+
+// frexp(x): converts a floating-point number to fractional and integral components
+STATIC mp_obj_t mp_math_frexp(mp_obj_t x_obj) {
+ int int_exponent = 0;
+ mp_float_t significand = MICROPY_FLOAT_C_FUN(frexp)(mp_obj_get_float(x_obj), &int_exponent);
+ mp_obj_t tuple[2];
+ tuple[0] = mp_obj_new_float(significand);
+ tuple[1] = mp_obj_new_int(int_exponent);
+ return mp_obj_new_tuple(2, tuple);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_math_frexp_obj, mp_math_frexp);
+
+// modf(x)
+STATIC mp_obj_t mp_math_modf(mp_obj_t x_obj) {
+ mp_float_t int_part = 0.0;
+ mp_float_t x = mp_obj_get_float(x_obj);
+ mp_float_t fractional_part = MICROPY_FLOAT_C_FUN(modf)(x, &int_part);
+ #if MICROPY_PY_MATH_MODF_FIX_NEGZERO
+ if (fractional_part == MICROPY_FLOAT_CONST(0.0)) {
+ fractional_part = copysign(fractional_part, x);
+ }
+ #endif
+ mp_obj_t tuple[2];
+ tuple[0] = mp_obj_new_float(fractional_part);
+ tuple[1] = mp_obj_new_float(int_part);
+ return mp_obj_new_tuple(2, tuple);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_math_modf_obj, mp_math_modf);
+
+// Angular conversions
+
+// radians(x)
+STATIC mp_obj_t mp_math_radians(mp_obj_t x_obj) {
+ return mp_obj_new_float(mp_obj_get_float(x_obj) * (MP_PI / MICROPY_FLOAT_CONST(180.0)));
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_math_radians_obj, mp_math_radians);
+
+// degrees(x)
+STATIC mp_obj_t mp_math_degrees(mp_obj_t x_obj) {
+ return mp_obj_new_float(mp_obj_get_float(x_obj) * (MICROPY_FLOAT_CONST(180.0) / MP_PI));
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_math_degrees_obj, mp_math_degrees);
+
+#if MICROPY_PY_MATH_FACTORIAL
+
+#if MICROPY_OPT_MATH_FACTORIAL
+
+// factorial(x): slightly efficient recursive implementation
+STATIC mp_obj_t mp_math_factorial_inner(mp_uint_t start, mp_uint_t end) {
+ if (start == end) {
+ return mp_obj_new_int(start);
+ } else if (end - start == 1) {
+ return mp_binary_op(MP_BINARY_OP_MULTIPLY, MP_OBJ_NEW_SMALL_INT(start), MP_OBJ_NEW_SMALL_INT(end));
+ } else if (end - start == 2) {
+ mp_obj_t left = MP_OBJ_NEW_SMALL_INT(start);
+ mp_obj_t middle = MP_OBJ_NEW_SMALL_INT(start + 1);
+ mp_obj_t right = MP_OBJ_NEW_SMALL_INT(end);
+ mp_obj_t tmp = mp_binary_op(MP_BINARY_OP_MULTIPLY, left, middle);
+ return mp_binary_op(MP_BINARY_OP_MULTIPLY, tmp, right);
+ } else {
+ mp_uint_t middle = start + ((end - start) >> 1);
+ mp_obj_t left = mp_math_factorial_inner(start, middle);
+ mp_obj_t right = mp_math_factorial_inner(middle + 1, end);
+ return mp_binary_op(MP_BINARY_OP_MULTIPLY, left, right);
+ }
+}
+STATIC mp_obj_t mp_math_factorial(mp_obj_t x_obj) {
+ mp_int_t max = mp_obj_get_int(x_obj);
+ if (max < 0) {
+ mp_raise_ValueError(MP_ERROR_TEXT("negative factorial"));
+ } else if (max == 0) {
+ return MP_OBJ_NEW_SMALL_INT(1);
+ }
+ return mp_math_factorial_inner(1, max);
+}
+
+#else
+
+// factorial(x): squared difference implementation
+// based on http://www.luschny.de/math/factorial/index.html
+STATIC mp_obj_t mp_math_factorial(mp_obj_t x_obj) {
+ mp_int_t max = mp_obj_get_int(x_obj);
+ if (max < 0) {
+ mp_raise_ValueError(MP_ERROR_TEXT("negative factorial"));
+ } else if (max <= 1) {
+ return MP_OBJ_NEW_SMALL_INT(1);
+ }
+ mp_int_t h = max >> 1;
+ mp_int_t q = h * h;
+ mp_int_t r = q << 1;
+ if (max & 1) {
+ r *= max;
+ }
+ mp_obj_t prod = MP_OBJ_NEW_SMALL_INT(r);
+ for (mp_int_t num = 1; num < max - 2; num += 2) {
+ q -= num;
+ prod = mp_binary_op(MP_BINARY_OP_MULTIPLY, prod, MP_OBJ_NEW_SMALL_INT(q));
+ }
+ return prod;
+}
+
+#endif
+
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_math_factorial_obj, mp_math_factorial);
+
+#endif
+
+STATIC const mp_rom_map_elem_t mp_module_math_globals_table[] = {
+ { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_math) },
+ { MP_ROM_QSTR(MP_QSTR_e), mp_const_float_e },
+ { MP_ROM_QSTR(MP_QSTR_pi), mp_const_float_pi },
+ { MP_ROM_QSTR(MP_QSTR_sqrt), MP_ROM_PTR(&mp_math_sqrt_obj) },
+ { MP_ROM_QSTR(MP_QSTR_pow), MP_ROM_PTR(&mp_math_pow_obj) },
+ { MP_ROM_QSTR(MP_QSTR_exp), MP_ROM_PTR(&mp_math_exp_obj) },
+ #if MICROPY_PY_MATH_SPECIAL_FUNCTIONS
+ { MP_ROM_QSTR(MP_QSTR_expm1), MP_ROM_PTR(&mp_math_expm1_obj) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_log), MP_ROM_PTR(&mp_math_log_obj) },
+ #if MICROPY_PY_MATH_SPECIAL_FUNCTIONS
+ { MP_ROM_QSTR(MP_QSTR_log2), MP_ROM_PTR(&mp_math_log2_obj) },
+ { MP_ROM_QSTR(MP_QSTR_log10), MP_ROM_PTR(&mp_math_log10_obj) },
+ { MP_ROM_QSTR(MP_QSTR_cosh), MP_ROM_PTR(&mp_math_cosh_obj) },
+ { MP_ROM_QSTR(MP_QSTR_sinh), MP_ROM_PTR(&mp_math_sinh_obj) },
+ { MP_ROM_QSTR(MP_QSTR_tanh), MP_ROM_PTR(&mp_math_tanh_obj) },
+ { MP_ROM_QSTR(MP_QSTR_acosh), MP_ROM_PTR(&mp_math_acosh_obj) },
+ { MP_ROM_QSTR(MP_QSTR_asinh), MP_ROM_PTR(&mp_math_asinh_obj) },
+ { MP_ROM_QSTR(MP_QSTR_atanh), MP_ROM_PTR(&mp_math_atanh_obj) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_cos), MP_ROM_PTR(&mp_math_cos_obj) },
+ { MP_ROM_QSTR(MP_QSTR_sin), MP_ROM_PTR(&mp_math_sin_obj) },
+ { MP_ROM_QSTR(MP_QSTR_tan), MP_ROM_PTR(&mp_math_tan_obj) },
+ { MP_ROM_QSTR(MP_QSTR_acos), MP_ROM_PTR(&mp_math_acos_obj) },
+ { MP_ROM_QSTR(MP_QSTR_asin), MP_ROM_PTR(&mp_math_asin_obj) },
+ { MP_ROM_QSTR(MP_QSTR_atan), MP_ROM_PTR(&mp_math_atan_obj) },
+ { MP_ROM_QSTR(MP_QSTR_atan2), MP_ROM_PTR(&mp_math_atan2_obj) },
+ { MP_ROM_QSTR(MP_QSTR_ceil), MP_ROM_PTR(&mp_math_ceil_obj) },
+ { MP_ROM_QSTR(MP_QSTR_copysign), MP_ROM_PTR(&mp_math_copysign_obj) },
+ { MP_ROM_QSTR(MP_QSTR_fabs), MP_ROM_PTR(&mp_math_fabs_obj) },
+ { MP_ROM_QSTR(MP_QSTR_floor), MP_ROM_PTR(&mp_math_floor_obj) },
+ { MP_ROM_QSTR(MP_QSTR_fmod), MP_ROM_PTR(&mp_math_fmod_obj) },
+ { MP_ROM_QSTR(MP_QSTR_frexp), MP_ROM_PTR(&mp_math_frexp_obj) },
+ { MP_ROM_QSTR(MP_QSTR_ldexp), MP_ROM_PTR(&mp_math_ldexp_obj) },
+ { MP_ROM_QSTR(MP_QSTR_modf), MP_ROM_PTR(&mp_math_modf_obj) },
+ { MP_ROM_QSTR(MP_QSTR_isfinite), MP_ROM_PTR(&mp_math_isfinite_obj) },
+ { MP_ROM_QSTR(MP_QSTR_isinf), MP_ROM_PTR(&mp_math_isinf_obj) },
+ { MP_ROM_QSTR(MP_QSTR_isnan), MP_ROM_PTR(&mp_math_isnan_obj) },
+ #if MICROPY_PY_MATH_ISCLOSE
+ { MP_ROM_QSTR(MP_QSTR_isclose), MP_ROM_PTR(&mp_math_isclose_obj) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_trunc), MP_ROM_PTR(&mp_math_trunc_obj) },
+ { MP_ROM_QSTR(MP_QSTR_radians), MP_ROM_PTR(&mp_math_radians_obj) },
+ { MP_ROM_QSTR(MP_QSTR_degrees), MP_ROM_PTR(&mp_math_degrees_obj) },
+ #if MICROPY_PY_MATH_FACTORIAL
+ { MP_ROM_QSTR(MP_QSTR_factorial), MP_ROM_PTR(&mp_math_factorial_obj) },
+ #endif
+ #if MICROPY_PY_MATH_SPECIAL_FUNCTIONS
+ { MP_ROM_QSTR(MP_QSTR_erf), MP_ROM_PTR(&mp_math_erf_obj) },
+ { MP_ROM_QSTR(MP_QSTR_erfc), MP_ROM_PTR(&mp_math_erfc_obj) },
+ { MP_ROM_QSTR(MP_QSTR_gamma), MP_ROM_PTR(&mp_math_gamma_obj) },
+ { MP_ROM_QSTR(MP_QSTR_lgamma), MP_ROM_PTR(&mp_math_lgamma_obj) },
+ #endif
+};
+
+STATIC MP_DEFINE_CONST_DICT(mp_module_math_globals, mp_module_math_globals_table);
+
+const mp_obj_module_t mp_module_math = {
+ .base = { &mp_type_module },
+ .globals = (mp_obj_dict_t *)&mp_module_math_globals,
+};
+
+#endif // MICROPY_PY_BUILTINS_FLOAT && MICROPY_PY_MATH
diff --git a/circuitpython/py/modmicropython.c b/circuitpython/py/modmicropython.c
new file mode 100644
index 0000000..0465a4e
--- /dev/null
+++ b/circuitpython/py/modmicropython.c
@@ -0,0 +1,213 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+
+#include "py/builtin.h"
+#include "py/stackctrl.h"
+#include "py/runtime.h"
+#include "py/gc.h"
+#include "py/mphal.h"
+
+#include "supervisor/shared/translate.h"
+
+// Various builtins specific to MicroPython runtime,
+// living in micropython module
+
+#if CIRCUITPY_MICROPYTHON_ADVANCED && MICROPY_ENABLE_COMPILER
+STATIC mp_obj_t mp_micropython_opt_level(size_t n_args, const mp_obj_t *args) {
+ if (n_args == 0) {
+ return MP_OBJ_NEW_SMALL_INT(MP_STATE_VM(mp_optimise_value));
+ } else {
+ MP_STATE_VM(mp_optimise_value) = mp_obj_get_int(args[0]);
+ return mp_const_none;
+ }
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_micropython_opt_level_obj, 0, 1, mp_micropython_opt_level);
+#endif
+
+#if CIRCUITPY_MICROPYTHON_ADVANCED && MICROPY_PY_MICROPYTHON_MEM_INFO
+
+#if MICROPY_MEM_STATS
+STATIC mp_obj_t mp_micropython_mem_total(void) {
+ return MP_OBJ_NEW_SMALL_INT(m_get_total_bytes_allocated());
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_0(mp_micropython_mem_total_obj, mp_micropython_mem_total);
+
+STATIC mp_obj_t mp_micropython_mem_current(void) {
+ return MP_OBJ_NEW_SMALL_INT(m_get_current_bytes_allocated());
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_0(mp_micropython_mem_current_obj, mp_micropython_mem_current);
+
+STATIC mp_obj_t mp_micropython_mem_peak(void) {
+ return MP_OBJ_NEW_SMALL_INT(m_get_peak_bytes_allocated());
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_0(mp_micropython_mem_peak_obj, mp_micropython_mem_peak);
+#endif
+
+mp_obj_t mp_micropython_mem_info(size_t n_args, const mp_obj_t *args) {
+ (void)args;
+ #if MICROPY_MEM_STATS
+ mp_printf(&mp_plat_print, "mem: total=" UINT_FMT ", current=" UINT_FMT ", peak=" UINT_FMT "\n",
+ (mp_uint_t)m_get_total_bytes_allocated(), (mp_uint_t)m_get_current_bytes_allocated(), (mp_uint_t)m_get_peak_bytes_allocated());
+ #endif
+ #if MICROPY_STACK_CHECK
+ mp_printf(&mp_plat_print, "stack: " UINT_FMT " out of " UINT_FMT "\n",
+ mp_stack_usage(), (mp_uint_t)MP_STATE_THREAD(stack_limit));
+ #else
+ mp_printf(&mp_plat_print, "stack: " UINT_FMT "\n", mp_stack_usage());
+ #endif
+ #if MICROPY_ENABLE_GC
+ gc_dump_info();
+ if (n_args == 1) {
+ // arg given means dump gc allocation table
+ gc_dump_alloc_table();
+ }
+ #else
+ (void)n_args;
+ #endif
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_micropython_mem_info_obj, 0, 1, mp_micropython_mem_info);
+
+STATIC mp_obj_t mp_micropython_qstr_info(size_t n_args, const mp_obj_t *args) {
+ (void)args;
+ size_t n_pool, n_qstr, n_str_data_bytes, n_total_bytes;
+ qstr_pool_info(&n_pool, &n_qstr, &n_str_data_bytes, &n_total_bytes);
+ mp_printf(&mp_plat_print, "qstr pool: n_pool=%u, n_qstr=%u, n_str_data_bytes=%u, n_total_bytes=%u\n",
+ n_pool, n_qstr, n_str_data_bytes, n_total_bytes);
+ if (n_args == 1) {
+ // arg given means dump qstr data
+ qstr_dump_data();
+ }
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_micropython_qstr_info_obj, 0, 1, mp_micropython_qstr_info);
+
+#endif // MICROPY_PY_MICROPYTHON_MEM_INFO
+
+#if CIRCUITPY_MICROPYTHON_ADVANCED && MICROPY_PY_MICROPYTHON_STACK_USE
+STATIC mp_obj_t mp_micropython_stack_use(void) {
+ return MP_OBJ_NEW_SMALL_INT(mp_stack_usage());
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_0(mp_micropython_stack_use_obj, mp_micropython_stack_use);
+#endif
+
+#if CIRCUITPY_MICROPYTHON_ADVANCED && MICROPY_ENABLE_PYSTACK
+STATIC mp_obj_t mp_micropython_pystack_use(void) {
+ return MP_OBJ_NEW_SMALL_INT(mp_pystack_usage());
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_0(mp_micropython_pystack_use_obj, mp_micropython_pystack_use);
+#endif
+
+#if CIRCUITPY_MICROPYTHON_ADVANCED && MICROPY_ENABLE_GC
+STATIC mp_obj_t mp_micropython_heap_lock(void) {
+ gc_lock();
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_0(mp_micropython_heap_lock_obj, mp_micropython_heap_lock);
+
+STATIC mp_obj_t mp_micropython_heap_unlock(void) {
+ gc_unlock();
+ return MP_OBJ_NEW_SMALL_INT(MP_STATE_THREAD(gc_lock_depth));
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_0(mp_micropython_heap_unlock_obj, mp_micropython_heap_unlock);
+
+#if MICROPY_PY_MICROPYTHON_HEAP_LOCKED
+STATIC mp_obj_t mp_micropython_heap_locked(void) {
+ return MP_OBJ_NEW_SMALL_INT(MP_STATE_THREAD(gc_lock_depth));
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_0(mp_micropython_heap_locked_obj, mp_micropython_heap_locked);
+#endif
+#endif
+
+#if CIRCUITPY_MICROPYTHON_ADVANCED && MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF && (MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE == 0)
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_alloc_emergency_exception_buf_obj, mp_alloc_emergency_exception_buf);
+#endif
+
+#if CIRCUITPY_MICROPYTHON_ADVANCED && MICROPY_KBD_EXCEPTION
+STATIC mp_obj_t mp_micropython_kbd_intr(mp_obj_t int_chr_in) {
+ mp_hal_set_interrupt_char(mp_obj_get_int(int_chr_in));
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_micropython_kbd_intr_obj, mp_micropython_kbd_intr);
+#endif
+
+#if MICROPY_ENABLE_SCHEDULER
+STATIC mp_obj_t mp_micropython_schedule(mp_obj_t function, mp_obj_t arg) {
+ if (!mp_sched_schedule(function, arg)) {
+ mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("schedule queue full"));
+ }
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(mp_micropython_schedule_obj, mp_micropython_schedule);
+#endif
+
+STATIC const mp_rom_map_elem_t mp_module_micropython_globals_table[] = {
+ { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_micropython) },
+ { MP_ROM_QSTR(MP_QSTR_const), MP_ROM_PTR(&mp_identity_obj) },
+ #if CIRCUITPY_MICROPYTHON_ADVANCED && MICROPY_ENABLE_COMPILER
+ { MP_ROM_QSTR(MP_QSTR_opt_level), MP_ROM_PTR(&mp_micropython_opt_level_obj) },
+ #endif
+ #if CIRCUITPY_MICROPYTHON_ADVANCED && MICROPY_PY_MICROPYTHON_MEM_INFO
+ #if MICROPY_MEM_STATS
+ { MP_ROM_QSTR(MP_QSTR_mem_total), MP_ROM_PTR(&mp_micropython_mem_total_obj) },
+ { MP_ROM_QSTR(MP_QSTR_mem_current), MP_ROM_PTR(&mp_micropython_mem_current_obj) },
+ { MP_ROM_QSTR(MP_QSTR_mem_peak), MP_ROM_PTR(&mp_micropython_mem_peak_obj) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_mem_info), MP_ROM_PTR(&mp_micropython_mem_info_obj) },
+ { MP_ROM_QSTR(MP_QSTR_qstr_info), MP_ROM_PTR(&mp_micropython_qstr_info_obj) },
+ #endif
+ #if CIRCUITPY_MICROPYTHON_ADVANCED && MICROPY_PY_MICROPYTHON_STACK_USE
+ { MP_ROM_QSTR(MP_QSTR_stack_use), MP_ROM_PTR(&mp_micropython_stack_use_obj) },
+ #endif
+ #if CIRCUITPY_MICROPYTHON_ADVANCED && MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF && (MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE == 0)
+ { MP_ROM_QSTR(MP_QSTR_alloc_emergency_exception_buf), MP_ROM_PTR(&mp_alloc_emergency_exception_buf_obj) },
+ #endif
+ #if CIRCUITPY_MICROPYTHON_ADVANCED && MICROPY_ENABLE_PYSTACK
+ { MP_ROM_QSTR(MP_QSTR_pystack_use), MP_ROM_PTR(&mp_micropython_pystack_use_obj) },
+ #endif
+ #if CIRCUITPY_MICROPYTHON_ADVANCED && MICROPY_ENABLE_GC
+ { MP_ROM_QSTR(MP_QSTR_heap_lock), MP_ROM_PTR(&mp_micropython_heap_lock_obj) },
+ { MP_ROM_QSTR(MP_QSTR_heap_unlock), MP_ROM_PTR(&mp_micropython_heap_unlock_obj) },
+ #if MICROPY_PY_MICROPYTHON_HEAP_LOCKED
+ { MP_ROM_QSTR(MP_QSTR_heap_locked), MP_ROM_PTR(&mp_micropython_heap_locked_obj) },
+ #endif
+ #endif
+ #if CIRCUITPY_MICROPYTHON_ADVANCED && MICROPY_KBD_EXCEPTION
+ { MP_ROM_QSTR(MP_QSTR_kbd_intr), MP_ROM_PTR(&mp_micropython_kbd_intr_obj) },
+ #endif
+ #if MICROPY_ENABLE_SCHEDULER
+ { MP_ROM_QSTR(MP_QSTR_schedule), MP_ROM_PTR(&mp_micropython_schedule_obj) },
+ #endif
+};
+
+STATIC MP_DEFINE_CONST_DICT(mp_module_micropython_globals, mp_module_micropython_globals_table);
+
+const mp_obj_module_t mp_module_micropython = {
+ .base = { &mp_type_module },
+ .globals = (mp_obj_dict_t *)&mp_module_micropython_globals,
+};
diff --git a/circuitpython/py/modstruct.c b/circuitpython/py/modstruct.c
new file mode 100644
index 0000000..c0b0fb7
--- /dev/null
+++ b/circuitpython/py/modstruct.c
@@ -0,0 +1,285 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include "py/runtime.h"
+#include "py/builtin.h"
+#include "py/objtuple.h"
+#include "py/binary.h"
+#include "py/parsenum.h"
+#include "supervisor/shared/translate.h"
+
+#if MICROPY_PY_STRUCT
+
+/*
+ This module implements most of character typecodes from CPython, with
+ some extensions:
+
+ O - (Pointer to) an arbitrary Python object. This is useful for callback
+ data, etc. Note that you must keep reference to passed object in
+ your Python application, otherwise it may be garbage-collected,
+ and then when you get back this value from callback it may be
+ invalid (and lead to crash).
+ S - Pointer to a string (returned as a Python string). Note the
+ difference from "Ns", - the latter says "in this place of structure
+ is character data of up to N bytes length", while "S" means
+ "in this place of a structure is a pointer to zero-terminated
+ character data".
+ */
+
+STATIC char get_fmt_type(const char **fmt) {
+ char t = **fmt;
+ switch (t) {
+ case '!':
+ t = '>';
+ break;
+ case '@':
+ case '=':
+ case '<':
+ case '>':
+ break;
+ default:
+ return '@';
+ }
+ // Skip type char
+ (*fmt)++;
+ return t;
+}
+
+STATIC mp_uint_t get_fmt_num(const char **p) {
+ const char *num = *p;
+ uint len = 1;
+ while (unichar_isdigit(*++num)) {
+ len++;
+ }
+ mp_uint_t val = (mp_uint_t)MP_OBJ_SMALL_INT_VALUE(mp_parse_num_integer(*p, len, 10, NULL));
+ *p = num;
+ return val;
+}
+
+STATIC size_t calc_size_items(const char *fmt, size_t *total_sz) {
+ char fmt_type = get_fmt_type(&fmt);
+ size_t total_cnt = 0;
+ size_t size;
+ for (size = 0; *fmt; fmt++) {
+ mp_uint_t cnt = 1;
+ if (unichar_isdigit(*fmt)) {
+ cnt = get_fmt_num(&fmt);
+ }
+
+ if (*fmt == 's') {
+ total_cnt += 1;
+ size += cnt;
+ } else {
+ // Pad bytes are skipped and don't get included in the item count.
+ if (*fmt != 'x') {
+ total_cnt += cnt;
+ }
+ size_t align;
+ size_t sz = mp_binary_get_size(fmt_type, *fmt, &align);
+ while (cnt--) {
+ // Apply alignment
+ size = (size + align - 1) & ~(align - 1);
+ size += sz;
+ }
+ }
+ }
+ *total_sz = size;
+ return total_cnt;
+}
+
+STATIC mp_obj_t struct_calcsize(mp_obj_t fmt_in) {
+ const char *fmt = mp_obj_str_get_str(fmt_in);
+ size_t size;
+ calc_size_items(fmt, &size);
+ return MP_OBJ_NEW_SMALL_INT(size);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(struct_calcsize_obj, struct_calcsize);
+
+STATIC mp_obj_t struct_unpack_from(size_t n_args, const mp_obj_t *args) {
+ // unpack requires that the buffer be exactly the right size.
+ // unpack_from requires that the buffer be "big enough".
+ // Since we implement unpack and unpack_from using the same function
+ // we relax the "exact" requirement, and only implement "big enough".
+ const char *fmt = mp_obj_str_get_str(args[0]);
+ size_t total_sz;
+ size_t num_items = calc_size_items(fmt, &total_sz);
+ char fmt_type = get_fmt_type(&fmt);
+ mp_obj_tuple_t *res = MP_OBJ_TO_PTR(mp_obj_new_tuple(num_items, NULL));
+ mp_buffer_info_t bufinfo;
+ mp_get_buffer_raise(args[1], &bufinfo, MP_BUFFER_READ);
+ byte *p = bufinfo.buf;
+ byte *end_p = &p[bufinfo.len];
+ mp_int_t offset = 0;
+
+ if (n_args > 2) {
+ // offset arg provided
+ offset = mp_obj_get_int(args[2]);
+ if (offset < 0) {
+ // negative offsets are relative to the end of the buffer
+ offset = bufinfo.len + offset;
+ if (offset < 0) {
+ mp_raise_ValueError(MP_ERROR_TEXT("buffer too small"));
+ }
+ }
+ p += offset;
+ }
+ byte *p_base = p;
+
+ // Check that the input buffer is big enough to unpack all the values
+ if (p + total_sz > end_p) {
+ mp_raise_ValueError(MP_ERROR_TEXT("buffer too small"));
+ }
+
+ for (size_t i = 0; i < num_items;) {
+ mp_uint_t cnt = 1;
+ if (unichar_isdigit(*fmt)) {
+ cnt = get_fmt_num(&fmt);
+ }
+ mp_obj_t item;
+ if (*fmt == 's') {
+ item = mp_obj_new_bytes(p, cnt);
+ p += cnt;
+ res->items[i++] = item;
+ } else {
+ while (cnt--) {
+ item = mp_binary_get_val(fmt_type, *fmt, p_base, &p);
+ // Pad bytes ('x') are just skipped.
+ if (*fmt != 'x') {
+ res->items[i++] = item;
+ }
+ }
+ }
+ fmt++;
+ }
+ return MP_OBJ_FROM_PTR(res);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(struct_unpack_from_obj, 2, 3, struct_unpack_from);
+
+// This function assumes there is enough room in p to store all the values
+STATIC void struct_pack_into_internal(mp_obj_t fmt_in, byte *p, size_t n_args, const mp_obj_t *args) {
+ size_t size;
+ size_t count = calc_size_items(mp_obj_str_get_str(fmt_in), &size);
+ if (count != n_args) {
+ #if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_ValueError(NULL);
+ #else
+ mp_raise_ValueError_varg(MP_ERROR_TEXT("pack expected %d items for packing (got %d)"), count, n_args);
+ #endif
+ }
+ const char *fmt = mp_obj_str_get_str(fmt_in);
+ char fmt_type = get_fmt_type(&fmt);
+
+ byte *p_base = p;
+ size_t i;
+ for (i = 0; i < n_args;) {
+ mp_uint_t cnt = 1;
+ if (unichar_isdigit(*fmt)) {
+ cnt = get_fmt_num(&fmt);
+ }
+
+ if (*fmt == 's') {
+ mp_buffer_info_t bufinfo;
+ mp_get_buffer_raise(args[i++], &bufinfo, MP_BUFFER_READ);
+ mp_uint_t to_copy = cnt;
+ if (bufinfo.len < to_copy) {
+ to_copy = bufinfo.len;
+ }
+ memcpy(p, bufinfo.buf, to_copy);
+ memset(p + to_copy, 0, cnt - to_copy);
+ p += cnt;
+ } else {
+ while (cnt--) {
+ // Pad bytes don't have a corresponding argument.
+ if (*fmt == 'x') {
+ mp_binary_set_val(fmt_type, *fmt, MP_OBJ_NEW_SMALL_INT(0), p_base, &p);
+ } else {
+ mp_binary_set_val(fmt_type, *fmt, args[i], p_base, &p);
+ i++;
+ }
+ }
+ }
+ fmt++;
+ }
+}
+
+STATIC mp_obj_t struct_pack(size_t n_args, const mp_obj_t *args) {
+ mp_int_t size = MP_OBJ_SMALL_INT_VALUE(struct_calcsize(args[0]));
+ vstr_t vstr;
+ vstr_init_len(&vstr, size);
+ byte *p = (byte *)vstr.buf;
+ memset(p, 0, size);
+ struct_pack_into_internal(args[0], p, n_args - 1, &args[1]);
+ return mp_obj_new_str_from_vstr(&mp_type_bytes, &vstr);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(struct_pack_obj, 1, MP_OBJ_FUN_ARGS_MAX, struct_pack);
+
+STATIC mp_obj_t struct_pack_into(size_t n_args, const mp_obj_t *args) {
+ mp_buffer_info_t bufinfo;
+ mp_get_buffer_raise(args[1], &bufinfo, MP_BUFFER_WRITE);
+ mp_int_t offset = mp_obj_get_int(args[2]);
+ if (offset < 0) {
+ // negative offsets are relative to the end of the buffer
+ offset = (mp_int_t)bufinfo.len + offset;
+ if (offset < 0) {
+ mp_raise_ValueError(MP_ERROR_TEXT("buffer too small"));
+ }
+ }
+ byte *p = (byte *)bufinfo.buf;
+ byte *end_p = &p[bufinfo.len];
+ p += offset;
+
+ // Check that the output buffer is big enough to hold all the values
+ mp_int_t sz = MP_OBJ_SMALL_INT_VALUE(struct_calcsize(args[0]));
+ if (p + sz > end_p) {
+ mp_raise_ValueError(MP_ERROR_TEXT("buffer too small"));
+ }
+
+ struct_pack_into_internal(args[0], p, n_args - 3, &args[3]);
+ return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(struct_pack_into_obj, 3, MP_OBJ_FUN_ARGS_MAX, struct_pack_into);
+
+STATIC const mp_rom_map_elem_t mp_module_struct_globals_table[] = {
+ { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_ustruct) },
+ { MP_ROM_QSTR(MP_QSTR_calcsize), MP_ROM_PTR(&struct_calcsize_obj) },
+ { MP_ROM_QSTR(MP_QSTR_pack), MP_ROM_PTR(&struct_pack_obj) },
+ { MP_ROM_QSTR(MP_QSTR_pack_into), MP_ROM_PTR(&struct_pack_into_obj) },
+ { MP_ROM_QSTR(MP_QSTR_unpack), MP_ROM_PTR(&struct_unpack_from_obj) },
+ { MP_ROM_QSTR(MP_QSTR_unpack_from), MP_ROM_PTR(&struct_unpack_from_obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(mp_module_struct_globals, mp_module_struct_globals_table);
+
+const mp_obj_module_t mp_module_ustruct = {
+ .base = { &mp_type_module },
+ .globals = (mp_obj_dict_t *)&mp_module_struct_globals,
+};
+
+#endif
diff --git a/circuitpython/py/modsys.c b/circuitpython/py/modsys.c
new file mode 100644
index 0000000..0ec5de1
--- /dev/null
+++ b/circuitpython/py/modsys.c
@@ -0,0 +1,238 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ * Copyright (c) 2014-2017 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/builtin.h"
+#include "py/objlist.h"
+#include "py/objtuple.h"
+#include "py/objstr.h"
+#include "py/objint.h"
+#include "py/objtype.h"
+#include "py/stream.h"
+#include "py/smallint.h"
+#include "py/runtime.h"
+#include "py/persistentcode.h"
+
+#if MICROPY_PY_SYS_SETTRACE
+#include "py/objmodule.h"
+#include "py/profile.h"
+#endif
+
+#if MICROPY_PY_SYS
+
+#include "genhdr/mpversion.h"
+
+// defined per port; type of these is irrelevant, just need pointer
+extern struct _mp_dummy_t mp_sys_stdin_obj;
+extern struct _mp_dummy_t mp_sys_stdout_obj;
+extern struct _mp_dummy_t mp_sys_stderr_obj;
+
+#if MICROPY_PY_IO && MICROPY_PY_SYS_STDFILES
+const mp_print_t mp_sys_stdout_print = {&mp_sys_stdout_obj, mp_stream_write_adaptor};
+#endif
+
+// version - Python language version that this implementation conforms to, as a string
+STATIC const MP_DEFINE_STR_OBJ(mp_sys_version_obj, "3.4.0");
+
+// version_info - Python language version that this implementation conforms to, as a tuple of ints
+#define I(n) MP_OBJ_NEW_SMALL_INT(n)
+// TODO: CPython is now at 5-element array, but save 2 els so far...
+STATIC const mp_obj_tuple_t mp_sys_version_info_obj = {{&mp_type_tuple}, 3, {I(3), I(4), I(0)}};
+
+// sys.implementation object
+// this holds the MicroPython version
+STATIC const mp_obj_tuple_t mp_sys_implementation_version_info_obj = {
+ {&mp_type_tuple},
+ 3,
+ { I(MICROPY_VERSION_MAJOR), I(MICROPY_VERSION_MINOR), I(MICROPY_VERSION_MICRO) }
+};
+#if MICROPY_PERSISTENT_CODE_LOAD
+#define SYS_IMPLEMENTATION_ELEMS \
+ MP_ROM_QSTR(MP_QSTR_circuitpython), \
+ MP_ROM_PTR(&mp_sys_implementation_version_info_obj), \
+ MP_ROM_INT(MPY_FILE_HEADER_INT)
+#else
+#define SYS_IMPLEMENTATION_ELEMS \
+ MP_ROM_QSTR(MP_QSTR_circuitpython), \
+ MP_ROM_PTR(&mp_sys_implementation_version_info_obj)
+#endif
+#if MICROPY_PY_ATTRTUPLE
+STATIC const qstr impl_fields[] = {
+ MP_QSTR_name,
+ MP_QSTR_version,
+ #if MICROPY_PERSISTENT_CODE_LOAD
+ MP_QSTR_mpy,
+ #endif
+};
+STATIC MP_DEFINE_ATTRTUPLE(
+ mp_sys_implementation_obj,
+ impl_fields,
+ 2 + MICROPY_PERSISTENT_CODE_LOAD,
+ SYS_IMPLEMENTATION_ELEMS
+ );
+#else
+STATIC const mp_rom_obj_tuple_t mp_sys_implementation_obj = {
+ {&mp_type_tuple},
+ 2 + MICROPY_PERSISTENT_CODE_LOAD,
+ {
+ SYS_IMPLEMENTATION_ELEMS
+ }
+};
+#endif
+
+#undef I
+
+#ifdef MICROPY_PY_SYS_PLATFORM
+// platform - the platform that MicroPython is running on
+STATIC const MP_DEFINE_STR_OBJ(mp_sys_platform_obj, MICROPY_PY_SYS_PLATFORM);
+#endif
+
+// exit([retval]): raise SystemExit, with optional argument given to the exception
+STATIC mp_obj_t mp_sys_exit(size_t n_args, const mp_obj_t *args) {
+ if (n_args == 0) {
+ mp_raise_type(&mp_type_SystemExit);
+ } else {
+ mp_raise_type_arg(&mp_type_SystemExit, args[0]);
+ }
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_sys_exit_obj, 0, 1, mp_sys_exit);
+
+#if MICROPY_PY_SYS_EXC_INFO
+STATIC mp_obj_t mp_sys_exc_info(void) {
+ mp_obj_t cur_exc = MP_OBJ_FROM_PTR(MP_STATE_VM(cur_exception));
+ mp_obj_tuple_t *t = MP_OBJ_TO_PTR(mp_obj_new_tuple(3, NULL));
+
+ if (cur_exc == MP_OBJ_NULL) {
+ t->items[0] = mp_const_none;
+ t->items[1] = mp_const_none;
+ t->items[2] = mp_const_none;
+ return MP_OBJ_FROM_PTR(t);
+ }
+
+ t->items[0] = MP_OBJ_FROM_PTR(mp_obj_get_type(cur_exc));
+ t->items[1] = cur_exc;
+ t->items[2] = mp_obj_exception_get_traceback_obj(cur_exc);
+ return MP_OBJ_FROM_PTR(t);
+}
+MP_DEFINE_CONST_FUN_OBJ_0(mp_sys_exc_info_obj, mp_sys_exc_info);
+#endif
+
+#if MICROPY_PY_SYS_GETSIZEOF
+STATIC mp_obj_t mp_sys_getsizeof(mp_obj_t obj) {
+ return mp_unary_op(MP_UNARY_OP_SIZEOF, obj);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_sys_getsizeof_obj, mp_sys_getsizeof);
+#endif
+
+#if MICROPY_PY_SYS_ATEXIT
+// atexit(callback): Callback is called when sys.exit is called.
+STATIC mp_obj_t mp_sys_atexit(mp_obj_t obj) {
+ mp_obj_t old = MP_STATE_VM(sys_exitfunc);
+ MP_STATE_VM(sys_exitfunc) = obj;
+ return old;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(mp_sys_atexit_obj, mp_sys_atexit);
+#endif
+
+#if MICROPY_PY_SYS_SETTRACE
+// settrace(tracefunc): Set the system's trace function.
+STATIC mp_obj_t mp_sys_settrace(mp_obj_t obj) {
+ return mp_prof_settrace(obj);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_sys_settrace_obj, mp_sys_settrace);
+#endif // MICROPY_PY_SYS_SETTRACE
+
+STATIC const mp_rom_map_elem_t mp_module_sys_globals_table[] = {
+ { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_sys) },
+
+ { MP_ROM_QSTR(MP_QSTR_path), MP_ROM_PTR(&MP_STATE_VM(mp_sys_path_obj)) },
+ { MP_ROM_QSTR(MP_QSTR_argv), MP_ROM_PTR(&MP_STATE_VM(mp_sys_argv_obj)) },
+ { MP_ROM_QSTR(MP_QSTR_version), MP_ROM_PTR(&mp_sys_version_obj) },
+ { MP_ROM_QSTR(MP_QSTR_version_info), MP_ROM_PTR(&mp_sys_version_info_obj) },
+ { MP_ROM_QSTR(MP_QSTR_implementation), MP_ROM_PTR(&mp_sys_implementation_obj) },
+ #ifdef MICROPY_PY_SYS_PLATFORM
+ { MP_ROM_QSTR(MP_QSTR_platform), MP_ROM_PTR(&mp_sys_platform_obj) },
+ #endif
+ #if MP_ENDIANNESS_LITTLE
+ { MP_ROM_QSTR(MP_QSTR_byteorder), MP_ROM_QSTR(MP_QSTR_little) },
+ #else
+ { MP_ROM_QSTR(MP_QSTR_byteorder), MP_ROM_QSTR(MP_QSTR_big) },
+ #endif
+
+ #if MICROPY_PY_SYS_MAXSIZE
+ #if MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_NONE
+ // Maximum mp_int_t value is not representable as small int, so we have
+ // little choice but to use MP_SMALL_INT_MAX. Apps also should be careful
+ // to not try to compare sys.maxsize to some literal number (as this
+ // number might not fit in available int size), but instead count number
+ // of "one" bits in sys.maxsize.
+ { MP_ROM_QSTR(MP_QSTR_maxsize), MP_ROM_INT(MP_SMALL_INT_MAX) },
+ #else
+ { MP_ROM_QSTR(MP_QSTR_maxsize), MP_ROM_PTR(&mp_sys_maxsize_obj) },
+ #endif
+ #endif
+
+ #if MICROPY_PY_SYS_EXIT
+ { MP_ROM_QSTR(MP_QSTR_exit), MP_ROM_PTR(&mp_sys_exit_obj) },
+ #endif
+
+ #if MICROPY_PY_SYS_SETTRACE
+ { MP_ROM_QSTR(MP_QSTR_settrace), MP_ROM_PTR(&mp_sys_settrace_obj) },
+ #endif
+
+ #if MICROPY_PY_SYS_STDFILES
+ { MP_ROM_QSTR(MP_QSTR_stdin), MP_ROM_PTR(&mp_sys_stdin_obj) },
+ { MP_ROM_QSTR(MP_QSTR_stdout), MP_ROM_PTR(&mp_sys_stdout_obj) },
+ { MP_ROM_QSTR(MP_QSTR_stderr), MP_ROM_PTR(&mp_sys_stderr_obj) },
+ #endif
+
+ #if MICROPY_PY_SYS_MODULES
+ { MP_ROM_QSTR(MP_QSTR_modules), MP_ROM_PTR(&MP_STATE_VM(mp_loaded_modules_dict)) },
+ #endif
+ #if MICROPY_PY_SYS_EXC_INFO
+ { MP_ROM_QSTR(MP_QSTR_exc_info), MP_ROM_PTR(&mp_sys_exc_info_obj) },
+ #endif
+ #if MICROPY_PY_SYS_GETSIZEOF
+ { MP_ROM_QSTR(MP_QSTR_getsizeof), MP_ROM_PTR(&mp_sys_getsizeof_obj) },
+ #endif
+
+ /*
+ * Extensions to CPython
+ */
+
+ #if MICROPY_PY_SYS_ATEXIT
+ { MP_ROM_QSTR(MP_QSTR_atexit), MP_ROM_PTR(&mp_sys_atexit_obj) },
+ #endif
+};
+
+STATIC MP_DEFINE_CONST_DICT(mp_module_sys_globals, mp_module_sys_globals_table);
+
+const mp_obj_module_t mp_module_sys = {
+ .base = { &mp_type_module },
+ .globals = (mp_obj_dict_t *)&mp_module_sys_globals,
+};
+
+#endif
diff --git a/circuitpython/py/modthread.c b/circuitpython/py/modthread.c
new file mode 100644
index 0000000..333d750
--- /dev/null
+++ b/circuitpython/py/modthread.c
@@ -0,0 +1,306 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include "py/runtime.h"
+#include "py/stackctrl.h"
+
+#include "supervisor/shared/translate.h"
+
+#if MICROPY_PY_THREAD
+
+#include "py/mpthread.h"
+
+#if MICROPY_DEBUG_VERBOSE // print debugging info
+#define DEBUG_PRINT (1)
+#define DEBUG_printf DEBUG_printf
+#else // don't print debugging info
+#define DEBUG_PRINT (0)
+#define DEBUG_printf(...) (void)0
+#endif
+
+/****************************************************************/
+// Lock object
+
+STATIC const mp_obj_type_t mp_type_thread_lock;
+
+typedef struct _mp_obj_thread_lock_t {
+ mp_obj_base_t base;
+ mp_thread_mutex_t mutex;
+ volatile bool locked;
+} mp_obj_thread_lock_t;
+
+STATIC mp_obj_thread_lock_t *mp_obj_new_thread_lock(void) {
+ mp_obj_thread_lock_t *self = m_new_obj(mp_obj_thread_lock_t);
+ self->base.type = &mp_type_thread_lock;
+ mp_thread_mutex_init(&self->mutex);
+ self->locked = false;
+ return self;
+}
+
+STATIC mp_obj_t thread_lock_acquire(size_t n_args, const mp_obj_t *args) {
+ mp_obj_thread_lock_t *self = MP_OBJ_TO_PTR(args[0]);
+ bool wait = true;
+ if (n_args > 1) {
+ wait = mp_obj_get_int(args[1]);
+ // TODO support timeout arg
+ }
+ MP_THREAD_GIL_EXIT();
+ int ret = mp_thread_mutex_lock(&self->mutex, wait);
+ MP_THREAD_GIL_ENTER();
+ if (ret == 0) {
+ return mp_const_false;
+ } else if (ret == 1) {
+ self->locked = true;
+ return mp_const_true;
+ } else {
+ mp_raise_OSError(-ret);
+ }
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(thread_lock_acquire_obj, 1, 3, thread_lock_acquire);
+
+STATIC mp_obj_t thread_lock_release(mp_obj_t self_in) {
+ mp_obj_thread_lock_t *self = MP_OBJ_TO_PTR(self_in);
+ if (!self->locked) {
+ mp_raise_msg(&mp_type_RuntimeError, NULL);
+ }
+ self->locked = false;
+ MP_THREAD_GIL_EXIT();
+ mp_thread_mutex_unlock(&self->mutex);
+ MP_THREAD_GIL_ENTER();
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(thread_lock_release_obj, thread_lock_release);
+
+STATIC mp_obj_t thread_lock_locked(mp_obj_t self_in) {
+ mp_obj_thread_lock_t *self = MP_OBJ_TO_PTR(self_in);
+ return mp_obj_new_bool(self->locked);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(thread_lock_locked_obj, thread_lock_locked);
+
+STATIC mp_obj_t thread_lock___exit__(size_t n_args, const mp_obj_t *args) {
+ (void)n_args; // unused
+ return thread_lock_release(args[0]);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(thread_lock___exit___obj, 4, 4, thread_lock___exit__);
+
+STATIC const mp_rom_map_elem_t thread_lock_locals_dict_table[] = {
+ { MP_ROM_QSTR(MP_QSTR_acquire), MP_ROM_PTR(&thread_lock_acquire_obj) },
+ { MP_ROM_QSTR(MP_QSTR_release), MP_ROM_PTR(&thread_lock_release_obj) },
+ { MP_ROM_QSTR(MP_QSTR_locked), MP_ROM_PTR(&thread_lock_locked_obj) },
+ { MP_ROM_QSTR(MP_QSTR___enter__), MP_ROM_PTR(&thread_lock_acquire_obj) },
+ { MP_ROM_QSTR(MP_QSTR___exit__), MP_ROM_PTR(&thread_lock___exit___obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(thread_lock_locals_dict, thread_lock_locals_dict_table);
+
+STATIC const mp_obj_type_t mp_type_thread_lock = {
+ { &mp_type_type },
+ .name = MP_QSTR_lock,
+ .locals_dict = (mp_obj_dict_t *)&thread_lock_locals_dict,
+};
+
+/****************************************************************/
+// _thread module
+
+STATIC size_t thread_stack_size = 0;
+
+STATIC mp_obj_t mod_thread_get_ident(void) {
+ return mp_obj_new_int_from_uint((uintptr_t)mp_thread_get_state());
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_0(mod_thread_get_ident_obj, mod_thread_get_ident);
+
+STATIC mp_obj_t mod_thread_stack_size(size_t n_args, const mp_obj_t *args) {
+ mp_obj_t ret = mp_obj_new_int_from_uint(thread_stack_size);
+ if (n_args == 0) {
+ thread_stack_size = 0;
+ } else {
+ thread_stack_size = mp_obj_get_int(args[0]);
+ }
+ return ret;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mod_thread_stack_size_obj, 0, 1, mod_thread_stack_size);
+
+typedef struct _thread_entry_args_t {
+ mp_obj_dict_t *dict_locals;
+ mp_obj_dict_t *dict_globals;
+ size_t stack_size;
+ mp_obj_t fun;
+ size_t n_args;
+ size_t n_kw;
+ mp_obj_t args[];
+} thread_entry_args_t;
+
+STATIC void *thread_entry(void *args_in) {
+ // Execution begins here for a new thread. We do not have the GIL.
+
+ thread_entry_args_t *args = (thread_entry_args_t *)args_in;
+
+ mp_state_thread_t ts;
+ mp_thread_set_state(&ts);
+
+ mp_stack_set_top(&ts + 1); // need to include ts in root-pointer scan
+ mp_stack_set_limit(args->stack_size);
+
+ #if MICROPY_ENABLE_PYSTACK
+ // TODO threading and pystack is not fully supported, for now just make a small stack
+ mp_obj_t mini_pystack[128];
+ mp_pystack_init(mini_pystack, &mini_pystack[128]);
+ #endif
+
+ // The GC starts off unlocked on this thread.
+ ts.gc_lock_depth = 0;
+
+ ts.mp_pending_exception = MP_OBJ_NULL;
+
+ // set locals and globals from the calling context
+ mp_locals_set(args->dict_locals);
+ mp_globals_set(args->dict_globals);
+
+ MP_THREAD_GIL_ENTER();
+
+ // signal that we are set up and running
+ mp_thread_start();
+
+ // TODO set more thread-specific state here:
+ // cur_exception (root pointer)
+
+ DEBUG_printf("[thread] start ts=%p args=%p stack=%p\n", &ts, &args, MP_STATE_THREAD(stack_top));
+
+ nlr_buf_t nlr;
+ if (nlr_push(&nlr) == 0) {
+ mp_call_function_n_kw(args->fun, args->n_args, args->n_kw, args->args);
+ nlr_pop();
+ } else {
+ // uncaught exception
+ // check for SystemExit
+ mp_obj_base_t *exc = (mp_obj_base_t *)nlr.ret_val;
+ if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(exc->type), MP_OBJ_FROM_PTR(&mp_type_SystemExit))) {
+ // swallow exception silently
+ } else {
+ // print exception out
+ mp_printf(MICROPY_ERROR_PRINTER, "Unhandled exception in thread started by ");
+ mp_obj_print_helper(MICROPY_ERROR_PRINTER, args->fun, PRINT_REPR);
+ mp_printf(MICROPY_ERROR_PRINTER, "\n");
+ mp_obj_print_exception(MICROPY_ERROR_PRINTER, MP_OBJ_FROM_PTR(exc));
+ }
+ }
+
+ DEBUG_printf("[thread] finish ts=%p\n", &ts);
+
+ // signal that we are finished
+ mp_thread_finish();
+
+ MP_THREAD_GIL_EXIT();
+
+ return NULL;
+}
+
+STATIC mp_obj_t mod_thread_start_new_thread(size_t n_args, const mp_obj_t *args) {
+ // This structure holds the Python function and arguments for thread entry.
+ // We copy all arguments into this structure to keep ownership of them.
+ // We must be very careful about root pointers because this pointer may
+ // disappear from our address space before the thread is created.
+ thread_entry_args_t *th_args;
+
+ // get positional arguments
+ size_t pos_args_len;
+ mp_obj_t *pos_args_items;
+ mp_obj_get_array(args[1], &pos_args_len, &pos_args_items);
+
+ // check for keyword arguments
+ if (n_args == 2) {
+ // just position arguments
+ th_args = m_new_obj_var(thread_entry_args_t, mp_obj_t, pos_args_len);
+ th_args->n_kw = 0;
+ } else {
+ // positional and keyword arguments
+ if (mp_obj_get_type(args[2]) != &mp_type_dict) {
+ mp_raise_TypeError(MP_ERROR_TEXT("expecting a dict for keyword args"));
+ }
+ mp_map_t *map = &((mp_obj_dict_t *)MP_OBJ_TO_PTR(args[2]))->map;
+ th_args = m_new_obj_var(thread_entry_args_t, mp_obj_t, pos_args_len + 2 * map->used);
+ th_args->n_kw = map->used;
+ // copy across the keyword arguments
+ for (size_t i = 0, n = pos_args_len; i < map->alloc; ++i) {
+ if (mp_map_slot_is_filled(map, i)) {
+ th_args->args[n++] = map->table[i].key;
+ th_args->args[n++] = map->table[i].value;
+ }
+ }
+ }
+
+ // copy across the positional arguments
+ th_args->n_args = pos_args_len;
+ memcpy(th_args->args, pos_args_items, pos_args_len * sizeof(mp_obj_t));
+
+ // pass our locals and globals into the new thread
+ th_args->dict_locals = mp_locals_get();
+ th_args->dict_globals = mp_globals_get();
+
+ // set the stack size to use
+ th_args->stack_size = thread_stack_size;
+
+ // set the function for thread entry
+ th_args->fun = args[0];
+
+ // spawn the thread!
+ mp_thread_create(thread_entry, th_args, &th_args->stack_size);
+
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mod_thread_start_new_thread_obj, 2, 3, mod_thread_start_new_thread);
+
+STATIC mp_obj_t mod_thread_exit(void) {
+ mp_raise_type(&mp_type_SystemExit);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_0(mod_thread_exit_obj, mod_thread_exit);
+
+STATIC mp_obj_t mod_thread_allocate_lock(void) {
+ return MP_OBJ_FROM_PTR(mp_obj_new_thread_lock());
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_0(mod_thread_allocate_lock_obj, mod_thread_allocate_lock);
+
+STATIC const mp_rom_map_elem_t mp_module_thread_globals_table[] = {
+ { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR__thread) },
+ { MP_ROM_QSTR(MP_QSTR_LockType), MP_ROM_PTR(&mp_type_thread_lock) },
+ { MP_ROM_QSTR(MP_QSTR_get_ident), MP_ROM_PTR(&mod_thread_get_ident_obj) },
+ { MP_ROM_QSTR(MP_QSTR_stack_size), MP_ROM_PTR(&mod_thread_stack_size_obj) },
+ { MP_ROM_QSTR(MP_QSTR_start_new_thread), MP_ROM_PTR(&mod_thread_start_new_thread_obj) },
+ { MP_ROM_QSTR(MP_QSTR_exit), MP_ROM_PTR(&mod_thread_exit_obj) },
+ { MP_ROM_QSTR(MP_QSTR_allocate_lock), MP_ROM_PTR(&mod_thread_allocate_lock_obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(mp_module_thread_globals, mp_module_thread_globals_table);
+
+const mp_obj_module_t mp_module_thread = {
+ .base = { &mp_type_module },
+ .globals = (mp_obj_dict_t *)&mp_module_thread_globals,
+};
+
+#endif // MICROPY_PY_THREAD
diff --git a/circuitpython/py/moduerrno.c b/circuitpython/py/moduerrno.c
new file mode 100644
index 0000000..743be0b
--- /dev/null
+++ b/circuitpython/py/moduerrno.c
@@ -0,0 +1,180 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2016 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include "py/obj.h"
+#include "py/mperrno.h"
+
+#include "supervisor/shared/translate.h"
+
+// This list can be defined per port in mpconfigport.h to tailor it to a
+// specific port's needs. If it's not defined then we provide a default.
+#ifndef MICROPY_PY_UERRNO_LIST
+#define MICROPY_PY_UERRNO_LIST \
+ X(EPERM) \
+ X(ENOENT) \
+ X(EIO) \
+ X(EBADF) \
+ X(EAGAIN) \
+ X(ENOMEM) \
+ X(EACCES) \
+ X(EEXIST) \
+ X(ENODEV) \
+ X(EISDIR) \
+ X(EINVAL) \
+ X(EOPNOTSUPP) \
+ X(EADDRINUSE) \
+ X(ECONNABORTED) \
+ X(ECONNRESET) \
+ X(ENOBUFS) \
+ X(ENOTCONN) \
+ X(ETIMEDOUT) \
+ X(ECONNREFUSED) \
+ X(EHOSTUNREACH) \
+ X(EALREADY) \
+ X(EINPROGRESS) \
+
+#endif
+
+#if MICROPY_PY_UERRNO
+
+#if MICROPY_PY_UERRNO_ERRORCODE
+STATIC const mp_rom_map_elem_t errorcode_table[] = {
+ #define X(e) { MP_ROM_INT(MP_##e), MP_ROM_QSTR(MP_QSTR_##e) },
+ MICROPY_PY_UERRNO_LIST
+#undef X
+};
+
+STATIC const mp_obj_dict_t errorcode_dict = {
+ .base = {&mp_type_dict},
+ .map = {
+ .all_keys_are_qstrs = 0, // keys are integers
+ .is_fixed = 1,
+ .is_ordered = 1,
+ .used = MP_ARRAY_SIZE(errorcode_table),
+ .alloc = MP_ARRAY_SIZE(errorcode_table),
+ .table = (mp_map_elem_t *)(mp_rom_map_elem_t *)errorcode_table,
+ },
+};
+#endif
+
+STATIC const mp_rom_map_elem_t mp_module_uerrno_globals_table[] = {
+ #if CIRCUITPY
+ { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_errno) },
+ #else
+ { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_uerrno) },
+ #endif
+ #if MICROPY_PY_UERRNO_ERRORCODE
+ { MP_ROM_QSTR(MP_QSTR_errorcode), MP_ROM_PTR(&errorcode_dict) },
+ #endif
+
+ #define X(e) { MP_ROM_QSTR(MP_QSTR_##e), MP_ROM_INT(MP_##e) },
+ MICROPY_PY_UERRNO_LIST
+#undef X
+};
+
+STATIC MP_DEFINE_CONST_DICT(mp_module_uerrno_globals, mp_module_uerrno_globals_table);
+
+const mp_obj_module_t mp_module_uerrno = {
+ .base = { &mp_type_module },
+ .globals = (mp_obj_dict_t *)&mp_module_uerrno_globals,
+};
+
+MP_REGISTER_MODULE(MP_QSTR_errno, mp_module_uerrno, MICROPY_PY_UERRNO);
+
+qstr mp_errno_to_str(mp_obj_t errno_val) {
+ // Otherwise, return the Exxxx string for that error code
+ #if MICROPY_PY_UERRNO_ERRORCODE
+ // We have the errorcode dict so can do a lookup using the hash map
+ mp_map_elem_t *elem = mp_map_lookup((mp_map_t *)&errorcode_dict.map, errno_val, MP_MAP_LOOKUP);
+ if (elem == NULL) {
+ return MP_QSTRnull;
+ } else {
+ return MP_OBJ_QSTR_VALUE(elem->value);
+ }
+ #else
+ // We don't have the errorcode dict so do a simple search in the modules dict
+ for (size_t i = 0; i < MP_ARRAY_SIZE(mp_module_uerrno_globals_table); ++i) {
+ if (errno_val == mp_module_uerrno_globals_table[i].value) {
+ return MP_OBJ_QSTR_VALUE(mp_module_uerrno_globals_table[i].key);
+ }
+ }
+ return MP_QSTRnull;
+ #endif
+}
+
+#endif // MICROPY_PY_UERRNO
+
+
+// For commonly encountered errors, return human readable strings, otherwise try errno name
+const char *mp_common_errno_to_str(mp_obj_t errno_val, char *buf, size_t len) {
+ if (!mp_obj_is_small_int(errno_val)) {
+ return NULL;
+ }
+
+ const compressed_string_t *desc = NULL;
+ switch (MP_OBJ_SMALL_INT_VALUE(errno_val)) {
+ case EPERM:
+ desc = MP_ERROR_TEXT("Operation not permitted");
+ break;
+ case ENOENT:
+ desc = MP_ERROR_TEXT("No such file/directory");
+ break;
+ case EIO:
+ desc = MP_ERROR_TEXT("Input/output error");
+ break;
+ case EACCES:
+ desc = MP_ERROR_TEXT("Permission denied");
+ break;
+ case EEXIST:
+ desc = MP_ERROR_TEXT("File exists");
+ break;
+ case ENODEV:
+ desc = MP_ERROR_TEXT("No such device");
+ break;
+ case EINVAL:
+ desc = MP_ERROR_TEXT("Invalid argument");
+ break;
+ case ENOSPC:
+ desc = MP_ERROR_TEXT("No space left on device");
+ break;
+ case EROFS:
+ desc = MP_ERROR_TEXT("Read-only filesystem");
+ break;
+ }
+ if (desc != NULL && decompress_length(desc) <= len) {
+ decompress(desc, buf);
+ return buf;
+ }
+
+ const char *msg = "";
+ #if MICROPY_PY_UERRNO
+ msg = qstr_str(mp_errno_to_str(errno_val));
+ #endif
+ return msg[0] != '\0' ? msg : NULL;
+}
diff --git a/circuitpython/py/mpconfig.h b/circuitpython/py/mpconfig.h
new file mode 100644
index 0000000..386a976
--- /dev/null
+++ b/circuitpython/py/mpconfig.h
@@ -0,0 +1,1897 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_MPCONFIG_H
+#define MICROPY_INCLUDED_PY_MPCONFIG_H
+
+// This file contains default configuration settings for MicroPython.
+// You can override any of the options below using mpconfigport.h file
+// located in a directory of your port.
+
+// mpconfigport.h is a file containing configuration settings for a
+// particular port. mpconfigport.h is actually a default name for
+// such config, and it can be overridden using MP_CONFIGFILE preprocessor
+// define (you can do that by passing CFLAGS_EXTRA='-DMP_CONFIGFILE="<file.h>"'
+// argument to make when using standard MicroPython makefiles).
+// This is useful to have more than one config per port, for example,
+// release vs debug configs, etc. Note that if you switch from one config
+// to another, you must rebuild from scratch using "-B" switch to make.
+
+#ifdef MP_CONFIGFILE
+#include MP_CONFIGFILE
+#else
+#include <mpconfigport.h>
+#endif
+
+// Is this a CircuitPython build?
+#ifndef CIRCUITPY
+#define CIRCUITPY 0
+#endif
+
+// Disable all optional features (i.e. minimal port).
+#define MICROPY_CONFIG_ROM_LEVEL_MINIMUM (0)
+// Only enable core features (constrained flash, e.g. STM32L072)
+#define MICROPY_CONFIG_ROM_LEVEL_CORE_FEATURES (10)
+// Enable most common features (small on-device flash, e.g. STM32F411)
+#define MICROPY_CONFIG_ROM_LEVEL_BASIC_FEATURES (20)
+// Enable convenience features (medium on-device flash, e.g. STM32F405)
+#define MICROPY_CONFIG_ROM_LEVEL_EXTRA_FEATURES (30)
+// Enable all common features (large/external flash, rp2, unix)
+#define MICROPY_CONFIG_ROM_LEVEL_FULL_FEATURES (40)
+// Enable everything (e.g. coverage)
+#define MICROPY_CONFIG_ROM_LEVEL_EVERYTHING (50)
+
+// Ports/boards should set this, but default to level=core.
+#ifndef MICROPY_CONFIG_ROM_LEVEL
+#define MICROPY_CONFIG_ROM_LEVEL (MICROPY_CONFIG_ROM_LEVEL_CORE_FEATURES)
+#endif
+
+// Helper macros for "have at least this level".
+#define MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES (MICROPY_CONFIG_ROM_LEVEL >= MICROPY_CONFIG_ROM_LEVEL_CORE_FEATURES)
+#define MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_BASIC_FEATURES (MICROPY_CONFIG_ROM_LEVEL >= MICROPY_CONFIG_ROM_LEVEL_BASIC_FEATURES)
+#define MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES (MICROPY_CONFIG_ROM_LEVEL >= MICROPY_CONFIG_ROM_LEVEL_EXTRA_FEATURES)
+#define MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_FULL_FEATURES (MICROPY_CONFIG_ROM_LEVEL >= MICROPY_CONFIG_ROM_LEVEL_FULL_FEATURES)
+#define MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EVERYTHING (MICROPY_CONFIG_ROM_LEVEL >= MICROPY_CONFIG_ROM_LEVEL_EVERYTHING)
+
+// Any options not explicitly set in mpconfigport.h will get default
+// values below.
+
+/*****************************************************************************/
+/* Object representation */
+
+// A MicroPython object is a machine word having the following form:
+// - xxxx...xxx1 : a small int, bits 1 and above are the value
+// - xxxx...x010 : a qstr, bits 3 and above are the value
+// - xxxx...x110 : an immediate object, bits 3 and above are the value
+// - xxxx...xx00 : a pointer to an mp_obj_base_t (unless a fake object)
+#define MICROPY_OBJ_REPR_A (0)
+
+// A MicroPython object is a machine word having the following form:
+// - xxxx...xx01 : a small int, bits 2 and above are the value
+// - xxxx...x011 : a qstr, bits 3 and above are the value
+// - xxxx...x111 : an immediate object, bits 3 and above are the value
+// - xxxx...xxx0 : a pointer to an mp_obj_base_t (unless a fake object)
+#define MICROPY_OBJ_REPR_B (1)
+
+// A MicroPython object is a machine word having the following form (called R):
+// - iiiiiiii iiiiiiii iiiiiiii iiiiiii1 small int with 31-bit signed value
+// - 01111111 1qqqqqqq qqqqqqqq qqqq0110 str with 19-bit qstr value
+// - 01111111 10000000 00000000 ssss1110 immediate object with 4-bit value
+// - s1111111 10000000 00000000 00000010 +/- inf
+// - s1111111 1xxxxxxx xxxxxxxx xxxxx010 nan, x != 0
+// - seeeeeee efffffff ffffffff ffffff10 30-bit fp, e != 0xff
+// - pppppppp pppppppp pppppppp pppppp00 ptr (4 byte alignment)
+// Str, immediate and float stored as O = R + 0x80800000, retrieved as R = O - 0x80800000.
+// This makes strs/immediates easier to encode/decode as they have zeros in the top 9 bits.
+// This scheme only works with 32-bit word size and float enabled.
+#define MICROPY_OBJ_REPR_C (2)
+
+// A MicroPython object is a 64-bit word having the following form (called R):
+// - seeeeeee eeeeffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff 64-bit fp, e != 0x7ff
+// - s1111111 11110000 00000000 00000000 00000000 00000000 00000000 00000000 +/- inf
+// - 01111111 11111000 00000000 00000000 00000000 00000000 00000000 00000000 normalised nan
+// - 01111111 11111101 iiiiiiii iiiiiiii iiiiiiii iiiiiiii iiiiiiii iiiiiii1 small int
+// - 01111111 11111110 00000000 00000000 qqqqqqqq qqqqqqqq qqqqqqqq qqqqqqq1 str
+// - 01111111 11111111 ss000000 00000000 00000000 00000000 00000000 00000000 immediate object
+// - 01111111 11111100 00000000 00000000 pppppppp pppppppp pppppppp pppppp00 ptr (4 byte alignment)
+// Stored as O = R + 0x8004000000000000, retrieved as R = O - 0x8004000000000000.
+// This makes pointers have all zeros in the top 32 bits.
+// Small-ints and strs have 1 as LSB to make sure they don't look like pointers
+// to the garbage collector.
+#define MICROPY_OBJ_REPR_D (3)
+
+#ifndef MICROPY_OBJ_REPR
+#define MICROPY_OBJ_REPR (MICROPY_OBJ_REPR_A)
+#endif
+
+// Whether to encode None/False/True as immediate objects instead of pointers to
+// real objects. Reduces code size by a decent amount without hurting
+// performance, for all representations except D on some architectures.
+#ifndef MICROPY_OBJ_IMMEDIATE_OBJS
+#define MICROPY_OBJ_IMMEDIATE_OBJS (MICROPY_OBJ_REPR != MICROPY_OBJ_REPR_D)
+#endif
+
+/*****************************************************************************/
+/* Memory allocation policy */
+
+// Number of bytes in memory allocation/GC block. Any size allocated will be
+// rounded up to be multiples of this.
+#ifndef MICROPY_BYTES_PER_GC_BLOCK
+#define MICROPY_BYTES_PER_GC_BLOCK (4 * MP_BYTES_PER_OBJ_WORD)
+#endif
+
+// Number of words allocated (in BSS) to the GC stack (minimum is 1)
+#ifndef MICROPY_ALLOC_GC_STACK_SIZE
+#define MICROPY_ALLOC_GC_STACK_SIZE (64)
+#endif
+
+// The C-type to use for entries in the GC stack. By default it allows the
+// heap to be as large as the address space, but the bit-width of this type can
+// be reduced to save memory when the heap is small enough. The type must be
+// big enough to index all blocks in the heap, which is set by
+// heap-size-in-bytes / MICROPY_BYTES_PER_GC_BLOCK.
+#ifndef MICROPY_GC_STACK_ENTRY_TYPE
+#define MICROPY_GC_STACK_ENTRY_TYPE size_t
+#endif
+
+// Be conservative and always clear to zero newly (re)allocated memory in the GC.
+// This helps eliminate stray pointers that hold on to memory that's no longer
+// used. It decreases performance due to unnecessary memory clearing.
+// A memory manager which always clears memory can set this to 0.
+// TODO Do analysis to understand why some memory is not properly cleared and
+// find a more efficient way to clear it.
+#ifndef MICROPY_GC_CONSERVATIVE_CLEAR
+#define MICROPY_GC_CONSERVATIVE_CLEAR (MICROPY_ENABLE_GC)
+#endif
+
+// Support automatic GC when reaching allocation threshold,
+// configurable by gc.threshold().
+#ifndef MICROPY_GC_ALLOC_THRESHOLD
+#define MICROPY_GC_ALLOC_THRESHOLD (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Number of bytes to allocate initially when creating new chunks to store
+// interned string data. Smaller numbers lead to more chunks being needed
+// and more wastage at the end of the chunk. Larger numbers lead to wasted
+// space at the end when no more strings need interning.
+#ifndef MICROPY_ALLOC_QSTR_CHUNK_INIT
+#define MICROPY_ALLOC_QSTR_CHUNK_INIT (128)
+#endif
+
+// Max number of entries in newly allocated QSTR pools. Smaller numbers may make QSTR lookups
+// slightly slower but reduce the waste of unused spots.
+#ifndef MICROPY_QSTR_POOL_MAX_ENTRIES
+#define MICROPY_QSTR_POOL_MAX_ENTRIES (64)
+#endif
+
+// Initial amount for lexer indentation level
+#ifndef MICROPY_ALLOC_LEXER_INDENT_INIT
+#define MICROPY_ALLOC_LEXER_INDENT_INIT (10)
+#endif
+
+// Increment for lexer indentation level
+#ifndef MICROPY_ALLOC_LEXEL_INDENT_INC
+#define MICROPY_ALLOC_LEXEL_INDENT_INC (8)
+#endif
+
+// Initial amount for parse rule stack
+#ifndef MICROPY_ALLOC_PARSE_RULE_INIT
+#define MICROPY_ALLOC_PARSE_RULE_INIT (64)
+#endif
+
+// Increment for parse rule stack
+#ifndef MICROPY_ALLOC_PARSE_RULE_INC
+#define MICROPY_ALLOC_PARSE_RULE_INC (16)
+#endif
+
+// Initial amount for parse result stack
+#ifndef MICROPY_ALLOC_PARSE_RESULT_INIT
+#define MICROPY_ALLOC_PARSE_RESULT_INIT (32)
+#endif
+
+// Increment for parse result stack
+#ifndef MICROPY_ALLOC_PARSE_RESULT_INC
+#define MICROPY_ALLOC_PARSE_RESULT_INC (16)
+#endif
+
+// Strings this length or less will be interned by the parser
+#ifndef MICROPY_ALLOC_PARSE_INTERN_STRING_LEN
+#define MICROPY_ALLOC_PARSE_INTERN_STRING_LEN (10)
+#endif
+
+// Number of bytes to allocate initially when creating new chunks to store
+// parse nodes. Small leads to fragmentation, large leads to excess use.
+#ifndef MICROPY_ALLOC_PARSE_CHUNK_INIT
+#define MICROPY_ALLOC_PARSE_CHUNK_INIT (128)
+#endif
+
+// Initial amount for ids in a scope
+#ifndef MICROPY_ALLOC_SCOPE_ID_INIT
+#define MICROPY_ALLOC_SCOPE_ID_INIT (4)
+#endif
+
+// Increment for ids in a scope
+#ifndef MICROPY_ALLOC_SCOPE_ID_INC
+#define MICROPY_ALLOC_SCOPE_ID_INC (6)
+#endif
+
+// Maximum length of a path in the filesystem
+// So we can allocate a buffer on the stack for path manipulation in import
+#ifndef MICROPY_ALLOC_PATH_MAX
+#define MICROPY_ALLOC_PATH_MAX (512)
+#endif
+
+// Initial size of module dict
+#ifndef MICROPY_MODULE_DICT_SIZE
+#define MICROPY_MODULE_DICT_SIZE (1)
+#endif
+
+// Initial size of sys.modules dict
+#ifndef MICROPY_LOADED_MODULES_DICT_SIZE
+#define MICROPY_LOADED_MODULES_DICT_SIZE (3)
+#endif
+
+// Whether realloc/free should be passed allocated memory region size
+// You must enable this if MICROPY_MEM_STATS is enabled
+#ifndef MICROPY_MALLOC_USES_ALLOCATED_SIZE
+#define MICROPY_MALLOC_USES_ALLOCATED_SIZE (0)
+#endif
+
+// Number of bytes used to store qstr length
+// Dictates hard limit on maximum Python identifier length, but 1 byte
+// (limit of 255 bytes in an identifier) should be enough for everyone
+#ifndef MICROPY_QSTR_BYTES_IN_LEN
+#define MICROPY_QSTR_BYTES_IN_LEN (1)
+#endif
+
+// Number of bytes used to store qstr hash
+#ifndef MICROPY_QSTR_BYTES_IN_HASH
+#if MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES
+#define MICROPY_QSTR_BYTES_IN_HASH (2)
+#else
+#define MICROPY_QSTR_BYTES_IN_HASH (1)
+#endif
+#endif
+
+// Avoid using C stack when making Python function calls. C stack still
+// may be used if there's no free heap.
+#ifndef MICROPY_STACKLESS
+#define MICROPY_STACKLESS (0)
+#endif
+
+// Never use C stack when making Python function calls. This may break
+// testsuite as will subtly change which exception is thrown in case
+// of too deep recursion and other similar cases.
+#ifndef MICROPY_STACKLESS_STRICT
+#define MICROPY_STACKLESS_STRICT (0)
+#endif
+
+// Don't use alloca calls. As alloca() is not part of ANSI C, this
+// workaround option is provided for compilers lacking this de-facto
+// standard function. The way it works is allocating from heap, and
+// relying on garbage collection to free it eventually. This is of
+// course much less optimal than real alloca().
+#if defined(MICROPY_NO_ALLOCA) && MICROPY_NO_ALLOCA
+#undef alloca
+#define alloca(x) m_malloc(x)
+#endif
+
+// Number of atb indices to cache. Allocations of fewer blocks will be faster
+// because the search will be accelerated by the index cache. This only applies
+// to short lived allocations because we assume the long lived allocations are
+// contiguous.
+#ifndef MICROPY_ATB_INDICES
+#define MICROPY_ATB_INDICES (8)
+#endif
+
+/*****************************************************************************/
+/* MicroPython emitters */
+
+// Whether to support loading of persistent code
+#ifndef MICROPY_PERSISTENT_CODE_LOAD
+#define MICROPY_PERSISTENT_CODE_LOAD (0)
+#endif
+
+// Whether to support saving of persistent code
+#ifndef MICROPY_PERSISTENT_CODE_SAVE
+#define MICROPY_PERSISTENT_CODE_SAVE (0)
+#endif
+
+// Whether to support saving persistent code to a file via mp_raw_code_save_file
+#ifndef MICROPY_PERSISTENT_CODE_SAVE_FILE
+#define MICROPY_PERSISTENT_CODE_SAVE_FILE (0)
+#endif
+
+// Whether generated code can persist independently of the VM/runtime instance
+// This is enabled automatically when needed by other features
+#ifndef MICROPY_PERSISTENT_CODE
+#define MICROPY_PERSISTENT_CODE (MICROPY_PERSISTENT_CODE_LOAD || MICROPY_PERSISTENT_CODE_SAVE || MICROPY_MODULE_FROZEN_MPY)
+#endif
+
+// Whether to emit x64 native code
+#ifndef MICROPY_EMIT_X64
+#define MICROPY_EMIT_X64 (0)
+#endif
+
+// Whether to emit x86 native code
+#ifndef MICROPY_EMIT_X86
+#define MICROPY_EMIT_X86 (0)
+#endif
+
+// Whether to emit thumb native code
+#ifndef MICROPY_EMIT_THUMB
+#define MICROPY_EMIT_THUMB (0)
+#endif
+
+// Whether to emit ARMv7-M instruction support in thumb native code
+#ifndef MICROPY_EMIT_THUMB_ARMV7M
+#define MICROPY_EMIT_THUMB_ARMV7M (1)
+#endif
+
+// Whether to enable the thumb inline assembler
+#ifndef MICROPY_EMIT_INLINE_THUMB
+#define MICROPY_EMIT_INLINE_THUMB (0)
+#endif
+
+// Whether to enable ARMv7-M instruction support in the Thumb2 inline assembler
+#ifndef MICROPY_EMIT_INLINE_THUMB_ARMV7M
+#define MICROPY_EMIT_INLINE_THUMB_ARMV7M (1)
+#endif
+
+// Whether to enable float support in the Thumb2 inline assembler
+#ifndef MICROPY_EMIT_INLINE_THUMB_FLOAT
+#define MICROPY_EMIT_INLINE_THUMB_FLOAT (1)
+#endif
+
+// Whether to emit ARM native code
+#ifndef MICROPY_EMIT_ARM
+#define MICROPY_EMIT_ARM (0)
+#endif
+
+// Whether to emit Xtensa native code
+#ifndef MICROPY_EMIT_XTENSA
+#define MICROPY_EMIT_XTENSA (0)
+#endif
+
+// Whether to enable the Xtensa inline assembler
+#ifndef MICROPY_EMIT_INLINE_XTENSA
+#define MICROPY_EMIT_INLINE_XTENSA (0)
+#endif
+
+// Whether to emit Xtensa-Windowed native code
+#ifndef MICROPY_EMIT_XTENSAWIN
+#define MICROPY_EMIT_XTENSAWIN (0)
+#endif
+
+// Convenience definition for whether any native emitter is enabled
+#define MICROPY_EMIT_NATIVE (MICROPY_EMIT_X64 || MICROPY_EMIT_X86 || MICROPY_EMIT_THUMB || MICROPY_EMIT_ARM || MICROPY_EMIT_XTENSA || MICROPY_EMIT_XTENSAWIN)
+
+// Select prelude-as-bytes-object for certain emitters
+#define MICROPY_EMIT_NATIVE_PRELUDE_AS_BYTES_OBJ (MICROPY_EMIT_XTENSAWIN)
+
+// Convenience definition for whether any inline assembler emitter is enabled
+#define MICROPY_EMIT_INLINE_ASM (MICROPY_EMIT_INLINE_THUMB || MICROPY_EMIT_INLINE_XTENSA)
+
+// Convenience definition for whether any native or inline assembler emitter is enabled
+#define MICROPY_EMIT_MACHINE_CODE (MICROPY_EMIT_NATIVE || MICROPY_EMIT_INLINE_ASM)
+
+// Whether native relocatable code loaded from .mpy files is explicitly tracked
+// so that the GC cannot reclaim it. Needed on architectures that allocate
+// executable memory on the MicroPython heap and don't explicitly track this
+// data some other way.
+#ifndef MICROPY_PERSISTENT_CODE_TRACK_RELOC_CODE
+#if !MICROPY_EMIT_MACHINE_CODE || defined(MP_PLAT_ALLOC_EXEC) || defined(MP_PLAT_COMMIT_EXEC)
+#define MICROPY_PERSISTENT_CODE_TRACK_RELOC_CODE (0)
+#else
+#define MICROPY_PERSISTENT_CODE_TRACK_RELOC_CODE (1)
+#endif
+#endif
+
+/*****************************************************************************/
+/* Compiler configuration */
+
+// Whether to include the compiler
+#ifndef MICROPY_ENABLE_COMPILER
+#define MICROPY_ENABLE_COMPILER (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether the compiler is dynamically configurable (ie at runtime)
+// This will disable the ability to execute native/viper code
+#ifndef MICROPY_DYNAMIC_COMPILER
+#define MICROPY_DYNAMIC_COMPILER (0)
+#endif
+
+// Configure dynamic compiler macros
+#if MICROPY_DYNAMIC_COMPILER
+#define MICROPY_PY_BUILTINS_STR_UNICODE_DYNAMIC (mp_dynamic_compiler.py_builtins_str_unicode)
+#else
+#define MICROPY_PY_BUILTINS_STR_UNICODE_DYNAMIC MICROPY_PY_BUILTINS_STR_UNICODE
+#endif
+
+// Whether to enable constant folding; eg 1+2 rewritten as 3
+#ifndef MICROPY_COMP_CONST_FOLDING
+#define MICROPY_COMP_CONST_FOLDING (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to enable optimisations for constant literals, eg OrderedDict
+#ifndef MICROPY_COMP_CONST_LITERAL
+#define MICROPY_COMP_CONST_LITERAL (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to enable lookup of constants in modules; eg module.CONST
+#ifndef MICROPY_COMP_MODULE_CONST
+#define MICROPY_COMP_MODULE_CONST (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to enable constant optimisation; id = const(value)
+#ifndef MICROPY_COMP_CONST
+#define MICROPY_COMP_CONST (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to enable optimisation of: a, b = c, d
+// Costs 124 bytes (Thumb2)
+#ifndef MICROPY_COMP_DOUBLE_TUPLE_ASSIGN
+#define MICROPY_COMP_DOUBLE_TUPLE_ASSIGN (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to enable optimisation of: a, b, c = d, e, f
+// Requires MICROPY_COMP_DOUBLE_TUPLE_ASSIGN and costs 68 bytes (Thumb2)
+#ifndef MICROPY_COMP_TRIPLE_TUPLE_ASSIGN
+#define MICROPY_COMP_TRIPLE_TUPLE_ASSIGN (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to enable optimisation of: return a if b else c
+// Costs about 80 bytes (Thumb2) and saves 2 bytes of bytecode for each use
+#ifndef MICROPY_COMP_RETURN_IF_EXPR
+#define MICROPY_COMP_RETURN_IF_EXPR (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to include parsing of f-string literals
+#ifndef MICROPY_COMP_FSTRING_LITERAL
+#define MICROPY_COMP_FSTRING_LITERAL (1)
+#endif
+
+/*****************************************************************************/
+/* Internal debugging stuff */
+
+// Whether to collect memory allocation stats
+#ifndef MICROPY_MEM_STATS
+#define MICROPY_MEM_STATS (0)
+#endif
+
+// The mp_print_t printer used for debugging output
+#ifndef MICROPY_DEBUG_PRINTER
+#define MICROPY_DEBUG_PRINTER (&mp_plat_print)
+#endif
+
+// Whether to build functions that print debugging info:
+// mp_bytecode_print
+// mp_parse_node_print
+#ifndef MICROPY_DEBUG_PRINTERS
+#define MICROPY_DEBUG_PRINTERS (0)
+#endif
+
+// Whether to enable all debugging outputs (it will be extremely verbose)
+#ifndef MICROPY_DEBUG_VERBOSE
+#define MICROPY_DEBUG_VERBOSE (0)
+#endif
+
+// Whether to enable debugging versions of MP_OBJ_NULL/STOP_ITERATION/SENTINEL
+#ifndef MICROPY_DEBUG_MP_OBJ_SENTINELS
+#define MICROPY_DEBUG_MP_OBJ_SENTINELS (0)
+#endif
+
+// Whether to print parse rule names (rather than integers) in mp_parse_node_print
+#ifndef MICROPY_DEBUG_PARSE_RULE_NAME
+#define MICROPY_DEBUG_PARSE_RULE_NAME (0)
+#endif
+
+// Whether to enable a simple VM stack overflow check
+#ifndef MICROPY_DEBUG_VM_STACK_OVERFLOW
+#define MICROPY_DEBUG_VM_STACK_OVERFLOW (0)
+#endif
+
+// Whether to enable extra instrumentation for valgrind
+#ifndef MICROPY_DEBUG_VALGRIND
+#define MICROPY_DEBUG_VALGRIND (0)
+#endif
+
+/*****************************************************************************/
+/* Optimisations */
+
+// Whether to use computed gotos in the VM, or a switch
+// Computed gotos are roughly 10% faster, and increase VM code size by a little,
+// e.g. ~1kiB on Cortex M4.
+// Note: enabling this will use the gcc-specific extensions of ranged designated
+// initialisers and addresses of labels, which are not part of the C99 standard.
+#ifndef MICROPY_OPT_COMPUTED_GOTO
+#define MICROPY_OPT_COMPUTED_GOTO (0)
+#endif
+
+// Whether to save trade flash space for speed in MICROPY_OPT_COMPUTED_GOTO.
+// Costs about 3% speed, saves about 1500 bytes space. In addition to the assumptions
+// of MICROPY_OPT_COMPUTED_GOTO, also assumes that mp_execute_bytecode is less than
+// 32kB in size.
+#ifndef MICROPY_OPT_COMPUTED_GOTO_SAVE_SPACE
+#define MICROPY_OPT_COMPUTED_GOTO_SAVE_SPACE (0)
+#endif
+
+// Optimise the fast path for loading attributes from instance types. Increases
+// Thumb2 code size by about 48 bytes.
+#ifndef MICROPY_OPT_LOAD_ATTR_FAST_PATH
+#define MICROPY_OPT_LOAD_ATTR_FAST_PATH (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Use extra RAM to cache map lookups by remembering the likely location of
+// the index. Avoids the hash computation on unordered maps, and avoids the
+// linear search on ordered (especially in-ROM) maps. Can provide a +10-15%
+// performance improvement on benchmarks involving lots of attribute access
+// or dictionary lookup.
+#ifndef MICROPY_OPT_MAP_LOOKUP_CACHE
+#define MICROPY_OPT_MAP_LOOKUP_CACHE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// How much RAM (in bytes) to use for the map lookup cache.
+#ifndef MICROPY_OPT_MAP_LOOKUP_CACHE_SIZE
+#define MICROPY_OPT_MAP_LOOKUP_CACHE_SIZE (128)
+#endif
+
+// Whether to use fast versions of bitwise operations (and, or, xor) when the
+// arguments are both positive. Increases Thumb2 code size by about 250 bytes.
+#ifndef MICROPY_OPT_MPZ_BITWISE
+#define MICROPY_OPT_MPZ_BITWISE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+
+// Whether math.factorial is large, fast and recursive (1) or small and slow (0).
+#ifndef MICROPY_OPT_MATH_FACTORIAL
+#define MICROPY_OPT_MATH_FACTORIAL (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+/*****************************************************************************/
+/* Python internal features */
+
+// Whether to enable import of external modules
+// When disabled, only importing of built-in modules is supported
+// When enabled, a port must implement mp_import_stat (among other things)
+#ifndef MICROPY_ENABLE_EXTERNAL_IMPORT
+#define MICROPY_ENABLE_EXTERNAL_IMPORT (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to use the POSIX reader for importing files
+#ifndef MICROPY_READER_POSIX
+#define MICROPY_READER_POSIX (0)
+#endif
+
+// Whether to use the VFS reader for importing files
+#ifndef MICROPY_READER_VFS
+#define MICROPY_READER_VFS (0)
+#endif
+
+// Whether any readers have been defined
+#ifndef MICROPY_HAS_FILE_READER
+#define MICROPY_HAS_FILE_READER (MICROPY_READER_POSIX || MICROPY_READER_VFS)
+#endif
+
+// Number of VFS mounts to persist across soft-reset.
+#ifndef MICROPY_FATFS_NUM_PERSISTENT
+#define MICROPY_FATFS_NUM_PERSISTENT (0)
+#endif
+
+// Hook for the VM at the start of the opcode loop (can contain variable
+// definitions usable by the other hook functions)
+#ifndef MICROPY_VM_HOOK_INIT
+#define MICROPY_VM_HOOK_INIT
+#endif
+
+// Hook for the VM during the opcode loop (but only after jump opcodes)
+#ifndef MICROPY_VM_HOOK_LOOP
+#define MICROPY_VM_HOOK_LOOP
+#endif
+
+// Hook for the VM just before return opcode is finished being interpreted
+#ifndef MICROPY_VM_HOOK_RETURN
+#define MICROPY_VM_HOOK_RETURN
+#endif
+
+// Hook for mp_sched_schedule when a function gets scheduled on sched_queue
+// (this macro executes within an atomic section)
+#ifndef MICROPY_SCHED_HOOK_SCHEDULED
+#define MICROPY_SCHED_HOOK_SCHEDULED
+#endif
+
+// Whether to include the garbage collector
+#ifndef MICROPY_ENABLE_GC
+#define MICROPY_ENABLE_GC (0)
+#endif
+
+// Hook to run code during time consuming garbage collector operations
+#ifndef MICROPY_GC_HOOK_LOOP
+#define MICROPY_GC_HOOK_LOOP
+#endif
+
+// Whether to enable finalisers in the garbage collector (ie call __del__)
+#ifndef MICROPY_ENABLE_FINALISER
+#define MICROPY_ENABLE_FINALISER (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to enable a separate allocator for the Python stack.
+// If enabled then the code must call mp_pystack_init before mp_init.
+#ifndef MICROPY_ENABLE_PYSTACK
+#define MICROPY_ENABLE_PYSTACK (0)
+#endif
+
+// Number of bytes that memory returned by mp_pystack_alloc will be aligned by.
+#ifndef MICROPY_PYSTACK_ALIGN
+#define MICROPY_PYSTACK_ALIGN (8)
+#endif
+
+// Whether to check C stack usage. C stack used for calling Python functions,
+// etc. Not checking means segfault on overflow.
+#ifndef MICROPY_STACK_CHECK
+#define MICROPY_STACK_CHECK (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to measure maximum stack excursion
+#ifndef MICROPY_MAX_STACK_USAGE
+#define MICROPY_MAX_STACK_USAGE (0)
+#endif
+
+// Whether to have an emergency exception buffer
+#ifndef MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
+#define MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF (0)
+#endif
+#if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
+#ifndef MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE
+#define MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE (0) // 0 - implies dynamic allocation
+#endif
+#endif
+
+// Whether to provide the mp_kbd_exception object, and micropython.kbd_intr function
+#ifndef MICROPY_KBD_EXCEPTION
+#define MICROPY_KBD_EXCEPTION (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Prefer to raise KeyboardInterrupt asynchronously (from signal or interrupt
+// handler) - if supported by a particular port.
+#ifndef MICROPY_ASYNC_KBD_INTR
+#define MICROPY_ASYNC_KBD_INTR (0)
+#endif
+
+// Whether to include REPL helper function
+#ifndef MICROPY_HELPER_REPL
+#define MICROPY_HELPER_REPL (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Allow enabling debug prints after each REPL line
+#ifndef MICROPY_REPL_INFO
+#define MICROPY_REPL_INFO (0)
+#endif
+
+// Whether to include emacs-style readline behavior in REPL
+#ifndef MICROPY_REPL_EMACS_KEYS
+#define MICROPY_REPL_EMACS_KEYS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to include emacs-style word movement/kill readline behavior in REPL.
+// This adds Alt+F, Alt+B, Alt+D and Alt+Backspace for forward-word, backward-word, forward-kill-word
+// and backward-kill-word, respectively.
+#ifndef MICROPY_REPL_EMACS_WORDS_MOVE
+#define MICROPY_REPL_EMACS_WORDS_MOVE (0)
+#endif
+
+// Whether to include extra convenience keys for word movement/kill in readline REPL.
+// This adds Ctrl+Right, Ctrl+Left and Ctrl+W for forward-word, backward-word and backward-kill-word
+// respectively. Ctrl+Delete is not implemented because it's a very different escape sequence.
+// Depends on MICROPY_REPL_EMACS_WORDS_MOVE.
+#ifndef MICROPY_REPL_EMACS_EXTRA_WORDS_MOVE
+#define MICROPY_REPL_EMACS_EXTRA_WORDS_MOVE (0)
+#endif
+
+// Whether to implement auto-indent in REPL
+#ifndef MICROPY_REPL_AUTO_INDENT
+#define MICROPY_REPL_AUTO_INDENT (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether port requires event-driven REPL functions
+#ifndef MICROPY_REPL_EVENT_DRIVEN
+#define MICROPY_REPL_EVENT_DRIVEN (0)
+#endif
+
+// Whether to include lexer helper function for unix
+#ifndef MICROPY_HELPER_LEXER_UNIX
+#define MICROPY_HELPER_LEXER_UNIX (0)
+#endif
+
+// Long int implementation
+#define MICROPY_LONGINT_IMPL_NONE (0)
+#define MICROPY_LONGINT_IMPL_LONGLONG (1)
+#define MICROPY_LONGINT_IMPL_MPZ (2)
+
+#ifndef MICROPY_LONGINT_IMPL
+#define MICROPY_LONGINT_IMPL (MICROPY_LONGINT_IMPL_NONE)
+#endif
+
+#if MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_LONGLONG
+typedef long long mp_longint_impl_t;
+#endif
+
+// Whether to include information in the byte code to determine source
+// line number (increases RAM usage, but doesn't slow byte code execution)
+#ifndef MICROPY_ENABLE_SOURCE_LINE
+#define MICROPY_ENABLE_SOURCE_LINE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to include doc strings (increases RAM usage)
+#ifndef MICROPY_ENABLE_DOC_STRING
+#define MICROPY_ENABLE_DOC_STRING (0)
+#endif
+
+// Exception messages are removed (requires disabling MICROPY_ROM_TEXT_COMPRESSION)
+#define MICROPY_ERROR_REPORTING_NONE (0)
+// Exception messages are short static strings
+#define MICROPY_ERROR_REPORTING_TERSE (1)
+// Exception messages provide basic error details
+#define MICROPY_ERROR_REPORTING_NORMAL (2)
+// Exception messages provide full info, e.g. object names
+#define MICROPY_ERROR_REPORTING_DETAILED (3)
+
+#ifndef MICROPY_ERROR_REPORTING
+#if MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_FULL_FEATURES
+#define MICROPY_ERROR_REPORTING (MICROPY_ERROR_REPORTING_DETAILED)
+#elif MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES
+#define MICROPY_ERROR_REPORTING (MICROPY_ERROR_REPORTING_NORMAL)
+#else
+#define MICROPY_ERROR_REPORTING (MICROPY_ERROR_REPORTING_TERSE)
+#endif
+#endif
+
+// Whether issue warnings during compiling/execution
+#ifndef MICROPY_WARNINGS
+#define MICROPY_WARNINGS (0)
+#endif
+
+// Whether to support warning categories
+#ifndef MICROPY_WARNINGS_CATEGORY
+#define MICROPY_WARNINGS_CATEGORY (0)
+#endif
+
+// This macro is used when printing runtime warnings and errors
+#ifndef MICROPY_ERROR_PRINTER
+#define MICROPY_ERROR_PRINTER (&mp_plat_print)
+#endif
+
+// Float and complex implementation
+#define MICROPY_FLOAT_IMPL_NONE (0)
+#define MICROPY_FLOAT_IMPL_FLOAT (1)
+#define MICROPY_FLOAT_IMPL_DOUBLE (2)
+
+#ifndef MICROPY_FLOAT_IMPL
+#define MICROPY_FLOAT_IMPL (MICROPY_FLOAT_IMPL_NONE)
+#endif
+
+#if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
+#define MICROPY_PY_BUILTINS_FLOAT (1)
+#define MICROPY_FLOAT_CONST(x) x##F
+#define MICROPY_FLOAT_C_FUN(fun) fun##f
+typedef float mp_float_t;
+#elif MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE
+#define MICROPY_PY_BUILTINS_FLOAT (1)
+#define MICROPY_FLOAT_CONST(x) x
+#define MICROPY_FLOAT_C_FUN(fun) fun
+typedef double mp_float_t;
+#else
+#define MICROPY_PY_BUILTINS_FLOAT (0)
+#endif
+
+#ifndef MICROPY_PY_BUILTINS_COMPLEX
+#define MICROPY_PY_BUILTINS_COMPLEX (MICROPY_PY_BUILTINS_FLOAT)
+#endif
+
+// Whether to provide a high-quality hash for float and complex numbers.
+// Otherwise the default is a very simple but correct hashing function.
+#ifndef MICROPY_FLOAT_HIGH_QUALITY_HASH
+#define MICROPY_FLOAT_HIGH_QUALITY_HASH (0)
+#endif
+
+// Enable features which improve CPython compatibility
+// but may lead to more code size/memory usage.
+// TODO: Originally intended as generic category to not
+// add bunch of once-off options. May need refactoring later
+#ifndef MICROPY_CPYTHON_COMPAT
+#define MICROPY_CPYTHON_COMPAT (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Perform full checks as done by CPython. Disabling this
+// may produce incorrect results, if incorrect data is fed,
+// but should not lead to MicroPython crashes or similar
+// grave issues (in other words, only user app should be,
+// affected, not system).
+#ifndef MICROPY_FULL_CHECKS
+#define MICROPY_FULL_CHECKS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether POSIX-semantics non-blocking streams are supported
+#ifndef MICROPY_STREAMS_NON_BLOCK
+#define MICROPY_STREAMS_NON_BLOCK (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide stream functions with POSIX-like signatures
+// (useful for porting existing libraries to MicroPython).
+#ifndef MICROPY_STREAMS_POSIX_API
+#define MICROPY_STREAMS_POSIX_API (0)
+#endif
+
+// Whether to call __init__ when importing builtin modules for the first time
+#ifndef MICROPY_MODULE_BUILTIN_INIT
+#define MICROPY_MODULE_BUILTIN_INIT (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support module-level __getattr__ (see PEP 562)
+#ifndef MICROPY_MODULE_GETATTR
+#define MICROPY_MODULE_GETATTR (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether module weak links are supported
+#ifndef MICROPY_MODULE_WEAK_LINKS
+#define MICROPY_MODULE_WEAK_LINKS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to enable importing foo.py with __name__ set to '__main__'
+// Used by the unix port for the -m flag.
+#ifndef MICROPY_MODULE_OVERRIDE_MAIN_IMPORT
+#define MICROPY_MODULE_OVERRIDE_MAIN_IMPORT (0)
+#endif
+
+// Whether frozen modules are supported in the form of strings
+#ifndef MICROPY_MODULE_FROZEN_STR
+#define MICROPY_MODULE_FROZEN_STR (0)
+#endif
+
+// Whether frozen modules are supported in the form of .mpy files
+#ifndef MICROPY_MODULE_FROZEN_MPY
+#define MICROPY_MODULE_FROZEN_MPY (0)
+#endif
+
+// Convenience macro for whether frozen modules are supported
+#ifndef MICROPY_MODULE_FROZEN
+#define MICROPY_MODULE_FROZEN (MICROPY_MODULE_FROZEN_STR || MICROPY_MODULE_FROZEN_MPY)
+#endif
+
+// Whether you can override builtins in the builtins module
+#ifndef MICROPY_CAN_OVERRIDE_BUILTINS
+#define MICROPY_CAN_OVERRIDE_BUILTINS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to check that the "self" argument of a builtin method has the
+// correct type. Such an explicit check is only needed if a builtin
+// method escapes to Python land without a first argument, eg
+// list.append([], 1). Without this check such calls will have undefined
+// behaviour (usually segfault) if the first argument is the wrong type.
+#ifndef MICROPY_BUILTIN_METHOD_CHECK_SELF_ARG
+#define MICROPY_BUILTIN_METHOD_CHECK_SELF_ARG (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to use internally defined errno's (otherwise system provided ones)
+#ifndef MICROPY_USE_INTERNAL_ERRNO
+#define MICROPY_USE_INTERNAL_ERRNO (0)
+#endif
+
+// Whether to use internally defined *printf() functions (otherwise external ones)
+#ifndef MICROPY_USE_INTERNAL_PRINTF
+#define MICROPY_USE_INTERNAL_PRINTF (1)
+#endif
+
+// Support for internal scheduler
+#ifndef MICROPY_ENABLE_SCHEDULER
+#define MICROPY_ENABLE_SCHEDULER (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Maximum number of entries in the scheduler
+#ifndef MICROPY_SCHEDULER_DEPTH
+#define MICROPY_SCHEDULER_DEPTH (4)
+#endif
+
+// Support for generic VFS sub-system
+#ifndef MICROPY_VFS
+#define MICROPY_VFS (0)
+#endif
+
+// Support for VFS POSIX component, to mount a POSIX filesystem within VFS
+#ifndef MICROPY_VFS
+#define MICROPY_VFS_POSIX (0)
+#endif
+
+// Support for VFS FAT component, to mount a FAT filesystem within VFS
+#ifndef MICROPY_VFS
+#define MICROPY_VFS_FAT (0)
+#endif
+
+// 1 when building C code for native mpy files. 0 otherwise.
+#ifndef MICROPY_ENABLE_DYNRUNTIME
+#define MICROPY_ENABLE_DYNRUNTIME (0)
+#endif
+
+/*****************************************************************************/
+/* Fine control over Python builtins, classes, modules, etc */
+
+// Whether to support multiple inheritance of Python classes. Multiple
+// inheritance makes some C functions inherently recursive, and adds a bit of
+// code overhead.
+#ifndef MICROPY_MULTIPLE_INHERITANCE
+#define MICROPY_MULTIPLE_INHERITANCE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to implement attributes on functions
+#ifndef MICROPY_PY_FUNCTION_ATTRS
+#define MICROPY_PY_FUNCTION_ATTRS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support the descriptors __get__, __set__, __delete__
+// This costs some code size and makes load/store/delete of instance
+// attributes slower for the classes that use this feature
+#ifndef MICROPY_PY_DESCRIPTORS
+#define MICROPY_PY_DESCRIPTORS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support class __delattr__ and __setattr__ methods
+// This costs some code size and makes store/delete of instance
+// attributes slower for the classes that use this feature
+#ifndef MICROPY_PY_DELATTR_SETATTR
+#define MICROPY_PY_DELATTR_SETATTR (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Support for async/await/async for/async with
+#ifndef MICROPY_PY_ASYNC_AWAIT
+#define MICROPY_PY_ASYNC_AWAIT (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Support for literal string interpolation, f-strings (see PEP 498, Python 3.6+)
+#ifndef MICROPY_PY_FSTRINGS
+#define MICROPY_PY_FSTRINGS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Support for assignment expressions with := (see PEP 572, Python 3.8+)
+#ifndef MICROPY_PY_ASSIGN_EXPR
+#define MICROPY_PY_ASSIGN_EXPR (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Non-standard .pend_throw() method for generators, allowing for
+// Future-like behavior with respect to exception handling: an
+// exception set with .pend_throw() will activate on the next call
+// to generator's .send() or .__next__(). (This is useful to implement
+// async schedulers.)
+#ifndef MICROPY_PY_GENERATOR_PEND_THROW
+#define MICROPY_PY_GENERATOR_PEND_THROW (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Issue a warning when comparing str and bytes objects
+#ifndef MICROPY_PY_STR_BYTES_CMP_WARN
+#define MICROPY_PY_STR_BYTES_CMP_WARN (0)
+#endif
+
+// Whether str object is proper unicode
+#ifndef MICROPY_PY_BUILTINS_STR_UNICODE
+#define MICROPY_PY_BUILTINS_STR_UNICODE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to check for valid UTF-8 when converting bytes to str
+#ifndef MICROPY_PY_BUILTINS_STR_UNICODE_CHECK
+#define MICROPY_PY_BUILTINS_STR_UNICODE_CHECK (MICROPY_PY_BUILTINS_STR_UNICODE)
+#endif
+
+// Whether str.center() method provided
+#ifndef MICROPY_PY_BUILTINS_STR_CENTER
+#define MICROPY_PY_BUILTINS_STR_CENTER (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether str.count() method provided
+#ifndef MICROPY_PY_BUILTINS_STR_COUNT
+#define MICROPY_PY_BUILTINS_STR_COUNT (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether str % (...) formatting operator provided
+#ifndef MICROPY_PY_BUILTINS_STR_OP_MODULO
+#define MICROPY_PY_BUILTINS_STR_OP_MODULO (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether str.partition()/str.rpartition() method provided
+#ifndef MICROPY_PY_BUILTINS_STR_PARTITION
+#define MICROPY_PY_BUILTINS_STR_PARTITION (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether str.splitlines() method provided
+#ifndef MICROPY_PY_BUILTINS_STR_SPLITLINES
+#define MICROPY_PY_BUILTINS_STR_SPLITLINES (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support bytearray object
+#ifndef MICROPY_PY_BUILTINS_BYTEARRAY
+#define MICROPY_PY_BUILTINS_BYTEARRAY (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to support dict.fromkeys() class method
+#ifndef MICROPY_PY_BUILTINS_DICT_FROMKEYS
+#define MICROPY_PY_BUILTINS_DICT_FROMKEYS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to support memoryview object
+#ifndef MICROPY_PY_BUILTINS_MEMORYVIEW
+#define MICROPY_PY_BUILTINS_MEMORYVIEW (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support memoryview.itemsize attribute
+#ifndef MICROPY_PY_BUILTINS_MEMORYVIEW_ITEMSIZE
+#define MICROPY_PY_BUILTINS_MEMORYVIEW_ITEMSIZE (0)
+#endif
+
+// Whether to support set object
+#ifndef MICROPY_PY_BUILTINS_SET
+#define MICROPY_PY_BUILTINS_SET (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to support slice subscript operators and slice object
+#ifndef MICROPY_PY_BUILTINS_SLICE
+#define MICROPY_PY_BUILTINS_SLICE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to support slice attribute read access,
+// i.e. slice.start, slice.stop, slice.step
+#ifndef MICROPY_PY_BUILTINS_SLICE_ATTRS
+#define MICROPY_PY_BUILTINS_SLICE_ATTRS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support the .indices(len) method on slice objects
+#ifndef MICROPY_PY_BUILTINS_SLICE_INDICES
+#define MICROPY_PY_BUILTINS_SLICE_INDICES (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support frozenset object
+#ifndef MICROPY_PY_BUILTINS_FROZENSET
+#define MICROPY_PY_BUILTINS_FROZENSET (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support property object
+#ifndef MICROPY_PY_BUILTINS_PROPERTY
+#define MICROPY_PY_BUILTINS_PROPERTY (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to optimize property flash storage size (requires linker script support)
+#ifndef MICROPY_PY_BUILTINS_PROPERTY
+#define MICROPY_PY_BUILTINS_PROPERTY (0)
+#endif
+
+// Whether to implement the start/stop/step attributes (readback) on
+// the "range" builtin type. Rarely used, and costs ~60 bytes (x86).
+#ifndef MICROPY_PY_BUILTINS_RANGE_ATTRS
+#define MICROPY_PY_BUILTINS_RANGE_ATTRS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to support binary ops [only (in)equality is defined] between range
+// objects. With this option disabled all range objects that are not exactly
+// the same object will compare as not-equal. With it enabled the semantics
+// match CPython and ranges are equal if they yield the same sequence of items.
+#ifndef MICROPY_PY_BUILTINS_RANGE_BINOP
+#define MICROPY_PY_BUILTINS_RANGE_BINOP (0)
+#endif
+
+// Support for callling next() with second argument
+#ifndef MICROPY_PY_BUILTINS_NEXT2
+#define MICROPY_PY_BUILTINS_NEXT2 (0)
+#endif
+
+// Whether to support rounding of integers (incl bignum); eg round(123,-1)=120
+#ifndef MICROPY_PY_BUILTINS_ROUND_INT
+#define MICROPY_PY_BUILTINS_ROUND_INT (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support complete set of special methods for user
+// classes, or only the most used ones. "Inplace" methods are
+// controlled by MICROPY_PY_ALL_INPLACE_SPECIAL_METHODS below.
+// "Reverse" methods are controlled by
+// MICROPY_PY_REVERSE_SPECIAL_METHODS below.
+#ifndef MICROPY_PY_ALL_SPECIAL_METHODS
+#define MICROPY_PY_ALL_SPECIAL_METHODS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support all inplace arithmetic operarion methods
+// (__imul__, etc.)
+#ifndef MICROPY_PY_ALL_INPLACE_SPECIAL_METHODS
+#define MICROPY_PY_ALL_INPLACE_SPECIAL_METHODS (0)
+#endif
+
+// Whether to support reverse arithmetic operarion methods
+// (__radd__, etc.). Additionally gated by
+// MICROPY_PY_ALL_SPECIAL_METHODS.
+#ifndef MICROPY_PY_REVERSE_SPECIAL_METHODS
+#define MICROPY_PY_REVERSE_SPECIAL_METHODS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support compile function
+#ifndef MICROPY_PY_BUILTINS_COMPILE
+#define MICROPY_PY_BUILTINS_COMPILE (MICROPY_ENABLE_COMPILER && MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support enumerate function(type)
+#ifndef MICROPY_PY_BUILTINS_ENUMERATE
+#define MICROPY_PY_BUILTINS_ENUMERATE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to support eval and exec functions
+// By default they are supported if the compiler is enabled
+#ifndef MICROPY_PY_BUILTINS_EVAL_EXEC
+#define MICROPY_PY_BUILTINS_EVAL_EXEC (MICROPY_ENABLE_COMPILER)
+#endif
+
+// Whether to support the Python 2 execfile function
+#ifndef MICROPY_PY_BUILTINS_EXECFILE
+#define MICROPY_PY_BUILTINS_EXECFILE (MICROPY_ENABLE_COMPILER && MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support filter function(type)
+#ifndef MICROPY_PY_BUILTINS_FILTER
+#define MICROPY_PY_BUILTINS_FILTER (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to support reversed function(type)
+#ifndef MICROPY_PY_BUILTINS_REVERSED
+#define MICROPY_PY_BUILTINS_REVERSED (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to define "NotImplemented" special constant
+#ifndef MICROPY_PY_BUILTINS_NOTIMPLEMENTED
+#define MICROPY_PY_BUILTINS_NOTIMPLEMENTED (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide the built-in input() function. The implementation of this
+// uses shared/readline, so can only be enabled if the port uses this readline.
+#ifndef MICROPY_PY_BUILTINS_INPUT
+#define MICROPY_PY_BUILTINS_INPUT (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support min/max functions
+#ifndef MICROPY_PY_BUILTINS_MIN_MAX
+#define MICROPY_PY_BUILTINS_MIN_MAX (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Support for calls to pow() with 3 integer arguments
+#ifndef MICROPY_PY_BUILTINS_POW3
+#define MICROPY_PY_BUILTINS_POW3 (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide the help function
+#ifndef MICROPY_PY_BUILTINS_HELP
+#define MICROPY_PY_BUILTINS_HELP (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Use this to configure the help text shown for help(). It should be a
+// variable with the type "const char*". A sensible default is provided.
+#ifndef MICROPY_PY_BUILTINS_HELP_TEXT
+#define MICROPY_PY_BUILTINS_HELP_TEXT mp_help_default_text
+#endif
+
+// Add the ability to list the available modules when executing help('modules')
+#ifndef MICROPY_PY_BUILTINS_HELP_MODULES
+#define MICROPY_PY_BUILTINS_HELP_MODULES (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to set __file__ for imported modules
+#ifndef MICROPY_PY___FILE__
+#define MICROPY_PY___FILE__ (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to provide mem-info related functions in micropython module
+#ifndef MICROPY_PY_MICROPYTHON_MEM_INFO
+#define MICROPY_PY_MICROPYTHON_MEM_INFO (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide "micropython.stack_use" function
+#ifndef MICROPY_PY_MICROPYTHON_STACK_USE
+#define MICROPY_PY_MICROPYTHON_STACK_USE (MICROPY_PY_MICROPYTHON_MEM_INFO)
+#endif
+
+// Whether to provide the "micropython.heap_locked" function
+#ifndef MICROPY_PY_MICROPYTHON_HEAP_LOCKED
+#define MICROPY_PY_MICROPYTHON_HEAP_LOCKED (0)
+#endif
+
+// Whether to provide "array" module. Note that large chunk of the
+// underlying code is shared with "bytearray" builtin type, so to
+// get real savings, it should be disabled too.
+#ifndef MICROPY_PY_ARRAY
+#define MICROPY_PY_ARRAY (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to support slice assignments for array (and bytearray).
+// This is rarely used, but adds ~0.5K of code.
+#ifndef MICROPY_PY_ARRAY_SLICE_ASSIGN
+#define MICROPY_PY_ARRAY_SLICE_ASSIGN (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support nonstandard typecodes "O", "P" and "S"
+// in array and struct modules.
+#ifndef MICROPY_NONSTANDARD_TYPECODES
+#define MICROPY_NONSTANDARD_TYPECODES (1)
+#endif
+
+// Whether to support attrtuple type (MicroPython extension)
+// It provides space-efficient tuples with attribute access
+#ifndef MICROPY_PY_ATTRTUPLE
+#define MICROPY_PY_ATTRTUPLE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to provide "collections" module
+#ifndef MICROPY_PY_COLLECTIONS
+#define MICROPY_PY_COLLECTIONS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to provide "ucollections.deque" type
+#ifndef MICROPY_PY_COLLECTIONS_DEQUE
+#define MICROPY_PY_COLLECTIONS_DEQUE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide "collections.OrderedDict" type
+#ifndef MICROPY_PY_COLLECTIONS_ORDEREDDICT
+#define MICROPY_PY_COLLECTIONS_ORDEREDDICT (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide the _asdict function for namedtuple
+#ifndef MICROPY_PY_COLLECTIONS_NAMEDTUPLE__ASDICT
+#define MICROPY_PY_COLLECTIONS_NAMEDTUPLE__ASDICT (0)
+#endif
+
+// Whether to provide "math" module
+#ifndef MICROPY_PY_MATH
+#define MICROPY_PY_MATH (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to provide special math functions: math.{erf,erfc,gamma,lgamma}
+#ifndef MICROPY_PY_MATH_SPECIAL_FUNCTIONS
+#define MICROPY_PY_MATH_SPECIAL_FUNCTIONS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide math.factorial function
+#ifndef MICROPY_PY_MATH_FACTORIAL
+#define MICROPY_PY_MATH_FACTORIAL (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide math.isclose function
+#ifndef MICROPY_PY_MATH_ISCLOSE
+#define MICROPY_PY_MATH_ISCLOSE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide fix for atan2 Inf handling.
+#ifndef MICROPY_PY_MATH_ATAN2_FIX_INFNAN
+#define MICROPY_PY_MATH_ATAN2_FIX_INFNAN (0)
+#endif
+
+// Whether to provide fix for fmod Inf handling.
+#ifndef MICROPY_PY_MATH_FMOD_FIX_INFNAN
+#define MICROPY_PY_MATH_FMOD_FIX_INFNAN (0)
+#endif
+
+// Whether to provide fix for modf negative zero handling.
+#ifndef MICROPY_PY_MATH_MODF_FIX_NEGZERO
+#define MICROPY_PY_MATH_MODF_FIX_NEGZERO (0)
+#endif
+
+// Whether to provide fix for pow(1, NaN) and pow(NaN, 0), which both should be 1 not NaN.
+#ifndef MICROPY_PY_MATH_POW_FIX_NAN
+#define MICROPY_PY_MATH_POW_FIX_NAN (0)
+#endif
+
+// Whether to provide "cmath" module
+#ifndef MICROPY_PY_CMATH
+#define MICROPY_PY_CMATH (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide "gc" module
+#ifndef MICROPY_PY_GC
+#define MICROPY_PY_GC (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to return number of collected objects from gc.collect()
+#ifndef MICROPY_PY_GC_COLLECT_RETVAL
+#define MICROPY_PY_GC_COLLECT_RETVAL (0)
+#endif
+
+// Whether to provide "io" module
+#ifndef MICROPY_PY_IO
+#define MICROPY_PY_IO (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to provide "io.IOBase" class to support user streams
+#ifndef MICROPY_PY_IO_IOBASE
+#define MICROPY_PY_IO_IOBASE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide "io.FileIO" class
+#ifndef MICROPY_PY_IO_FILEIO
+#define MICROPY_PY_IO_FILEIO (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide "io.BytesIO" class
+#ifndef MICROPY_PY_IO_BYTESIO
+#define MICROPY_PY_IO_BYTESIO (1)
+#endif
+
+// Whether to provide "io.BufferedWriter" class
+#ifndef MICROPY_PY_IO_BUFFEREDWRITER
+#define MICROPY_PY_IO_BUFFEREDWRITER (0)
+#endif
+
+// Whether to provide "struct" module
+#ifndef MICROPY_PY_STRUCT
+#define MICROPY_PY_STRUCT (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to provide "sys" module
+#ifndef MICROPY_PY_SYS
+#define MICROPY_PY_SYS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_CORE_FEATURES)
+#endif
+
+// Whether to initialise "sys.path" and "sys.argv" to their defaults in mp_init()
+#ifndef MICROPY_PY_SYS_PATH_ARGV_DEFAULTS
+#define MICROPY_PY_SYS_PATH_ARGV_DEFAULTS (MICROPY_PY_SYS)
+#endif
+
+// Whether to provide "sys.maxsize" constant
+#ifndef MICROPY_PY_SYS_MAXSIZE
+#define MICROPY_PY_SYS_MAXSIZE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide "sys.modules" dictionary
+#ifndef MICROPY_PY_SYS_MODULES
+#define MICROPY_PY_SYS_MODULES (1)
+#endif
+
+// Whether to provide "sys.exc_info" function
+// Avoid enabling this, this function is Python2 heritage
+#ifndef MICROPY_PY_SYS_EXC_INFO
+#define MICROPY_PY_SYS_EXC_INFO (0)
+#endif
+
+// Whether to provide "sys.exit" function
+#ifndef MICROPY_PY_SYS_EXIT
+#define MICROPY_PY_SYS_EXIT (1)
+#endif
+
+// Whether to provide "sys.atexit" function (MicroPython extension)
+#ifndef MICROPY_PY_SYS_ATEXIT
+#define MICROPY_PY_SYS_ATEXIT (0)
+#endif
+
+// Whether to provide "sys.settrace" function
+#ifndef MICROPY_PY_SYS_SETTRACE
+#define MICROPY_PY_SYS_SETTRACE (0)
+#endif
+
+// Whether to provide "sys.getsizeof" function
+#ifndef MICROPY_PY_SYS_GETSIZEOF
+#define MICROPY_PY_SYS_GETSIZEOF (0)
+#endif
+
+// Whether to provide sys.{stdin,stdout,stderr} objects
+#ifndef MICROPY_PY_SYS_STDFILES
+#define MICROPY_PY_SYS_STDFILES (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide sys.{stdin,stdout,stderr}.buffer object
+// This is implemented per-port
+#ifndef MICROPY_PY_SYS_STDIO_BUFFER
+#define MICROPY_PY_SYS_STDIO_BUFFER (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide "uerrno" module
+#ifndef MICROPY_PY_UERRNO
+#define MICROPY_PY_UERRNO (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide the uerrno.errorcode dict
+#ifndef MICROPY_PY_UERRNO_ERRORCODE
+#define MICROPY_PY_UERRNO_ERRORCODE (1)
+#endif
+
+// Whether to provide "uselect" module (baremetal implementation)
+#ifndef MICROPY_PY_USELECT
+#define MICROPY_PY_USELECT (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to enable the select() function in the "uselect" module (baremetal
+// implementation). This is present for compatibility but can be disabled to
+// save space.
+#ifndef MICROPY_PY_USELECT_SELECT
+#define MICROPY_PY_USELECT_SELECT (1)
+#endif
+
+// Whether to provide "utime" module functions implementation
+// in terms of mp_hal_* functions.
+#ifndef MICROPY_PY_UTIME_MP_HAL
+#define MICROPY_PY_UTIME_MP_HAL (0)
+#endif
+
+// Period of values returned by utime.ticks_ms(), ticks_us(), ticks_cpu()
+// functions. Should be power of two. All functions above use the same
+// period, so if underlying hardware/API has different periods, the
+// minimum of them should be used. The value below is the maximum value
+// this parameter can take (corresponding to 30 bit tick values on 32-bit
+// system).
+#ifndef MICROPY_PY_UTIME_TICKS_PERIOD
+#define MICROPY_PY_UTIME_TICKS_PERIOD (MP_SMALL_INT_POSITIVE_MASK + 1)
+#endif
+
+// Whether to provide "_thread" module
+#ifndef MICROPY_PY_THREAD
+#define MICROPY_PY_THREAD (0)
+#endif
+
+// Whether to make the VM/runtime thread-safe using a global lock
+// If not enabled then thread safety must be provided at the Python level
+#ifndef MICROPY_PY_THREAD_GIL
+#define MICROPY_PY_THREAD_GIL (MICROPY_PY_THREAD)
+#endif
+
+// Number of VM jump-loops to do before releasing the GIL.
+// Set this to 0 to disable the divisor.
+#ifndef MICROPY_PY_THREAD_GIL_VM_DIVISOR
+#define MICROPY_PY_THREAD_GIL_VM_DIVISOR (32)
+#endif
+
+// Extended modules
+
+#ifndef MICROPY_PY_UASYNCIO
+#define MICROPY_PY_UASYNCIO (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+#ifndef MICROPY_PY_UCTYPES
+#define MICROPY_PY_UCTYPES (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to provide SHORT, INT, LONG, etc. types in addition to
+// exact-bitness types like INT16, INT32, etc.
+#ifndef MICROPY_PY_UCTYPES_NATIVE_C_TYPES
+#define MICROPY_PY_UCTYPES_NATIVE_C_TYPES (1)
+#endif
+
+#ifndef MICROPY_PY_UZLIB
+#define MICROPY_PY_UZLIB (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+#ifndef MICROPY_PY_UJSON
+#define MICROPY_PY_UJSON (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to support the "separators" argument to dump, dumps
+#ifndef MICROPY_PY_UJSON_SEPARATORS
+#define MICROPY_PY_UJSON_SEPARATORS (1)
+#endif
+
+#ifndef CIRCUITPY_ULAB
+#define CIRCUITPY_ULAB (0)
+#endif
+
+#ifndef MICROPY_PY_URE
+#define MICROPY_PY_URE (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+#ifndef MICROPY_PY_URE_DEBUG
+#define MICROPY_PY_URE_DEBUG (0)
+#endif
+
+#ifndef MICROPY_PY_URE_MATCH_GROUPS
+#define MICROPY_PY_URE_MATCH_GROUPS (0)
+#endif
+
+#ifndef MICROPY_PY_URE_MATCH_SPAN_START_END
+#define MICROPY_PY_URE_MATCH_SPAN_START_END (0)
+#endif
+
+#ifndef MICROPY_PY_URE_SUB
+#define MICROPY_PY_URE_SUB (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+#ifndef MICROPY_PY_UHEAPQ
+#define MICROPY_PY_UHEAPQ (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Optimized heap queue for relative timestamps (only used by uasyncio v2)
+#ifndef MICROPY_PY_UTIMEQ
+#define MICROPY_PY_UTIMEQ (0)
+#endif
+
+#ifndef MICROPY_PY_UHASHLIB
+#define MICROPY_PY_UHASHLIB (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+#ifndef MICROPY_PY_UHASHLIB_MD5
+#define MICROPY_PY_UHASHLIB_MD5 (0)
+#endif
+
+#ifndef MICROPY_PY_UHASHLIB_SHA1
+#define MICROPY_PY_UHASHLIB_SHA1 (0)
+#endif
+
+#ifndef MICROPY_PY_UHASHLIB_SHA256
+#define MICROPY_PY_UHASHLIB_SHA256 (1)
+#endif
+
+#ifndef MICROPY_PY_UCRYPTOLIB
+#define MICROPY_PY_UCRYPTOLIB (0)
+#endif
+
+// Depends on MICROPY_PY_UCRYPTOLIB
+#ifndef MICROPY_PY_UCRYPTOLIB_CTR
+#define MICROPY_PY_UCRYPTOLIB_CTR (0)
+#endif
+
+#ifndef MICROPY_PY_UCRYPTOLIB_CONSTS
+#define MICROPY_PY_UCRYPTOLIB_CONSTS (0)
+#endif
+
+#ifndef MICROPY_PY_UBINASCII
+#define MICROPY_PY_UBINASCII (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Depends on MICROPY_PY_UZLIB
+#ifndef MICROPY_PY_UBINASCII_CRC32
+#define MICROPY_PY_UBINASCII_CRC32 (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+#ifndef MICROPY_PY_URANDOM
+#define MICROPY_PY_URANDOM (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+// Whether to include: randrange, randint, choice, random, uniform
+#ifndef MICROPY_PY_URANDOM_EXTRA_FUNCS
+#define MICROPY_PY_URANDOM_EXTRA_FUNCS (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+#ifndef MICROPY_PY_MACHINE
+#define MICROPY_PY_MACHINE (0)
+#endif
+
+// Whether to include: bitstream
+#ifndef MICROPY_PY_MACHINE_BITSTREAM
+#define MICROPY_PY_MACHINE_BITSTREAM (0)
+#endif
+
+// Whether to include: time_pulse_us
+#ifndef MICROPY_PY_MACHINE_PULSE
+#define MICROPY_PY_MACHINE_PULSE (0)
+#endif
+
+#ifndef MICROPY_PY_MACHINE_I2C
+#define MICROPY_PY_MACHINE_I2C (0)
+#endif
+
+// Whether to provide the "machine.SoftI2C" class
+#ifndef MICROPY_PY_MACHINE_SOFTI2C
+#define MICROPY_PY_MACHINE_SOFTI2C (0)
+#endif
+
+#ifndef MICROPY_PY_MACHINE_SPI
+#define MICROPY_PY_MACHINE_SPI (0)
+#endif
+
+// Whether to provide the "machine.SoftSPI" class
+#ifndef MICROPY_PY_MACHINE_SOFTSPI
+#define MICROPY_PY_MACHINE_SOFTSPI (0)
+#endif
+
+#ifndef MICROPY_PY_USSL
+#define MICROPY_PY_USSL (0)
+#endif
+
+// Whether to add finaliser code to ussl objects
+#ifndef MICROPY_PY_USSL_FINALISER
+#define MICROPY_PY_USSL_FINALISER (0)
+#endif
+
+#ifndef MICROPY_PY_UWEBSOCKET
+#define MICROPY_PY_UWEBSOCKET (0)
+#endif
+
+#ifndef MICROPY_PY_FRAMEBUF
+#define MICROPY_PY_FRAMEBUF (MICROPY_CONFIG_ROM_LEVEL_AT_LEAST_EXTRA_FEATURES)
+#endif
+
+#ifndef MICROPY_PY_BTREE
+#define MICROPY_PY_BTREE (0)
+#endif
+
+#ifndef MICROPY_HW_ENABLE_USB
+#define MICROPY_HW_ENABLE_USB (0)
+#endif
+
+// Whether to provide the low-level "_onewire" module
+#ifndef MICROPY_PY_ONEWIRE
+#define MICROPY_PY_ONEWIRE (0)
+#endif
+
+/*****************************************************************************/
+/* Hooks for a port to add builtins */
+
+// Additional builtin function definitions - see modbuiltins.c:mp_module_builtins_globals_table for format.
+#ifndef MICROPY_PORT_BUILTINS
+#define MICROPY_PORT_BUILTINS
+#endif
+
+// Additional builtin function definitions for extension by command-line, boards or variants.
+// See modbuiltins.c:mp_module_builtins_globals_table for format.
+#ifndef MICROPY_PORT_EXTRA_BUILTINS
+#define MICROPY_PORT_EXTRA_BUILTINS
+#endif
+
+// Additional builtin module definitions - see objmodule.c:mp_builtin_module_table for format.
+#ifndef MICROPY_PORT_BUILTIN_MODULES
+#define MICROPY_PORT_BUILTIN_MODULES
+#endif
+
+// Additional constant definitions for the compiler - see compile.c:mp_constants_table.
+#ifndef MICROPY_PORT_CONSTANTS
+#define MICROPY_PORT_CONSTANTS
+#endif
+
+// Any root pointers for GC scanning - see mpstate.c
+#ifndef MICROPY_PORT_ROOT_POINTERS
+#define MICROPY_PORT_ROOT_POINTERS
+#endif
+
+/*****************************************************************************/
+/* Hooks for a port to wrap functions with attributes */
+
+#ifndef MICROPY_WRAP_MP_BINARY_OP
+#define MICROPY_WRAP_MP_BINARY_OP(f) f
+#endif
+
+#ifndef MICROPY_WRAP_MP_EXECUTE_BYTECODE
+#define MICROPY_WRAP_MP_EXECUTE_BYTECODE(f) f
+#endif
+
+#ifndef MICROPY_WRAP_MP_LOAD_GLOBAL
+#define MICROPY_WRAP_MP_LOAD_GLOBAL(f) f
+#endif
+
+#ifndef MICROPY_WRAP_MP_LOAD_NAME
+#define MICROPY_WRAP_MP_LOAD_NAME(f) f
+#endif
+
+#ifndef MICROPY_WRAP_MP_MAP_LOOKUP
+#define MICROPY_WRAP_MP_MAP_LOOKUP(f) f
+#endif
+
+#ifndef MICROPY_WRAP_MP_OBJ_GET_TYPE
+#define MICROPY_WRAP_MP_OBJ_GET_TYPE(f) f
+#endif
+
+#ifndef MICROPY_WRAP_MP_SCHED_EXCEPTION
+#define MICROPY_WRAP_MP_SCHED_EXCEPTION(f) f
+#endif
+
+#ifndef MICROPY_WRAP_MP_SCHED_KEYBOARD_INTERRUPT
+#define MICROPY_WRAP_MP_SCHED_KEYBOARD_INTERRUPT(f) f
+#endif
+
+#ifndef MICROPY_WRAP_MP_SCHED_SCHEDULE
+#define MICROPY_WRAP_MP_SCHED_SCHEDULE(f) f
+#endif
+
+/*****************************************************************************/
+/* Miscellaneous settings */
+
+// All uPy objects in ROM must be aligned on at least a 4 byte boundary
+// so that the small-int/qstr/pointer distinction can be made. For machines
+// that don't do this (eg 16-bit CPU), define the following macro to something
+// like __attribute__((aligned(4))).
+#ifndef MICROPY_OBJ_BASE_ALIGNMENT
+#define MICROPY_OBJ_BASE_ALIGNMENT
+#endif
+
+// On embedded platforms, these will typically enable/disable irqs.
+#ifndef MICROPY_BEGIN_ATOMIC_SECTION
+#define MICROPY_BEGIN_ATOMIC_SECTION() (0)
+#endif
+#ifndef MICROPY_END_ATOMIC_SECTION
+#define MICROPY_END_ATOMIC_SECTION(state) (void)(state)
+#endif
+
+// Allow to override static modifier for global objects, e.g. to use with
+// object code analysis tools which don't support static symbols.
+#ifndef STATIC
+#define STATIC static
+#endif
+
+// Number of bytes in an object word: mp_obj_t, mp_uint_t, mp_uint_t
+#ifndef MP_BYTES_PER_OBJ_WORD
+#define MP_BYTES_PER_OBJ_WORD (sizeof(mp_uint_t))
+#endif
+
+// Number of bits in a byte
+#ifndef MP_BITS_PER_BYTE
+#define MP_BITS_PER_BYTE (8)
+#endif
+// mp_int_t value with most significant bit set
+#define MP_OBJ_WORD_MSBIT_HIGH (((mp_uint_t)1) << (MP_BYTES_PER_OBJ_WORD * MP_BITS_PER_BYTE - 1))
+
+// Make sure both MP_ENDIANNESS_LITTLE and MP_ENDIANNESS_BIG are
+// defined and that they are the opposite of each other.
+#if defined(MP_ENDIANNESS_LITTLE)
+#define MP_ENDIANNESS_BIG (!MP_ENDIANNESS_LITTLE)
+#elif defined(MP_ENDIANNESS_BIG)
+#define MP_ENDIANNESS_LITTLE (!MP_ENDIANNESS_BIG)
+#else
+// Endianness not defined by port so try to autodetect it.
+ #if defined(__BYTE_ORDER__)
+ #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ #define MP_ENDIANNESS_LITTLE (1)
+ #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ #define MP_ENDIANNESS_LITTLE (0)
+ #endif
+ #else
+ #include <endian.h>
+ #if defined(__BYTE_ORDER)
+ #if __BYTE_ORDER == __LITTLE_ENDIAN
+ #define MP_ENDIANNESS_LITTLE (1)
+ #elif __BYTE_ORDER == __BIG_ENDIAN
+ #define MP_ENDIANNESS_LITTLE (0)
+ #endif
+ #endif
+ #endif
+ #ifndef MP_ENDIANNESS_LITTLE
+ #error endianness not defined and cannot detect it
+ #endif
+ #define MP_ENDIANNESS_BIG (!MP_ENDIANNESS_LITTLE)
+#endif
+
+// Make a pointer to RAM callable (eg set lower bit for Thumb code)
+// (This scheme won't work if we want to mix Thumb and normal ARM code.)
+#ifndef MICROPY_MAKE_POINTER_CALLABLE
+#define MICROPY_MAKE_POINTER_CALLABLE(p) (p)
+#endif
+
+// If these MP_PLAT_*_EXEC macros are overridden then the memory allocated by them
+// must be somehow reachable for marking by the GC, since the native code
+// generators store pointers to GC managed memory in the code.
+#ifndef MP_PLAT_ALLOC_EXEC
+#define MP_PLAT_ALLOC_EXEC(min_size, ptr, size) do { *ptr = m_new(byte, min_size); *size = min_size; } while (0)
+#endif
+
+#ifndef MP_PLAT_FREE_EXEC
+#define MP_PLAT_FREE_EXEC(ptr, size) m_del(byte, ptr, size)
+#endif
+
+// This macro is used to do all output (except when MICROPY_PY_IO is defined)
+#ifndef MP_PLAT_PRINT_STRN
+#define MP_PLAT_PRINT_STRN(str, len) mp_hal_stdout_tx_strn_cooked(str, len)
+#endif
+
+#ifndef MP_SSIZE_MAX
+#define MP_SSIZE_MAX SSIZE_MAX
+#endif
+
+// printf format spec to use for mp_int_t and friends
+#ifndef INT_FMT
+#if defined(__LP64__)
+// Archs where mp_int_t == long, long != int
+#define UINT_FMT "%lu"
+#define INT_FMT "%ld"
+#elif defined(_WIN64)
+#define UINT_FMT "%llu"
+#define INT_FMT "%lld"
+#else
+// Archs where mp_int_t == int
+#define UINT_FMT "%u"
+#define INT_FMT "%d"
+#endif
+#endif // INT_FMT
+
+// Modifier for function which doesn't return
+#ifndef NORETURN
+#define NORETURN __attribute__((noreturn))
+#endif
+
+// Modifier for weak functions
+#ifndef MP_WEAK
+#define MP_WEAK __attribute__((weak))
+#endif
+
+// Modifier for functions which should be never inlined
+#ifndef MP_NOINLINE
+#define MP_NOINLINE __attribute__((noinline))
+#endif
+
+// Modifier for functions which should be always inlined
+#ifndef MP_ALWAYSINLINE
+#define MP_ALWAYSINLINE __attribute__((always_inline))
+#endif
+
+// Condition is likely to be true, to help branch prediction
+#ifndef MP_LIKELY
+#define MP_LIKELY(x) __builtin_expect((x), 1)
+#endif
+
+// Condition is likely to be false, to help branch prediction
+#ifndef MP_UNLIKELY
+#define MP_UNLIKELY(x) __builtin_expect((x), 0)
+#endif
+
+// To annotate that code is unreachable
+#ifndef MP_UNREACHABLE
+#if defined(__GNUC__)
+#define MP_UNREACHABLE __builtin_unreachable();
+#else
+#define MP_UNREACHABLE for (;;);
+#endif
+#endif
+
+// Explicitly annotate switch case fall throughs
+#if defined(__GNUC__) && __GNUC__ >= 7
+#define MP_FALLTHROUGH __attribute__((fallthrough));
+#else
+#define MP_FALLTHROUGH
+#endif
+
+#ifndef MP_HTOBE16
+#if MP_ENDIANNESS_LITTLE
+#define MP_HTOBE16(x) ((uint16_t)((((x) & 0xff) << 8) | (((x) >> 8) & 0xff)))
+#define MP_BE16TOH(x) MP_HTOBE16(x)
+#else
+#define MP_HTOBE16(x) (x)
+#define MP_BE16TOH(x) (x)
+#endif
+#endif
+
+#ifndef MP_HTOBE32
+#if MP_ENDIANNESS_LITTLE
+#define MP_HTOBE32(x) ((uint32_t)((((x) & 0xff) << 24) | (((x) & 0xff00) << 8) | (((x) >> 8) & 0xff00) | (((x) >> 24) & 0xff)))
+#define MP_BE32TOH(x) MP_HTOBE32(x)
+#else
+#define MP_HTOBE32(x) (x)
+#define MP_BE32TOH(x) (x)
+#endif
+#endif
+
+// Warning categories are by default implemented as strings, though
+// hook is left for a port to define them as something else.
+#if MICROPY_WARNINGS_CATEGORY
+#ifndef MP_WARN_CAT
+#define MP_WARN_CAT(x) #x
+#endif
+#else
+#undef MP_WARN_CAT
+#define MP_WARN_CAT(x) (NULL)
+#endif
+
+// Feature dependency check.
+#if MICROPY_PY_SYS_SETTRACE
+#if !MICROPY_PERSISTENT_CODE_SAVE
+#error "MICROPY_PY_SYS_SETTRACE requires MICROPY_PERSISTENT_CODE_SAVE to be enabled"
+#endif
+#if MICROPY_COMP_CONST
+#error "MICROPY_PY_SYS_SETTRACE requires MICROPY_COMP_CONST to be disabled"
+#endif
+#endif
+
+#endif // MICROPY_INCLUDED_PY_MPCONFIG_H
diff --git a/circuitpython/py/mperrno.h b/circuitpython/py/mperrno.h
new file mode 100644
index 0000000..f981916
--- /dev/null
+++ b/circuitpython/py/mperrno.h
@@ -0,0 +1,148 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2016 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_MPERRNO_H
+#define MICROPY_INCLUDED_PY_MPERRNO_H
+
+#include "py/mpconfig.h"
+#include "py/obj.h"
+
+#if MICROPY_USE_INTERNAL_ERRNO
+
+// MP_Exxx errno's are defined directly as numeric values
+// (Linux constants are used as a reference)
+
+#define MP_EPERM (1) // Operation not permitted
+#define MP_ENOENT (2) // No such file or directory
+#define MP_ESRCH (3) // No such process
+#define MP_EINTR (4) // Interrupted system call
+#define MP_EIO (5) // I/O error
+#define MP_ENXIO (6) // No such device or address
+#define MP_E2BIG (7) // Argument list too long
+#define MP_ENOEXEC (8) // Exec format error
+#define MP_EBADF (9) // Bad file number
+#define MP_ECHILD (10) // No child processes
+#define MP_EAGAIN (11) // Try again
+#define MP_ENOMEM (12) // Out of memory
+#define MP_EACCES (13) // Permission denied
+#define MP_EFAULT (14) // Bad address
+#define MP_ENOTBLK (15) // Block device required
+#define MP_EBUSY (16) // Device or resource busy
+#define MP_EEXIST (17) // File exists
+#define MP_EXDEV (18) // Cross-device link
+#define MP_ENODEV (19) // No such device
+#define MP_ENOTDIR (20) // Not a directory
+#define MP_EISDIR (21) // Is a directory
+#define MP_EINVAL (22) // Invalid argument
+#define MP_ENFILE (23) // File table overflow
+#define MP_EMFILE (24) // Too many open files
+#define MP_ENOTTY (25) // Not a typewriter
+#define MP_ETXTBSY (26) // Text file busy
+#define MP_EFBIG (27) // File too large
+#define MP_ENOSPC (28) // No space left on device
+#define MP_ESPIPE (29) // Illegal seek
+#define MP_EROFS (30) // Read-only file system
+#define MP_EMLINK (31) // Too many links
+#define MP_EPIPE (32) // Broken pipe
+#define MP_EDOM (33) // Math argument out of domain of func
+#define MP_ERANGE (34) // Math result not representable
+#define MP_EWOULDBLOCK MP_EAGAIN // Operation would block
+#define MP_EOPNOTSUPP (95) // Operation not supported on transport endpoint
+#define MP_EAFNOSUPPORT (97) // Address family not supported by protocol
+#define MP_EADDRINUSE (98) // Address already in use
+#define MP_ECONNABORTED (103) // Software caused connection abort
+#define MP_ECONNRESET (104) // Connection reset by peer
+#define MP_ENOBUFS (105) // No buffer space available
+#define MP_EISCONN (106) // Transport endpoint is already connected
+#define MP_ENOTCONN (107) // Transport endpoint is not connected
+#define MP_ETIMEDOUT (110) // Connection timed out
+#define MP_ECONNREFUSED (111) // Connection refused
+#define MP_EHOSTUNREACH (113) // No route to host
+#define MP_EALREADY (114) // Operation already in progress
+#define MP_EINPROGRESS (115) // Operation now in progress
+#define MP_ECANCELED (125) // Operation canceled
+
+#else
+
+// MP_Exxx errno's are defined in terms of system supplied ones
+
+#include <errno.h>
+
+#define MP_EPERM EPERM
+#define MP_ENOENT ENOENT
+#define MP_ESRCH ESRCH
+#define MP_EINTR EINTR
+#define MP_EIO EIO
+#define MP_ENXIO ENXIO
+#define MP_E2BIG E2BIG
+#define MP_ENOEXEC ENOEXEC
+#define MP_EBADF EBADF
+#define MP_ECHILD ECHILD
+#define MP_EAGAIN EAGAIN
+#define MP_ENOMEM ENOMEM
+#define MP_EACCES EACCES
+#define MP_EFAULT EFAULT
+#define MP_ENOTBLK ENOTBLK
+#define MP_EBUSY EBUSY
+#define MP_EEXIST EEXIST
+#define MP_EXDEV EXDEV
+#define MP_ENODEV ENODEV
+#define MP_ENOTDIR ENOTDIR
+#define MP_EISDIR EISDIR
+#define MP_EINVAL EINVAL
+#define MP_ENFILE ENFILE
+#define MP_EMFILE EMFILE
+#define MP_ENOTTY ENOTTY
+#define MP_ETXTBSY ETXTBSY
+#define MP_EFBIG EFBIG
+#define MP_ENOSPC ENOSPC
+#define MP_ESPIPE ESPIPE
+#define MP_EROFS EROFS
+#define MP_EMLINK EMLINK
+#define MP_EPIPE EPIPE
+#define MP_EDOM EDOM
+#define MP_ERANGE ERANGE
+#define MP_EWOULDBLOCK EWOULDBLOCK
+#define MP_EOPNOTSUPP EOPNOTSUPP
+#define MP_EAFNOSUPPORT EAFNOSUPPORT
+#define MP_EADDRINUSE EADDRINUSE
+#define MP_ECONNABORTED ECONNABORTED
+#define MP_ECONNRESET ECONNRESET
+#define MP_ENOBUFS ENOBUFS
+#define MP_EISCONN EISCONN
+#define MP_ENOTCONN ENOTCONN
+#define MP_ETIMEDOUT ETIMEDOUT
+#define MP_ECONNREFUSED ECONNREFUSED
+#define MP_EHOSTUNREACH EHOSTUNREACH
+#define MP_EALREADY EALREADY
+#define MP_EINPROGRESS EINPROGRESS
+#define MP_ECANCELED ECANCELED
+
+#endif
+
+qstr mp_errno_to_str(mp_obj_t errno_val);
+const char *mp_common_errno_to_str(mp_obj_t errno_val, char *buf, size_t len);
+
+#endif // MICROPY_INCLUDED_PY_MPERRNO_H
diff --git a/circuitpython/py/mphal.h b/circuitpython/py/mphal.h
new file mode 100644
index 0000000..13aae19
--- /dev/null
+++ b/circuitpython/py/mphal.h
@@ -0,0 +1,93 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2015 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_MPHAL_H
+#define MICROPY_INCLUDED_PY_MPHAL_H
+
+#include <stdint.h>
+#include "py/mpconfig.h"
+
+#ifdef MICROPY_MPHALPORT_H
+#include MICROPY_MPHALPORT_H
+#else
+#include <mphalport.h>
+#endif
+
+#ifndef mp_hal_stdio_poll
+uintptr_t mp_hal_stdio_poll(uintptr_t poll_flags);
+#endif
+
+#ifndef mp_hal_stdin_rx_chr
+int mp_hal_stdin_rx_chr(void);
+#endif
+
+#ifndef mp_hal_stdout_tx_str
+void mp_hal_stdout_tx_str(const char *str);
+#endif
+
+#ifndef mp_hal_stdout_tx_strn
+void mp_hal_stdout_tx_strn(const char *str, size_t len);
+#endif
+
+#ifndef mp_hal_stdout_tx_strn_cooked
+void mp_hal_stdout_tx_strn_cooked(const char *str, size_t len);
+#endif
+
+#ifndef mp_hal_delay_ms
+void mp_hal_delay_ms(mp_uint_t ms);
+#endif
+
+#ifndef mp_hal_delay_us
+void mp_hal_delay_us(mp_uint_t us);
+#endif
+
+#ifndef mp_hal_ticks_ms
+mp_uint_t mp_hal_ticks_ms(void);
+#endif
+
+#ifndef mp_hal_ticks_us
+mp_uint_t mp_hal_ticks_us(void);
+#endif
+
+#ifndef mp_hal_ticks_cpu
+mp_uint_t mp_hal_ticks_cpu(void);
+#endif
+
+#ifndef mp_hal_time_ns
+// Nanoseconds since the Epoch.
+uint64_t mp_hal_time_ns(void);
+#endif
+
+// If port HAL didn't define its own pin API, use generic
+// "virtual pin" API from the core.
+#ifndef mp_hal_pin_obj_t
+#define mp_hal_pin_obj_t mp_obj_t
+#define mp_hal_get_pin_obj(pin) (pin)
+#define mp_hal_pin_read(pin) mp_virtual_pin_read(pin)
+#define mp_hal_pin_write(pin, v) mp_virtual_pin_write(pin, v)
+#include "extmod/virtpin.h"
+#endif
+
+#endif // MICROPY_INCLUDED_PY_MPHAL_H
diff --git a/circuitpython/py/mpprint.c b/circuitpython/py/mpprint.c
new file mode 100644
index 0000000..df73587
--- /dev/null
+++ b/circuitpython/py/mpprint.c
@@ -0,0 +1,609 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013-2015 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "py/mphal.h"
+#include "py/mpprint.h"
+#include "py/obj.h"
+#include "py/objint.h"
+#include "py/runtime.h"
+
+#if MICROPY_PY_BUILTINS_FLOAT
+#include "py/formatfloat.h"
+#endif
+
+static const char pad_spaces[] = " ";
+static const char pad_zeroes[] = "0000000000000000";
+
+STATIC void plat_print_strn(void *env, const char *str, size_t len) {
+ (void)env;
+ MP_PLAT_PRINT_STRN(str, len);
+}
+
+const mp_print_t mp_plat_print = {NULL, plat_print_strn};
+
+int mp_print_str(const mp_print_t *print, const char *str) {
+ size_t len = strlen(str);
+ if (len) {
+ print->print_strn(print->data, str, len);
+ }
+ return len;
+}
+
+int mp_print_strn(const mp_print_t *print, const char *str, size_t len, int flags, char fill, int width) {
+ int left_pad = 0;
+ int right_pad = 0;
+ int pad = width - len;
+ int pad_size;
+ int total_chars_printed = 0;
+ const char *pad_chars;
+
+ if (!fill || fill == ' ') {
+ pad_chars = pad_spaces;
+ pad_size = sizeof(pad_spaces) - 1;
+ } else if (fill == '0') {
+ pad_chars = pad_zeroes;
+ pad_size = sizeof(pad_zeroes) - 1;
+ } else {
+ // Other pad characters are fairly unusual, so we'll take the hit
+ // and output them 1 at a time.
+ pad_chars = &fill;
+ pad_size = 1;
+ }
+
+ if (flags & PF_FLAG_CENTER_ADJUST) {
+ left_pad = pad / 2;
+ right_pad = pad - left_pad;
+ } else if (flags & PF_FLAG_LEFT_ADJUST) {
+ right_pad = pad;
+ } else {
+ left_pad = pad;
+ }
+
+ if (left_pad > 0) {
+ total_chars_printed += left_pad;
+ while (left_pad > 0) {
+ int p = left_pad;
+ if (p > pad_size) {
+ p = pad_size;
+ }
+ print->print_strn(print->data, pad_chars, p);
+ left_pad -= p;
+ }
+ }
+ if (len) {
+ print->print_strn(print->data, str, len);
+ total_chars_printed += len;
+ }
+ if (right_pad > 0) {
+ total_chars_printed += right_pad;
+ while (right_pad > 0) {
+ int p = right_pad;
+ if (p > pad_size) {
+ p = pad_size;
+ }
+ print->print_strn(print->data, pad_chars, p);
+ right_pad -= p;
+ }
+ }
+ return total_chars_printed;
+}
+
+// 32-bits is 10 digits, add 3 for commas, 1 for sign, 1 for terminating null
+// We can use 16 characters for 32-bit and 32 characters for 64-bit
+#define INT_BUF_SIZE (sizeof(mp_int_t) * 4)
+
+// Our mp_vprintf function below does not support the '#' format modifier to
+// print the prefix of a non-base-10 number, so we don't need code for this.
+#define SUPPORT_INT_BASE_PREFIX (0)
+
+// This function is used exclusively by mp_vprintf to format ints.
+// It needs to be a separate function to mp_print_mp_int, since converting to a mp_int looses the MSB.
+STATIC int mp_print_int(const mp_print_t *print, mp_uint_t x, int sgn, int base, int base_char, int flags, char fill, int width) {
+ char sign = 0;
+ if (sgn) {
+ if ((mp_int_t)x < 0) {
+ sign = '-';
+ x = -x;
+ } else if (flags & PF_FLAG_SHOW_SIGN) {
+ sign = '+';
+ } else if (flags & PF_FLAG_SPACE_SIGN) {
+ sign = ' ';
+ }
+ }
+
+ char buf[INT_BUF_SIZE];
+ char *b = buf + INT_BUF_SIZE;
+
+ if (x == 0) {
+ *(--b) = '0';
+ } else {
+ do {
+ int c = x % base;
+ x /= base;
+ if (c >= 10) {
+ c += base_char - 10;
+ } else {
+ c += '0';
+ }
+ *(--b) = c;
+ } while (b > buf && x != 0);
+ }
+
+ #if SUPPORT_INT_BASE_PREFIX
+ char prefix_char = '\0';
+
+ if (flags & PF_FLAG_SHOW_PREFIX) {
+ if (base == 2) {
+ prefix_char = base_char + 'b' - 'a';
+ } else if (base == 8) {
+ prefix_char = base_char + 'o' - 'a';
+ } else if (base == 16) {
+ prefix_char = base_char + 'x' - 'a';
+ }
+ }
+ #endif
+
+ int len = 0;
+ if (flags & PF_FLAG_PAD_AFTER_SIGN) {
+ if (sign) {
+ len += mp_print_strn(print, &sign, 1, flags, fill, 1);
+ width--;
+ }
+ #if SUPPORT_INT_BASE_PREFIX
+ if (prefix_char) {
+ len += mp_print_strn(print, "0", 1, flags, fill, 1);
+ len += mp_print_strn(print, &prefix_char, 1, flags, fill, 1);
+ width -= 2;
+ }
+ #endif
+ } else {
+ #if SUPPORT_INT_BASE_PREFIX
+ if (prefix_char && b > &buf[1]) {
+ *(--b) = prefix_char;
+ *(--b) = '0';
+ }
+ #endif
+ if (sign && b > buf) {
+ *(--b) = sign;
+ }
+ }
+
+ len += mp_print_strn(print, b, buf + INT_BUF_SIZE - b, flags, fill, width);
+ return len;
+}
+
+int mp_print_mp_int(const mp_print_t *print, mp_obj_t x, int base, int base_char, int flags, char fill, int width, int prec) {
+ // These are the only values for "base" that are required to be supported by this
+ // function, since Python only allows the user to format integers in these bases.
+ // If needed this function could be generalised to handle other values.
+ assert(base == 2 || base == 8 || base == 10 || base == 16);
+
+ if (!mp_obj_is_int(x)) {
+ // This will convert booleans to int, or raise an error for
+ // non-integer types.
+ x = MP_OBJ_NEW_SMALL_INT(mp_obj_get_int(x));
+ }
+
+ if ((flags & (PF_FLAG_LEFT_ADJUST | PF_FLAG_CENTER_ADJUST)) == 0 && fill == '0') {
+ if (prec > width) {
+ width = prec;
+ }
+ prec = 0;
+ }
+ char prefix_buf[4];
+ char *prefix = prefix_buf;
+
+ if (mp_obj_int_sign(x) >= 0) {
+ if (flags & PF_FLAG_SHOW_SIGN) {
+ *prefix++ = '+';
+ } else if (flags & PF_FLAG_SPACE_SIGN) {
+ *prefix++ = ' ';
+ }
+ }
+
+ if (flags & PF_FLAG_SHOW_PREFIX) {
+ if (base == 2) {
+ *prefix++ = '0';
+ *prefix++ = base_char + 'b' - 'a';
+ } else if (base == 8) {
+ *prefix++ = '0';
+ if (flags & PF_FLAG_SHOW_OCTAL_LETTER) {
+ *prefix++ = base_char + 'o' - 'a';
+ }
+ } else if (base == 16) {
+ *prefix++ = '0';
+ *prefix++ = base_char + 'x' - 'a';
+ }
+ }
+ *prefix = '\0';
+ int prefix_len = prefix - prefix_buf;
+ prefix = prefix_buf;
+
+ char comma = '\0';
+ if (flags & PF_FLAG_SHOW_COMMA) {
+ comma = ',';
+ }
+
+ // The size of this buffer is rather arbitrary. If it's not large
+ // enough, a dynamic one will be allocated.
+ char stack_buf[sizeof(mp_int_t) * 4];
+ char *buf = stack_buf;
+ size_t buf_size = sizeof(stack_buf);
+ size_t fmt_size = 0;
+ char *str;
+
+ if (prec > 1) {
+ flags |= PF_FLAG_PAD_AFTER_SIGN;
+ }
+ char sign = '\0';
+ if (flags & PF_FLAG_PAD_AFTER_SIGN) {
+ // We add the pad in this function, so since the pad goes after
+ // the sign & prefix, we format without a prefix
+ str = mp_obj_int_formatted(&buf, &buf_size, &fmt_size,
+ x, base, NULL, base_char, comma);
+ if (*str == '-') {
+ sign = *str++;
+ fmt_size--;
+ }
+ } else {
+ str = mp_obj_int_formatted(&buf, &buf_size, &fmt_size,
+ x, base, prefix, base_char, comma);
+ }
+
+ int spaces_before = 0;
+ int spaces_after = 0;
+
+ if (prec > 1) {
+ // If prec was specified, then prec specifies the width to zero-pad the
+ // the number to. This zero-padded number then gets left or right
+ // aligned in width characters.
+
+ int prec_width = fmt_size; // The digits
+ if (prec_width < prec) {
+ prec_width = prec;
+ }
+ if (flags & PF_FLAG_PAD_AFTER_SIGN) {
+ if (sign) {
+ prec_width++;
+ }
+ prec_width += prefix_len;
+ }
+ if (prec_width < width) {
+ if (flags & PF_FLAG_LEFT_ADJUST) {
+ spaces_after = width - prec_width;
+ } else {
+ spaces_before = width - prec_width;
+ }
+ }
+ fill = '0';
+ flags &= ~PF_FLAG_LEFT_ADJUST;
+ }
+
+ int len = 0;
+ if (spaces_before) {
+ len += mp_print_strn(print, "", 0, 0, ' ', spaces_before);
+ }
+ if (flags & PF_FLAG_PAD_AFTER_SIGN) {
+ // pad after sign implies pad after prefix as well.
+ if (sign) {
+ len += mp_print_strn(print, &sign, 1, 0, 0, 1);
+ width--;
+ }
+ if (prefix_len) {
+ len += mp_print_strn(print, prefix, prefix_len, 0, 0, 1);
+ width -= prefix_len;
+ }
+ }
+ if (prec > 1) {
+ width = prec;
+ }
+
+ len += mp_print_strn(print, str, fmt_size, flags, fill, width);
+
+ if (spaces_after) {
+ len += mp_print_strn(print, "", 0, 0, ' ', spaces_after);
+ }
+
+ if (buf != stack_buf) {
+ m_del(char, buf, buf_size);
+ }
+ return len;
+}
+
+#if MICROPY_PY_BUILTINS_FLOAT
+int mp_print_float(const mp_print_t *print, mp_float_t f, char fmt, int flags, char fill, int width, int prec) {
+ char buf[32];
+ char sign = '\0';
+ int chrs = 0;
+
+ if (flags & PF_FLAG_SHOW_SIGN) {
+ sign = '+';
+ } else
+ if (flags & PF_FLAG_SPACE_SIGN) {
+ sign = ' ';
+ }
+
+ int len = mp_format_float(f, buf, sizeof(buf), fmt, prec, sign);
+
+ char *s = buf;
+
+ if ((flags & PF_FLAG_ADD_PERCENT) && (size_t)(len + 1) < sizeof(buf)) {
+ buf[len++] = '%';
+ buf[len] = '\0';
+ }
+
+ // buf[0] < '0' returns true if the first character is space, + or -
+ if ((flags & PF_FLAG_PAD_AFTER_SIGN) && buf[0] < '0') {
+ // We have a sign character
+ s++;
+ chrs += mp_print_strn(print, &buf[0], 1, 0, 0, 1);
+ width--;
+ len--;
+ }
+
+ chrs += mp_print_strn(print, s, len, flags, fill, width);
+
+ return chrs;
+}
+#endif
+
+static int print_str_common(const mp_print_t *print, const char *str, int prec, size_t len, int flags, int fill, int width) {
+ if (prec >= 0 && (size_t)prec < len) {
+ len = prec;
+ }
+ return mp_print_strn(print, str, len, flags, fill, width);
+}
+
+int mp_printf(const mp_print_t *print, const char *fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ int ret = mp_vprintf(print, fmt, ap);
+ va_end(ap);
+ return ret;
+}
+
+int mp_vprintf(const mp_print_t *print, const char *fmt, va_list args) {
+ int chrs = 0;
+ for (;;) {
+ {
+ const char *f = fmt;
+ while (*f != '\0' && *f != '%') {
+ ++f; // XXX UTF8 advance char
+ }
+ if (f > fmt) {
+ print->print_strn(print->data, fmt, f - fmt);
+ chrs += f - fmt;
+ fmt = f;
+ }
+ }
+
+ if (*fmt == '\0') {
+ break;
+ }
+
+ // move past % character
+ ++fmt;
+
+ // parse flags, if they exist
+ int flags = 0;
+ char fill = ' ';
+ while (*fmt != '\0') {
+ if (*fmt == '-') {
+ flags |= PF_FLAG_LEFT_ADJUST;
+ } else if (*fmt == '+') {
+ flags |= PF_FLAG_SHOW_SIGN;
+ } else if (*fmt == ' ') {
+ flags |= PF_FLAG_SPACE_SIGN;
+ } else if (*fmt == '!') {
+ flags |= PF_FLAG_NO_TRAILZ;
+ } else if (*fmt == '0') {
+ flags |= PF_FLAG_PAD_AFTER_SIGN;
+ fill = '0';
+ } else {
+ break;
+ }
+ ++fmt;
+ }
+
+ // parse width, if it exists
+ int width = 0;
+ for (; '0' <= *fmt && *fmt <= '9'; ++fmt) {
+ width = width * 10 + *fmt - '0';
+ }
+
+ // parse precision, if it exists
+ int prec = -1;
+ if (*fmt == '.') {
+ ++fmt;
+ if (*fmt == '*') {
+ ++fmt;
+ prec = va_arg(args, int);
+ } else {
+ prec = 0;
+ for (; '0' <= *fmt && *fmt <= '9'; ++fmt) {
+ prec = prec * 10 + *fmt - '0';
+ }
+ }
+ if (prec < 0) {
+ prec = 0;
+ }
+ }
+
+ // parse long specifiers (only for LP64 model where they make a difference)
+ #ifndef __LP64__
+ const
+ #endif
+ bool long_arg = false;
+ if (*fmt == 'l') {
+ ++fmt;
+ #ifdef __LP64__
+ long_arg = true;
+ #endif
+ }
+
+ if (*fmt == '\0') {
+ break;
+ }
+
+ switch (*fmt) {
+ case 'b':
+ if (va_arg(args, int)) {
+ chrs += mp_print_strn(print, "true", 4, flags, fill, width);
+ } else {
+ chrs += mp_print_strn(print, "false", 5, flags, fill, width);
+ }
+ break;
+ case 'c': {
+ char str = va_arg(args, int);
+ chrs += mp_print_strn(print, &str, 1, flags, fill, width);
+ break;
+ }
+ case 'q': {
+ qstr qst = va_arg(args, qstr);
+ size_t len;
+ const char *str = (const char *)qstr_data(qst, &len);
+ chrs += print_str_common(print, str, prec, len, flags, fill, width);
+ break;
+ }
+ case 'S': {
+ compressed_string_t *arg = va_arg(args, compressed_string_t *);
+ size_t len_with_nul = decompress_length(arg);
+ size_t len = len_with_nul - 1;
+ char str[len_with_nul];
+ decompress(arg, str);
+ chrs += print_str_common(print, str, prec, len, flags, fill, width);
+ break;
+ }
+ case 's': {
+ const char *str = va_arg(args, const char *);
+ #ifndef NDEBUG
+ // With debugging enabled, catch printing of null string pointers
+ if (str == NULL) {
+ str = "(null)";
+ }
+ #endif
+ size_t len = strlen(str);
+ if (prec >= 0 && (size_t)prec < len) {
+ len = prec;
+ }
+ chrs += mp_print_strn(print, str, len, flags, fill, width);
+ break;
+ }
+ case 'd': {
+ mp_int_t val;
+ if (long_arg) {
+ val = va_arg(args, long int);
+ } else {
+ val = va_arg(args, int);
+ }
+ chrs += mp_print_int(print, val, 1, 10, 'a', flags, fill, width);
+ break;
+ }
+ case 'u':
+ case 'x':
+ case 'X': {
+ int base = 16 - ((*fmt + 1) & 6); // maps char u/x/X to base 10/16/16
+ char fmt_c = (*fmt & 0xf0) - 'P' + 'A'; // maps char u/x/X to char a/a/A
+ mp_uint_t val;
+ if (long_arg) {
+ val = va_arg(args, unsigned long int);
+ } else {
+ val = va_arg(args, unsigned int);
+ }
+ chrs += mp_print_int(print, val, 0, base, fmt_c, flags, fill, width);
+ break;
+ }
+ case 'p':
+ case 'P': // don't bother to handle upcase for 'P'
+ // Use unsigned long int to work on both ILP32 and LP64 systems
+ #if SUPPORT_INT_BASE_PREFIX
+ chrs += mp_print_int(print, va_arg(args, unsigned long int), 0, 16, 'a', flags | PF_FLAG_SHOW_PREFIX, fill, width);
+ #else
+ print->print_strn(print->data, "0x", 2);
+ chrs += mp_print_int(print, va_arg(args, unsigned long int), 0, 16, 'a', flags, fill, width) + 2;
+ #endif
+ break;
+ #if MICROPY_PY_BUILTINS_FLOAT
+ case 'e':
+ case 'E':
+ case 'f':
+ case 'F':
+ case 'g':
+ case 'G': {
+ #if ((MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT) || (MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE))
+ mp_float_t f = (mp_float_t)va_arg(args, double);
+ chrs += mp_print_float(print, f, *fmt, flags, fill, width, prec);
+ #else
+ #error Unknown MICROPY FLOAT IMPL
+ #endif
+ break;
+ }
+ #endif
+ // Because 'l' is eaten above, another 'l' means %ll. We need to support
+ // this length specifier for OBJ_REPR_D (64-bit NaN boxing).
+ // TODO Either enable this unconditionally, or provide a specific config var.
+ #if (MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_D) || defined(_WIN64)
+ case 'l': {
+ unsigned long long int arg_value = va_arg(args, unsigned long long int);
+ ++fmt;
+ assert(*fmt == 'u' || *fmt == 'd' || !"unsupported fmt char");
+ chrs += mp_print_int(print, arg_value, *fmt == 'd', 10, 'a', flags, fill, width);
+ break;
+ }
+ #endif
+ default:
+ // if it's not %% then it's an unsupported format character
+ assert(*fmt == '%' || !"unsupported fmt char");
+ print->print_strn(print->data, fmt, 1);
+ chrs += 1;
+ break;
+ }
+ ++fmt;
+ }
+ return chrs;
+}
+
+int mp_cprintf(const mp_print_t *print, const compressed_string_t *compressed_fmt, ...) {
+ va_list ap;
+ va_start(ap, compressed_fmt);
+ int ret = mp_vcprintf(print, compressed_fmt, ap);
+ va_end(ap);
+ return ret;
+}
+
+int mp_vcprintf(const mp_print_t *print, const compressed_string_t *compressed_fmt, va_list args) {
+ char fmt[decompress_length(compressed_fmt)];
+ // TODO: Optimise this to format-while-decompressing (and not require the temp stack space).
+ decompress(compressed_fmt, fmt);
+
+ return mp_vprintf(print, fmt, args);
+}
diff --git a/circuitpython/py/mpprint.h b/circuitpython/py/mpprint.h
new file mode 100644
index 0000000..616f21a
--- /dev/null
+++ b/circuitpython/py/mpprint.h
@@ -0,0 +1,88 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_MPPRINT_H
+#define MICROPY_INCLUDED_PY_MPPRINT_H
+
+#include "py/mpconfig.h"
+
+#define PF_FLAG_LEFT_ADJUST (0x001)
+#define PF_FLAG_SHOW_SIGN (0x002)
+#define PF_FLAG_SPACE_SIGN (0x004)
+#define PF_FLAG_NO_TRAILZ (0x008)
+#define PF_FLAG_SHOW_PREFIX (0x010)
+#define PF_FLAG_SHOW_COMMA (0x020)
+#define PF_FLAG_PAD_AFTER_SIGN (0x040)
+#define PF_FLAG_CENTER_ADJUST (0x080)
+#define PF_FLAG_ADD_PERCENT (0x100)
+#define PF_FLAG_SHOW_OCTAL_LETTER (0x200)
+
+#if MICROPY_PY_IO && MICROPY_PY_SYS_STDFILES
+#define MP_PYTHON_PRINTER &mp_sys_stdout_print
+#else
+#define MP_PYTHON_PRINTER &mp_plat_print
+#endif
+
+typedef void (*mp_print_strn_t)(void *data, const char *str, size_t len);
+
+typedef struct _mp_print_t {
+ void *data;
+ mp_print_strn_t print_strn;
+} mp_print_t;
+
+typedef struct _mp_print_ext_t {
+ mp_print_t base;
+ const char *item_separator;
+ const char *key_separator;
+}mp_print_ext_t;
+
+#define MP_PRINT_GET_EXT(print) ((mp_print_ext_t *)print)
+
+// All (non-debug) prints go through one of the two interfaces below.
+// 1) Wrapper for platform print function, which wraps MP_PLAT_PRINT_STRN.
+extern const mp_print_t mp_plat_print;
+#if MICROPY_PY_IO && MICROPY_PY_SYS_STDFILES
+// 2) Wrapper for printing to sys.stdout.
+extern const mp_print_t mp_sys_stdout_print;
+#endif
+
+int mp_print_str(const mp_print_t *print, const char *str);
+int mp_print_strn(const mp_print_t *print, const char *str, size_t len, int flags, char fill, int width);
+#if MICROPY_PY_BUILTINS_FLOAT
+int mp_print_float(const mp_print_t *print, mp_float_t f, char fmt, int flags, char fill, int width, int prec);
+#endif
+
+int mp_printf(const mp_print_t *print, const char *fmt, ...);
+#ifdef va_start
+int mp_vprintf(const mp_print_t *print, const char *fmt, va_list args);
+#endif
+
+struct compressed_string;
+int mp_cprintf(const mp_print_t *print, const struct compressed_string *compressed_fmt, ...);
+#ifdef va_start
+int mp_vcprintf(const mp_print_t *print, const struct compressed_string *compressed_fmt, va_list args);
+#endif
+
+#endif // MICROPY_INCLUDED_PY_MPPRINT_H
diff --git a/circuitpython/py/mpstate.c b/circuitpython/py/mpstate.c
new file mode 100644
index 0000000..b3957cc
--- /dev/null
+++ b/circuitpython/py/mpstate.c
@@ -0,0 +1,34 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/mpstate.h"
+#include "supervisor/linker.h"
+
+#if MICROPY_DYNAMIC_COMPILER
+mp_dynamic_compiler_t mp_dynamic_compiler = {0};
+#endif
+
+mp_state_ctx_t PLACE_IN_DTCM_BSS(mp_state_ctx);
diff --git a/circuitpython/py/mpstate.h b/circuitpython/py/mpstate.h
new file mode 100644
index 0000000..bfbc29d
--- /dev/null
+++ b/circuitpython/py/mpstate.h
@@ -0,0 +1,310 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_MPSTATE_H
+#define MICROPY_INCLUDED_PY_MPSTATE_H
+
+#include <stdint.h>
+
+#include "py/mpconfig.h"
+#include "py/mpthread.h"
+#include "py/misc.h"
+#include "py/nlr.h"
+#include "py/obj.h"
+#include "py/objlist.h"
+#include "py/objexcept.h"
+
+// This file contains structures defining the state of the MicroPython
+// memory system, runtime and virtual machine. The state is a global
+// variable, but in the future it is hoped that the state can become local.
+
+// This structure contains dynamic configuration for the compiler.
+#if MICROPY_DYNAMIC_COMPILER
+typedef struct mp_dynamic_compiler_t {
+ uint8_t small_int_bits; // must be <= host small_int_bits
+ bool py_builtins_str_unicode;
+ uint8_t native_arch;
+ uint8_t nlr_buf_num_regs;
+} mp_dynamic_compiler_t;
+extern mp_dynamic_compiler_t mp_dynamic_compiler;
+#endif
+
+// These are the values for sched_state
+#define MP_SCHED_IDLE (1)
+#define MP_SCHED_LOCKED (-1)
+#define MP_SCHED_PENDING (0) // 0 so it's a quick check in the VM
+
+typedef struct _mp_sched_item_t {
+ mp_obj_t func;
+ mp_obj_t arg;
+} mp_sched_item_t;
+
+// This structure hold information about the memory allocation system.
+typedef struct _mp_state_mem_t {
+ #if MICROPY_MEM_STATS
+ size_t total_bytes_allocated;
+ size_t current_bytes_allocated;
+ size_t peak_bytes_allocated;
+ #endif
+
+ byte *gc_alloc_table_start;
+ size_t gc_alloc_table_byte_len;
+ #if MICROPY_ENABLE_FINALISER
+ byte *gc_finaliser_table_start;
+ #endif
+ byte *gc_pool_start;
+ byte *gc_pool_end;
+
+ void *gc_lowest_long_lived_ptr;
+
+ int gc_stack_overflow;
+ MICROPY_GC_STACK_ENTRY_TYPE gc_stack[MICROPY_ALLOC_GC_STACK_SIZE];
+
+ // This variable controls auto garbage collection. If set to false then the
+ // GC won't automatically run when gc_alloc can't find enough blocks. But
+ // you can still allocate/free memory and also explicitly call gc_collect.
+ bool gc_auto_collect_enabled;
+
+ #if MICROPY_GC_ALLOC_THRESHOLD
+ size_t gc_alloc_amount;
+ size_t gc_alloc_threshold;
+ #endif
+
+ size_t gc_first_free_atb_index[MICROPY_ATB_INDICES];
+ size_t gc_last_free_atb_index;
+
+ #if MICROPY_PY_GC_COLLECT_RETVAL
+ size_t gc_collected;
+ #endif
+
+ #if MICROPY_PY_THREAD && !MICROPY_PY_THREAD_GIL
+ // This is a global mutex used to make the GC thread-safe.
+ mp_thread_mutex_t gc_mutex;
+ #endif
+
+ void **permanent_pointers;
+} mp_state_mem_t;
+
+// This structure hold runtime and VM information. It includes a section
+// which contains root pointers that must be scanned by the GC.
+typedef struct _mp_state_vm_t {
+ //
+ // CONTINUE ROOT POINTER SECTION
+ // This must start at the start of this structure and follows
+ // the state in the mp_state_thread_t structure, continuing
+ // the root pointer section from there.
+ //
+
+ qstr_pool_t *last_pool;
+
+ // non-heap memory for creating a traceback if we can't allocate RAM
+ mp_obj_traceback_t mp_emergency_traceback_obj;
+
+ // non-heap memory for creating an exception if we can't allocate RAM
+ mp_obj_exception_t mp_emergency_exception_obj;
+
+ // memory for exception arguments if we can't allocate RAM
+ #if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
+ #if MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE > 0
+ // statically allocated buf (needs to be aligned to mp_obj_t)
+ mp_obj_t mp_emergency_exception_buf[MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE / sizeof(mp_obj_t)];
+ #else
+ // dynamically allocated buf
+ byte *mp_emergency_exception_buf;
+ #endif
+ #endif
+
+ #if MICROPY_KBD_EXCEPTION
+ // exception object of type KeyboardInterrupt
+ mp_obj_exception_t mp_kbd_exception;
+ #endif
+
+ // exception object of type ReloadException
+ mp_obj_exception_t mp_reload_exception;
+
+ // dictionary with loaded modules (may be exposed as sys.modules)
+ mp_obj_dict_t mp_loaded_modules_dict;
+
+ #if MICROPY_ENABLE_SCHEDULER
+ mp_sched_item_t sched_queue[MICROPY_SCHEDULER_DEPTH];
+ #endif
+
+ // current exception being handled, for sys.exc_info()
+ #if MICROPY_PY_SYS_EXC_INFO
+ mp_obj_base_t *cur_exception;
+ #endif
+
+ #if MICROPY_PY_SYS_ATEXIT
+ // exposed through sys.atexit function
+ mp_obj_t sys_exitfunc;
+ #endif
+
+ // dictionary for the __main__ module
+ mp_obj_dict_t dict_main;
+
+ #if MICROPY_PY_SYS
+ // If MICROPY_PY_SYS_PATH_ARGV_DEFAULTS is not enabled then these two lists
+ // must be initialised after the call to mp_init.
+ mp_obj_list_t mp_sys_path_obj;
+ mp_obj_list_t mp_sys_argv_obj;
+ #endif
+
+ // dictionary for overridden builtins
+ #if MICROPY_CAN_OVERRIDE_BUILTINS
+ mp_obj_dict_t *mp_module_builtins_override_dict;
+ #endif
+
+ #if MICROPY_PERSISTENT_CODE_TRACK_RELOC_CODE
+ // An mp_obj_list_t that tracks relocated native code to prevent the GC from reclaiming them.
+ mp_obj_t track_reloc_code_list;
+ #endif
+
+ // include any root pointers defined by a port
+ MICROPY_PORT_ROOT_POINTERS
+
+ // root pointers for extmod
+
+ #if MICROPY_REPL_EVENT_DRIVEN
+ vstr_t *repl_line;
+ #endif
+
+ #if MICROPY_VFS
+ struct _mp_vfs_mount_t *vfs_cur;
+ struct _mp_vfs_mount_t *vfs_mount_table;
+ #endif
+
+ //
+ // END ROOT POINTER SECTION
+ ////////////////////////////////////////////////////////////
+
+ // pointer and sizes to store interned string data
+ // (qstr_last_chunk can be root pointer but is also stored in qstr pool)
+ char *qstr_last_chunk;
+ size_t qstr_last_alloc;
+ size_t qstr_last_used;
+
+ #if MICROPY_PY_THREAD && !MICROPY_PY_THREAD_GIL
+ // This is a global mutex used to make qstr interning thread-safe.
+ mp_thread_mutex_t qstr_mutex;
+ #endif
+
+ #if MICROPY_ENABLE_COMPILER
+ mp_uint_t mp_optimise_value;
+ #if MICROPY_EMIT_NATIVE
+ uint8_t default_emit_opt; // one of MP_EMIT_OPT_xxx
+ #endif
+ #endif
+
+ // size of the emergency exception buf, if it's dynamically allocated
+ #if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF && MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE == 0
+ mp_int_t mp_emergency_exception_buf_size;
+ #endif
+
+ #if MICROPY_ENABLE_SCHEDULER
+ volatile int16_t sched_state;
+ uint8_t sched_len;
+ uint8_t sched_idx;
+ #endif
+
+ #if MICROPY_PY_THREAD_GIL
+ // This is a global mutex used to make the VM/runtime thread-safe.
+ mp_thread_mutex_t gil_mutex;
+ #endif
+
+ #if MICROPY_OPT_MAP_LOOKUP_CACHE
+ // See mp_map_lookup.
+ uint8_t map_lookup_cache[MICROPY_OPT_MAP_LOOKUP_CACHE_SIZE];
+ #endif
+} mp_state_vm_t;
+
+// This structure holds state that is specific to a given thread.
+// Everything in this structure is scanned for root pointers.
+typedef struct _mp_state_thread_t {
+ // Stack top at the start of program
+ char *stack_top;
+
+ #if MICROPY_MAX_STACK_USAGE
+ char *stack_bottom;
+ #endif
+
+ #if MICROPY_STACK_CHECK
+ size_t stack_limit;
+ #endif
+
+ #if MICROPY_ENABLE_PYSTACK
+ uint8_t *pystack_start;
+ uint8_t *pystack_end;
+ uint8_t *pystack_cur;
+ #endif
+
+ // Locking of the GC is done per thread.
+ uint16_t gc_lock_depth;
+
+ ////////////////////////////////////////////////////////////
+ // START ROOT POINTER SECTION
+ // Everything that needs GC scanning must start here, and
+ // is followed by state in the mp_state_vm_t structure.
+ //
+
+ mp_obj_dict_t *dict_locals;
+ mp_obj_dict_t *dict_globals;
+
+ nlr_buf_t *nlr_top;
+
+ // pending exception object (MP_OBJ_NULL if not pending)
+ volatile mp_obj_t mp_pending_exception;
+
+ // If MP_OBJ_STOP_ITERATION is propagated then this holds its argument.
+ mp_obj_t stop_iteration_arg;
+
+ #if MICROPY_PY_SYS_SETTRACE
+ mp_obj_t prof_trace_callback;
+ bool prof_callback_is_executing;
+ struct _mp_code_state_t *current_code_state;
+ #endif
+} mp_state_thread_t;
+
+// This structure combines the above 3 structures.
+// The order of the entries are important for root pointer scanning in the GC to work.
+typedef struct _mp_state_ctx_t {
+ mp_state_thread_t thread;
+ mp_state_vm_t vm;
+ mp_state_mem_t mem;
+} mp_state_ctx_t;
+
+extern mp_state_ctx_t mp_state_ctx;
+
+#define MP_STATE_VM(x) (mp_state_ctx.vm.x)
+#define MP_STATE_MEM(x) (mp_state_ctx.mem.x)
+#define MP_STATE_MAIN_THREAD(x) (mp_state_ctx.thread.x)
+
+#if MICROPY_PY_THREAD
+extern mp_state_thread_t *mp_thread_get_state(void);
+#define MP_STATE_THREAD(x) (mp_thread_get_state()->x)
+#else
+#define MP_STATE_THREAD(x) MP_STATE_MAIN_THREAD(x)
+#endif
+
+#endif // MICROPY_INCLUDED_PY_MPSTATE_H
diff --git a/circuitpython/py/mpthread.h b/circuitpython/py/mpthread.h
new file mode 100644
index 0000000..fa9e054
--- /dev/null
+++ b/circuitpython/py/mpthread.h
@@ -0,0 +1,61 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_MPTHREAD_H
+#define MICROPY_INCLUDED_PY_MPTHREAD_H
+
+#include "py/mpconfig.h"
+
+#if MICROPY_PY_THREAD
+
+struct _mp_state_thread_t;
+
+#ifdef MICROPY_MPTHREADPORT_H
+#include MICROPY_MPTHREADPORT_H
+#else
+#include <mpthreadport.h>
+#endif
+
+struct _mp_state_thread_t *mp_thread_get_state(void);
+void mp_thread_set_state(struct _mp_state_thread_t *state);
+void mp_thread_create(void *(*entry)(void *), void *arg, size_t *stack_size);
+void mp_thread_start(void);
+void mp_thread_finish(void);
+void mp_thread_mutex_init(mp_thread_mutex_t *mutex);
+int mp_thread_mutex_lock(mp_thread_mutex_t *mutex, int wait);
+void mp_thread_mutex_unlock(mp_thread_mutex_t *mutex);
+
+#endif // MICROPY_PY_THREAD
+
+#if MICROPY_PY_THREAD && MICROPY_PY_THREAD_GIL
+#include "py/mpstate.h"
+#define MP_THREAD_GIL_ENTER() mp_thread_mutex_lock(&MP_STATE_VM(gil_mutex), 1)
+#define MP_THREAD_GIL_EXIT() mp_thread_mutex_unlock(&MP_STATE_VM(gil_mutex))
+#else
+#define MP_THREAD_GIL_ENTER()
+#define MP_THREAD_GIL_EXIT()
+#endif
+
+#endif // MICROPY_INCLUDED_PY_MPTHREAD_H
diff --git a/circuitpython/py/mpz.c b/circuitpython/py/mpz.c
new file mode 100644
index 0000000..b52e051
--- /dev/null
+++ b/circuitpython/py/mpz.c
@@ -0,0 +1,1750 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+#include <assert.h>
+
+#include "py/mpz.h"
+
+#if MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_MPZ
+
+#define DIG_SIZE (MPZ_DIG_SIZE)
+#define DIG_MASK ((MPZ_LONG_1 << DIG_SIZE) - 1)
+#define DIG_MSB (MPZ_LONG_1 << (DIG_SIZE - 1))
+#define DIG_BASE (MPZ_LONG_1 << DIG_SIZE)
+
+/*
+ mpz is an arbitrary precision integer type with a public API.
+
+ mpn functions act on non-negative integers represented by an array of generalised
+ digits (eg a word per digit). You also need to specify separately the length of the
+ array. There is no public API for mpn. Rather, the functions are used by mpz to
+ implement its features.
+
+ Integer values are stored little endian (first digit is first in memory).
+
+ Definition of normalise: ?
+*/
+
+STATIC size_t mpn_remove_trailing_zeros(mpz_dig_t *oidig, mpz_dig_t *idig) {
+ for (--idig; idig >= oidig && *idig == 0; --idig) {
+ }
+ return idig + 1 - oidig;
+}
+
+/* compares i with j
+ returns sign(i - j)
+ assumes i, j are normalised
+*/
+STATIC int mpn_cmp(const mpz_dig_t *idig, size_t ilen, const mpz_dig_t *jdig, size_t jlen) {
+ if (ilen < jlen) {
+ return -1;
+ }
+ if (ilen > jlen) {
+ return 1;
+ }
+
+ for (idig += ilen, jdig += ilen; ilen > 0; --ilen) {
+ mpz_dbl_dig_signed_t cmp = (mpz_dbl_dig_t)*(--idig) - (mpz_dbl_dig_t)*(--jdig);
+ if (cmp < 0) {
+ return -1;
+ }
+ if (cmp > 0) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/* computes i = j << n
+ returns number of digits in i
+ assumes enough memory in i; assumes normalised j; assumes n > 0
+ can have i, j pointing to same memory
+*/
+STATIC size_t mpn_shl(mpz_dig_t *idig, mpz_dig_t *jdig, size_t jlen, mp_uint_t n) {
+ mp_uint_t n_whole = (n + DIG_SIZE - 1) / DIG_SIZE;
+ mp_uint_t n_part = n % DIG_SIZE;
+ if (n_part == 0) {
+ n_part = DIG_SIZE;
+ }
+
+ // start from the high end of the digit arrays
+ idig += jlen + n_whole - 1;
+ jdig += jlen - 1;
+
+ // shift the digits
+ mpz_dbl_dig_t d = 0;
+ for (size_t i = jlen; i > 0; i--, idig--, jdig--) {
+ d |= *jdig;
+ *idig = (d >> (DIG_SIZE - n_part)) & DIG_MASK;
+ d <<= DIG_SIZE;
+ }
+
+ // store remaining bits
+ *idig = (d >> (DIG_SIZE - n_part)) & DIG_MASK;
+ idig -= n_whole - 1;
+ memset(idig, 0, (n_whole - 1) * sizeof(mpz_dig_t));
+
+ // work out length of result
+ jlen += n_whole;
+ while (jlen != 0 && idig[jlen - 1] == 0) {
+ jlen--;
+ }
+
+ // return length of result
+ return jlen;
+}
+
+/* computes i = j >> n
+ returns number of digits in i
+ assumes enough memory in i; assumes normalised j; assumes n > 0
+ can have i, j pointing to same memory
+*/
+STATIC size_t mpn_shr(mpz_dig_t *idig, mpz_dig_t *jdig, size_t jlen, mp_uint_t n) {
+ mp_uint_t n_whole = n / DIG_SIZE;
+ mp_uint_t n_part = n % DIG_SIZE;
+
+ if (n_whole >= jlen) {
+ return 0;
+ }
+
+ jdig += n_whole;
+ jlen -= n_whole;
+
+ for (size_t i = jlen; i > 0; i--, idig++, jdig++) {
+ mpz_dbl_dig_t d = *jdig;
+ if (i > 1) {
+ d |= (mpz_dbl_dig_t)jdig[1] << DIG_SIZE;
+ }
+ d >>= n_part;
+ *idig = d & DIG_MASK;
+ }
+
+ if (idig[-1] == 0) {
+ jlen--;
+ }
+
+ return jlen;
+}
+
+/* computes i = j + k
+ returns number of digits in i
+ assumes enough memory in i; assumes normalised j, k; assumes jlen >= klen
+ can have i, j, k pointing to same memory
+*/
+STATIC size_t mpn_add(mpz_dig_t *idig, const mpz_dig_t *jdig, size_t jlen, const mpz_dig_t *kdig, size_t klen) {
+ mpz_dig_t *oidig = idig;
+ mpz_dbl_dig_t carry = 0;
+
+ jlen -= klen;
+
+ for (; klen > 0; --klen, ++idig, ++jdig, ++kdig) {
+ carry += (mpz_dbl_dig_t)*jdig + (mpz_dbl_dig_t)*kdig;
+ *idig = carry & DIG_MASK;
+ carry >>= DIG_SIZE;
+ }
+
+ for (; jlen > 0; --jlen, ++idig, ++jdig) {
+ carry += *jdig;
+ *idig = carry & DIG_MASK;
+ carry >>= DIG_SIZE;
+ }
+
+ if (carry != 0) {
+ *idig++ = carry;
+ }
+
+ return idig - oidig;
+}
+
+/* computes i = j - k
+ returns number of digits in i
+ assumes enough memory in i; assumes normalised j, k; assumes j >= k
+ can have i, j, k pointing to same memory
+*/
+STATIC size_t mpn_sub(mpz_dig_t *idig, const mpz_dig_t *jdig, size_t jlen, const mpz_dig_t *kdig, size_t klen) {
+ mpz_dig_t *oidig = idig;
+ mpz_dbl_dig_signed_t borrow = 0;
+
+ jlen -= klen;
+
+ for (; klen > 0; --klen, ++idig, ++jdig, ++kdig) {
+ borrow += (mpz_dbl_dig_t)*jdig - (mpz_dbl_dig_t)*kdig;
+ *idig = borrow & DIG_MASK;
+ borrow >>= DIG_SIZE;
+ }
+
+ for (; jlen > 0; --jlen, ++idig, ++jdig) {
+ borrow += *jdig;
+ *idig = borrow & DIG_MASK;
+ borrow >>= DIG_SIZE;
+ }
+
+ return mpn_remove_trailing_zeros(oidig, idig);
+}
+
+#if MICROPY_OPT_MPZ_BITWISE
+
+/* computes i = j & k
+ returns number of digits in i
+ assumes enough memory in i; assumes normalised j, k; assumes jlen >= klen (jlen argument not needed)
+ can have i, j, k pointing to same memory
+*/
+STATIC size_t mpn_and(mpz_dig_t *idig, const mpz_dig_t *jdig, const mpz_dig_t *kdig, size_t klen) {
+ mpz_dig_t *oidig = idig;
+
+ for (; klen > 0; --klen, ++idig, ++jdig, ++kdig) {
+ *idig = *jdig & *kdig;
+ }
+
+ return mpn_remove_trailing_zeros(oidig, idig);
+}
+
+#endif
+
+/* i = -((-j) & (-k)) = ~((~j + 1) & (~k + 1)) + 1
+ i = (j & (-k)) = (j & (~k + 1)) = ( j & (~k + 1))
+ i = ((-j) & k) = ((~j + 1) & k) = ((~j + 1) & k )
+ computes general form:
+ i = (im ^ (((j ^ jm) + jc) & ((k ^ km) + kc))) + ic where Xm = Xc == 0 ? 0 : DIG_MASK
+ returns number of digits in i
+ assumes enough memory in i; assumes normalised j, k; assumes length j >= length k
+ can have i, j, k pointing to same memory
+*/
+STATIC size_t mpn_and_neg(mpz_dig_t *idig, const mpz_dig_t *jdig, size_t jlen, const mpz_dig_t *kdig, size_t klen,
+ mpz_dbl_dig_t carryi, mpz_dbl_dig_t carryj, mpz_dbl_dig_t carryk) {
+ mpz_dig_t *oidig = idig;
+ mpz_dig_t imask = (0 == carryi) ? 0 : DIG_MASK;
+ mpz_dig_t jmask = (0 == carryj) ? 0 : DIG_MASK;
+ mpz_dig_t kmask = (0 == carryk) ? 0 : DIG_MASK;
+
+ for (; jlen > 0; ++idig, ++jdig) {
+ carryj += *jdig ^ jmask;
+ carryk += (--klen <= --jlen) ? (*kdig++ ^ kmask) : kmask;
+ carryi += ((carryj & carryk) ^ imask) & DIG_MASK;
+ *idig = carryi & DIG_MASK;
+ carryk >>= DIG_SIZE;
+ carryj >>= DIG_SIZE;
+ carryi >>= DIG_SIZE;
+ }
+
+ if (0 != carryi) {
+ *idig++ = carryi;
+ }
+
+ return mpn_remove_trailing_zeros(oidig, idig);
+}
+
+#if MICROPY_OPT_MPZ_BITWISE
+
+/* computes i = j | k
+ returns number of digits in i
+ assumes enough memory in i; assumes normalised j, k; assumes jlen >= klen
+ can have i, j, k pointing to same memory
+*/
+STATIC size_t mpn_or(mpz_dig_t *idig, const mpz_dig_t *jdig, size_t jlen, const mpz_dig_t *kdig, size_t klen) {
+ mpz_dig_t *oidig = idig;
+
+ jlen -= klen;
+
+ for (; klen > 0; --klen, ++idig, ++jdig, ++kdig) {
+ *idig = *jdig | *kdig;
+ }
+
+ for (; jlen > 0; --jlen, ++idig, ++jdig) {
+ *idig = *jdig;
+ }
+
+ return idig - oidig;
+}
+
+#endif
+
+/* i = -((-j) | (-k)) = ~((~j + 1) | (~k + 1)) + 1
+ i = -(j | (-k)) = -(j | (~k + 1)) = ~( j | (~k + 1)) + 1
+ i = -((-j) | k) = -((~j + 1) | k) = ~((~j + 1) | k ) + 1
+ computes general form:
+ i = ~(((j ^ jm) + jc) | ((k ^ km) + kc)) + 1 where Xm = Xc == 0 ? 0 : DIG_MASK
+ returns number of digits in i
+ assumes enough memory in i; assumes normalised j, k; assumes length j >= length k
+ can have i, j, k pointing to same memory
+*/
+
+#if MICROPY_OPT_MPZ_BITWISE
+
+STATIC size_t mpn_or_neg(mpz_dig_t *idig, const mpz_dig_t *jdig, size_t jlen, const mpz_dig_t *kdig, size_t klen,
+ mpz_dbl_dig_t carryj, mpz_dbl_dig_t carryk) {
+ mpz_dig_t *oidig = idig;
+ mpz_dbl_dig_t carryi = 1;
+ mpz_dig_t jmask = (0 == carryj) ? 0 : DIG_MASK;
+ mpz_dig_t kmask = (0 == carryk) ? 0 : DIG_MASK;
+
+ for (; jlen > 0; ++idig, ++jdig) {
+ carryj += *jdig ^ jmask;
+ carryk += (--klen <= --jlen) ? (*kdig++ ^ kmask) : kmask;
+ carryi += ((carryj | carryk) ^ DIG_MASK) & DIG_MASK;
+ *idig = carryi & DIG_MASK;
+ carryk >>= DIG_SIZE;
+ carryj >>= DIG_SIZE;
+ carryi >>= DIG_SIZE;
+ }
+
+ // At least one of j,k must be negative so the above for-loop runs at least
+ // once. For carryi to be non-zero here it must be equal to 1 at the end of
+ // each iteration of the loop. So the accumulation of carryi must overflow
+ // each time, ie carryi += 0xff..ff. So carryj|carryk must be 0 in the
+ // DIG_MASK bits on each iteration. But considering all cases of signs of
+ // j,k one sees that this is not possible.
+ assert(carryi == 0);
+
+ return mpn_remove_trailing_zeros(oidig, idig);
+}
+
+#else
+
+STATIC size_t mpn_or_neg(mpz_dig_t *idig, const mpz_dig_t *jdig, size_t jlen, const mpz_dig_t *kdig, size_t klen,
+ mpz_dbl_dig_t carryi, mpz_dbl_dig_t carryj, mpz_dbl_dig_t carryk) {
+ mpz_dig_t *oidig = idig;
+ mpz_dig_t imask = (0 == carryi) ? 0 : DIG_MASK;
+ mpz_dig_t jmask = (0 == carryj) ? 0 : DIG_MASK;
+ mpz_dig_t kmask = (0 == carryk) ? 0 : DIG_MASK;
+
+ for (; jlen > 0; ++idig, ++jdig) {
+ carryj += *jdig ^ jmask;
+ carryk += (--klen <= --jlen) ? (*kdig++ ^ kmask) : kmask;
+ carryi += ((carryj | carryk) ^ imask) & DIG_MASK;
+ *idig = carryi & DIG_MASK;
+ carryk >>= DIG_SIZE;
+ carryj >>= DIG_SIZE;
+ carryi >>= DIG_SIZE;
+ }
+
+ // See comment in above mpn_or_neg for why carryi must be 0.
+ assert(carryi == 0);
+
+ return mpn_remove_trailing_zeros(oidig, idig);
+}
+
+#endif
+
+#if MICROPY_OPT_MPZ_BITWISE
+
+/* computes i = j ^ k
+ returns number of digits in i
+ assumes enough memory in i; assumes normalised j, k; assumes jlen >= klen
+ can have i, j, k pointing to same memory
+*/
+STATIC size_t mpn_xor(mpz_dig_t *idig, const mpz_dig_t *jdig, size_t jlen, const mpz_dig_t *kdig, size_t klen) {
+ mpz_dig_t *oidig = idig;
+
+ jlen -= klen;
+
+ for (; klen > 0; --klen, ++idig, ++jdig, ++kdig) {
+ *idig = *jdig ^ *kdig;
+ }
+
+ for (; jlen > 0; --jlen, ++idig, ++jdig) {
+ *idig = *jdig;
+ }
+
+ return mpn_remove_trailing_zeros(oidig, idig);
+}
+
+#endif
+
+/* i = (-j) ^ (-k) = ~(j - 1) ^ ~(k - 1) = (j - 1) ^ (k - 1)
+ i = -(j ^ (-k)) = -(j ^ ~(k - 1)) = ~(j ^ ~(k - 1)) + 1 = (j ^ (k - 1)) + 1
+ i = -((-j) ^ k) = -(~(j - 1) ^ k) = ~(~(j - 1) ^ k) + 1 = ((j - 1) ^ k) + 1
+ computes general form:
+ i = ((j - 1 + jc) ^ (k - 1 + kc)) + ic
+ returns number of digits in i
+ assumes enough memory in i; assumes normalised j, k; assumes length j >= length k
+ can have i, j, k pointing to same memory
+*/
+STATIC size_t mpn_xor_neg(mpz_dig_t *idig, const mpz_dig_t *jdig, size_t jlen, const mpz_dig_t *kdig, size_t klen,
+ mpz_dbl_dig_t carryi, mpz_dbl_dig_t carryj, mpz_dbl_dig_t carryk) {
+ mpz_dig_t *oidig = idig;
+
+ for (; jlen > 0; ++idig, ++jdig) {
+ carryj += *jdig + DIG_MASK;
+ carryk += (--klen <= --jlen) ? (*kdig++ + DIG_MASK) : DIG_MASK;
+ carryi += (carryj ^ carryk) & DIG_MASK;
+ *idig = carryi & DIG_MASK;
+ carryk >>= DIG_SIZE;
+ carryj >>= DIG_SIZE;
+ carryi >>= DIG_SIZE;
+ }
+
+ if (0 != carryi) {
+ *idig++ = carryi;
+ }
+
+ return mpn_remove_trailing_zeros(oidig, idig);
+}
+
+/* computes i = i * d1 + d2
+ returns number of digits in i
+ assumes enough memory in i; assumes normalised i; assumes dmul != 0
+*/
+STATIC size_t mpn_mul_dig_add_dig(mpz_dig_t *idig, size_t ilen, mpz_dig_t dmul, mpz_dig_t dadd) {
+ mpz_dig_t *oidig = idig;
+ mpz_dbl_dig_t carry = dadd;
+
+ for (; ilen > 0; --ilen, ++idig) {
+ carry += (mpz_dbl_dig_t)*idig * (mpz_dbl_dig_t)dmul; // will never overflow so long as DIG_SIZE <= 8*sizeof(mpz_dbl_dig_t)/2
+ *idig = carry & DIG_MASK;
+ carry >>= DIG_SIZE;
+ }
+
+ if (carry != 0) {
+ *idig++ = carry;
+ }
+
+ return idig - oidig;
+}
+
+/* computes i = j * k
+ returns number of digits in i
+ assumes enough memory in i; assumes i is zeroed; assumes normalised j, k
+ can have j, k point to same memory
+*/
+STATIC size_t mpn_mul(mpz_dig_t *idig, mpz_dig_t *jdig, size_t jlen, mpz_dig_t *kdig, size_t klen) {
+ mpz_dig_t *oidig = idig;
+ size_t ilen = 0;
+
+ for (; klen > 0; --klen, ++idig, ++kdig) {
+ mpz_dig_t *id = idig;
+ mpz_dbl_dig_t carry = 0;
+
+ size_t jl = jlen;
+ for (mpz_dig_t *jd = jdig; jl > 0; --jl, ++jd, ++id) {
+ carry += (mpz_dbl_dig_t)*id + (mpz_dbl_dig_t)*jd * (mpz_dbl_dig_t)*kdig; // will never overflow so long as DIG_SIZE <= 8*sizeof(mpz_dbl_dig_t)/2
+ *id = carry & DIG_MASK;
+ carry >>= DIG_SIZE;
+ }
+
+ if (carry != 0) {
+ *id++ = carry;
+ }
+
+ ilen = id - oidig;
+ // check to prevent usb starvation
+ #ifdef RUN_BACKGROUND_TASKS
+ RUN_BACKGROUND_TASKS;
+ #endif
+ }
+
+ return ilen;
+}
+
+/* natural_div - quo * den + new_num = old_num (ie num is replaced with rem)
+ assumes den != 0
+ assumes num_dig has enough memory to be extended by 1 digit
+ assumes quo_dig has enough memory (as many digits as num)
+ assumes quo_dig is filled with zeros
+*/
+STATIC void mpn_div(mpz_dig_t *num_dig, size_t *num_len, const mpz_dig_t *den_dig, size_t den_len, mpz_dig_t *quo_dig, size_t *quo_len) {
+ mpz_dig_t *orig_num_dig = num_dig;
+ mpz_dig_t *orig_quo_dig = quo_dig;
+ mpz_dig_t norm_shift = 0;
+ mpz_dbl_dig_t lead_den_digit;
+
+ // handle simple cases
+ {
+ int cmp = mpn_cmp(num_dig, *num_len, den_dig, den_len);
+ if (cmp == 0) {
+ *num_len = 0;
+ quo_dig[0] = 1;
+ *quo_len = 1;
+ return;
+ } else if (cmp < 0) {
+ // numerator remains the same
+ *quo_len = 0;
+ return;
+ }
+ }
+
+ // We need to normalise the denominator (leading bit of leading digit is 1)
+ // so that the division routine works. Since the denominator memory is
+ // read-only we do the normalisation on the fly, each time a digit of the
+ // denominator is needed. We need to know is how many bits to shift by.
+
+ // count number of leading zeros in leading digit of denominator
+ {
+ mpz_dig_t d = den_dig[den_len - 1];
+ while ((d & DIG_MSB) == 0) {
+ d <<= 1;
+ ++norm_shift;
+ }
+ }
+
+ // now need to shift numerator by same amount as denominator
+ // first, increase length of numerator in case we need more room to shift
+ num_dig[*num_len] = 0;
+ ++(*num_len);
+ for (mpz_dig_t *num = num_dig, carry = 0; num < num_dig + *num_len; ++num) {
+ mpz_dig_t n = *num;
+ *num = ((n << norm_shift) | carry) & DIG_MASK;
+ carry = (mpz_dbl_dig_t)n >> (DIG_SIZE - norm_shift);
+ }
+
+ // cache the leading digit of the denominator
+ lead_den_digit = (mpz_dbl_dig_t)den_dig[den_len - 1] << norm_shift;
+ if (den_len >= 2) {
+ lead_den_digit |= (mpz_dbl_dig_t)den_dig[den_len - 2] >> (DIG_SIZE - norm_shift);
+ }
+
+ // point num_dig to last digit in numerator
+ num_dig += *num_len - 1;
+
+ // calculate number of digits in quotient
+ *quo_len = *num_len - den_len;
+
+ // point to last digit to store for quotient
+ quo_dig += *quo_len - 1;
+
+ // keep going while we have enough digits to divide
+ while (*num_len > den_len) {
+ mpz_dbl_dig_t quo = ((mpz_dbl_dig_t)*num_dig << DIG_SIZE) | num_dig[-1];
+
+ // get approximate quotient
+ quo /= lead_den_digit;
+
+ // Multiply quo by den and subtract from num to get remainder.
+ // Must be careful with overflow of the borrow variable. Both
+ // borrow and low_digs are signed values and need signed right-shift,
+ // but x is unsigned and may take a full-range value.
+ const mpz_dig_t *d = den_dig;
+ mpz_dbl_dig_t d_norm = 0;
+ mpz_dbl_dig_signed_t borrow = 0;
+ for (mpz_dig_t *n = num_dig - den_len; n < num_dig; ++n, ++d) {
+ // Get the next digit in (den).
+ d_norm = ((mpz_dbl_dig_t)*d << norm_shift) | (d_norm >> DIG_SIZE);
+ // Multiply the next digit in (quo * den).
+ mpz_dbl_dig_t x = (mpz_dbl_dig_t)quo * (d_norm & DIG_MASK);
+ // Compute the low DIG_MASK bits of the next digit in (num - quo * den)
+ mpz_dbl_dig_signed_t low_digs = (borrow & DIG_MASK) + *n - (x & DIG_MASK);
+ // Store the digit result for (num).
+ *n = low_digs & DIG_MASK;
+ // Compute the borrow, shifted right before summing to avoid overflow.
+ borrow = (borrow >> DIG_SIZE) - (x >> DIG_SIZE) + (low_digs >> DIG_SIZE);
+ }
+
+ // At this point we have either:
+ //
+ // 1. quo was the correct value and the most-sig-digit of num is exactly
+ // cancelled by borrow (borrow + *num_dig == 0). In this case there is
+ // nothing more to do.
+ //
+ // 2. quo was too large, we subtracted too many den from num, and the
+ // most-sig-digit of num is less than needed (borrow + *num_dig < 0).
+ // In this case we must reduce quo and add back den to num until the
+ // carry from this operation cancels out the borrow.
+ //
+ borrow += *num_dig;
+ for (; borrow != 0; --quo) {
+ d = den_dig;
+ d_norm = 0;
+ mpz_dbl_dig_t carry = 0;
+ for (mpz_dig_t *n = num_dig - den_len; n < num_dig; ++n, ++d) {
+ d_norm = ((mpz_dbl_dig_t)*d << norm_shift) | (d_norm >> DIG_SIZE);
+ carry += (mpz_dbl_dig_t)*n + (d_norm & DIG_MASK);
+ *n = carry & DIG_MASK;
+ carry >>= DIG_SIZE;
+ }
+ borrow += carry;
+ }
+
+ // store this digit of the quotient
+ *quo_dig = quo & DIG_MASK;
+ --quo_dig;
+
+ // move down to next digit of numerator
+ --num_dig;
+ --(*num_len);
+ }
+
+ // unnormalise numerator (remainder now)
+ for (mpz_dig_t *num = orig_num_dig + *num_len - 1, carry = 0; num >= orig_num_dig; --num) {
+ mpz_dig_t n = *num;
+ *num = ((n >> norm_shift) | carry) & DIG_MASK;
+ carry = (mpz_dbl_dig_t)n << (DIG_SIZE - norm_shift);
+ }
+
+ // strip trailing zeros
+
+ while (*quo_len > 0 && orig_quo_dig[*quo_len - 1] == 0) {
+ --(*quo_len);
+ }
+
+ while (*num_len > 0 && orig_num_dig[*num_len - 1] == 0) {
+ --(*num_len);
+ }
+}
+
+#define MIN_ALLOC (2)
+
+void mpz_init_zero(mpz_t *z) {
+ z->neg = 0;
+ z->fixed_dig = 0;
+ z->alloc = 0;
+ z->len = 0;
+ z->dig = NULL;
+}
+
+void mpz_init_from_int(mpz_t *z, mp_int_t val) {
+ mpz_init_zero(z);
+ mpz_set_from_int(z, val);
+}
+
+void mpz_init_fixed_from_int(mpz_t *z, mpz_dig_t *dig, size_t alloc, mp_int_t val) {
+ z->neg = 0;
+ z->fixed_dig = 1;
+ z->alloc = alloc;
+ z->len = 0;
+ z->dig = dig;
+ mpz_set_from_int(z, val);
+}
+
+void mpz_deinit(mpz_t *z) {
+ if (z != NULL && !z->fixed_dig) {
+ m_del(mpz_dig_t, z->dig, z->alloc);
+ }
+}
+
+#if 0
+these functions are unused
+
+mpz_t *mpz_zero(void) {
+ mpz_t *z = m_new_obj(mpz_t);
+ mpz_init_zero(z);
+ return z;
+}
+
+mpz_t *mpz_from_int(mp_int_t val) {
+ mpz_t *z = mpz_zero();
+ mpz_set_from_int(z, val);
+ return z;
+}
+
+mpz_t *mpz_from_ll(long long val, bool is_signed) {
+ mpz_t *z = mpz_zero();
+ mpz_set_from_ll(z, val, is_signed);
+ return z;
+}
+
+#if MICROPY_PY_BUILTINS_FLOAT
+mpz_t *mpz_from_float(mp_float_t val) {
+ mpz_t *z = mpz_zero();
+ mpz_set_from_float(z, val);
+ return z;
+}
+#endif
+
+mpz_t *mpz_from_str(const char *str, size_t len, bool neg, unsigned int base) {
+ mpz_t *z = mpz_zero();
+ mpz_set_from_str(z, str, len, neg, base);
+ return z;
+}
+#endif
+
+STATIC void mpz_free(mpz_t *z) {
+ if (z != NULL) {
+ m_del(mpz_dig_t, z->dig, z->alloc);
+ m_del_obj(mpz_t, z);
+ }
+}
+
+STATIC void mpz_need_dig(mpz_t *z, size_t need) {
+ if (need < MIN_ALLOC) {
+ need = MIN_ALLOC;
+ }
+
+ if (z->dig == NULL || z->alloc < need) {
+ // if z has fixed digit buffer there's not much we can do as the caller will
+ // be expecting a buffer with at least "need" bytes (but it shouldn't happen)
+ assert(!z->fixed_dig);
+ z->dig = m_renew(mpz_dig_t, z->dig, z->alloc, need);
+ z->alloc = need;
+ }
+}
+
+STATIC mpz_t *mpz_clone(const mpz_t *src) {
+ assert(src->alloc != 0);
+ mpz_t *z = m_new_obj(mpz_t);
+ z->neg = src->neg;
+ z->fixed_dig = 0;
+ z->alloc = src->alloc;
+ z->len = src->len;
+ z->dig = m_new(mpz_dig_t, z->alloc);
+ memcpy(z->dig, src->dig, src->alloc * sizeof(mpz_dig_t));
+ return z;
+}
+
+/* sets dest = src
+ can have dest, src the same
+*/
+void mpz_set(mpz_t *dest, const mpz_t *src) {
+ mpz_need_dig(dest, src->len);
+ dest->neg = src->neg;
+ dest->len = src->len;
+ memcpy(dest->dig, src->dig, src->len * sizeof(mpz_dig_t));
+}
+
+void mpz_set_from_int(mpz_t *z, mp_int_t val) {
+ if (val == 0) {
+ z->neg = 0;
+ z->len = 0;
+ return;
+ }
+
+ mpz_need_dig(z, MPZ_NUM_DIG_FOR_INT);
+
+ mp_uint_t uval;
+ if (val < 0) {
+ z->neg = 1;
+ uval = -val;
+ } else {
+ z->neg = 0;
+ uval = val;
+ }
+
+ z->len = 0;
+ while (uval > 0) {
+ z->dig[z->len++] = uval & DIG_MASK;
+ uval >>= DIG_SIZE;
+ }
+}
+
+void mpz_set_from_ll(mpz_t *z, long long val, bool is_signed) {
+ mpz_need_dig(z, MPZ_NUM_DIG_FOR_LL);
+
+ unsigned long long uval;
+ if (is_signed && val < 0) {
+ z->neg = 1;
+ uval = -(unsigned long long)val;
+ } else {
+ z->neg = 0;
+ uval = val;
+ }
+
+ z->len = 0;
+ while (uval > 0) {
+ z->dig[z->len++] = uval & DIG_MASK;
+ uval >>= DIG_SIZE;
+ }
+}
+
+#if MICROPY_PY_BUILTINS_FLOAT
+void mpz_set_from_float(mpz_t *z, mp_float_t src) {
+ mp_float_union_t u = {src};
+ z->neg = u.p.sgn;
+ if (u.p.exp == 0) {
+ // value == 0 || value < 1
+ mpz_set_from_int(z, 0);
+ } else if (u.p.exp == ((1 << MP_FLOAT_EXP_BITS) - 1)) {
+ // u.p.frc == 0 indicates inf, else NaN
+ // should be handled by caller
+ mpz_set_from_int(z, 0);
+ } else {
+ const int adj_exp = (int)u.p.exp - MP_FLOAT_EXP_BIAS;
+ if (adj_exp < 0) {
+ // value < 1 , truncates to 0
+ mpz_set_from_int(z, 0);
+ } else if (adj_exp == 0) {
+ // 1 <= value < 2 , so truncates to 1
+ mpz_set_from_int(z, 1);
+ } else {
+ // 2 <= value
+ const int dig_cnt = (adj_exp + 1 + (DIG_SIZE - 1)) / DIG_SIZE;
+ const unsigned int rem = adj_exp % DIG_SIZE;
+ int dig_ind, shft;
+ mp_float_uint_t frc = u.p.frc | ((mp_float_uint_t)1 << MP_FLOAT_FRAC_BITS);
+
+ if (adj_exp < MP_FLOAT_FRAC_BITS) {
+ shft = 0;
+ dig_ind = 0;
+ frc >>= MP_FLOAT_FRAC_BITS - adj_exp;
+ } else {
+ shft = (rem - MP_FLOAT_FRAC_BITS) % DIG_SIZE;
+ dig_ind = (adj_exp - MP_FLOAT_FRAC_BITS) / DIG_SIZE;
+ }
+ mpz_need_dig(z, dig_cnt);
+ z->len = dig_cnt;
+ if (dig_ind != 0) {
+ memset(z->dig, 0, dig_ind * sizeof(mpz_dig_t));
+ }
+ if (shft != 0) {
+ z->dig[dig_ind++] = (frc << shft) & DIG_MASK;
+ frc >>= DIG_SIZE - shft;
+ }
+ #if DIG_SIZE < (MP_FLOAT_FRAC_BITS + 1)
+ while (dig_ind != dig_cnt) {
+ z->dig[dig_ind++] = frc & DIG_MASK;
+ frc >>= DIG_SIZE;
+ }
+ #else
+ if (dig_ind != dig_cnt) {
+ z->dig[dig_ind] = frc;
+ }
+ #endif
+ }
+ }
+}
+#endif
+
+// returns number of bytes from str that were processed
+size_t mpz_set_from_str(mpz_t *z, const char *str, size_t len, bool neg, unsigned int base) {
+ assert(base <= 36);
+
+ const char *cur = str;
+ const char *top = str + len;
+
+ mpz_need_dig(z, len * 8 / DIG_SIZE + 1);
+
+ if (neg) {
+ z->neg = 1;
+ } else {
+ z->neg = 0;
+ }
+
+ z->len = 0;
+ for (; cur < top; ++cur) { // XXX UTF8 next char
+ // mp_uint_t v = char_to_numeric(cur#); // XXX UTF8 get char
+ mp_uint_t v = *cur;
+ if ('0' <= v && v <= '9') {
+ v -= '0';
+ } else if ('A' <= v && v <= 'Z') {
+ v -= 'A' - 10;
+ } else if ('a' <= v && v <= 'z') {
+ v -= 'a' - 10;
+ } else {
+ break;
+ }
+ if (v >= base) {
+ break;
+ }
+ z->len = mpn_mul_dig_add_dig(z->dig, z->len, base, v);
+ }
+
+ return cur - str;
+}
+
+void mpz_set_from_bytes(mpz_t *z, bool big_endian, size_t len, const byte *buf) {
+ int delta = 1;
+ if (big_endian) {
+ buf += len - 1;
+ delta = -1;
+ }
+
+ mpz_need_dig(z, (len * 8 + DIG_SIZE - 1) / DIG_SIZE);
+
+ mpz_dig_t d = 0;
+ int num_bits = 0;
+ z->neg = 0;
+ z->len = 0;
+ while (len) {
+ while (len && num_bits < DIG_SIZE) {
+ d |= *buf << num_bits;
+ num_bits += 8;
+ buf += delta;
+ len--;
+ }
+ z->dig[z->len++] = d & DIG_MASK;
+ // Need this #if because it's C undefined behavior to do: uint32_t >> 32
+ #if DIG_SIZE != 8 && DIG_SIZE != 16 && DIG_SIZE != 32
+ d >>= DIG_SIZE;
+ #else
+ d = 0;
+ #endif
+ num_bits -= DIG_SIZE;
+ }
+
+ z->len = mpn_remove_trailing_zeros(z->dig, z->dig + z->len);
+}
+
+#if 0
+these functions are unused
+
+bool mpz_is_pos(const mpz_t *z) {
+ return z->len > 0 && z->neg == 0;
+}
+
+bool mpz_is_odd(const mpz_t *z) {
+ return z->len > 0 && (z->dig[0] & 1) != 0;
+}
+
+bool mpz_is_even(const mpz_t *z) {
+ return z->len == 0 || (z->dig[0] & 1) == 0;
+}
+#endif
+
+int mpz_cmp(const mpz_t *z1, const mpz_t *z2) {
+ int cmp = (int)z2->neg - (int)z1->neg;
+ if (cmp != 0) {
+ return cmp;
+ }
+ cmp = mpn_cmp(z1->dig, z1->len, z2->dig, z2->len);
+ if (z1->neg != 0) {
+ cmp = -cmp;
+ }
+ return cmp;
+}
+
+#if 0
+// obsolete
+// compares mpz with an integer that fits within DIG_SIZE bits
+mp_int_t mpz_cmp_sml_int(const mpz_t *z, mp_int_t sml_int) {
+ mp_int_t cmp;
+ if (z->neg == 0) {
+ if (sml_int < 0) {
+ return 1;
+ }
+ if (sml_int == 0) {
+ if (z->len == 0) {
+ return 0;
+ }
+ return 1;
+ }
+ if (z->len == 0) {
+ return -1;
+ }
+ assert(sml_int < (1 << DIG_SIZE));
+ if (z->len != 1) {
+ return 1;
+ }
+ cmp = z->dig[0] - sml_int;
+ } else {
+ if (sml_int > 0) {
+ return -1;
+ }
+ if (sml_int == 0) {
+ if (z->len == 0) {
+ return 0;
+ }
+ return -1;
+ }
+ if (z->len == 0) {
+ return 1;
+ }
+ assert(sml_int > -(1 << DIG_SIZE));
+ if (z->len != 1) {
+ return -1;
+ }
+ cmp = -z->dig[0] - sml_int;
+ }
+ if (cmp < 0) {
+ return -1;
+ }
+ if (cmp > 0) {
+ return 1;
+ }
+ return 0;
+}
+#endif
+
+#if 0
+these functions are unused
+
+/* returns abs(z)
+*/
+mpz_t *mpz_abs(const mpz_t *z) {
+ // TODO: handle case of z->alloc=0
+ mpz_t *z2 = mpz_clone(z);
+ z2->neg = 0;
+ return z2;
+}
+
+/* returns -z
+*/
+mpz_t *mpz_neg(const mpz_t *z) {
+ // TODO: handle case of z->alloc=0
+ mpz_t *z2 = mpz_clone(z);
+ z2->neg = 1 - z2->neg;
+ return z2;
+}
+
+/* returns lhs + rhs
+ can have lhs, rhs the same
+*/
+mpz_t *mpz_add(const mpz_t *lhs, const mpz_t *rhs) {
+ mpz_t *z = mpz_zero();
+ mpz_add_inpl(z, lhs, rhs);
+ return z;
+}
+
+/* returns lhs - rhs
+ can have lhs, rhs the same
+*/
+mpz_t *mpz_sub(const mpz_t *lhs, const mpz_t *rhs) {
+ mpz_t *z = mpz_zero();
+ mpz_sub_inpl(z, lhs, rhs);
+ return z;
+}
+
+/* returns lhs * rhs
+ can have lhs, rhs the same
+*/
+mpz_t *mpz_mul(const mpz_t *lhs, const mpz_t *rhs) {
+ mpz_t *z = mpz_zero();
+ mpz_mul_inpl(z, lhs, rhs);
+ return z;
+}
+
+/* returns lhs ** rhs
+ can have lhs, rhs the same
+*/
+mpz_t *mpz_pow(const mpz_t *lhs, const mpz_t *rhs) {
+ mpz_t *z = mpz_zero();
+ mpz_pow_inpl(z, lhs, rhs);
+ return z;
+}
+
+/* computes new integers in quo and rem such that:
+ quo * rhs + rem = lhs
+ 0 <= rem < rhs
+ can have lhs, rhs the same
+*/
+void mpz_divmod(const mpz_t *lhs, const mpz_t *rhs, mpz_t **quo, mpz_t **rem) {
+ *quo = mpz_zero();
+ *rem = mpz_zero();
+ mpz_divmod_inpl(*quo, *rem, lhs, rhs);
+}
+#endif
+
+/* computes dest = abs(z)
+ can have dest, z the same
+*/
+void mpz_abs_inpl(mpz_t *dest, const mpz_t *z) {
+ if (dest != z) {
+ mpz_set(dest, z);
+ }
+ dest->neg = 0;
+}
+
+/* computes dest = -z
+ can have dest, z the same
+*/
+void mpz_neg_inpl(mpz_t *dest, const mpz_t *z) {
+ if (dest != z) {
+ mpz_set(dest, z);
+ }
+ if (dest->len) {
+ dest->neg = 1 - dest->neg;
+ }
+}
+
+/* computes dest = ~z (= -z - 1)
+ can have dest, z the same
+*/
+void mpz_not_inpl(mpz_t *dest, const mpz_t *z) {
+ if (dest != z) {
+ mpz_set(dest, z);
+ }
+ if (dest->len == 0) {
+ mpz_need_dig(dest, 1);
+ dest->dig[0] = 1;
+ dest->len = 1;
+ dest->neg = 1;
+ } else if (dest->neg) {
+ dest->neg = 0;
+ mpz_dig_t k = 1;
+ dest->len = mpn_sub(dest->dig, dest->dig, dest->len, &k, 1);
+ } else {
+ mpz_need_dig(dest, dest->len + 1);
+ mpz_dig_t k = 1;
+ dest->len = mpn_add(dest->dig, dest->dig, dest->len, &k, 1);
+ dest->neg = 1;
+ }
+}
+
+/* computes dest = lhs << rhs
+ can have dest, lhs the same
+*/
+void mpz_shl_inpl(mpz_t *dest, const mpz_t *lhs, mp_uint_t rhs) {
+ if (lhs->len == 0 || rhs == 0) {
+ mpz_set(dest, lhs);
+ } else {
+ mpz_need_dig(dest, lhs->len + (rhs + DIG_SIZE - 1) / DIG_SIZE);
+ dest->len = mpn_shl(dest->dig, lhs->dig, lhs->len, rhs);
+ dest->neg = lhs->neg;
+ }
+}
+
+/* computes dest = lhs >> rhs
+ can have dest, lhs the same
+*/
+void mpz_shr_inpl(mpz_t *dest, const mpz_t *lhs, mp_uint_t rhs) {
+ if (lhs->len == 0 || rhs == 0) {
+ mpz_set(dest, lhs);
+ } else {
+ mpz_need_dig(dest, lhs->len);
+ dest->len = mpn_shr(dest->dig, lhs->dig, lhs->len, rhs);
+ dest->neg = lhs->neg;
+ if (dest->neg) {
+ // arithmetic shift right, rounding to negative infinity
+ mp_uint_t n_whole = rhs / DIG_SIZE;
+ mp_uint_t n_part = rhs % DIG_SIZE;
+ mpz_dig_t round_up = 0;
+ for (size_t i = 0; i < lhs->len && i < n_whole; i++) {
+ if (lhs->dig[i] != 0) {
+ round_up = 1;
+ break;
+ }
+ }
+ if (n_whole < lhs->len && (lhs->dig[n_whole] & ((1 << n_part) - 1)) != 0) {
+ round_up = 1;
+ }
+ if (round_up) {
+ if (dest->len == 0) {
+ // dest == 0, so need to add 1 by hand (answer will be -1)
+ dest->dig[0] = 1;
+ dest->len = 1;
+ } else {
+ // dest > 0, so can use mpn_add to add 1
+ dest->len = mpn_add(dest->dig, dest->dig, dest->len, &round_up, 1);
+ }
+ }
+ }
+ }
+}
+
+/* computes dest = lhs + rhs
+ can have dest, lhs, rhs the same
+*/
+void mpz_add_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs) {
+ if (mpn_cmp(lhs->dig, lhs->len, rhs->dig, rhs->len) < 0) {
+ const mpz_t *temp = lhs;
+ lhs = rhs;
+ rhs = temp;
+ }
+
+ if (lhs->neg == rhs->neg) {
+ mpz_need_dig(dest, lhs->len + 1);
+ dest->len = mpn_add(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len);
+ } else {
+ mpz_need_dig(dest, lhs->len);
+ dest->len = mpn_sub(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len);
+ }
+
+ dest->neg = lhs->neg & !!dest->len;
+}
+
+/* computes dest = lhs - rhs
+ can have dest, lhs, rhs the same
+*/
+void mpz_sub_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs) {
+ bool neg = false;
+
+ if (mpn_cmp(lhs->dig, lhs->len, rhs->dig, rhs->len) < 0) {
+ const mpz_t *temp = lhs;
+ lhs = rhs;
+ rhs = temp;
+ neg = true;
+ }
+
+ if (lhs->neg != rhs->neg) {
+ mpz_need_dig(dest, lhs->len + 1);
+ dest->len = mpn_add(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len);
+ } else {
+ mpz_need_dig(dest, lhs->len);
+ dest->len = mpn_sub(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len);
+ }
+
+ if (dest->len == 0) {
+ dest->neg = 0;
+ } else if (neg) {
+ dest->neg = 1 - lhs->neg;
+ } else {
+ dest->neg = lhs->neg;
+ }
+}
+
+/* computes dest = lhs & rhs
+ can have dest, lhs, rhs the same
+*/
+void mpz_and_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs) {
+ // make sure lhs has the most digits
+ if (lhs->len < rhs->len) {
+ const mpz_t *temp = lhs;
+ lhs = rhs;
+ rhs = temp;
+ }
+
+ #if MICROPY_OPT_MPZ_BITWISE
+
+ if ((0 == lhs->neg) && (0 == rhs->neg)) {
+ mpz_need_dig(dest, lhs->len);
+ dest->len = mpn_and(dest->dig, lhs->dig, rhs->dig, rhs->len);
+ dest->neg = 0;
+ } else {
+ mpz_need_dig(dest, lhs->len + 1);
+ dest->len = mpn_and_neg(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len,
+ lhs->neg == rhs->neg, 0 != lhs->neg, 0 != rhs->neg);
+ dest->neg = lhs->neg & rhs->neg;
+ }
+
+ #else
+
+ mpz_need_dig(dest, lhs->len + (lhs->neg || rhs->neg));
+ dest->len = mpn_and_neg(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len,
+ (lhs->neg == rhs->neg) ? lhs->neg : 0, lhs->neg, rhs->neg);
+ dest->neg = lhs->neg & rhs->neg;
+
+ #endif
+}
+
+/* computes dest = lhs | rhs
+ can have dest, lhs, rhs the same
+*/
+void mpz_or_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs) {
+ // make sure lhs has the most digits
+ if (lhs->len < rhs->len) {
+ const mpz_t *temp = lhs;
+ lhs = rhs;
+ rhs = temp;
+ }
+
+ #if MICROPY_OPT_MPZ_BITWISE
+
+ if ((0 == lhs->neg) && (0 == rhs->neg)) {
+ mpz_need_dig(dest, lhs->len);
+ dest->len = mpn_or(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len);
+ dest->neg = 0;
+ } else {
+ mpz_need_dig(dest, lhs->len + 1);
+ dest->len = mpn_or_neg(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len,
+ 0 != lhs->neg, 0 != rhs->neg);
+ dest->neg = 1;
+ }
+
+ #else
+
+ mpz_need_dig(dest, lhs->len + (lhs->neg || rhs->neg));
+ dest->len = mpn_or_neg(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len,
+ (lhs->neg || rhs->neg), lhs->neg, rhs->neg);
+ dest->neg = lhs->neg | rhs->neg;
+
+ #endif
+}
+
+/* computes dest = lhs ^ rhs
+ can have dest, lhs, rhs the same
+*/
+void mpz_xor_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs) {
+ // make sure lhs has the most digits
+ if (lhs->len < rhs->len) {
+ const mpz_t *temp = lhs;
+ lhs = rhs;
+ rhs = temp;
+ }
+
+ #if MICROPY_OPT_MPZ_BITWISE
+
+ if (lhs->neg == rhs->neg) {
+ mpz_need_dig(dest, lhs->len);
+ if (lhs->neg == 0) {
+ dest->len = mpn_xor(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len);
+ } else {
+ dest->len = mpn_xor_neg(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len, 0, 0, 0);
+ }
+ dest->neg = 0;
+ } else {
+ mpz_need_dig(dest, lhs->len + 1);
+ dest->len = mpn_xor_neg(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len, 1,
+ 0 == lhs->neg, 0 == rhs->neg);
+ dest->neg = 1;
+ }
+
+ #else
+
+ mpz_need_dig(dest, lhs->len + (lhs->neg || rhs->neg));
+ dest->len = mpn_xor_neg(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len,
+ (lhs->neg != rhs->neg), 0 == lhs->neg, 0 == rhs->neg);
+ dest->neg = lhs->neg ^ rhs->neg;
+
+ #endif
+}
+
+/* computes dest = lhs * rhs
+ can have dest, lhs, rhs the same
+*/
+void mpz_mul_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs) {
+ if (lhs->len == 0 || rhs->len == 0) {
+ mpz_set_from_int(dest, 0);
+ return;
+ }
+
+ mpz_t *temp = NULL;
+ if (lhs == dest) {
+ lhs = temp = mpz_clone(lhs);
+ if (rhs == dest) {
+ rhs = lhs;
+ }
+ } else if (rhs == dest) {
+ rhs = temp = mpz_clone(rhs);
+ }
+
+ mpz_need_dig(dest, lhs->len + rhs->len); // min mem l+r-1, max mem l+r
+ memset(dest->dig, 0, dest->alloc * sizeof(mpz_dig_t));
+ dest->len = mpn_mul(dest->dig, lhs->dig, lhs->len, rhs->dig, rhs->len);
+
+ if (lhs->neg == rhs->neg) {
+ dest->neg = 0;
+ } else {
+ dest->neg = 1;
+ }
+
+ mpz_free(temp);
+}
+
+/* computes dest = lhs ** rhs
+ can have dest, lhs, rhs the same
+*/
+void mpz_pow_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs) {
+ if (lhs->len == 0 || rhs->neg != 0) {
+ mpz_set_from_int(dest, 0);
+ return;
+ }
+
+ if (rhs->len == 0) {
+ mpz_set_from_int(dest, 1);
+ return;
+ }
+
+ mpz_t *x = mpz_clone(lhs);
+ mpz_t *n = mpz_clone(rhs);
+
+ mpz_set_from_int(dest, 1);
+
+ while (n->len > 0) {
+ if ((n->dig[0] & 1) != 0) {
+ mpz_mul_inpl(dest, dest, x);
+ }
+ n->len = mpn_shr(n->dig, n->dig, n->len, 1);
+ if (n->len == 0) {
+ break;
+ }
+ mpz_mul_inpl(x, x, x);
+ }
+
+ mpz_free(x);
+ mpz_free(n);
+}
+
+/* computes dest = (lhs ** rhs) % mod
+ can have dest, lhs, rhs the same; mod can't be the same as dest
+*/
+void mpz_pow3_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs, const mpz_t *mod) {
+ if (lhs->len == 0 || rhs->neg != 0 || (mod->len == 1 && mod->dig[0] == 1)) {
+ mpz_set_from_int(dest, 0);
+ return;
+ }
+
+ mpz_set_from_int(dest, 1);
+
+ if (rhs->len == 0) {
+ return;
+ }
+
+ mpz_t *x = mpz_clone(lhs);
+ mpz_t *n = mpz_clone(rhs);
+ mpz_t quo;
+ mpz_init_zero(&quo);
+
+ while (n->len > 0) {
+ if ((n->dig[0] & 1) != 0) {
+ mpz_mul_inpl(dest, dest, x);
+ mpz_divmod_inpl(&quo, dest, dest, mod);
+ }
+ n->len = mpn_shr(n->dig, n->dig, n->len, 1);
+ if (n->len == 0) {
+ break;
+ }
+ mpz_mul_inpl(x, x, x);
+ mpz_divmod_inpl(&quo, x, x, mod);
+ }
+
+ mpz_deinit(&quo);
+ mpz_free(x);
+ mpz_free(n);
+}
+
+#if 0
+these functions are unused
+
+/* computes gcd(z1, z2)
+ based on Knuth's modified gcd algorithm (I think?)
+ gcd(z1, z2) >= 0
+ gcd(0, 0) = 0
+ gcd(z, 0) = abs(z)
+*/
+mpz_t *mpz_gcd(const mpz_t *z1, const mpz_t *z2) {
+ if (z1->len == 0) {
+ // TODO: handle case of z2->alloc=0
+ mpz_t *a = mpz_clone(z2);
+ a->neg = 0;
+ return a;
+ } else if (z2->len == 0) {
+ mpz_t *a = mpz_clone(z1);
+ a->neg = 0;
+ return a;
+ }
+
+ mpz_t *a = mpz_clone(z1);
+ mpz_t *b = mpz_clone(z2);
+ mpz_t c;
+ mpz_init_zero(&c);
+ a->neg = 0;
+ b->neg = 0;
+
+ for (;;) {
+ if (mpz_cmp(a, b) < 0) {
+ if (a->len == 0) {
+ mpz_free(a);
+ mpz_deinit(&c);
+ return b;
+ }
+ mpz_t *t = a;
+ a = b;
+ b = t;
+ }
+ if (!(b->len >= 2 || (b->len == 1 && b->dig[0] > 1))) { // compute b > 0; could be mpz_cmp_small_int(b, 1) > 0
+ break;
+ }
+ mpz_set(&c, b);
+ do {
+ mpz_add_inpl(&c, &c, &c);
+ } while (mpz_cmp(&c, a) <= 0);
+ c.len = mpn_shr(c.dig, c.dig, c.len, 1);
+ mpz_sub_inpl(a, a, &c);
+ }
+
+ mpz_deinit(&c);
+
+ if (b->len == 1 && b->dig[0] == 1) { // compute b == 1; could be mpz_cmp_small_int(b, 1) == 0
+ mpz_free(a);
+ return b;
+ } else {
+ mpz_free(b);
+ return a;
+ }
+}
+
+/* computes lcm(z1, z2)
+ = abs(z1) / gcd(z1, z2) * abs(z2)
+ lcm(z1, z1) >= 0
+ lcm(0, 0) = 0
+ lcm(z, 0) = 0
+*/
+mpz_t *mpz_lcm(const mpz_t *z1, const mpz_t *z2) {
+ if (z1->len == 0 || z2->len == 0) {
+ return mpz_zero();
+ }
+
+ mpz_t *gcd = mpz_gcd(z1, z2);
+ mpz_t *quo = mpz_zero();
+ mpz_t *rem = mpz_zero();
+ mpz_divmod_inpl(quo, rem, z1, gcd);
+ mpz_mul_inpl(rem, quo, z2);
+ mpz_free(gcd);
+ mpz_free(quo);
+ rem->neg = 0;
+ return rem;
+}
+#endif
+
+/* computes new integers in quo and rem such that:
+ quo * rhs + rem = lhs
+ 0 <= rem < rhs
+ can have lhs, rhs the same
+ assumes rhs != 0 (undefined behaviour if it is)
+*/
+void mpz_divmod_inpl(mpz_t *dest_quo, mpz_t *dest_rem, const mpz_t *lhs, const mpz_t *rhs) {
+ assert(!mpz_is_zero(rhs));
+
+ mpz_need_dig(dest_quo, lhs->len + 1); // +1 necessary?
+ memset(dest_quo->dig, 0, (lhs->len + 1) * sizeof(mpz_dig_t));
+ dest_quo->neg = 0;
+ dest_quo->len = 0;
+ mpz_need_dig(dest_rem, lhs->len + 1); // +1 necessary?
+ mpz_set(dest_rem, lhs);
+ mpn_div(dest_rem->dig, &dest_rem->len, rhs->dig, rhs->len, dest_quo->dig, &dest_quo->len);
+ dest_rem->neg &= !!dest_rem->len;
+
+ // check signs and do Python style modulo
+ if (lhs->neg != rhs->neg) {
+ dest_quo->neg = !!dest_quo->len;
+ if (!mpz_is_zero(dest_rem)) {
+ mpz_t mpzone;
+ mpz_init_from_int(&mpzone, -1);
+ mpz_add_inpl(dest_quo, dest_quo, &mpzone);
+ mpz_add_inpl(dest_rem, dest_rem, rhs);
+ }
+ }
+}
+
+#if 0
+these functions are unused
+
+/* computes floor(lhs / rhs)
+ can have lhs, rhs the same
+*/
+mpz_t *mpz_div(const mpz_t *lhs, const mpz_t *rhs) {
+ mpz_t *quo = mpz_zero();
+ mpz_t rem;
+ mpz_init_zero(&rem);
+ mpz_divmod_inpl(quo, &rem, lhs, rhs);
+ mpz_deinit(&rem);
+ return quo;
+}
+
+/* computes lhs % rhs ( >= 0)
+ can have lhs, rhs the same
+*/
+mpz_t *mpz_mod(const mpz_t *lhs, const mpz_t *rhs) {
+ mpz_t quo;
+ mpz_init_zero(&quo);
+ mpz_t *rem = mpz_zero();
+ mpz_divmod_inpl(&quo, rem, lhs, rhs);
+ mpz_deinit(&quo);
+ return rem;
+}
+#endif
+
+// must return actual int value if it fits in mp_int_t
+mp_int_t mpz_hash(const mpz_t *z) {
+ mp_uint_t val = 0;
+ mpz_dig_t *d = z->dig + z->len;
+
+ while (d-- > z->dig) {
+ val = (val << DIG_SIZE) | *d;
+ }
+
+ if (z->neg != 0) {
+ val = -val;
+ }
+
+ return val;
+}
+
+bool mpz_as_int_checked(const mpz_t *i, mp_int_t *value) {
+ mp_uint_t val = 0;
+ mpz_dig_t *d = i->dig + i->len;
+
+ while (d-- > i->dig) {
+ if (val > (~(MP_OBJ_WORD_MSBIT_HIGH) >> DIG_SIZE)) {
+ // will overflow
+ return false;
+ }
+ val = (val << DIG_SIZE) | *d;
+ }
+
+ if (i->neg != 0) {
+ val = -val;
+ }
+
+ *value = val;
+ return true;
+}
+
+bool mpz_as_uint_checked(const mpz_t *i, mp_uint_t *value) {
+ if (i->neg != 0) {
+ // can't represent signed values
+ return false;
+ }
+
+ mp_uint_t val = 0;
+ mpz_dig_t *d = i->dig + i->len;
+
+ while (d-- > i->dig) {
+ if (val > (~(MP_OBJ_WORD_MSBIT_HIGH) >> (DIG_SIZE - 1))) {
+ // will overflow
+ return false;
+ }
+ val = (val << DIG_SIZE) | *d;
+ }
+
+ *value = val;
+ return true;
+}
+
+void mpz_as_bytes(const mpz_t *z, bool big_endian, size_t len, byte *buf) {
+ byte *b = buf;
+ if (big_endian) {
+ b += len;
+ }
+ mpz_dig_t *zdig = z->dig;
+ int bits = 0;
+ mpz_dbl_dig_t d = 0;
+ mpz_dbl_dig_t carry = 1;
+ for (size_t zlen = z->len; zlen > 0; --zlen) {
+ bits += DIG_SIZE;
+ d = (d << DIG_SIZE) | *zdig++;
+ for (; bits >= 8; bits -= 8, d >>= 8) {
+ mpz_dig_t val = d;
+ if (z->neg) {
+ val = (~val & 0xff) + carry;
+ carry = val >> 8;
+ }
+ if (big_endian) {
+ *--b = val;
+ if (b == buf) {
+ return;
+ }
+ } else {
+ *b++ = val;
+ if (b == buf + len) {
+ return;
+ }
+ }
+ }
+ }
+
+ // fill remainder of buf with zero/sign extension of the integer
+ if (big_endian) {
+ len = b - buf;
+ } else {
+ len = buf + len - b;
+ buf = b;
+ }
+ memset(buf, z->neg ? 0xff : 0x00, len);
+}
+
+#if MICROPY_PY_BUILTINS_FLOAT
+mp_float_t mpz_as_float(const mpz_t *i) {
+ mp_float_t val = 0;
+ mpz_dig_t *d = i->dig + i->len;
+
+ while (d-- > i->dig) {
+ val = val * DIG_BASE + *d;
+ }
+
+ if (i->neg != 0) {
+ val = -val;
+ }
+
+ return val;
+}
+#endif
+
+#if 0
+this function is unused
+char *mpz_as_str(const mpz_t *i, unsigned int base) {
+ char *s = m_new(char, mp_int_format_size(mpz_max_num_bits(i), base, NULL, '\0'));
+ mpz_as_str_inpl(i, base, NULL, 'a', '\0', s);
+ return s;
+}
+#endif
+
+// assumes enough space in str as calculated by mp_int_format_size
+// base must be between 2 and 32 inclusive
+// returns length of string, not including null byte
+size_t mpz_as_str_inpl(const mpz_t *i, unsigned int base, const char *prefix, char base_char, char comma, char *str) {
+ assert(str != NULL);
+ assert(2 <= base && base <= 32);
+
+ size_t ilen = i->len;
+
+ char *s = str;
+ if (ilen == 0) {
+ if (prefix) {
+ while (*prefix) {
+ *s++ = *prefix++;
+ }
+ }
+ *s++ = '0';
+ *s = '\0';
+ return s - str;
+ }
+
+ // make a copy of mpz digits, so we can do the div/mod calculation
+ mpz_dig_t *dig = m_new(mpz_dig_t, ilen);
+ memcpy(dig, i->dig, ilen * sizeof(mpz_dig_t));
+
+ // convert
+ char *last_comma = str;
+ bool done;
+ do {
+ mpz_dig_t *d = dig + ilen;
+ mpz_dbl_dig_t a = 0;
+
+ // compute next remainder
+ while (--d >= dig) {
+ a = (a << DIG_SIZE) | *d;
+ *d = a / base;
+ a %= base;
+ }
+
+ // convert to character
+ a += '0';
+ if (a > '9') {
+ a += base_char - '9' - 1;
+ }
+ *s++ = a;
+
+ // check if number is zero
+ done = true;
+ for (d = dig; d < dig + ilen; ++d) {
+ if (*d != 0) {
+ done = false;
+ break;
+ }
+ }
+ if (comma && (s - last_comma) == 3) {
+ *s++ = comma;
+ last_comma = s;
+ }
+ }
+ while (!done);
+
+ // free the copy of the digits array
+ m_del(mpz_dig_t, dig, ilen);
+
+ if (prefix) {
+ const char *p = &prefix[strlen(prefix)];
+ while (p > prefix) {
+ *s++ = *--p;
+ }
+ }
+ if (i->neg != 0) {
+ *s++ = '-';
+ }
+
+ // reverse string
+ for (char *u = str, *v = s - 1; u < v; ++u, --v) {
+ char temp = *u;
+ *u = *v;
+ *v = temp;
+ }
+
+ *s = '\0'; // null termination
+
+ return s - str;
+}
+
+#endif // MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_MPZ
diff --git a/circuitpython/py/mpz.h b/circuitpython/py/mpz.h
new file mode 100644
index 0000000..0fdcf52
--- /dev/null
+++ b/circuitpython/py/mpz.h
@@ -0,0 +1,161 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_MPZ_H
+#define MICROPY_INCLUDED_PY_MPZ_H
+
+#include <stdint.h>
+
+#include "py/mpconfig.h"
+#include "py/misc.h"
+
+// This mpz module implements arbitrary precision integers.
+//
+// The storage for each digit is defined by mpz_dig_t. The actual number of
+// bits in mpz_dig_t that are used is defined by MPZ_DIG_SIZE. The machine must
+// also provide a type that is twice as wide as mpz_dig_t, in both signed and
+// unsigned versions.
+//
+// MPZ_DIG_SIZE can be between 4 and 8*sizeof(mpz_dig_t), but it makes most
+// sense to have it as large as possible. If MPZ_DIG_SIZE is not already
+// defined then it is auto-detected below, depending on the machine. The types
+// are then set based on the value of MPZ_DIG_SIZE (although they can be freely
+// changed so long as the constraints mentioned above are met).
+
+#ifndef MPZ_DIG_SIZE
+ #if defined(__x86_64__) || defined(_WIN64)
+// 64-bit machine, using 32-bit storage for digits
+ #define MPZ_DIG_SIZE (32)
+ #else
+// default: 32-bit machine, using 16-bit storage for digits
+ #define MPZ_DIG_SIZE (16)
+ #endif
+#endif
+
+#if MPZ_DIG_SIZE > 16
+#define MPZ_DBL_DIG_SIZE (64)
+typedef uint32_t mpz_dig_t;
+typedef uint64_t mpz_dbl_dig_t;
+typedef int64_t mpz_dbl_dig_signed_t;
+#elif MPZ_DIG_SIZE > 8
+#define MPZ_DBL_DIG_SIZE (32)
+typedef uint16_t mpz_dig_t;
+typedef uint32_t mpz_dbl_dig_t;
+typedef int32_t mpz_dbl_dig_signed_t;
+#elif MPZ_DIG_SIZE > 4
+#define MPZ_DBL_DIG_SIZE (16)
+typedef uint8_t mpz_dig_t;
+typedef uint16_t mpz_dbl_dig_t;
+typedef int16_t mpz_dbl_dig_signed_t;
+#else
+#define MPZ_DBL_DIG_SIZE (8)
+typedef uint8_t mpz_dig_t;
+typedef uint8_t mpz_dbl_dig_t;
+typedef int8_t mpz_dbl_dig_signed_t;
+#endif
+
+#ifdef _WIN64
+ #ifdef __MINGW32__
+ #define MPZ_LONG_1 1LL
+ #else
+ #define MPZ_LONG_1 1i64
+ #endif
+#else
+ #define MPZ_LONG_1 1L
+#endif
+
+// these define the maximum storage needed to hold an int or long long
+#define MPZ_NUM_DIG_FOR_INT ((sizeof(mp_int_t) * 8 + MPZ_DIG_SIZE - 1) / MPZ_DIG_SIZE)
+#define MPZ_NUM_DIG_FOR_LL ((sizeof(long long) * 8 + MPZ_DIG_SIZE - 1) / MPZ_DIG_SIZE)
+
+typedef struct _mpz_t {
+ // Zero has neg=0, len=0. Negative zero is not allowed.
+ size_t neg : 1;
+ size_t fixed_dig : 1;
+ size_t alloc : (8 * sizeof(size_t) - 2);
+ size_t len;
+ mpz_dig_t *dig;
+} mpz_t;
+
+// convenience macro to declare an mpz with a digit array from the stack, initialised by an integer
+#define MPZ_CONST_INT(z, val) mpz_t z; mpz_dig_t z##_digits[MPZ_NUM_DIG_FOR_INT]; mpz_init_fixed_from_int(&z, z_digits, MPZ_NUM_DIG_FOR_INT, val);
+
+void mpz_init_zero(mpz_t *z);
+void mpz_init_from_int(mpz_t *z, mp_int_t val);
+void mpz_init_fixed_from_int(mpz_t *z, mpz_dig_t *dig, size_t dig_alloc, mp_int_t val);
+void mpz_deinit(mpz_t *z);
+
+void mpz_set(mpz_t *dest, const mpz_t *src);
+void mpz_set_from_int(mpz_t *z, mp_int_t src);
+void mpz_set_from_ll(mpz_t *z, long long i, bool is_signed);
+#if MICROPY_PY_BUILTINS_FLOAT
+void mpz_set_from_float(mpz_t *z, mp_float_t src);
+#endif
+size_t mpz_set_from_str(mpz_t *z, const char *str, size_t len, bool neg, unsigned int base);
+void mpz_set_from_bytes(mpz_t *z, bool big_endian, size_t len, const byte *buf);
+
+static inline bool mpz_is_zero(const mpz_t *z) {
+ return z->len == 0;
+}
+static inline bool mpz_is_neg(const mpz_t *z) {
+ return z->neg != 0;
+}
+int mpz_cmp(const mpz_t *lhs, const mpz_t *rhs);
+
+void mpz_abs_inpl(mpz_t *dest, const mpz_t *z);
+void mpz_neg_inpl(mpz_t *dest, const mpz_t *z);
+void mpz_not_inpl(mpz_t *dest, const mpz_t *z);
+void mpz_shl_inpl(mpz_t *dest, const mpz_t *lhs, mp_uint_t rhs);
+void mpz_shr_inpl(mpz_t *dest, const mpz_t *lhs, mp_uint_t rhs);
+void mpz_add_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs);
+void mpz_sub_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs);
+void mpz_mul_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs);
+void mpz_pow_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs);
+void mpz_pow3_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs, const mpz_t *mod);
+void mpz_and_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs);
+void mpz_or_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs);
+void mpz_xor_inpl(mpz_t *dest, const mpz_t *lhs, const mpz_t *rhs);
+void mpz_divmod_inpl(mpz_t *dest_quo, mpz_t *dest_rem, const mpz_t *lhs, const mpz_t *rhs);
+
+static inline size_t mpz_max_num_bits(const mpz_t *z) {
+ return z->len * MPZ_DIG_SIZE;
+}
+static inline size_t mpz_num_bits(const mpz_t *z) {
+ if (mpz_is_zero(z)) {
+ return 0;
+ }
+ size_t last_bits = (8 * (sizeof(long) - sizeof(mpz_dig_t))) - __builtin_clzl(z->dig[z->len - 1]);
+ return z->len * MPZ_DIG_SIZE + last_bits;
+}
+mp_int_t mpz_hash(const mpz_t *z);
+bool mpz_as_int_checked(const mpz_t *z, mp_int_t *value);
+bool mpz_as_uint_checked(const mpz_t *z, mp_uint_t *value);
+void mpz_as_bytes(const mpz_t *z, bool big_endian, size_t len, byte *buf);
+#if MICROPY_PY_BUILTINS_FLOAT
+mp_float_t mpz_as_float(const mpz_t *z);
+#endif
+size_t mpz_as_str_inpl(const mpz_t *z, unsigned int base, const char *prefix, char base_char, char comma, char *str);
+
+#endif // MICROPY_INCLUDED_PY_MPZ_H
diff --git a/circuitpython/py/nativeglue.c b/circuitpython/py/nativeglue.c
new file mode 100644
index 0000000..29ca77b
--- /dev/null
+++ b/circuitpython/py/nativeglue.c
@@ -0,0 +1,349 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/runtime.h"
+#include "py/smallint.h"
+#include "py/nativeglue.h"
+#include "py/objtype.h"
+#include "py/gc.h"
+
+#if MICROPY_DEBUG_VERBOSE // print debugging info
+#define DEBUG_printf DEBUG_printf
+#else // don't print debugging info
+#define DEBUG_printf(...) (void)0
+#endif
+
+#if MICROPY_EMIT_NATIVE
+
+int mp_native_type_from_qstr(qstr qst) {
+ switch (qst) {
+ case MP_QSTR_object:
+ return MP_NATIVE_TYPE_OBJ;
+ case MP_QSTR_bool:
+ return MP_NATIVE_TYPE_BOOL;
+ case MP_QSTR_int:
+ return MP_NATIVE_TYPE_INT;
+ case MP_QSTR_uint:
+ return MP_NATIVE_TYPE_UINT;
+ case MP_QSTR_ptr:
+ return MP_NATIVE_TYPE_PTR;
+ case MP_QSTR_ptr8:
+ return MP_NATIVE_TYPE_PTR8;
+ case MP_QSTR_ptr16:
+ return MP_NATIVE_TYPE_PTR16;
+ case MP_QSTR_ptr32:
+ return MP_NATIVE_TYPE_PTR32;
+ default:
+ return -1;
+ }
+}
+
+// convert a MicroPython object to a valid native value based on type
+mp_uint_t mp_native_from_obj(mp_obj_t obj, mp_uint_t type) {
+ DEBUG_printf("mp_native_from_obj(%p, " UINT_FMT ")\n", obj, type);
+ switch (type & 0xf) {
+ case MP_NATIVE_TYPE_OBJ:
+ return (mp_uint_t)obj;
+ case MP_NATIVE_TYPE_BOOL:
+ return mp_obj_is_true(obj);
+ case MP_NATIVE_TYPE_INT:
+ case MP_NATIVE_TYPE_UINT:
+ return mp_obj_get_int_truncated(obj);
+ default: { // cast obj to a pointer
+ mp_buffer_info_t bufinfo;
+ if (mp_get_buffer(obj, &bufinfo, MP_BUFFER_READ)) {
+ return (mp_uint_t)bufinfo.buf;
+ } else {
+ // assume obj is an integer that represents an address
+ return mp_obj_get_int_truncated(obj);
+ }
+ }
+ }
+}
+
+#endif
+
+#if MICROPY_EMIT_MACHINE_CODE
+
+// convert a native value to a MicroPython object based on type
+mp_obj_t mp_native_to_obj(mp_uint_t val, mp_uint_t type) {
+ DEBUG_printf("mp_native_to_obj(" UINT_FMT ", " UINT_FMT ")\n", val, type);
+ switch (type & 0xf) {
+ case MP_NATIVE_TYPE_OBJ:
+ return (mp_obj_t)val;
+ case MP_NATIVE_TYPE_BOOL:
+ return mp_obj_new_bool(val);
+ case MP_NATIVE_TYPE_INT:
+ return mp_obj_new_int(val);
+ case MP_NATIVE_TYPE_UINT:
+ return mp_obj_new_int_from_uint(val);
+ default: // a pointer
+ // we return just the value of the pointer as an integer
+ return mp_obj_new_int_from_uint(val);
+ }
+}
+
+#endif
+
+#if MICROPY_EMIT_NATIVE && !MICROPY_DYNAMIC_COMPILER
+
+#if !MICROPY_PY_BUILTINS_SET
+mp_obj_t mp_obj_new_set(size_t n_args, mp_obj_t *items) {
+ (void)n_args;
+ (void)items;
+ mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("set unsupported"));
+}
+
+void mp_obj_set_store(mp_obj_t self_in, mp_obj_t item) {
+ (void)self_in;
+ (void)item;
+ mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("set unsupported"));
+}
+#endif
+
+#if !MICROPY_PY_BUILTINS_SLICE
+mp_obj_t mp_obj_new_slice(mp_obj_t ostart, mp_obj_t ostop, mp_obj_t ostep) {
+ (void)ostart;
+ (void)ostop;
+ (void)ostep;
+ mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("slice unsupported"));
+}
+#endif
+
+STATIC mp_obj_dict_t *mp_native_swap_globals(mp_obj_dict_t *new_globals) {
+ if (new_globals == NULL) {
+ // Globals were the originally the same so don't restore them
+ return NULL;
+ }
+ mp_obj_dict_t *old_globals = mp_globals_get();
+ if (old_globals == new_globals) {
+ // Don't set globals if they are the same, and return NULL to indicate this
+ return NULL;
+ }
+ mp_globals_set(new_globals);
+ return old_globals;
+}
+
+// wrapper that accepts n_args and n_kw in one argument
+// (native emitter can only pass at most 3 arguments to a function)
+STATIC mp_obj_t mp_native_call_function_n_kw(mp_obj_t fun_in, size_t n_args_kw, const mp_obj_t *args) {
+ return mp_call_function_n_kw(fun_in, n_args_kw & 0xff, (n_args_kw >> 8) & 0xff, args);
+}
+
+// wrapper that makes raise obj and raises it
+// END_FINALLY opcode requires that we don't raise if o==None
+STATIC void mp_native_raise(mp_obj_t o) {
+ if (o != MP_OBJ_NULL && o != mp_const_none) {
+ nlr_raise(mp_make_raise_obj(o));
+ }
+}
+
+// wrapper that handles iterator buffer
+STATIC mp_obj_t mp_native_getiter(mp_obj_t obj, mp_obj_iter_buf_t *iter) {
+ if (iter == NULL) {
+ return mp_getiter(obj, NULL);
+ } else {
+ obj = mp_getiter(obj, iter);
+ if (obj != MP_OBJ_FROM_PTR(iter)) {
+ // Iterator didn't use the stack so indicate that with MP_OBJ_NULL.
+ iter->base.type = MP_OBJ_NULL;
+ iter->buf[0] = obj;
+ }
+ return NULL;
+ }
+}
+
+// wrapper that handles iterator buffer
+STATIC mp_obj_t mp_native_iternext(mp_obj_iter_buf_t *iter) {
+ mp_obj_t obj;
+ if (iter->base.type == MP_OBJ_NULL) {
+ obj = iter->buf[0];
+ } else {
+ obj = MP_OBJ_FROM_PTR(iter);
+ }
+ return mp_iternext(obj);
+}
+
+STATIC bool mp_native_yield_from(mp_obj_t gen, mp_obj_t send_value, mp_obj_t *ret_value) {
+ mp_vm_return_kind_t ret_kind;
+ nlr_buf_t nlr_buf;
+ mp_obj_t throw_value = *ret_value;
+ if (nlr_push(&nlr_buf) == 0) {
+ if (throw_value != MP_OBJ_NULL) {
+ send_value = MP_OBJ_NULL;
+ }
+ ret_kind = mp_resume(gen, send_value, throw_value, ret_value);
+ nlr_pop();
+ } else {
+ ret_kind = MP_VM_RETURN_EXCEPTION;
+ *ret_value = nlr_buf.ret_val;
+ }
+
+ if (ret_kind == MP_VM_RETURN_YIELD) {
+ return true;
+ } else if (ret_kind == MP_VM_RETURN_NORMAL) {
+ if (*ret_value == MP_OBJ_STOP_ITERATION) {
+ *ret_value = mp_const_none;
+ }
+ } else {
+ assert(ret_kind == MP_VM_RETURN_EXCEPTION);
+ if (!mp_obj_exception_match(*ret_value, MP_OBJ_FROM_PTR(&mp_type_StopIteration))) {
+ nlr_raise(*ret_value);
+ }
+ *ret_value = mp_obj_exception_get_value(*ret_value);
+ }
+
+ if (throw_value != MP_OBJ_NULL && mp_obj_exception_match(throw_value, MP_OBJ_FROM_PTR(&mp_type_GeneratorExit))) {
+ nlr_raise(mp_make_raise_obj(throw_value));
+ }
+
+ return false;
+}
+
+#if !MICROPY_PY_BUILTINS_FLOAT
+
+STATIC mp_obj_t mp_obj_new_float_from_f(float f) {
+ (void)f;
+ mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("float unsupported"));
+}
+
+STATIC mp_obj_t mp_obj_new_float_from_d(double d) {
+ (void)d;
+ mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("float unsupported"));
+}
+
+STATIC float mp_obj_get_float_to_f(mp_obj_t o) {
+ (void)o;
+ mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("float unsupported"));
+}
+
+STATIC double mp_obj_get_float_to_d(mp_obj_t o) {
+ (void)o;
+ mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("float unsupported"));
+}
+
+#endif
+
+// these must correspond to the respective enum in nativeglue.h
+const mp_fun_table_t mp_fun_table = {
+ mp_const_none,
+ mp_const_false,
+ mp_const_true,
+ mp_native_from_obj,
+ mp_native_to_obj,
+ mp_native_swap_globals,
+ mp_load_name,
+ mp_load_global,
+ mp_load_build_class,
+ mp_load_attr,
+ mp_load_method,
+ mp_load_super_method,
+ mp_store_name,
+ mp_store_global,
+ mp_store_attr,
+ mp_obj_subscr,
+ mp_obj_is_true,
+ mp_unary_op,
+ mp_binary_op,
+ mp_obj_new_tuple,
+ mp_obj_new_list,
+ mp_obj_new_dict,
+ mp_obj_new_set,
+ mp_obj_set_store,
+ mp_obj_list_append,
+ mp_obj_dict_store,
+ mp_make_function_from_raw_code,
+ mp_native_call_function_n_kw,
+ mp_call_method_n_kw,
+ mp_call_method_n_kw_var,
+ mp_native_getiter,
+ mp_native_iternext,
+ #if MICROPY_NLR_SETJMP
+ nlr_push_tail,
+ #else
+ nlr_push,
+ #endif
+ nlr_pop,
+ mp_native_raise,
+ mp_import_name,
+ mp_import_from,
+ mp_import_all,
+ mp_obj_new_slice,
+ mp_unpack_sequence,
+ mp_unpack_ex,
+ mp_delete_name,
+ mp_delete_global,
+ mp_make_closure_from_raw_code,
+ mp_arg_check_num_sig,
+ mp_setup_code_state,
+ mp_small_int_floor_divide,
+ mp_small_int_modulo,
+ mp_native_yield_from,
+ #if MICROPY_NLR_SETJMP
+ setjmp,
+ #else
+ NULL,
+ #endif
+ // Additional entries for dynamic runtime, starts at index 50
+ memset,
+ memmove,
+ gc_realloc,
+ mp_printf,
+ mp_vprintf,
+ mp_raise_msg_str,
+ mp_obj_get_type,
+ mp_obj_new_str,
+ mp_obj_new_bytes,
+ mp_obj_new_bytearray_by_ref,
+ mp_obj_new_float_from_f,
+ mp_obj_new_float_from_d,
+ mp_obj_get_float_to_f,
+ mp_obj_get_float_to_d,
+ mp_get_buffer_raise,
+ mp_get_stream_raise,
+ mp_obj_assert_native_inited,
+ &mp_plat_print,
+ &mp_type_type,
+ &mp_type_str,
+ &mp_type_list,
+ &mp_type_dict,
+ &mp_type_fun_builtin_0,
+ &mp_type_fun_builtin_1,
+ &mp_type_fun_builtin_2,
+ &mp_type_fun_builtin_3,
+ &mp_type_fun_builtin_var,
+ &mp_stream_read_obj,
+ &mp_stream_readinto_obj,
+ &mp_stream_unbuffered_readline_obj,
+ &mp_stream_write_obj,
+};
+
+#endif // MICROPY_EMIT_NATIVE
diff --git a/circuitpython/py/nativeglue.h b/circuitpython/py/nativeglue.h
new file mode 100644
index 0000000..e1f0be3
--- /dev/null
+++ b/circuitpython/py/nativeglue.h
@@ -0,0 +1,178 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2013-2019 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_NATIVEGLUE_H
+#define MICROPY_INCLUDED_PY_NATIVEGLUE_H
+
+#include <stdarg.h>
+#include "py/obj.h"
+#include "py/persistentcode.h"
+#include "py/stream.h"
+
+typedef enum {
+ MP_F_CONST_NONE_OBJ = 0,
+ MP_F_CONST_FALSE_OBJ,
+ MP_F_CONST_TRUE_OBJ,
+ MP_F_CONVERT_OBJ_TO_NATIVE,
+ MP_F_CONVERT_NATIVE_TO_OBJ,
+ MP_F_NATIVE_SWAP_GLOBALS,
+ MP_F_LOAD_NAME,
+ MP_F_LOAD_GLOBAL,
+ MP_F_LOAD_BUILD_CLASS,
+ MP_F_LOAD_ATTR,
+ MP_F_LOAD_METHOD,
+ MP_F_LOAD_SUPER_METHOD,
+ MP_F_STORE_NAME,
+ MP_F_STORE_GLOBAL,
+ MP_F_STORE_ATTR,
+ MP_F_OBJ_SUBSCR,
+ MP_F_OBJ_IS_TRUE,
+ MP_F_UNARY_OP,
+ MP_F_BINARY_OP,
+ MP_F_BUILD_TUPLE,
+ MP_F_BUILD_LIST,
+ MP_F_BUILD_MAP,
+ MP_F_BUILD_SET,
+ MP_F_STORE_SET,
+ MP_F_LIST_APPEND,
+ MP_F_STORE_MAP,
+ MP_F_MAKE_FUNCTION_FROM_RAW_CODE,
+ MP_F_NATIVE_CALL_FUNCTION_N_KW,
+ MP_F_CALL_METHOD_N_KW,
+ MP_F_CALL_METHOD_N_KW_VAR,
+ MP_F_NATIVE_GETITER,
+ MP_F_NATIVE_ITERNEXT,
+ MP_F_NLR_PUSH,
+ MP_F_NLR_POP,
+ MP_F_NATIVE_RAISE,
+ MP_F_IMPORT_NAME,
+ MP_F_IMPORT_FROM,
+ MP_F_IMPORT_ALL,
+ MP_F_NEW_SLICE,
+ MP_F_UNPACK_SEQUENCE,
+ MP_F_UNPACK_EX,
+ MP_F_DELETE_NAME,
+ MP_F_DELETE_GLOBAL,
+ MP_F_MAKE_CLOSURE_FROM_RAW_CODE,
+ MP_F_ARG_CHECK_NUM_SIG,
+ MP_F_SETUP_CODE_STATE,
+ MP_F_SMALL_INT_FLOOR_DIVIDE,
+ MP_F_SMALL_INT_MODULO,
+ MP_F_NATIVE_YIELD_FROM,
+ MP_F_SETJMP,
+ MP_F_NUMBER_OF,
+} mp_fun_kind_t;
+
+typedef struct _mp_fun_table_t {
+ mp_const_obj_t const_none;
+ mp_const_obj_t const_false;
+ mp_const_obj_t const_true;
+ mp_uint_t (*native_from_obj)(mp_obj_t obj, mp_uint_t type);
+ mp_obj_t (*native_to_obj)(mp_uint_t val, mp_uint_t type);
+ mp_obj_dict_t *(*swap_globals)(mp_obj_dict_t * new_globals);
+ mp_obj_t (*load_name)(qstr qst);
+ mp_obj_t (*load_global)(qstr qst);
+ mp_obj_t (*load_build_class)(void);
+ mp_obj_t (*load_attr)(mp_obj_t base, qstr attr);
+ void (*load_method)(mp_obj_t base, qstr attr, mp_obj_t *dest);
+ void (*load_super_method)(qstr attr, mp_obj_t *dest);
+ void (*store_name)(qstr qst, mp_obj_t obj);
+ void (*store_global)(qstr qst, mp_obj_t obj);
+ void (*store_attr)(mp_obj_t base, qstr attr, mp_obj_t val);
+ mp_obj_t (*obj_subscr)(mp_obj_t base, mp_obj_t index, mp_obj_t val);
+ bool (*obj_is_true)(mp_obj_t arg);
+ mp_obj_t (*unary_op)(mp_unary_op_t op, mp_obj_t arg);
+ mp_obj_t (*binary_op)(mp_binary_op_t op, mp_obj_t lhs, mp_obj_t rhs);
+ mp_obj_t (*new_tuple)(size_t n, const mp_obj_t *items);
+ mp_obj_t (*new_list)(size_t n, mp_obj_t *items);
+ mp_obj_t (*new_dict)(size_t n_args);
+ mp_obj_t (*new_set)(size_t n_args, mp_obj_t *items);
+ void (*set_store)(mp_obj_t self_in, mp_obj_t item);
+ mp_obj_t (*list_append)(mp_obj_t self_in, mp_obj_t arg);
+ mp_obj_t (*dict_store)(mp_obj_t self_in, mp_obj_t key, mp_obj_t value);
+ mp_obj_t (*make_function_from_raw_code)(const mp_raw_code_t *rc, mp_obj_t def_args, mp_obj_t def_kw_args);
+ mp_obj_t (*call_function_n_kw)(mp_obj_t fun_in, size_t n_args_kw, const mp_obj_t *args);
+ mp_obj_t (*call_method_n_kw)(size_t n_args, size_t n_kw, const mp_obj_t *args);
+ mp_obj_t (*call_method_n_kw_var)(bool have_self, size_t n_args_n_kw, const mp_obj_t *args);
+ mp_obj_t (*getiter)(mp_obj_t obj, mp_obj_iter_buf_t *iter);
+ mp_obj_t (*iternext)(mp_obj_iter_buf_t *iter);
+ unsigned int (*nlr_push)(nlr_buf_t *);
+ void (*nlr_pop)(void);
+ void (*raise)(mp_obj_t o);
+ mp_obj_t (*import_name)(qstr name, mp_obj_t fromlist, mp_obj_t level);
+ mp_obj_t (*import_from)(mp_obj_t module, qstr name);
+ void (*import_all)(mp_obj_t module);
+ mp_obj_t (*new_slice)(mp_obj_t start, mp_obj_t stop, mp_obj_t step);
+ void (*unpack_sequence)(mp_obj_t seq, size_t num, mp_obj_t *items);
+ void (*unpack_ex)(mp_obj_t seq, size_t num, mp_obj_t *items);
+ void (*delete_name)(qstr qst);
+ void (*delete_global)(qstr qst);
+ mp_obj_t (*make_closure_from_raw_code)(const mp_raw_code_t *rc, mp_uint_t n_closed_over, const mp_obj_t *args);
+ void (*arg_check_num_sig)(size_t n_args, size_t n_kw, uint32_t sig);
+ void (*setup_code_state)(mp_code_state_t *code_state, size_t n_args, size_t n_kw, const mp_obj_t *args);
+ mp_int_t (*small_int_floor_divide)(mp_int_t num, mp_int_t denom);
+ mp_int_t (*small_int_modulo)(mp_int_t dividend, mp_int_t divisor);
+ bool (*yield_from)(mp_obj_t gen, mp_obj_t send_value, mp_obj_t *ret_value);
+ void *setjmp_;
+ // Additional entries for dynamic runtime, starts at index 50
+ void *(*memset_)(void *s, int c, size_t n);
+ void *(*memmove_)(void *dest, const void *src, size_t n);
+ void *(*realloc_)(void *ptr, size_t n_bytes, bool allow_move);
+ int (*printf_)(const mp_print_t *print, const char *fmt, ...);
+ int (*vprintf_)(const mp_print_t *print, const char *fmt, va_list args);
+ #if defined(__GNUC__)
+ NORETURN // Only certain compilers support no-return attributes in function pointer declarations
+ #endif
+ void (*raise_msg_str)(const mp_obj_type_t *exc_type, const char *msg);
+ const mp_obj_type_t *(*obj_get_type)(mp_const_obj_t o_in);
+ mp_obj_t (*obj_new_str)(const char *data, size_t len);
+ mp_obj_t (*obj_new_bytes)(const byte *data, size_t len);
+ mp_obj_t (*obj_new_bytearray_by_ref)(size_t n, void *items);
+ mp_obj_t (*obj_new_float_from_f)(float f);
+ mp_obj_t (*obj_new_float_from_d)(double d);
+ float (*obj_get_float_to_f)(mp_obj_t o);
+ double (*obj_get_float_to_d)(mp_obj_t o);
+ void (*get_buffer_raise)(mp_obj_t obj, mp_buffer_info_t *bufinfo, mp_uint_t flags);
+ const mp_stream_p_t *(*get_stream_raise)(mp_obj_t self_in, int flags);
+ void (*assert_native_inited)(mp_obj_t native_object);
+ const mp_print_t *plat_print;
+ const mp_obj_type_t *type_type;
+ const mp_obj_type_t *type_str;
+ const mp_obj_type_t *type_list;
+ const mp_obj_type_t *type_dict;
+ const mp_obj_type_t *type_fun_builtin_0;
+ const mp_obj_type_t *type_fun_builtin_1;
+ const mp_obj_type_t *type_fun_builtin_2;
+ const mp_obj_type_t *type_fun_builtin_3;
+ const mp_obj_type_t *type_fun_builtin_var;
+ const mp_obj_fun_builtin_var_t *stream_read_obj;
+ const mp_obj_fun_builtin_var_t *stream_readinto_obj;
+ const mp_obj_fun_builtin_var_t *stream_unbuffered_readline_obj;
+ const mp_obj_fun_builtin_var_t *stream_write_obj;
+} mp_fun_table_t;
+
+extern const mp_fun_table_t mp_fun_table;
+
+#endif // MICROPY_INCLUDED_PY_NATIVEGLUE_H
diff --git a/circuitpython/py/nlr.c b/circuitpython/py/nlr.c
new file mode 100644
index 0000000..6dfd162
--- /dev/null
+++ b/circuitpython/py/nlr.c
@@ -0,0 +1,51 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013-2017 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/mpstate.h"
+
+#if !MICROPY_NLR_SETJMP
+// When not using setjmp, nlr_push_tail is called from inline asm so needs special care
+#if defined(MICROPY_NLR_X86) && MICROPY_NLR_X86 && defined(MICROPY_NLR_OS_WINDOWS) && MICROPY_NLR_OS_WINDOWS
+// On these 32-bit platforms make sure nlr_push_tail doesn't have a leading underscore
+unsigned int nlr_push_tail(nlr_buf_t *nlr) asm ("nlr_push_tail");
+#else
+// LTO can't see inside inline asm functions so explicitly mark nlr_push_tail as used
+__attribute__((used)) unsigned int nlr_push_tail(nlr_buf_t *nlr);
+#endif
+#endif
+
+unsigned int nlr_push_tail(nlr_buf_t *nlr) {
+ nlr_buf_t **top = &MP_STATE_THREAD(nlr_top);
+ nlr->prev = *top;
+ MP_NLR_SAVE_PYSTACK(nlr);
+ *top = nlr;
+ return 0; // normal return
+}
+
+void nlr_pop(void) {
+ nlr_buf_t **top = &MP_STATE_THREAD(nlr_top);
+ *top = (*top)->prev;
+}
diff --git a/circuitpython/py/nlr.h b/circuitpython/py/nlr.h
new file mode 100644
index 0000000..1fb51f3
--- /dev/null
+++ b/circuitpython/py/nlr.h
@@ -0,0 +1,190 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_NLR_H
+#define MICROPY_INCLUDED_PY_NLR_H
+
+// non-local return
+// exception handling, basically a stack of setjmp/longjmp buffers
+
+#include <limits.h>
+#include <assert.h>
+
+#include "py/mpconfig.h"
+
+#define MICROPY_NLR_NUM_REGS_X86 (6)
+#define MICROPY_NLR_NUM_REGS_X64 (8)
+#define MICROPY_NLR_NUM_REGS_X64_WIN (10)
+#define MICROPY_NLR_NUM_REGS_ARM_THUMB (10)
+#define MICROPY_NLR_NUM_REGS_ARM_THUMB_FP (10 + 6)
+#define MICROPY_NLR_NUM_REGS_AARCH64 (13)
+#define MICROPY_NLR_NUM_REGS_XTENSA (10)
+#define MICROPY_NLR_NUM_REGS_XTENSAWIN (17)
+
+// *FORMAT-OFF*
+
+// If MICROPY_NLR_SETJMP is not enabled then auto-detect the machine arch
+#if !defined(MICROPY_NLR_SETJMP) || !MICROPY_NLR_SETJMP
+// A lot of nlr-related things need different treatment on Windows
+#if defined(_WIN32) || defined(__CYGWIN__)
+#define MICROPY_NLR_OS_WINDOWS 1
+#else
+#define MICROPY_NLR_OS_WINDOWS 0
+#endif
+#if defined(__i386__)
+ #define MICROPY_NLR_SETJMP (0)
+ #define MICROPY_NLR_X86 (1)
+ #define MICROPY_NLR_NUM_REGS (MICROPY_NLR_NUM_REGS_X86)
+#elif defined(__x86_64__)
+ #define MICROPY_NLR_SETJMP (0)
+ #define MICROPY_NLR_X64 (1)
+ #if MICROPY_NLR_OS_WINDOWS
+ #define MICROPY_NLR_NUM_REGS (MICROPY_NLR_NUM_REGS_X64_WIN)
+ #else
+ #define MICROPY_NLR_NUM_REGS (MICROPY_NLR_NUM_REGS_X64)
+ #endif
+#elif defined(__thumb2__) || defined(__thumb__) || defined(__arm__)
+ #define MICROPY_NLR_SETJMP (0)
+ #define MICROPY_NLR_THUMB (1)
+ #if defined(__SOFTFP__)
+ #define MICROPY_NLR_NUM_REGS (MICROPY_NLR_NUM_REGS_ARM_THUMB)
+ #else
+ // With hardware FP registers s16-s31 are callee save so in principle
+ // should be saved and restored by the NLR code. gcc only uses s16-s21
+ // so only save/restore those as an optimisation.
+ #define MICROPY_NLR_NUM_REGS (MICROPY_NLR_NUM_REGS_ARM_THUMB_FP)
+ #endif
+#elif defined(__aarch64__)
+ #define MICROPY_NLR_AARCH64 (1)
+ #define MICROPY_NLR_NUM_REGS (MICROPY_NLR_NUM_REGS_AARCH64)
+#elif defined(__xtensa__)
+ #define MICROPY_NLR_SETJMP (0)
+ #define MICROPY_NLR_XTENSA (1)
+ #define MICROPY_NLR_NUM_REGS (MICROPY_NLR_NUM_REGS_XTENSA)
+#elif defined(__powerpc__)
+ #define MICROPY_NLR_SETJMP (0)
+ #define MICROPY_NLR_POWERPC (1)
+ // this could be less but using 128 for safety
+ #define MICROPY_NLR_NUM_REGS (128)
+#else
+ #define MICROPY_NLR_SETJMP (1)
+// #warning "No native NLR support for this arch, using setjmp implementation"
+#endif
+#endif
+
+// If MICROPY_NLR_SETJMP is not defined above - define/disable it here
+#if !defined(MICROPY_NLR_SETJMP)
+ #define MICROPY_NLR_SETJMP (0)
+#endif
+
+// *FORMAT-ON*
+
+#if MICROPY_NLR_SETJMP
+#include <setjmp.h>
+#endif
+
+typedef struct _nlr_buf_t nlr_buf_t;
+struct _nlr_buf_t {
+ // the entries here must all be machine word size
+ nlr_buf_t *prev;
+ void *ret_val; // always a concrete object (an exception instance)
+
+ #if MICROPY_NLR_SETJMP
+ jmp_buf jmpbuf;
+ #else
+ void *regs[MICROPY_NLR_NUM_REGS];
+ #endif
+
+ #if MICROPY_ENABLE_PYSTACK
+ void *pystack;
+ #endif
+};
+
+// Helper macros to save/restore the pystack state
+#if MICROPY_ENABLE_PYSTACK
+#define MP_NLR_SAVE_PYSTACK(nlr_buf) (nlr_buf)->pystack = MP_STATE_THREAD(pystack_cur)
+#define MP_NLR_RESTORE_PYSTACK(nlr_buf) MP_STATE_THREAD(pystack_cur) = (nlr_buf)->pystack
+#else
+#define MP_NLR_SAVE_PYSTACK(nlr_buf) (void)nlr_buf
+#define MP_NLR_RESTORE_PYSTACK(nlr_buf) (void)nlr_buf
+#endif
+
+// Helper macro to use at the start of a specific nlr_jump implementation
+#define MP_NLR_JUMP_HEAD(val, top) \
+ nlr_buf_t **_top_ptr = &MP_STATE_THREAD(nlr_top); \
+ nlr_buf_t *top = *_top_ptr; \
+ if (top == NULL) { \
+ nlr_jump_fail(val); \
+ } \
+ top->ret_val = val; \
+ MP_NLR_RESTORE_PYSTACK(top); \
+ *_top_ptr = top->prev; \
+
+#if MICROPY_NLR_SETJMP
+// nlr_push() must be defined as a macro, because "The stack context will be
+// invalidated if the function which called setjmp() returns."
+// For this case it is safe to call nlr_push_tail() first.
+#define nlr_push(buf) (nlr_push_tail(buf), setjmp((buf)->jmpbuf))
+#else
+unsigned int nlr_push(nlr_buf_t *);
+#endif
+
+unsigned int nlr_push_tail(nlr_buf_t *top);
+void nlr_pop(void);
+NORETURN void nlr_jump(void *val);
+
+// This must be implemented by a port. It's called by nlr_jump
+// if no nlr buf has been pushed. It must not return, but rather
+// should bail out with a fatal error.
+NORETURN void nlr_jump_fail(void *val);
+
+// use nlr_raise instead of nlr_jump so that debugging is easier
+#ifndef MICROPY_DEBUG_NLR
+#define nlr_raise(val) nlr_jump(MP_OBJ_TO_PTR(val))
+#else
+#include "mpstate.h"
+#define nlr_raise(val) \
+ do { \
+ /*printf("nlr_raise: nlr_top=%p\n", MP_STATE_THREAD(nlr_top)); \
+ fflush(stdout);*/ \
+ void *_val = MP_OBJ_TO_PTR(val); \
+ assert(_val != NULL); \
+ assert(mp_obj_is_exception_instance(val)); \
+ nlr_jump(_val); \
+ } while (0)
+
+#if !MICROPY_NLR_SETJMP
+#define nlr_push(val) \
+ assert(MP_STATE_THREAD(nlr_top) != val),nlr_push(val)
+
+/*
+#define nlr_push(val) \
+ printf("nlr_push: before: nlr_top=%p, val=%p\n", MP_STATE_THREAD(nlr_top), val),assert(MP_STATE_THREAD(nlr_top) != val),nlr_push(val)
+*/
+#endif
+
+#endif
+
+#endif // MICROPY_INCLUDED_PY_NLR_H
diff --git a/circuitpython/py/nlraarch64.c b/circuitpython/py/nlraarch64.c
new file mode 100644
index 0000000..9b00a28
--- /dev/null
+++ b/circuitpython/py/nlraarch64.c
@@ -0,0 +1,83 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2021 Yonatan Goldschmidt
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/mpstate.h" // needed for NLR defs
+
+#if defined(MICROPY_NLR_AARCH64) && MICROPY_NLR_AARCH64
+
+// AArch64 callee-saved registers are x19-x29.
+// https://en.wikipedia.org/wiki/Calling_convention#ARM_(A64)
+
+// Implemented purely as inline assembly; inside a function, we have to deal with undoing the prologue, restoring
+// SP and LR. This way, we don't.
+__asm(
+ #if defined(__APPLE__) && defined(__MACH__)
+ "_nlr_push: \n"
+ ".global _nlr_push \n"
+ #else
+ "nlr_push: \n"
+ ".global nlr_push \n"
+ #endif
+ "mov x9, sp \n"
+ "stp lr, x9, [x0, #16]\n" // 16 == offsetof(nlr_buf_t, regs)
+ "stp x19, x20, [x0, #32]\n"
+ "stp x21, x22, [x0, #48]\n"
+ "stp x23, x24, [x0, #64]\n"
+ "stp x25, x26, [x0, #80]\n"
+ "stp x27, x28, [x0, #96]\n"
+ "str x29, [x0, #112]\n"
+ #if defined(__APPLE__) && defined(__MACH__)
+ "b _nlr_push_tail \n" // do the rest in C
+ #else
+ "b nlr_push_tail \n" // do the rest in C
+ #endif
+ );
+
+NORETURN void nlr_jump(void *val) {
+ MP_NLR_JUMP_HEAD(val, top)
+
+ MP_STATIC_ASSERT(offsetof(nlr_buf_t, regs) == 16); // asm assumes it
+
+ __asm volatile (
+ "ldr x29, [%0, #112]\n"
+ "ldp x27, x28, [%0, #96]\n"
+ "ldp x25, x26, [%0, #80]\n"
+ "ldp x23, x24, [%0, #64]\n"
+ "ldp x21, x22, [%0, #48]\n"
+ "ldp x19, x20, [%0, #32]\n"
+ "ldp lr, x9, [%0, #16]\n" // 16 == offsetof(nlr_buf_t, regs)
+ "mov sp, x9 \n"
+ "mov x0, #1 \n" // non-local return
+ "ret \n"
+ :
+ : "r" (top)
+ :
+ );
+
+ MP_UNREACHABLE
+}
+
+#endif // MICROPY_NLR_AARCH64
diff --git a/circuitpython/py/nlrpowerpc.c b/circuitpython/py/nlrpowerpc.c
new file mode 100644
index 0000000..43eb345
--- /dev/null
+++ b/circuitpython/py/nlrpowerpc.c
@@ -0,0 +1,121 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2019, Michael Neuling, IBM Corporation.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/mpstate.h"
+
+#if defined(MICROPY_NLR_POWERPC) && MICROPY_NLR_POWERPC
+
+#undef nlr_push
+
+// Saving all ABI non-vol registers here
+
+unsigned int nlr_push(nlr_buf_t *nlr) {
+
+ __asm__ volatile (
+ "li 4, 0x4eed ; " // Store canary
+ "std 4, 0x00(%0) ;"
+ "std 0, 0x08(%0) ;"
+ "std 1, 0x10(%0) ;"
+ "std 2, 0x18(%0) ;"
+ "std 14, 0x20(%0) ;"
+ "std 15, 0x28(%0) ;"
+ "std 16, 0x30(%0) ;"
+ "std 17, 0x38(%0) ;"
+ "std 18, 0x40(%0) ;"
+ "std 19, 0x48(%0) ;"
+ "std 20, 0x50(%0) ;"
+ "std 21, 0x58(%0) ;"
+ "std 22, 0x60(%0) ;"
+ "std 23, 0x68(%0) ;"
+ "std 24, 0x70(%0) ;"
+ "std 25, 0x78(%0) ;"
+ "std 26, 0x80(%0) ;"
+ "std 27, 0x88(%0) ;"
+ "std 28, 0x90(%0) ;"
+ "std 29, 0x98(%0) ;"
+ "std 30, 0xA0(%0) ;"
+ "std 31, 0xA8(%0) ;"
+
+ "mfcr 4 ; "
+ "std 4, 0xB0(%0) ;"
+ "mflr 4 ;"
+ "std 4, 0xB8(%0) ;"
+ "li 4, nlr_push_tail@l ;"
+ "oris 4, 4, nlr_push_tail@h ;"
+ "mtctr 4 ;"
+ "mr 3, %1 ; "
+ "bctr ;"
+ :
+ : "r" (&nlr->regs), "r" (nlr)
+ :
+ );
+
+ return 0;
+}
+
+NORETURN void nlr_jump(void *val) {
+ MP_NLR_JUMP_HEAD(val, top)
+
+ __asm__ volatile (
+ "ld 3, 0x0(%0) ;"
+ "cmpdi 3, 0x4eed ; " // Check canary
+ "bne . ; "
+ "ld 0, 0x08(%0) ;"
+ "ld 1, 0x10(%0) ;"
+ "ld 2, 0x18(%0) ;"
+ "ld 14, 0x20(%0) ;"
+ "ld 15, 0x28(%0) ;"
+ "ld 16, 0x30(%0) ;"
+ "ld 17, 0x38(%0) ;"
+ "ld 18, 0x40(%0) ;"
+ "ld 19, 0x48(%0) ;"
+ "ld 20, 0x50(%0) ;"
+ "ld 21, 0x58(%0) ;"
+ "ld 22, 0x60(%0) ;"
+ "ld 23, 0x68(%0) ;"
+ "ld 24, 0x70(%0) ;"
+ "ld 25, 0x78(%0) ;"
+ "ld 26, 0x80(%0) ;"
+ "ld 27, 0x88(%0) ;"
+ "ld 28, 0x90(%0) ;"
+ "ld 29, 0x98(%0) ;"
+ "ld 30, 0xA0(%0) ;"
+ "ld 31, 0xA8(%0) ;"
+ "ld 3, 0xB0(%0) ;"
+ "mtcr 3 ;"
+ "ld 3, 0xB8(%0) ;"
+ "mtlr 3 ; "
+ "li 3, 1;"
+ "blr ;"
+ :
+ : "r" (&top->regs)
+ :
+ );
+
+ MP_UNREACHABLE;
+}
+
+#endif // MICROPY_NLR_POWERPC
diff --git a/circuitpython/py/nlrsetjmp.c b/circuitpython/py/nlrsetjmp.c
new file mode 100644
index 0000000..a93595d
--- /dev/null
+++ b/circuitpython/py/nlrsetjmp.c
@@ -0,0 +1,43 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013-2017 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/mpstate.h"
+
+#if MICROPY_NLR_SETJMP
+
+void nlr_jump(void *val) {
+ nlr_buf_t **top_ptr = &MP_STATE_THREAD(nlr_top);
+ nlr_buf_t *top = *top_ptr;
+ if (top == NULL) {
+ nlr_jump_fail(val);
+ }
+ top->ret_val = val;
+ MP_NLR_RESTORE_PYSTACK(top);
+ *top_ptr = top->prev;
+ longjmp(top->jmpbuf, 1);
+}
+
+#endif
diff --git a/circuitpython/py/nlrthumb.c b/circuitpython/py/nlrthumb.c
new file mode 100644
index 0000000..30f9f2c
--- /dev/null
+++ b/circuitpython/py/nlrthumb.c
@@ -0,0 +1,141 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013-2017 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/mpstate.h"
+
+#if defined(MICROPY_NLR_THUMB) && MICROPY_NLR_THUMB
+
+#undef nlr_push
+
+// We only need the functions here if we are on arm/thumb, and we are not
+// using setjmp/longjmp.
+//
+// For reference, arm/thumb callee save regs are:
+// r4-r11, r13=sp
+
+__attribute__((naked)) unsigned int nlr_push(nlr_buf_t *nlr) {
+
+ __asm volatile (
+ "str r4, [r0, #12] \n" // store r4 into nlr_buf
+ "str r5, [r0, #16] \n" // store r5 into nlr_buf
+ "str r6, [r0, #20] \n" // store r6 into nlr_buf
+ "str r7, [r0, #24] \n" // store r7 into nlr_buf
+
+ #if !defined(__thumb2__)
+ "mov r1, r8 \n"
+ "str r1, [r0, #28] \n" // store r8 into nlr_buf
+ "mov r1, r9 \n"
+ "str r1, [r0, #32] \n" // store r9 into nlr_buf
+ "mov r1, r10 \n"
+ "str r1, [r0, #36] \n" // store r10 into nlr_buf
+ "mov r1, r11 \n"
+ "str r1, [r0, #40] \n" // store r11 into nlr_buf
+ "mov r1, r13 \n"
+ "str r1, [r0, #44] \n" // store r13=sp into nlr_buf
+ "mov r1, lr \n"
+ "str r1, [r0, #8] \n" // store lr into nlr_buf
+ #else
+ "str r8, [r0, #28] \n" // store r8 into nlr_buf
+ "str r9, [r0, #32] \n" // store r9 into nlr_buf
+ "str r10, [r0, #36] \n" // store r10 into nlr_buf
+ "str r11, [r0, #40] \n" // store r11 into nlr_buf
+ "str r13, [r0, #44] \n" // store r13=sp into nlr_buf
+ #if MICROPY_NLR_NUM_REGS == 16
+ "vstr d8, [r0, #48] \n" // store s16-s17 into nlr_buf
+ "vstr d9, [r0, #56] \n" // store s18-s19 into nlr_buf
+ "vstr d10, [r0, #64] \n" // store s20-s21 into nlr_buf
+ #endif
+ "str lr, [r0, #8] \n" // store lr into nlr_buf
+ #endif
+
+ #if !defined(__thumb2__)
+ "ldr r1, nlr_push_tail_var \n"
+ "bx r1 \n" // do the rest in C
+ ".align 2 \n"
+ "nlr_push_tail_var: .word nlr_push_tail \n"
+ #else
+ #if defined(__APPLE__) || defined(__MACH__)
+ "b _nlr_push_tail \n" // do the rest in C
+ #else
+ "b nlr_push_tail \n" // do the rest in C
+ #endif
+ #endif
+ );
+
+ #if !defined(__clang__) && defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 8))
+ // Older versions of gcc give an error when naked functions don't return a value
+ // Additionally exclude Clang as it also defines __GNUC__ but doesn't need this statement
+ return 0;
+ #endif
+}
+
+NORETURN void nlr_jump(void *val) {
+ MP_NLR_JUMP_HEAD(val, top)
+
+ __asm volatile (
+ "mov r0, %0 \n" // r0 points to nlr_buf
+ "ldr r4, [r0, #12] \n" // load r4 from nlr_buf
+ "ldr r5, [r0, #16] \n" // load r5 from nlr_buf
+ "ldr r6, [r0, #20] \n" // load r6 from nlr_buf
+ "ldr r7, [r0, #24] \n" // load r7 from nlr_buf
+
+ #if !defined(__thumb2__)
+ "ldr r1, [r0, #28] \n" // load r8 from nlr_buf
+ "mov r8, r1 \n"
+ "ldr r1, [r0, #32] \n" // load r9 from nlr_buf
+ "mov r9, r1 \n"
+ "ldr r1, [r0, #36] \n" // load r10 from nlr_buf
+ "mov r10, r1 \n"
+ "ldr r1, [r0, #40] \n" // load r11 from nlr_buf
+ "mov r11, r1 \n"
+ "ldr r1, [r0, #44] \n" // load r13=sp from nlr_buf
+ "mov r13, r1 \n"
+ "ldr r1, [r0, #8] \n" // load lr from nlr_buf
+ "mov lr, r1 \n"
+ #else
+ "ldr r8, [r0, #28] \n" // load r8 from nlr_buf
+ "ldr r9, [r0, #32] \n" // load r9 from nlr_buf
+ "ldr r10, [r0, #36] \n" // load r10 from nlr_buf
+ "ldr r11, [r0, #40] \n" // load r11 from nlr_buf
+ "ldr r13, [r0, #44] \n" // load r13=sp from nlr_buf
+ #if MICROPY_NLR_NUM_REGS == 16
+ "vldr d8, [r0, #48] \n" // load s16-s17 from nlr_buf
+ "vldr d9, [r0, #56] \n" // load s18-s19 from nlr_buf
+ "vldr d10, [r0, #64] \n" // load s20-s21 from nlr_buf
+ #endif
+ "ldr lr, [r0, #8] \n" // load lr from nlr_buf
+ #endif
+ "movs r0, #1 \n" // return 1, non-local return
+ "bx lr \n" // return
+ : // output operands
+ : "r" (top) // input operands
+ : // clobbered registers
+ );
+
+ MP_UNREACHABLE
+}
+
+#endif // MICROPY_NLR_THUMB
diff --git a/circuitpython/py/nlrx64.c b/circuitpython/py/nlrx64.c
new file mode 100644
index 0000000..f7e9260
--- /dev/null
+++ b/circuitpython/py/nlrx64.c
@@ -0,0 +1,114 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013-2017 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/mpstate.h"
+
+#if defined(MICROPY_NLR_X64) && MICROPY_NLR_X64
+
+#undef nlr_push
+
+// x86-64 callee-save registers are:
+// rbx, rbp, rsp, r12, r13, r14, r15
+
+__attribute__((used)) unsigned int nlr_push_tail(nlr_buf_t *nlr);
+
+unsigned int nlr_push(nlr_buf_t *nlr) {
+ (void)nlr;
+
+ #if MICROPY_NLR_OS_WINDOWS
+
+ __asm volatile (
+ "movq (%rsp), %rax \n" // load return %rip
+ "movq %rax, 16(%rcx) \n" // store %rip into nlr_buf
+ "movq %rbp, 24(%rcx) \n" // store %rbp into nlr_buf
+ "movq %rsp, 32(%rcx) \n" // store %rsp into nlr_buf
+ "movq %rbx, 40(%rcx) \n" // store %rbx into nlr_buf
+ "movq %r12, 48(%rcx) \n" // store %r12 into nlr_buf
+ "movq %r13, 56(%rcx) \n" // store %r13 into nlr_buf
+ "movq %r14, 64(%rcx) \n" // store %r14 into nlr_buf
+ "movq %r15, 72(%rcx) \n" // store %r15 into nlr_buf
+ "movq %rdi, 80(%rcx) \n" // store %rdr into nlr_buf
+ "movq %rsi, 88(%rcx) \n" // store %rsi into nlr_buf
+ "jmp nlr_push_tail \n" // do the rest in C
+ );
+
+ #else
+
+ __asm volatile (
+ #if defined(__APPLE__) && defined(__MACH__)
+ "pop %rbp \n" // undo function's prelude
+ #endif
+ "movq (%rsp), %rax \n" // load return %rip
+ "movq %rax, 16(%rdi) \n" // store %rip into nlr_buf
+ "movq %rbp, 24(%rdi) \n" // store %rbp into nlr_buf
+ "movq %rsp, 32(%rdi) \n" // store %rsp into nlr_buf
+ "movq %rbx, 40(%rdi) \n" // store %rbx into nlr_buf
+ "movq %r12, 48(%rdi) \n" // store %r12 into nlr_buf
+ "movq %r13, 56(%rdi) \n" // store %r13 into nlr_buf
+ "movq %r14, 64(%rdi) \n" // store %r14 into nlr_buf
+ "movq %r15, 72(%rdi) \n" // store %r15 into nlr_buf
+ #if defined(__APPLE__) && defined(__MACH__)
+ "jmp _nlr_push_tail \n" // do the rest in C
+ #else
+ "jmp nlr_push_tail \n" // do the rest in C
+ #endif
+ );
+
+ #endif
+
+ return 0; // needed to silence compiler warning
+}
+
+NORETURN void nlr_jump(void *val) {
+ MP_NLR_JUMP_HEAD(val, top)
+
+ __asm volatile (
+ "movq %0, %%rcx \n" // %rcx points to nlr_buf
+ #if MICROPY_NLR_OS_WINDOWS
+ "movq 88(%%rcx), %%rsi \n" // load saved %rsi
+ "movq 80(%%rcx), %%rdi \n" // load saved %rdi
+ #endif
+ "movq 72(%%rcx), %%r15 \n" // load saved %r15
+ "movq 64(%%rcx), %%r14 \n" // load saved %r14
+ "movq 56(%%rcx), %%r13 \n" // load saved %r13
+ "movq 48(%%rcx), %%r12 \n" // load saved %r12
+ "movq 40(%%rcx), %%rbx \n" // load saved %rbx
+ "movq 32(%%rcx), %%rsp \n" // load saved %rsp
+ "movq 24(%%rcx), %%rbp \n" // load saved %rbp
+ "movq 16(%%rcx), %%rax \n" // load saved %rip
+ "movq %%rax, (%%rsp) \n" // store saved %rip to stack
+ "xorq %%rax, %%rax \n" // clear return register
+ "inc %%al \n" // increase to make 1, non-local return
+ "ret \n" // return
+ : // output operands
+ : "r" (top) // input operands
+ : // clobbered registers
+ );
+
+ MP_UNREACHABLE
+}
+
+#endif // MICROPY_NLR_X64
diff --git a/circuitpython/py/nlrx86.c b/circuitpython/py/nlrx86.c
new file mode 100644
index 0000000..0cc8b9e
--- /dev/null
+++ b/circuitpython/py/nlrx86.c
@@ -0,0 +1,104 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013-2017 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/mpstate.h"
+
+#if defined(MICROPY_NLR_X86) && MICROPY_NLR_X86
+
+#undef nlr_push
+
+// For reference, x86 callee save regs are:
+// ebx, esi, edi, ebp, esp, eip
+
+#if defined(MICROPY_NLR_OS_WINDOWS) && MICROPY_NLR_OS_WINDOWS
+unsigned int nlr_push_tail(nlr_buf_t *nlr) asm ("nlr_push_tail");
+#else
+__attribute__((used)) unsigned int nlr_push_tail(nlr_buf_t *nlr);
+#endif
+
+#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 8
+// Since gcc 8.0 the naked attribute is supported
+#define USE_NAKED (1)
+#define UNDO_PRELUDE (0)
+#elif defined(__ZEPHYR__) || defined(__ANDROID__)
+// Zephyr and Android use a different calling convention by default
+#define USE_NAKED (0)
+#define UNDO_PRELUDE (0)
+#else
+#define USE_NAKED (0)
+#define UNDO_PRELUDE (1)
+#endif
+
+#if USE_NAKED
+__attribute__((naked))
+#endif
+unsigned int nlr_push(nlr_buf_t *nlr) {
+ (void)nlr;
+
+ __asm volatile (
+ #if UNDO_PRELUDE
+ "pop %ebp \n" // undo function's prelude
+ #endif
+ "mov 4(%esp), %edx \n" // load nlr_buf
+ "mov (%esp), %eax \n" // load return %eip
+ "mov %eax, 8(%edx) \n" // store %eip into nlr_buf
+ "mov %ebp, 12(%edx) \n" // store %ebp into nlr_buf
+ "mov %esp, 16(%edx) \n" // store %esp into nlr_buf
+ "mov %ebx, 20(%edx) \n" // store %ebx into nlr_buf
+ "mov %edi, 24(%edx) \n" // store %edi into nlr_buf
+ "mov %esi, 28(%edx) \n" // store %esi into nlr_buf
+ "jmp nlr_push_tail \n" // do the rest in C
+ );
+
+ #if !USE_NAKED
+ return 0; // needed to silence compiler warning
+ #endif
+}
+
+NORETURN void nlr_jump(void *val) {
+ MP_NLR_JUMP_HEAD(val, top)
+
+ __asm volatile (
+ "mov %0, %%edx \n" // %edx points to nlr_buf
+ "mov 28(%%edx), %%esi \n" // load saved %esi
+ "mov 24(%%edx), %%edi \n" // load saved %edi
+ "mov 20(%%edx), %%ebx \n" // load saved %ebx
+ "mov 16(%%edx), %%esp \n" // load saved %esp
+ "mov 12(%%edx), %%ebp \n" // load saved %ebp
+ "mov 8(%%edx), %%eax \n" // load saved %eip
+ "mov %%eax, (%%esp) \n" // store saved %eip to stack
+ "xor %%eax, %%eax \n" // clear return register
+ "inc %%al \n" // increase to make 1, non-local return
+ "ret \n" // return
+ : // output operands
+ : "r" (top) // input operands
+ : // clobbered registers
+ );
+
+ MP_UNREACHABLE
+}
+
+#endif // MICROPY_NLR_X86
diff --git a/circuitpython/py/nlrxtensa.c b/circuitpython/py/nlrxtensa.c
new file mode 100644
index 0000000..5aa767a
--- /dev/null
+++ b/circuitpython/py/nlrxtensa.c
@@ -0,0 +1,83 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2014-2017 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/mpstate.h"
+
+#if defined(MICROPY_NLR_XTENSA) && MICROPY_NLR_XTENSA
+
+#undef nlr_push
+
+// Xtensa calling conventions:
+// a0 = return address
+// a1 = stack pointer
+// a2 = first arg, return value
+// a3-a7 = rest of args
+
+unsigned int nlr_push(nlr_buf_t *nlr) {
+
+ __asm volatile (
+ "s32i.n a0, a2, 8 \n" // save regs...
+ "s32i.n a1, a2, 12 \n"
+ "s32i.n a8, a2, 16 \n"
+ "s32i.n a9, a2, 20 \n"
+ "s32i.n a10, a2, 24 \n"
+ "s32i.n a11, a2, 28 \n"
+ "s32i.n a12, a2, 32 \n"
+ "s32i.n a13, a2, 36 \n"
+ "s32i.n a14, a2, 40 \n"
+ "s32i.n a15, a2, 44 \n"
+ "j nlr_push_tail \n" // do the rest in C
+ );
+
+ return 0; // needed to silence compiler warning
+}
+
+NORETURN void nlr_jump(void *val) {
+ MP_NLR_JUMP_HEAD(val, top)
+
+ __asm volatile (
+ "mov.n a2, %0 \n" // a2 points to nlr_buf
+ "l32i.n a0, a2, 8 \n" // restore regs...
+ "l32i.n a1, a2, 12 \n"
+ "l32i.n a8, a2, 16 \n"
+ "l32i.n a9, a2, 20 \n"
+ "l32i.n a10, a2, 24 \n"
+ "l32i.n a11, a2, 28 \n"
+ "l32i.n a12, a2, 32 \n"
+ "l32i.n a13, a2, 36 \n"
+ "l32i.n a14, a2, 40 \n"
+ "l32i.n a15, a2, 44 \n"
+ "movi.n a2, 1 \n" // return 1, non-local return
+ "ret.n \n" // return
+ : // output operands
+ : "r" (top) // input operands
+ : // clobbered registers
+ );
+
+ MP_UNREACHABLE
+}
+
+#endif // MICROPY_NLR_XTENSA
diff --git a/circuitpython/py/obj.c b/circuitpython/py/obj.c
new file mode 100644
index 0000000..cc7f900
--- /dev/null
+++ b/circuitpython/py/obj.c
@@ -0,0 +1,776 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <assert.h>
+
+#include "shared/runtime/interrupt_char.h"
+#include "py/obj.h"
+#include "py/objtype.h"
+#include "py/objint.h"
+#include "py/objstr.h"
+#include "py/qstr.h"
+#include "py/runtime.h"
+#include "py/stackctrl.h"
+#include "py/stream.h" // for mp_obj_print
+
+#include "supervisor/linker.h"
+#include "supervisor/shared/stack.h"
+#include "supervisor/shared/translate.h"
+
+const mp_obj_type_t *MICROPY_WRAP_MP_OBJ_GET_TYPE(mp_obj_get_type)(mp_const_obj_t o_in) {
+ #if MICROPY_OBJ_IMMEDIATE_OBJS && MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_A
+
+ if (mp_obj_is_obj(o_in)) {
+ const mp_obj_base_t *o = MP_OBJ_TO_PTR(o_in);
+ return o->type;
+ } else {
+ static const mp_obj_type_t *const types[] = {
+ NULL, &mp_type_int, &mp_type_str, &mp_type_int,
+ NULL, &mp_type_int, &mp_type_NoneType, &mp_type_int,
+ NULL, &mp_type_int, &mp_type_str, &mp_type_int,
+ NULL, &mp_type_int, &mp_type_bool, &mp_type_int,
+ };
+ return types[(uintptr_t)o_in & 0xf];
+ }
+
+ #elif MICROPY_OBJ_IMMEDIATE_OBJS && MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_C
+
+ if (mp_obj_is_small_int(o_in)) {
+ return &mp_type_int;
+ } else if (mp_obj_is_obj(o_in)) {
+ const mp_obj_base_t *o = MP_OBJ_TO_PTR(o_in);
+ return o->type;
+ #if MICROPY_PY_BUILTINS_FLOAT
+ } else if ((((mp_uint_t)(o_in)) & 0xff800007) != 0x00000006) {
+ return &mp_type_float;
+ #endif
+ } else {
+ static const mp_obj_type_t *const types[] = {
+ &mp_type_str, &mp_type_NoneType, &mp_type_str, &mp_type_bool,
+ };
+ return types[((uintptr_t)o_in >> 3) & 3];
+ }
+
+ #else
+
+ if (mp_obj_is_small_int(o_in)) {
+ return &mp_type_int;
+ } else if (mp_obj_is_qstr(o_in)) {
+ return &mp_type_str;
+ #if MICROPY_PY_BUILTINS_FLOAT && ( \
+ MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_C || MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_D)
+ } else if (mp_obj_is_float(o_in)) {
+ return &mp_type_float;
+ #endif
+ #if MICROPY_OBJ_IMMEDIATE_OBJS
+ } else if (mp_obj_is_immediate_obj(o_in)) {
+ static const mp_obj_type_t *const types[2] = {&mp_type_NoneType, &mp_type_bool};
+ return types[MP_OBJ_IMMEDIATE_OBJ_VALUE(o_in) & 1];
+ #endif
+ } else {
+ const mp_obj_base_t *o = MP_OBJ_TO_PTR(o_in);
+ return o->type;
+ }
+
+ #endif
+}
+
+const mp_obj_full_type_t *mp_obj_get_full_type(mp_const_obj_t o_in) {
+ const mp_obj_type_t *type = mp_obj_get_type(o_in);
+ assert(type->flags & MP_TYPE_FLAG_EXTENDED);
+ return (mp_obj_full_type_t *)type;
+}
+
+const char *mp_obj_get_type_str(mp_const_obj_t o_in) {
+ return qstr_str(mp_obj_get_type_qstr(o_in));
+}
+
+void mp_obj_print_helper(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ // There can be data structures nested too deep, or just recursive
+ MP_STACK_CHECK();
+ #ifdef RUN_BACKGROUND_TASKS
+ RUN_BACKGROUND_TASKS;
+ #endif
+ #if MICROPY_KBD_EXCEPTION
+ // Stop printing if we've been interrupted.
+ if (mp_hal_is_interrupted()) {
+ return;
+ }
+ #endif
+
+ #ifndef NDEBUG
+ if (o_in == MP_OBJ_NULL) {
+ mp_print_str(print, "(nil)");
+ return;
+ }
+ #endif
+ const mp_obj_type_t *type = mp_obj_get_type(o_in);
+ if (type->print != NULL) {
+ type->print((mp_print_t *)print, o_in, kind);
+ } else {
+ mp_printf(print, "<%q>", type->name);
+ }
+}
+
+void mp_obj_print(mp_obj_t o_in, mp_print_kind_t kind) {
+ mp_obj_print_helper(MP_PYTHON_PRINTER, o_in, kind);
+}
+
+// helper function to print an exception with traceback
+void mp_obj_print_exception_with_limit(const mp_print_t *print, mp_obj_t exc, mp_int_t limit) {
+ if (mp_obj_is_exception_instance(exc) && stack_ok()) {
+ size_t n, *values;
+ mp_obj_exception_get_traceback(exc, &n, &values);
+ if (n > 0) {
+ assert(n % 3 == 0);
+ #if MICROPY_ENABLE_SOURCE_LINE
+ const compressed_string_t *frame = MP_ERROR_TEXT(" File \"%q\", line %d");
+ #else
+ const compressed_string_t *frame = MP_ERROR_TEXT(" File \"%q\"");
+ #endif
+ const compressed_string_t *block_fmt = MP_ERROR_TEXT(", in %q\n");
+
+ // Set traceback formatting
+ // Default: Print full traceback
+ limit = limit * 3;
+ mp_int_t i = n - 3, j;
+ if (limit > 0) {
+ // Print upto limit traceback
+ // entries from caller's frame
+ if ((unsigned)limit > n) {
+ limit = n;
+ }
+ limit = n - limit;
+ } else if (limit < 0) {
+ // Print upto limit traceback
+ // entries from last
+ if ((unsigned)-limit > n) {
+ limit = -n;
+ }
+ i = 0, limit = limit + 3;
+ }
+
+ // Print the traceback
+ mp_cprintf(print, MP_ERROR_TEXT("Traceback (most recent call last):\n"));
+
+ for (; i >= limit; i -= 3) {
+ j = (i < 0) ? -i : i;
+ #if MICROPY_ENABLE_SOURCE_LINE
+ mp_cprintf(print, frame, values[j], (int)values[j + 1]);
+ #else
+ mp_cprintf(print, frame, values[j]);
+ #endif
+ // The block name can be NULL if it's unknown
+ qstr block = values[j + 2];
+ if (block == MP_QSTRnull) {
+ mp_print_str(print, "\n");
+ } else {
+ mp_cprintf(print, block_fmt, block);
+ }
+ }
+ }
+ }
+ mp_obj_print_helper(print, exc, PRINT_EXC);
+ mp_print_str(print, "\n");
+}
+
+void mp_obj_print_exception(const mp_print_t *print, mp_obj_t exc) {
+ mp_obj_print_exception_with_limit(print, exc, 0);
+}
+
+bool PLACE_IN_ITCM(mp_obj_is_true)(mp_obj_t arg) {
+ if (arg == mp_const_false) {
+ return 0;
+ } else if (arg == mp_const_true) {
+ return 1;
+ } else if (arg == mp_const_none) {
+ return 0;
+ } else if (mp_obj_is_small_int(arg)) {
+ if (arg == MP_OBJ_NEW_SMALL_INT(0)) {
+ return 0;
+ } else {
+ return 1;
+ }
+ } else {
+ const mp_obj_type_t *type = mp_obj_get_type(arg);
+ mp_unary_op_fun_t unary_op = mp_type_get_unary_op_slot(type);
+ if (unary_op) {
+ mp_obj_t result = unary_op(MP_UNARY_OP_BOOL, arg);
+ if (result != MP_OBJ_NULL) {
+ return result == mp_const_true;
+ }
+ }
+
+ mp_obj_t len = mp_obj_len_maybe(arg);
+ if (len != MP_OBJ_NULL) {
+ // obj has a length, truth determined if len != 0
+ return len != MP_OBJ_NEW_SMALL_INT(0);
+ } else {
+ // any other obj is true per Python semantics
+ return 1;
+ }
+ }
+}
+
+bool mp_obj_is_callable(mp_obj_t o_in) {
+ const mp_call_fun_t call = mp_type_get_call_slot(mp_obj_get_type(o_in));
+ if (call != mp_obj_instance_call) {
+ return call != NULL;
+ }
+ return mp_obj_instance_is_callable(o_in);
+}
+
+// This function implements the '==' and '!=' operators.
+//
+// From the Python language reference:
+// (https://docs.python.org/3/reference/expressions.html#not-in)
+// "The objects need not have the same type. If both are numbers, they are converted
+// to a common type. Otherwise, the == and != operators always consider objects of
+// different types to be unequal."
+//
+// This means that False==0 and True==1 are true expressions.
+//
+// Furthermore, from the v3.4.2 code for object.c: "Practical amendments: If rich
+// comparison returns NotImplemented, == and != are decided by comparing the object
+// pointer."
+mp_obj_t mp_obj_equal_not_equal(mp_binary_op_t op, mp_obj_t o1, mp_obj_t o2) {
+ mp_obj_t local_true = (op == MP_BINARY_OP_NOT_EQUAL) ? mp_const_false : mp_const_true;
+ mp_obj_t local_false = (op == MP_BINARY_OP_NOT_EQUAL) ? mp_const_true : mp_const_false;
+ int pass_number = 0;
+
+ // Shortcut for very common cases
+ if (o1 == o2 &&
+ (mp_obj_is_small_int(o1) || !(mp_obj_get_type(o1)->flags & MP_TYPE_FLAG_EQ_NOT_REFLEXIVE))) {
+ return local_true;
+ }
+
+ // fast path for strings
+ if (mp_obj_is_str(o1)) {
+ if (mp_obj_is_str(o2)) {
+ // both strings, use special function
+ return mp_obj_str_equal(o1, o2) ? local_true : local_false;
+ #if MICROPY_PY_STR_BYTES_CMP_WARN
+ } else if (mp_obj_is_type(o2, &mp_type_bytes)) {
+ str_bytes_cmp:
+ mp_warning(MP_WARN_CAT(BytesWarning), "Comparison between bytes and str");
+ return local_false;
+ #endif
+ } else {
+ goto skip_one_pass;
+ }
+ #if MICROPY_PY_STR_BYTES_CMP_WARN
+ } else if (mp_obj_is_str(o2) && mp_obj_is_type(o1, &mp_type_bytes)) {
+ // o1 is not a string (else caught above), so the objects are not equal
+ goto str_bytes_cmp;
+ #endif
+ }
+
+ // fast path for small ints
+ if (mp_obj_is_small_int(o1)) {
+ if (mp_obj_is_small_int(o2)) {
+ // both SMALL_INT, and not equal if we get here
+ return local_false;
+ } else {
+ goto skip_one_pass;
+ }
+ }
+
+ // generic type, call binary_op(MP_BINARY_OP_EQUAL)
+ while (pass_number < 2) {
+ const mp_obj_type_t *type = mp_obj_get_type(o1);
+ // If a full equality test is not needed and the other object is a different
+ // type then we don't need to bother trying the comparison.
+ mp_binary_op_fun_t binary_op = mp_type_get_binary_op_slot(type);
+ if (binary_op != NULL &&
+ ((type->flags & MP_TYPE_FLAG_EQ_CHECKS_OTHER_TYPE) || mp_obj_get_type(o2) == type)) {
+ // CPython is asymmetric: it will try __eq__ if there's no __ne__ but not the
+ // other way around. If the class doesn't need a full test we can skip __ne__.
+ if (op == MP_BINARY_OP_NOT_EQUAL && (type->flags & MP_TYPE_FLAG_EQ_HAS_NEQ_TEST)) {
+ mp_obj_t r = binary_op(MP_BINARY_OP_NOT_EQUAL, o1, o2);
+ if (r != MP_OBJ_NULL) {
+ return r;
+ }
+ }
+
+ // Try calling __eq__.
+ mp_obj_t r = binary_op(MP_BINARY_OP_EQUAL, o1, o2);
+ if (r != MP_OBJ_NULL) {
+ if (op == MP_BINARY_OP_EQUAL) {
+ return r;
+ } else {
+ return mp_obj_is_true(r) ? local_true : local_false;
+ }
+ }
+ }
+
+ skip_one_pass:
+ // Try the other way around if none of the above worked
+ ++pass_number;
+ mp_obj_t temp = o1;
+ o1 = o2;
+ o2 = temp;
+ }
+
+ // equality not implemented, so fall back to pointer conparison
+ return (o1 == o2) ? local_true : local_false;
+}
+
+bool mp_obj_equal(mp_obj_t o1, mp_obj_t o2) {
+ return mp_obj_is_true(mp_obj_equal_not_equal(MP_BINARY_OP_EQUAL, o1, o2));
+}
+
+mp_int_t mp_obj_get_int(mp_const_obj_t arg) {
+ // This function essentially performs implicit type conversion to int
+ // Note that Python does NOT provide implicit type conversion from
+ // float to int in the core expression language, try some_list[1.0].
+ if (arg == mp_const_false) {
+ return 0;
+ } else if (arg == mp_const_true) {
+ return 1;
+ } else if (mp_obj_is_small_int(arg)) {
+ return MP_OBJ_SMALL_INT_VALUE(arg);
+ } else if (mp_obj_is_type(arg, &mp_type_int)) {
+ return mp_obj_int_get_checked(arg);
+ } else {
+ mp_obj_t res = mp_unary_op(MP_UNARY_OP_INT, (mp_obj_t)arg);
+ return mp_obj_int_get_checked(res);
+ }
+}
+
+mp_int_t mp_obj_get_int_truncated(mp_const_obj_t arg) {
+ if (mp_obj_is_int(arg)) {
+ return mp_obj_int_get_truncated(arg);
+ } else {
+ return mp_obj_get_int(arg);
+ }
+}
+
+// returns false if arg is not of integral type
+// returns true and sets *value if it is of integral type
+// can throw OverflowError if arg is of integral type, but doesn't fit in a mp_int_t
+bool mp_obj_get_int_maybe(mp_const_obj_t arg, mp_int_t *value) {
+ if (arg == mp_const_false) {
+ *value = 0;
+ } else if (arg == mp_const_true) {
+ *value = 1;
+ } else if (mp_obj_is_small_int(arg)) {
+ *value = MP_OBJ_SMALL_INT_VALUE(arg);
+ } else if (mp_obj_is_type(arg, &mp_type_int)) {
+ *value = mp_obj_int_get_checked(arg);
+ } else {
+ return false;
+ }
+ return true;
+}
+
+#if MICROPY_PY_BUILTINS_FLOAT
+bool mp_obj_get_float_maybe(mp_obj_t arg, mp_float_t *value) {
+ mp_float_t val;
+
+ if (arg == mp_const_false) {
+ val = 0;
+ } else if (arg == mp_const_true) {
+ val = 1;
+ } else if (mp_obj_is_small_int(arg)) {
+ val = (mp_float_t)MP_OBJ_SMALL_INT_VALUE(arg);
+ #if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE
+ } else if (mp_obj_is_type(arg, &mp_type_int)) {
+ val = mp_obj_int_as_float_impl(arg);
+ #endif
+ } else if (mp_obj_is_float(arg)) {
+ val = mp_obj_float_get(arg);
+ } else {
+ return false;
+ }
+
+ *value = val;
+ return true;
+}
+
+mp_float_t mp_obj_get_float(mp_obj_t arg) {
+ mp_float_t val;
+
+ if (!mp_obj_get_float_maybe(arg, &val)) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_TypeError_varg(MP_ERROR_TEXT("can't convert to %q"), MP_QSTR_float);
+ #else
+ mp_raise_TypeError_varg(
+ MP_ERROR_TEXT("can't convert %q to %q"), mp_obj_get_type_qstr(arg), MP_QSTR_float);
+ #endif
+ }
+
+ return val;
+}
+
+#if MICROPY_PY_BUILTINS_COMPLEX
+bool mp_obj_get_complex_maybe(mp_obj_t arg, mp_float_t *real, mp_float_t *imag) {
+ if (arg == mp_const_false) {
+ *real = 0;
+ *imag = 0;
+ } else if (arg == mp_const_true) {
+ *real = 1;
+ *imag = 0;
+ } else if (mp_obj_is_small_int(arg)) {
+ *real = (mp_float_t)MP_OBJ_SMALL_INT_VALUE(arg);
+ *imag = 0;
+ #if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE
+ } else if (mp_obj_is_type(arg, &mp_type_int)) {
+ *real = mp_obj_int_as_float_impl(arg);
+ *imag = 0;
+ #endif
+ } else if (mp_obj_is_float(arg)) {
+ *real = mp_obj_float_get(arg);
+ *imag = 0;
+ } else if (mp_obj_is_type(arg, &mp_type_complex)) {
+ mp_obj_complex_get(arg, real, imag);
+ } else {
+ return false;
+ }
+ return true;
+}
+
+void mp_obj_get_complex(mp_obj_t arg, mp_float_t *real, mp_float_t *imag) {
+ if (!mp_obj_get_complex_maybe(arg, real, imag)) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_TypeError(MP_ERROR_TEXT("can't convert to complex"));
+ #else
+ mp_raise_TypeError_varg(
+ MP_ERROR_TEXT("can't convert %s to complex"), mp_obj_get_type_str(arg));
+ #endif
+ }
+}
+#endif
+#endif
+
+// note: returned value in *items may point to the interior of a GC block
+void mp_obj_get_array(mp_obj_t o, size_t *len, mp_obj_t **items) {
+ if (mp_obj_is_tuple_compatible(o)) {
+ mp_obj_tuple_get(o, len, items);
+ } else if (mp_obj_is_type(o, &mp_type_list)) {
+ mp_obj_list_get(o, len, items);
+ } else {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_TypeError(MP_ERROR_TEXT("expected tuple/list"));
+ #else
+ mp_raise_TypeError_varg(
+ MP_ERROR_TEXT("object '%s' isn't a tuple or list"), mp_obj_get_type_str(o));
+ #endif
+ }
+}
+
+// note: returned value in *items may point to the interior of a GC block
+void mp_obj_get_array_fixed_n(mp_obj_t o, size_t len, mp_obj_t **items) {
+ size_t seq_len;
+ mp_obj_get_array(o, &seq_len, items);
+ if (seq_len != len) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_ValueError(MP_ERROR_TEXT("tuple/list has wrong length"));
+ #else
+ mp_raise_ValueError_varg(
+ MP_ERROR_TEXT("requested length %d but object has length %d"), (int)len, (int)seq_len);
+ #endif
+ }
+}
+
+// is_slice determines whether the index is a slice index
+size_t mp_get_index(const mp_obj_type_t *type, size_t len, mp_obj_t index, bool is_slice) {
+ mp_int_t i;
+ if (mp_obj_is_small_int(index)) {
+ i = MP_OBJ_SMALL_INT_VALUE(index);
+ } else if (!mp_obj_get_int_maybe(index, &i)) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_TypeError(MP_ERROR_TEXT("indices must be integers"));
+ #else
+ mp_raise_TypeError_varg(
+ MP_ERROR_TEXT("%q indices must be integers, not %s"),
+ type->name, mp_obj_get_type_str(index));
+ #endif
+ }
+
+ if (i < 0) {
+ i += len;
+ }
+ if (is_slice) {
+ if (i < 0) {
+ i = 0;
+ } else if ((mp_uint_t)i > len) {
+ i = len;
+ }
+ } else {
+ if (i < 0 || (mp_uint_t)i >= len) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_IndexError(MP_ERROR_TEXT("index out of range"));
+ #else
+ mp_raise_msg_varg(&mp_type_IndexError,
+ MP_ERROR_TEXT("%q index out of range"), type->name);
+ #endif
+ }
+ }
+
+ // By this point 0 <= i <= len and so fits in a size_t
+ return (size_t)i;
+}
+
+mp_obj_t mp_obj_id(mp_obj_t o_in) {
+ mp_int_t id = (mp_int_t)o_in;
+ if (!mp_obj_is_obj(o_in)) {
+ return mp_obj_new_int(id);
+ } else if (id >= 0) {
+ // Many OSes and CPUs have affinity for putting "user" memories
+ // into low half of address space, and "system" into upper half.
+ // We're going to take advantage of that and return small int
+ // (signed) for such "user" addresses.
+ return MP_OBJ_NEW_SMALL_INT(id);
+ } else {
+ // If that didn't work, well, let's return long int, just as
+ // a (big) positive value, so it will never clash with the range
+ // of small int returned in previous case.
+ return mp_obj_new_int_from_uint((mp_uint_t)id);
+ }
+}
+
+// will raise a TypeError if object has no length
+mp_obj_t mp_obj_len(mp_obj_t o_in) {
+ mp_obj_t len = mp_obj_len_maybe(o_in);
+ if (len == MP_OBJ_NULL) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_TypeError(MP_ERROR_TEXT("object has no len"));
+ #else
+ mp_raise_TypeError_varg(
+ MP_ERROR_TEXT("object of type '%s' has no len()"), mp_obj_get_type_str(o_in));
+ #endif
+ } else {
+ return len;
+ }
+}
+
+// may return MP_OBJ_NULL
+mp_obj_t mp_obj_len_maybe(mp_obj_t o_in) {
+ if (
+ #if !MICROPY_PY_BUILTINS_STR_UNICODE
+ // It's simple - unicode is slow, non-unicode is fast
+ mp_obj_is_str(o_in) ||
+ #endif
+ mp_obj_is_type(o_in, &mp_type_bytes)) {
+ GET_STR_LEN(o_in, l);
+ return MP_OBJ_NEW_SMALL_INT(l);
+ } else {
+ const mp_obj_type_t *type = mp_obj_get_type(o_in);
+ mp_unary_op_fun_t unary_op = mp_type_get_unary_op_slot(type);
+ if (unary_op != NULL) {
+ return unary_op(MP_UNARY_OP_LEN, o_in);
+ } else {
+ return MP_OBJ_NULL;
+ }
+ }
+}
+
+mp_obj_t mp_obj_subscr(mp_obj_t base, mp_obj_t index, mp_obj_t value) {
+ const mp_obj_type_t *type = mp_obj_get_type(base);
+ mp_subscr_fun_t subscr = mp_type_get_subscr_slot(type);
+ if (subscr != NULL) {
+ mp_obj_t ret = subscr(base, index, value);
+ // May have called port specific C code. Make sure it didn't mess up the heap.
+ assert_heap_ok();
+ if (ret != MP_OBJ_NULL) {
+ return ret;
+ }
+ }
+ if (value == MP_OBJ_NULL) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_TypeError(MP_ERROR_TEXT("object doesn't support item deletion"));
+ #else
+ mp_raise_TypeError_varg(
+ MP_ERROR_TEXT("'%s' object doesn't support item deletion"), mp_obj_get_type_str(base));
+ #endif
+ } else if (value == MP_OBJ_SENTINEL) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_TypeError(MP_ERROR_TEXT("object isn't subscriptable"));
+ #else
+ mp_raise_TypeError_varg(
+ MP_ERROR_TEXT("'%s' object isn't subscriptable"), mp_obj_get_type_str(base));
+ #endif
+ } else {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_TypeError(MP_ERROR_TEXT("object doesn't support item assignment"));
+ #else
+ mp_raise_TypeError_varg(
+ MP_ERROR_TEXT("'%s' object doesn't support item assignment"), mp_obj_get_type_str(base));
+ #endif
+ }
+}
+
+// Return input argument. Useful as .getiter for objects which are
+// their own iterators, etc.
+mp_obj_t mp_identity(mp_obj_t self) {
+ return self;
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_identity_obj, mp_identity);
+
+mp_obj_t mp_identity_getiter(mp_obj_t self, mp_obj_iter_buf_t *iter_buf) {
+ (void)iter_buf;
+ return self;
+}
+
+typedef struct {
+ mp_obj_base_t base;
+ mp_fun_1_t iternext;
+ mp_obj_t obj;
+ mp_int_t cur;
+} mp_obj_generic_it_t;
+
+STATIC mp_obj_t generic_it_iternext(mp_obj_t self_in) {
+ mp_obj_generic_it_t *self = MP_OBJ_TO_PTR(self_in);
+ const mp_obj_full_type_t *type = mp_obj_get_full_type(self->obj);
+ mp_obj_t current_length = type->MP_TYPE_UNARY_OP(MP_UNARY_OP_LEN, self->obj);
+ if (self->cur < MP_OBJ_SMALL_INT_VALUE(current_length)) {
+ mp_obj_t o_out = type->ext[0].subscr(self->obj, MP_OBJ_NEW_SMALL_INT(self->cur), MP_OBJ_SENTINEL);
+ self->cur += 1;
+ return o_out;
+ } else {
+ return MP_OBJ_STOP_ITERATION;
+ }
+}
+
+mp_obj_t mp_obj_new_generic_iterator(mp_obj_t obj, mp_obj_iter_buf_t *iter_buf) {
+ assert(sizeof(mp_obj_generic_it_t) <= sizeof(mp_obj_iter_buf_t));
+ mp_obj_generic_it_t *o = (mp_obj_generic_it_t *)iter_buf;
+ o->base.type = &mp_type_polymorph_iter;
+ o->iternext = generic_it_iternext;
+ o->obj = obj;
+ o->cur = 0;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+bool mp_get_buffer(mp_obj_t obj, mp_buffer_info_t *bufinfo, mp_uint_t flags) {
+ const mp_obj_type_t *type = mp_obj_get_type(obj);
+ const mp_getbuffer_fun_t get_buffer = mp_type_get_getbuffer_slot(type);
+ if (get_buffer == NULL) {
+ return false;
+ }
+ int ret = get_buffer(obj, bufinfo, flags);
+ if (ret != 0) {
+ return false;
+ }
+ return true;
+}
+
+void mp_get_buffer_raise(mp_obj_t obj, mp_buffer_info_t *bufinfo, mp_uint_t flags) {
+ if (!mp_get_buffer(obj, bufinfo, flags)) {
+ mp_raise_TypeError(MP_ERROR_TEXT("object with buffer protocol required"));
+ }
+}
+
+mp_obj_t mp_generic_unary_op(mp_unary_op_t op, mp_obj_t o_in) {
+ switch (op) {
+ case MP_UNARY_OP_HASH:
+ return MP_OBJ_NEW_SMALL_INT((mp_uint_t)o_in);
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+mp_call_fun_t mp_type_get_call_slot(const mp_obj_type_t *type) {
+ if (!(type->flags & MP_TYPE_FLAG_EXTENDED)) {
+ return NULL;
+ }
+ return type->ext[0].call;
+}
+
+mp_unary_op_fun_t mp_type_get_unary_op_slot(const mp_obj_type_t *type) {
+ if (!(type->flags & MP_TYPE_FLAG_EXTENDED)) {
+ return NULL;
+ }
+ return type->ext[0].unary_op;
+}
+
+
+mp_binary_op_fun_t mp_type_get_binary_op_slot(const mp_obj_type_t *type) {
+ if (!(type->flags & MP_TYPE_FLAG_EXTENDED)) {
+ return NULL;
+ }
+ return type->ext[0].binary_op;
+}
+
+
+mp_attr_fun_t mp_type_get_attr_slot(const mp_obj_type_t *type) {
+ return type->attr;
+}
+
+
+mp_subscr_fun_t mp_type_get_subscr_slot(const mp_obj_type_t *type) {
+ if (!(type->flags & MP_TYPE_FLAG_EXTENDED)) {
+ return NULL;
+ }
+ return type->MP_TYPE_SUBSCR;
+}
+
+
+mp_getiter_fun_t mp_type_get_getiter_slot(const mp_obj_type_t *type) {
+ if (!(type->flags & MP_TYPE_FLAG_EXTENDED)) {
+ return NULL;
+ }
+ return type->MP_TYPE_GETITER;
+}
+
+
+mp_fun_1_t mp_type_get_iternext_slot(const mp_obj_type_t *type) {
+ if (!(type->flags & MP_TYPE_FLAG_EXTENDED)) {
+ return NULL;
+ }
+ return type->MP_TYPE_ITERNEXT;
+}
+
+
+mp_getbuffer_fun_t mp_type_get_getbuffer_slot(const mp_obj_type_t *type) {
+ if (!(type->flags & MP_TYPE_FLAG_EXTENDED)) {
+ return NULL;
+ }
+ return type->MP_TYPE_GET_BUFFER;
+}
+
+
+const void *mp_type_get_protocol_slot(const mp_obj_type_t *type) {
+ if (!(type->flags & MP_TYPE_FLAG_EXTENDED)) {
+ return NULL;
+ }
+ return type->MP_TYPE_PROTOCOL;
+}
+
+
+const void *mp_type_get_parent_slot(const mp_obj_type_t *type) {
+ return type->parent;
+}
+
+size_t mp_type_size(const mp_obj_type_t *type) {
+ if (!(type->flags & MP_TYPE_FLAG_EXTENDED)) {
+ return sizeof(mp_obj_type_t);
+ }
+ return sizeof(mp_obj_full_type_t);
+}
diff --git a/circuitpython/py/obj.h b/circuitpython/py/obj.h
new file mode 100644
index 0000000..5f8f71b
--- /dev/null
+++ b/circuitpython/py/obj.h
@@ -0,0 +1,1141 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_OBJ_H
+#define MICROPY_INCLUDED_PY_OBJ_H
+
+#include <assert.h>
+
+#include "py/mpconfig.h"
+#include "py/misc.h"
+#include "py/qstr.h"
+#include "py/mpprint.h"
+#include "py/runtime0.h"
+
+#include "supervisor/shared/translate.h"
+
+// This is the definition of the opaque MicroPython object type.
+// All concrete objects have an encoding within this type and the
+// particular encoding is specified by MICROPY_OBJ_REPR.
+#if MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_D
+typedef uint64_t mp_obj_t;
+typedef uint64_t mp_const_obj_t;
+#else
+typedef void *mp_obj_t;
+typedef const void *mp_const_obj_t;
+#endif
+
+// This mp_obj_type_t struct is a concrete MicroPython object which holds info
+// about a type. See below for actual definition of the struct.
+typedef struct _mp_obj_type_t mp_obj_type_t;
+typedef struct _mp_obj_full_type_t mp_obj_full_type_t;
+
+// Anything that wants to be a concrete MicroPython object must have mp_obj_base_t
+// as its first member (small ints, qstr objs and inline floats are not concrete).
+struct _mp_obj_base_t {
+ const mp_obj_type_t *type MICROPY_OBJ_BASE_ALIGNMENT;
+};
+typedef struct _mp_obj_base_t mp_obj_base_t;
+
+// These fake objects are used to indicate certain things in arguments or return
+// values, and should only be used when explicitly allowed.
+//
+// - MP_OBJ_NULL : used to indicate the absence of an object, or unsupported operation.
+// - MP_OBJ_STOP_ITERATION : used instead of throwing a StopIteration, for efficiency.
+// - MP_OBJ_SENTINEL : used for various internal purposes where one needs
+// an object which is unique from all other objects, including MP_OBJ_NULL.
+//
+// For debugging purposes they are all different. For non-debug mode, we alias
+// as many as we can to MP_OBJ_NULL because it's cheaper to load/compare 0.
+
+#if MICROPY_DEBUG_MP_OBJ_SENTINELS
+#define MP_OBJ_NULL (MP_OBJ_FROM_PTR((void *)0))
+#define MP_OBJ_STOP_ITERATION (MP_OBJ_FROM_PTR((void *)4))
+#define MP_OBJ_SENTINEL (MP_OBJ_FROM_PTR((void *)8))
+#else
+#define MP_OBJ_NULL (MP_OBJ_FROM_PTR((void *)0))
+#define MP_OBJ_STOP_ITERATION (MP_OBJ_FROM_PTR((void *)0))
+#define MP_OBJ_SENTINEL (MP_OBJ_FROM_PTR((void *)4))
+#endif
+
+// These macros/inline functions operate on objects and depend on the
+// particular object representation. They are used to query, pack and
+// unpack small ints, qstrs and full object pointers.
+
+#if MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_A
+
+static inline bool mp_obj_is_small_int(mp_const_obj_t o) {
+ return (((mp_int_t)(o)) & 1) != 0;
+}
+#define MP_OBJ_SMALL_INT_VALUE(o) (((mp_int_t)(o)) >> 1)
+#define MP_OBJ_NEW_SMALL_INT(small_int) ((mp_obj_t)((((mp_uint_t)(small_int)) << 1) | 1))
+
+static inline bool mp_obj_is_qstr(mp_const_obj_t o) {
+ return (((mp_int_t)(o)) & 7) == 2;
+}
+#define MP_OBJ_QSTR_VALUE(o) (((mp_uint_t)(o)) >> 3)
+#define MP_OBJ_NEW_QSTR(qst) ((mp_obj_t)((((mp_uint_t)(qst)) << 3) | 2))
+
+static inline bool mp_obj_is_immediate_obj(mp_const_obj_t o) {
+ return (((mp_int_t)(o)) & 7) == 6;
+}
+#define MP_OBJ_IMMEDIATE_OBJ_VALUE(o) (((mp_uint_t)(o)) >> 3)
+#define MP_OBJ_NEW_IMMEDIATE_OBJ(val) ((mp_obj_t)(((val) << 3) | 6))
+
+#if MICROPY_PY_BUILTINS_FLOAT
+#define mp_const_float_e MP_ROM_PTR(&mp_const_float_e_obj)
+#define mp_const_float_pi MP_ROM_PTR(&mp_const_float_pi_obj)
+extern const struct _mp_obj_float_t mp_const_float_e_obj;
+extern const struct _mp_obj_float_t mp_const_float_pi_obj;
+
+#define mp_obj_is_float(o) mp_obj_is_type((o), &mp_type_float)
+mp_float_t mp_obj_float_get(mp_obj_t self_in);
+mp_obj_t mp_obj_new_float(mp_float_t value);
+#endif
+
+static inline bool mp_obj_is_obj(mp_const_obj_t o) {
+ return (((mp_int_t)(o)) & 3) == 0;
+}
+
+#elif MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_B
+
+static inline bool mp_obj_is_small_int(mp_const_obj_t o) {
+ return (((mp_int_t)(o)) & 3) == 1;
+}
+#define MP_OBJ_SMALL_INT_VALUE(o) (((mp_int_t)(o)) >> 2)
+#define MP_OBJ_NEW_SMALL_INT(small_int) ((mp_obj_t)((((mp_uint_t)(small_int)) << 2) | 1))
+
+static inline bool mp_obj_is_qstr(mp_const_obj_t o) {
+ return (((mp_int_t)(o)) & 7) == 3;
+}
+#define MP_OBJ_QSTR_VALUE(o) (((mp_uint_t)(o)) >> 3)
+#define MP_OBJ_NEW_QSTR(qst) ((mp_obj_t)((((mp_uint_t)(qst)) << 3) | 3))
+
+static inline bool mp_obj_is_immediate_obj(mp_const_obj_t o) {
+ return (((mp_int_t)(o)) & 7) == 7;
+}
+#define MP_OBJ_IMMEDIATE_OBJ_VALUE(o) (((mp_uint_t)(o)) >> 3)
+#define MP_OBJ_NEW_IMMEDIATE_OBJ(val) ((mp_obj_t)(((val) << 3) | 7))
+
+#if MICROPY_PY_BUILTINS_FLOAT
+#define mp_const_float_e MP_ROM_PTR(&mp_const_float_e_obj)
+#define mp_const_float_pi MP_ROM_PTR(&mp_const_float_pi_obj)
+extern const struct _mp_obj_float_t mp_const_float_e_obj;
+extern const struct _mp_obj_float_t mp_const_float_pi_obj;
+
+#define mp_obj_is_float(o) mp_obj_is_type((o), &mp_type_float)
+mp_float_t mp_obj_float_get(mp_obj_t self_in);
+mp_obj_t mp_obj_new_float(mp_float_t value);
+#endif
+
+static inline bool mp_obj_is_obj(mp_const_obj_t o) {
+ return (((mp_int_t)(o)) & 1) == 0;
+}
+
+#elif MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_C
+
+static inline bool mp_obj_is_small_int(mp_const_obj_t o) {
+ return (((mp_int_t)(o)) & 1) != 0;
+}
+#define MP_OBJ_SMALL_INT_VALUE(o) (((mp_int_t)(o)) >> 1)
+#define MP_OBJ_NEW_SMALL_INT(small_int) ((mp_obj_t)((((mp_uint_t)(small_int)) << 1) | 1))
+
+#if MICROPY_PY_BUILTINS_FLOAT
+#define mp_const_float_e MP_ROM_PTR((mp_obj_t)(((0x402df854 & ~3) | 2) + 0x80800000))
+#define mp_const_float_pi MP_ROM_PTR((mp_obj_t)(((0x40490fdb & ~3) | 2) + 0x80800000))
+
+static inline bool mp_obj_is_float(mp_const_obj_t o) {
+ return (((mp_uint_t)(o)) & 3) == 2 && (((mp_uint_t)(o)) & 0xff800007) != 0x00000006;
+}
+static inline mp_float_t mp_obj_float_get(mp_const_obj_t o) {
+ union {
+ mp_float_t f;
+ mp_uint_t u;
+ } num = {.u = ((mp_uint_t)o - 0x80800000) & ~3};
+ return num.f;
+}
+static inline mp_obj_t mp_obj_new_float(mp_float_t f) {
+ union {
+ mp_float_t f;
+ mp_uint_t u;
+ } num = {.f = f};
+ return (mp_obj_t)(((num.u & ~0x3) | 2) + 0x80800000);
+}
+#endif
+
+static inline bool mp_obj_is_qstr(mp_const_obj_t o) {
+ return (((mp_uint_t)(o)) & 0xff80000f) == 0x00000006;
+}
+#define MP_OBJ_QSTR_VALUE(o) (((mp_uint_t)(o)) >> 4)
+#define MP_OBJ_NEW_QSTR(qst) ((mp_obj_t)((((mp_uint_t)(qst)) << 4) | 0x00000006))
+
+static inline bool mp_obj_is_immediate_obj(mp_const_obj_t o) {
+ return (((mp_uint_t)(o)) & 0xff80000f) == 0x0000000e;
+}
+#define MP_OBJ_IMMEDIATE_OBJ_VALUE(o) (((mp_uint_t)(o)) >> 4)
+#define MP_OBJ_NEW_IMMEDIATE_OBJ(val) ((mp_obj_t)(((val) << 4) | 0xe))
+
+static inline bool mp_obj_is_obj(mp_const_obj_t o) {
+ return (((mp_int_t)(o)) & 3) == 0;
+}
+
+#elif MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_D
+
+static inline bool mp_obj_is_small_int(mp_const_obj_t o) {
+ return (((uint64_t)(o)) & 0xffff000000000000) == 0x0001000000000000;
+}
+#define MP_OBJ_SMALL_INT_VALUE(o) (((mp_int_t)((o) << 16)) >> 17)
+#define MP_OBJ_NEW_SMALL_INT(small_int) (((((uint64_t)(small_int)) & 0x7fffffffffff) << 1) | 0x0001000000000001)
+
+static inline bool mp_obj_is_qstr(mp_const_obj_t o) {
+ return (((uint64_t)(o)) & 0xffff000000000000) == 0x0002000000000000;
+}
+#define MP_OBJ_QSTR_VALUE(o) ((((uint32_t)(o)) >> 1) & 0xffffffff)
+#define MP_OBJ_NEW_QSTR(qst) ((mp_obj_t)(((uint64_t)(((uint32_t)(qst)) << 1)) | 0x0002000000000001))
+
+static inline bool mp_obj_is_immediate_obj(mp_const_obj_t o) {
+ return (((uint64_t)(o)) & 0xffff000000000000) == 0x0003000000000000;
+}
+#define MP_OBJ_IMMEDIATE_OBJ_VALUE(o) ((((uint32_t)(o)) >> 46) & 3)
+#define MP_OBJ_NEW_IMMEDIATE_OBJ(val) (((uint64_t)(val) << 46) | 0x0003000000000000)
+
+#if MICROPY_PY_BUILTINS_FLOAT
+
+#if MICROPY_FLOAT_IMPL != MICROPY_FLOAT_IMPL_DOUBLE
+#error MICROPY_OBJ_REPR_D requires MICROPY_FLOAT_IMPL_DOUBLE
+#endif
+
+#define mp_const_float_e {((mp_obj_t)((uint64_t)0x4005bf0a8b145769 + 0x8004000000000000))}
+#define mp_const_float_pi {((mp_obj_t)((uint64_t)0x400921fb54442d18 + 0x8004000000000000))}
+
+static inline bool mp_obj_is_float(mp_const_obj_t o) {
+ return ((uint64_t)(o) & 0xfffc000000000000) != 0;
+}
+static inline mp_float_t mp_obj_float_get(mp_const_obj_t o) {
+ union {
+ mp_float_t f;
+ uint64_t r;
+ } num = {.r = o - 0x8004000000000000};
+ return num.f;
+}
+static inline mp_obj_t mp_obj_new_float(mp_float_t f) {
+ union {
+ mp_float_t f;
+ uint64_t r;
+ } num = {.f = f};
+ return num.r + 0x8004000000000000;
+}
+#endif
+
+static inline bool mp_obj_is_obj(mp_const_obj_t o) {
+ return (((uint64_t)(o)) & 0xffff000000000000) == 0x0000000000000000;
+}
+#define MP_OBJ_TO_PTR(o) ((void *)(uintptr_t)(o))
+#define MP_OBJ_FROM_PTR(p) ((mp_obj_t)((uintptr_t)(p)))
+
+// rom object storage needs special handling to widen 32-bit pointer to 64-bits
+typedef union _mp_rom_obj_t { uint64_t u64;
+ struct { const void *lo, *hi;
+ } u32;
+} mp_rom_obj_t;
+#define MP_ROM_INT(i) {MP_OBJ_NEW_SMALL_INT(i)}
+#define MP_ROM_QSTR(q) {MP_OBJ_NEW_QSTR(q)}
+#if MP_ENDIANNESS_LITTLE
+#define MP_ROM_PTR(p) {.u32 = {.lo = (p), .hi = NULL}}
+#else
+#define MP_ROM_PTR(p) {.u32 = {.lo = NULL, .hi = (p)}}
+#endif
+
+#endif
+
+// Macros to convert between mp_obj_t and concrete object types.
+// These are identity operations in MicroPython, but ability to override
+// these operations are provided to experiment with other methods of
+// object representation and memory management.
+
+// Cast mp_obj_t to object pointer
+#ifndef MP_OBJ_TO_PTR
+#define MP_OBJ_TO_PTR(o) ((void *)o)
+#endif
+
+// Cast object pointer to mp_obj_t
+#ifndef MP_OBJ_FROM_PTR
+#define MP_OBJ_FROM_PTR(p) ((mp_obj_t)p)
+#endif
+
+// Macros to create objects that are stored in ROM.
+
+#ifndef MP_ROM_NONE
+#if MICROPY_OBJ_IMMEDIATE_OBJS
+#define MP_ROM_NONE mp_const_none
+#else
+#define MP_ROM_NONE MP_ROM_PTR(&mp_const_none_obj)
+#endif
+#endif
+
+#ifndef MP_ROM_FALSE
+#if MICROPY_OBJ_IMMEDIATE_OBJS
+#define MP_ROM_FALSE mp_const_false
+#define MP_ROM_TRUE mp_const_true
+#else
+#define MP_ROM_FALSE MP_ROM_PTR(&mp_const_false_obj)
+#define MP_ROM_TRUE MP_ROM_PTR(&mp_const_true_obj)
+#endif
+#endif
+
+#ifndef MP_ROM_INT
+typedef mp_const_obj_t mp_rom_obj_t;
+#define MP_ROM_INT(i) MP_OBJ_NEW_SMALL_INT(i)
+#define MP_ROM_QSTR(q) MP_OBJ_NEW_QSTR(q)
+#define MP_ROM_PTR(p) (p)
+/* for testing
+typedef struct _mp_rom_obj_t { mp_const_obj_t o; } mp_rom_obj_t;
+#define MP_ROM_INT(i) {MP_OBJ_NEW_SMALL_INT(i)}
+#define MP_ROM_QSTR(q) {MP_OBJ_NEW_QSTR(q)}
+#define MP_ROM_PTR(p) {.o = p}
+*/
+#endif
+
+// These macros are used to declare and define constant function objects
+// You can put "static" in front of the definitions to make them local
+
+#define MP_DECLARE_CONST_FUN_OBJ_0(obj_name) extern const mp_obj_fun_builtin_fixed_t obj_name
+#define MP_DECLARE_CONST_FUN_OBJ_1(obj_name) extern const mp_obj_fun_builtin_fixed_t obj_name
+#define MP_DECLARE_CONST_FUN_OBJ_2(obj_name) extern const mp_obj_fun_builtin_fixed_t obj_name
+#define MP_DECLARE_CONST_FUN_OBJ_3(obj_name) extern const mp_obj_fun_builtin_fixed_t obj_name
+#define MP_DECLARE_CONST_FUN_OBJ_VAR(obj_name) extern const mp_obj_fun_builtin_var_t obj_name
+#define MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(obj_name) extern const mp_obj_fun_builtin_var_t obj_name
+#define MP_DECLARE_CONST_FUN_OBJ_KW(obj_name) extern const mp_obj_fun_builtin_var_t obj_name
+
+#define MP_OBJ_FUN_ARGS_MAX (0xffff) // to set maximum value in n_args_max below
+#define MP_OBJ_FUN_MAKE_SIG(n_args_min, n_args_max, takes_kw) ((uint32_t)((((uint32_t)(n_args_min)) << 17) | (((uint32_t)(n_args_max)) << 1) | ((takes_kw) ? 1 : 0)))
+
+#define MP_DEFINE_CONST_FUN_OBJ_0(obj_name, fun_name) \
+ const mp_obj_fun_builtin_fixed_t obj_name = \
+ {{&mp_type_fun_builtin_0}, .fun._0 = fun_name}
+#define MP_DEFINE_CONST_FUN_OBJ_1(obj_name, fun_name) \
+ const mp_obj_fun_builtin_fixed_t obj_name = \
+ {{&mp_type_fun_builtin_1}, .fun._1 = fun_name}
+#define MP_DEFINE_CONST_FUN_OBJ_2(obj_name, fun_name) \
+ const mp_obj_fun_builtin_fixed_t obj_name = \
+ {{&mp_type_fun_builtin_2}, .fun._2 = fun_name}
+#define MP_DEFINE_CONST_FUN_OBJ_3(obj_name, fun_name) \
+ const mp_obj_fun_builtin_fixed_t obj_name = \
+ {{&mp_type_fun_builtin_3}, .fun._3 = fun_name}
+#define MP_DEFINE_CONST_FUN_OBJ_VAR(obj_name, n_args_min, fun_name) \
+ const mp_obj_fun_builtin_var_t obj_name = \
+ {{&mp_type_fun_builtin_var}, MP_OBJ_FUN_MAKE_SIG(n_args_min, MP_OBJ_FUN_ARGS_MAX, false), .fun.var = fun_name}
+#define MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(obj_name, n_args_min, n_args_max, fun_name) \
+ const mp_obj_fun_builtin_var_t obj_name = \
+ {{&mp_type_fun_builtin_var}, MP_OBJ_FUN_MAKE_SIG(n_args_min, n_args_max, false), .fun.var = fun_name}
+#define MP_DEFINE_CONST_FUN_OBJ_KW(obj_name, n_args_min, fun_name) \
+ const mp_obj_fun_builtin_var_t obj_name = \
+ {{&mp_type_fun_builtin_var}, MP_OBJ_FUN_MAKE_SIG(n_args_min, MP_OBJ_FUN_ARGS_MAX, true), .fun.kw = fun_name}
+
+#define MP_DEFINE_CONST_PROP_GET(obj_name, fun_name) \
+ const mp_obj_fun_builtin_fixed_t fun_name##_obj = {{&mp_type_fun_builtin_1}, .fun._1 = fun_name}; \
+ MP_PROPERTY_GETTER(obj_name, (mp_obj_t)&fun_name##_obj);
+
+// These macros are used to define constant or mutable map/dict objects
+// You can put "static" in front of the definition to make it local
+
+#define MP_DEFINE_CONST_MAP(map_name, table_name) \
+ const mp_map_t map_name = { \
+ .all_keys_are_qstrs = 1, \
+ .is_fixed = 1, \
+ .is_ordered = 1, \
+ .used = MP_ARRAY_SIZE(table_name), \
+ .alloc = MP_ARRAY_SIZE(table_name), \
+ .table = (mp_map_elem_t *)(mp_rom_map_elem_t *)table_name, \
+ }
+
+#define MP_DEFINE_CONST_DICT(dict_name, table_name) \
+ const mp_obj_dict_t dict_name = { \
+ .base = {&mp_type_dict}, \
+ .map = { \
+ .all_keys_are_qstrs = 1, \
+ .is_fixed = 1, \
+ .is_ordered = 1, \
+ .used = MP_ARRAY_SIZE(table_name), \
+ .alloc = MP_ARRAY_SIZE(table_name), \
+ .table = (mp_map_elem_t *)(mp_rom_map_elem_t *)table_name, \
+ }, \
+ }
+
+#define MP_DEFINE_MUTABLE_MAP(map_name, table_name) \
+ mp_map_t map_name = { \
+ .all_keys_are_qstrs = 1, \
+ .is_fixed = 1, \
+ .is_ordered = 1, \
+ .used = MP_ARRAY_SIZE(table_name), \
+ .alloc = MP_ARRAY_SIZE(table_name), \
+ .table = table_name, \
+ }
+
+#define MP_DEFINE_MUTABLE_DICT(dict_name, table_name) \
+ mp_obj_dict_t dict_name = { \
+ .base = {&mp_type_dict}, \
+ .map = { \
+ .all_keys_are_qstrs = 1, \
+ .is_fixed = 1, \
+ .is_ordered = 1, \
+ .used = MP_ARRAY_SIZE(table_name), \
+ .alloc = MP_ARRAY_SIZE(table_name), \
+ .table = table_name, \
+ }, \
+ }
+
+// These macros are used to declare and define constant staticmethond and classmethod objects
+// You can put "static" in front of the definitions to make them local
+
+#define MP_DECLARE_CONST_STATICMETHOD_OBJ(obj_name) extern const mp_rom_obj_static_class_method_t obj_name
+#define MP_DECLARE_CONST_CLASSMETHOD_OBJ(obj_name) extern const mp_rom_obj_static_class_method_t obj_name
+
+#define MP_DEFINE_CONST_STATICMETHOD_OBJ(obj_name, fun_name) const mp_rom_obj_static_class_method_t obj_name = {{&mp_type_staticmethod}, fun_name}
+#define MP_DEFINE_CONST_CLASSMETHOD_OBJ(obj_name, fun_name) const mp_rom_obj_static_class_method_t obj_name = {{&mp_type_classmethod}, fun_name}
+
+// Declare a module as a builtin, processed by makemoduledefs.py
+// param module_name: MP_QSTR_<module name>
+// param obj_module: mp_obj_module_t instance
+// prarm enabled_define: used as `#if (enabled_define) around entry`
+
+#define MP_REGISTER_MODULE(module_name, obj_module, enabled_define)
+
+// Underlying map/hash table implementation (not dict object or map function)
+
+typedef struct _mp_map_elem_t {
+ mp_obj_t key;
+ mp_obj_t value;
+} mp_map_elem_t;
+
+typedef struct _mp_rom_map_elem_t {
+ mp_rom_obj_t key;
+ mp_rom_obj_t value;
+} mp_rom_map_elem_t;
+
+typedef struct _mp_map_t {
+ size_t all_keys_are_qstrs : 1;
+ size_t is_fixed : 1; // a fixed array that can't be modified; must also be ordered
+ size_t is_ordered : 1; // an ordered array
+ size_t scanning : 1; // true if we're in the middle of scanning linked dictionaries,
+ // e.g., make_dict_long_lived()
+ size_t used : (8 * sizeof(size_t) - 4);
+ size_t alloc;
+ mp_map_elem_t *table;
+} mp_map_t;
+
+// mp_set_lookup requires these constants to have the values they do
+typedef enum _mp_map_lookup_kind_t {
+ MP_MAP_LOOKUP = 0,
+ MP_MAP_LOOKUP_ADD_IF_NOT_FOUND = 1,
+ MP_MAP_LOOKUP_REMOVE_IF_FOUND = 2,
+ MP_MAP_LOOKUP_ADD_IF_NOT_FOUND_OR_REMOVE_IF_FOUND = 3, // only valid for mp_set_lookup
+} mp_map_lookup_kind_t;
+
+static inline bool mp_map_slot_is_filled(const mp_map_t *map, size_t pos) {
+ assert(pos < map->alloc);
+ return (map)->table[pos].key != MP_OBJ_NULL && (map)->table[pos].key != MP_OBJ_SENTINEL;
+}
+
+void mp_map_init(mp_map_t *map, size_t n);
+void mp_map_init_fixed_table(mp_map_t *map, size_t n, const mp_obj_t *table);
+mp_map_t *mp_map_new(size_t n);
+void mp_map_deinit(mp_map_t *map);
+void mp_map_free(mp_map_t *map);
+mp_map_elem_t *mp_map_lookup(mp_map_t *map, mp_obj_t index, mp_map_lookup_kind_t lookup_kind);
+void mp_map_clear(mp_map_t *map);
+void mp_map_dump(mp_map_t *map);
+
+// Underlying set implementation (not set object)
+
+typedef struct _mp_set_t {
+ size_t alloc;
+ size_t used;
+ mp_obj_t *table;
+} mp_set_t;
+
+static inline bool mp_set_slot_is_filled(const mp_set_t *set, size_t pos) {
+ return (set)->table[pos] != MP_OBJ_NULL && (set)->table[pos] != MP_OBJ_SENTINEL;
+}
+
+void mp_set_init(mp_set_t *set, size_t n);
+mp_obj_t mp_set_lookup(mp_set_t *set, mp_obj_t index, mp_map_lookup_kind_t lookup_kind);
+mp_obj_t mp_set_remove_first(mp_set_t *set);
+void mp_set_clear(mp_set_t *set);
+
+// Type definitions for methods
+
+typedef mp_obj_t (*mp_fun_0_t)(void);
+typedef mp_obj_t (*mp_fun_1_t)(mp_obj_t);
+typedef mp_obj_t (*mp_fun_2_t)(mp_obj_t, mp_obj_t);
+typedef mp_obj_t (*mp_fun_3_t)(mp_obj_t, mp_obj_t, mp_obj_t);
+typedef mp_obj_t (*mp_fun_var_t)(size_t n, const mp_obj_t *);
+// mp_fun_kw_t takes mp_map_t* (and not const mp_map_t*) to ease passing
+// this arg to mp_map_lookup().
+typedef mp_obj_t (*mp_fun_kw_t)(size_t n, const mp_obj_t *, mp_map_t *);
+
+// Flags for type behaviour (mp_obj_type_t.flags)
+// If MP_TYPE_FLAG_EQ_NOT_REFLEXIVE is clear then __eq__ is reflexive (A==A returns True).
+// If MP_TYPE_FLAG_EQ_CHECKS_OTHER_TYPE is clear then the type can't be equal to an
+// instance of any different class that also clears this flag. If this flag is set
+// then the type may check for equality against a different type.
+// If MP_TYPE_FLAG_EQ_HAS_NEQ_TEST is clear then the type only implements the __eq__
+// operator and not the __ne__ operator. If it's set then __ne__ may be implemented.
+// If MP_TYPE_FLAG_BINDS_SELF is set then the type as a method binds self as the first arg.
+// If MP_TYPE_FLAG_BUILTIN_FUN is set then the type is a built-in function type.
+#define MP_TYPE_FLAG_IS_SUBCLASSED (0x0001)
+#define MP_TYPE_FLAG_HAS_SPECIAL_ACCESSORS (0x0002)
+#define MP_TYPE_FLAG_EQ_NOT_REFLEXIVE (0x0004)
+#define MP_TYPE_FLAG_EQ_CHECKS_OTHER_TYPE (0x0008)
+#define MP_TYPE_FLAG_EQ_HAS_NEQ_TEST (0x0010)
+#define MP_TYPE_FLAG_BINDS_SELF (0x0020)
+#define MP_TYPE_FLAG_BUILTIN_FUN (0x0040)
+#define MP_TYPE_FLAG_EXTENDED (0x0080) // contains the 'ext' fields
+
+typedef enum {
+ PRINT_STR = 0,
+ PRINT_REPR = 1,
+ PRINT_EXC = 2, // Special format for printing exception in unhandled exception message
+ PRINT_JSON = 3,
+ PRINT_RAW = 4, // Special format for printing bytes as an undercorated string
+ PRINT_EXC_SUBCLASS = 0x80, // Internal flag for printing exception subclasses
+} mp_print_kind_t;
+
+typedef struct _mp_obj_iter_buf_t {
+ mp_obj_base_t base;
+ mp_obj_t buf[3];
+} mp_obj_iter_buf_t;
+
+// The number of slots that an mp_obj_iter_buf_t needs on the Python value stack.
+// It's rounded up in case mp_obj_base_t is smaller than mp_obj_t (eg for OBJ_REPR_D).
+#define MP_OBJ_ITER_BUF_NSLOTS ((sizeof(mp_obj_iter_buf_t) + sizeof(mp_obj_t) - 1) / sizeof(mp_obj_t))
+
+struct _mp_buffer_info_t;
+
+typedef void (*mp_print_fun_t)(const mp_print_t *print, mp_obj_t o, mp_print_kind_t kind);
+typedef mp_obj_t (*mp_make_new_fun_t)(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args);
+typedef mp_obj_t (*mp_call_fun_t)(mp_obj_t fun, size_t n_args, size_t n_kw, const mp_obj_t *args);
+typedef mp_obj_t (*mp_unary_op_fun_t)(mp_unary_op_t op, mp_obj_t);
+typedef mp_obj_t (*mp_binary_op_fun_t)(mp_binary_op_t op, mp_obj_t, mp_obj_t);
+typedef void (*mp_attr_fun_t)(mp_obj_t self_in, qstr attr, mp_obj_t *dest);
+typedef mp_obj_t (*mp_subscr_fun_t)(mp_obj_t self_in, mp_obj_t index, mp_obj_t value);
+typedef mp_obj_t (*mp_getiter_fun_t)(mp_obj_t self_in, mp_obj_iter_buf_t *iter_buf);
+typedef mp_int_t (*mp_getbuffer_fun_t)(mp_obj_t obj, struct _mp_buffer_info_t *bufinfo, mp_uint_t flags);
+
+// Buffer protocol
+typedef struct _mp_buffer_info_t {
+ void *buf; // can be NULL if len == 0
+ size_t len; // in bytes
+ int typecode; // as per binary.h
+} mp_buffer_info_t;
+#define MP_BUFFER_READ (1)
+#define MP_BUFFER_WRITE (2)
+#define MP_BUFFER_RW (MP_BUFFER_READ | MP_BUFFER_WRITE)
+typedef struct _mp_buffer_p_t {
+ mp_getbuffer_fun_t get_buffer;
+} mp_buffer_p_t;
+bool mp_get_buffer(mp_obj_t obj, mp_buffer_info_t *bufinfo, mp_uint_t flags);
+void mp_get_buffer_raise(mp_obj_t obj, mp_buffer_info_t *bufinfo, mp_uint_t flags);
+
+struct _mp_obj_type_ext {
+ // Corresponds to __call__ special method, ie T(...).
+ mp_call_fun_t call;
+
+ // Implements unary and binary operations.
+ // Can return MP_OBJ_NULL if the operation is not supported.
+ mp_unary_op_fun_t unary_op;
+ mp_binary_op_fun_t binary_op;
+
+ // Implements load, store and delete subscripting:
+ // - value = MP_OBJ_SENTINEL means load
+ // - value = MP_OBJ_NULL means delete
+ // - all other values mean store the value
+ // Can return MP_OBJ_NULL if operation not supported.
+ mp_subscr_fun_t subscr;
+
+ // Corresponds to __iter__ special method.
+ // Can use the given mp_obj_iter_buf_t to store iterator object,
+ // otherwise can return a pointer to an object on the heap.
+ mp_getiter_fun_t getiter;
+
+ // Corresponds to __next__ special method. May return MP_OBJ_STOP_ITERATION
+ // as an optimisation instead of raising StopIteration() with no args.
+ mp_fun_1_t iternext;
+
+ // Implements the buffer protocol if supported by this type.
+ mp_buffer_p_t buffer_p;
+
+ // One of disjoint protocols (interfaces), like mp_stream_p_t, etc.
+ const void *protocol;
+};
+
+struct _mp_obj_type_t {
+ // A type is an object so must start with this entry, which points to mp_type_type.
+ mp_obj_base_t base;
+
+ // Flags associated with this type.
+ uint16_t flags;
+ // The name of this type, a qstr.
+ uint16_t name;
+
+ // A dict mapping qstrs to objects local methods/constants/etc.
+ struct _mp_obj_dict_t *locals_dict;
+
+ // Corresponds to __new__ and __init__ special methods, to make an instance of the type.
+ mp_make_new_fun_t make_new;
+
+ // Corresponds to __repr__ and __str__ special methods.
+ mp_print_fun_t print;
+
+ // Implements load, store and delete attribute.
+ //
+ // dest[0] = MP_OBJ_NULL means load
+ // return: for fail, do nothing
+ // for fail but continue lookup in locals_dict, dest[1] = MP_OBJ_SENTINEL
+ // for attr, dest[0] = value
+ // for method, dest[0] = method, dest[1] = self
+ //
+ // dest[0,1] = {MP_OBJ_SENTINEL, MP_OBJ_NULL} means delete
+ // dest[0,1] = {MP_OBJ_SENTINEL, object} means store
+ // return: for fail, do nothing
+ // for success set dest[0] = MP_OBJ_NULL
+ mp_attr_fun_t attr;
+
+ // A pointer to the parents of this type:
+ // - 0 parents: pointer is NULL (object is implicitly the single parent)
+ // - 1 parent: a pointer to the type of that parent
+ // - 2 or more parents: pointer to a tuple object containing the parent types
+ const void *parent;
+
+#define MP_TYPE_EXTENDED_FIELDS(...) .ext = {{ __VA_ARGS__ }}
+ struct _mp_obj_type_ext ext[];
+};
+
+// _mp_obj_full_type_t must match _mp_obj_type_t exactly, except that the `ext` field
+// is a 1-element array rather than a flexible array member.
+struct _mp_obj_full_type_t {
+ mp_obj_base_t base;
+ uint16_t flags;
+ uint16_t name;
+ struct _mp_obj_dict_t *locals_dict;
+ mp_make_new_fun_t make_new;
+ mp_print_fun_t print;
+ mp_attr_fun_t attr;
+ const void *parent;
+ struct _mp_obj_type_ext ext[1];
+};
+
+
+// If the type object in question is known to have the extended fields, you can
+// refer to type->MP_TYPE_CALL. Otherwise, you have to use mp_type_get_call_slot(type)
+// The same goes for other fields within the extended region.
+#define MP_TYPE_CALL ext[0].call
+#define MP_TYPE_UNARY_OP ext[0].unary_op
+#define MP_TYPE_BINARY_OP ext[0].binary_op
+#define MP_TYPE_SUBSCR ext[0].subscr
+#define MP_TYPE_GETITER ext[0].getiter
+#define MP_TYPE_ITERNEXT ext[0].iternext
+#define MP_TYPE_GET_BUFFER ext[0].buffer_p.get_buffer
+#define MP_TYPE_PROTOCOL ext[0].protocol
+extern mp_call_fun_t mp_type_get_call_slot(const mp_obj_type_t *);
+extern mp_unary_op_fun_t mp_type_get_unary_op_slot(const mp_obj_type_t *);
+extern mp_binary_op_fun_t mp_type_get_binary_op_slot(const mp_obj_type_t *);
+extern mp_subscr_fun_t mp_type_get_subscr_slot(const mp_obj_type_t *);
+extern mp_getiter_fun_t mp_type_get_getiter_slot(const mp_obj_type_t *);
+extern mp_fun_1_t mp_type_get_iternext_slot(const mp_obj_type_t *);
+extern mp_getbuffer_fun_t mp_type_get_getbuffer_slot(const mp_obj_type_t *);
+extern const void *mp_type_get_protocol_slot(const mp_obj_type_t *);
+
+// These fields ended up not being placed in the extended area, but accessors
+// were created for them anyway.
+extern mp_attr_fun_t mp_type_get_attr_slot(const mp_obj_type_t *);
+extern const void *mp_type_get_parent_slot(const mp_obj_type_t *);
+
+// Return the size of a type object, which can be one of two lengths depending whether it has
+// the extended fields or not.
+extern size_t mp_type_size(const mp_obj_type_t *);
+
+// Constant types, globally accessible
+extern const mp_obj_type_t mp_type_type;
+extern const mp_obj_type_t mp_type_object;
+extern const mp_obj_type_t mp_type_NoneType;
+extern const mp_obj_type_t mp_type_bool;
+extern const mp_obj_type_t mp_type_int;
+extern const mp_obj_type_t mp_type_str;
+extern const mp_obj_type_t mp_type_bytes;
+extern const mp_obj_type_t mp_type_bytearray;
+extern const mp_obj_type_t mp_type_memoryview;
+extern const mp_obj_type_t mp_type_float;
+extern const mp_obj_type_t mp_type_complex;
+extern const mp_obj_type_t mp_type_traceback;
+extern const mp_obj_type_t mp_type_tuple;
+extern const mp_obj_type_t mp_type_list;
+extern const mp_obj_type_t mp_type_map; // map (the python builtin, not the dict implementation detail)
+extern const mp_obj_type_t mp_type_enumerate;
+extern const mp_obj_type_t mp_type_filter;
+extern const mp_obj_type_t mp_type_deque;
+extern const mp_obj_type_t mp_type_dict;
+extern const mp_obj_type_t mp_type_ordereddict;
+extern const mp_obj_type_t mp_type_range;
+extern const mp_obj_type_t mp_type_set;
+extern const mp_obj_type_t mp_type_frozenset;
+extern const mp_obj_type_t mp_type_slice;
+extern const mp_obj_type_t mp_type_zip;
+extern const mp_obj_type_t mp_type_array;
+extern const mp_obj_type_t mp_type_super;
+extern const mp_obj_type_t mp_type_gen_wrap;
+extern const mp_obj_type_t mp_type_native_gen_wrap;
+extern const mp_obj_type_t mp_type_gen_instance;
+extern const mp_obj_type_t mp_type_fun_builtin_0;
+extern const mp_obj_type_t mp_type_fun_builtin_1;
+extern const mp_obj_type_t mp_type_fun_builtin_2;
+extern const mp_obj_type_t mp_type_fun_builtin_3;
+extern const mp_obj_type_t mp_type_fun_builtin_var;
+extern const mp_obj_type_t mp_type_fun_bc;
+#if MICROPY_EMIT_NATIVE
+extern const mp_obj_type_t mp_type_fun_native;
+#endif
+extern const mp_obj_type_t mp_type_module;
+extern const mp_obj_type_t mp_type_staticmethod;
+extern const mp_obj_type_t mp_type_classmethod;
+extern const mp_obj_type_t mp_type_property;
+extern const mp_obj_type_t mp_type_stringio;
+extern const mp_obj_type_t mp_type_bytesio;
+extern const mp_obj_type_t mp_type_reversed;
+extern const mp_obj_type_t mp_type_polymorph_iter;
+
+// Exceptions
+extern const mp_obj_type_t mp_type_BaseException;
+extern const mp_obj_type_t mp_type_ArithmeticError;
+extern const mp_obj_type_t mp_type_AssertionError;
+extern const mp_obj_type_t mp_type_AttributeError;
+extern const mp_obj_type_t mp_type_EOFError;
+extern const mp_obj_type_t mp_type_Exception;
+extern const mp_obj_type_t mp_type_GeneratorExit;
+extern const mp_obj_type_t mp_type_ImportError;
+extern const mp_obj_type_t mp_type_IndentationError;
+extern const mp_obj_type_t mp_type_IndexError;
+extern const mp_obj_type_t mp_type_KeyboardInterrupt;
+extern const mp_obj_type_t mp_type_ReloadException;
+extern const mp_obj_type_t mp_type_KeyError;
+extern const mp_obj_type_t mp_type_LookupError;
+extern const mp_obj_type_t mp_type_MemoryError;
+extern const mp_obj_type_t mp_type_MpyError;
+extern const mp_obj_type_t mp_type_NameError;
+extern const mp_obj_type_t mp_type_NotImplementedError;
+extern const mp_obj_type_t mp_type_OSError;
+extern const mp_obj_type_t mp_type_TimeoutError;
+extern const mp_obj_type_t mp_type_ConnectionError;
+extern const mp_obj_type_t mp_type_BrokenPipeError;
+extern const mp_obj_type_t mp_type_OverflowError;
+extern const mp_obj_type_t mp_type_RuntimeError;
+extern const mp_obj_type_t mp_type_StopAsyncIteration;
+extern const mp_obj_type_t mp_type_StopIteration;
+extern const mp_obj_type_t mp_type_SyntaxError;
+extern const mp_obj_type_t mp_type_SystemExit;
+extern const mp_obj_type_t mp_type_TypeError;
+extern const mp_obj_type_t mp_type_UnicodeError;
+extern const mp_obj_type_t mp_type_ValueError;
+extern const mp_obj_type_t mp_type_ViperTypeError;
+extern const mp_obj_type_t mp_type_ZeroDivisionError;
+#if CIRCUITPY_ALARM
+extern const mp_obj_type_t mp_type_DeepSleepRequest;
+#endif
+
+
+// Constant objects, globally accessible: None, False, True
+// These should always be accessed via the below macros.
+#if MICROPY_OBJ_IMMEDIATE_OBJS
+// None is even while False/True are odd so their types can be distinguished with 1 bit.
+#define mp_const_none MP_OBJ_NEW_IMMEDIATE_OBJ(0)
+#define mp_const_false MP_OBJ_NEW_IMMEDIATE_OBJ(1)
+#define mp_const_true MP_OBJ_NEW_IMMEDIATE_OBJ(3)
+#else
+#define mp_const_none (MP_OBJ_FROM_PTR(&mp_const_none_obj))
+#define mp_const_false (MP_OBJ_FROM_PTR(&mp_const_false_obj))
+#define mp_const_true (MP_OBJ_FROM_PTR(&mp_const_true_obj))
+extern const struct _mp_obj_none_t mp_const_none_obj;
+extern const struct _mp_obj_bool_t mp_const_false_obj;
+extern const struct _mp_obj_bool_t mp_const_true_obj;
+#endif
+
+// Constant objects, globally accessible: b'', (), {}, Ellipsis, NotImplemented, GeneratorExit()
+// The below macros are for convenience only.
+#define mp_const_empty_bytes (MP_OBJ_FROM_PTR(&mp_const_empty_bytes_obj))
+#define mp_const_empty_tuple (MP_OBJ_FROM_PTR(&mp_const_empty_tuple_obj))
+#define mp_const_notimplemented (MP_OBJ_FROM_PTR(&mp_const_notimplemented_obj))
+extern const struct _mp_obj_str_t mp_const_empty_bytes_obj;
+extern const struct _mp_obj_tuple_t mp_const_empty_tuple_obj;
+extern const struct _mp_obj_dict_t mp_const_empty_dict_obj;
+extern const struct _mp_obj_traceback_t mp_const_empty_traceback_obj;
+extern const struct _mp_obj_singleton_t mp_const_ellipsis_obj;
+extern const struct _mp_obj_singleton_t mp_const_notimplemented_obj;
+extern const struct _mp_obj_exception_t mp_const_GeneratorExit_obj;
+
+// Fixed empty map. Useful when calling keyword-receiving functions
+// without any keywords from C, etc.
+#define mp_const_empty_map (mp_const_empty_dict_obj.map)
+
+// General API for objects
+
+// These macros are derived from more primitive ones and are used to
+// check for more specific object types.
+// Note: these are kept as macros because inline functions sometimes use much
+// more code space than the equivalent macros, depending on the compiler.
+#define mp_obj_is_type(o, t) (mp_obj_is_obj(o) && (&(((mp_obj_base_t *)MP_OBJ_TO_PTR(o))->type->name) == &((t)->name))) // this does not work for checking int, str or fun; use below macros for that
+#if MICROPY_OBJ_IMMEDIATE_OBJS
+// bool's are immediates, not real objects, so test for the 2 possible values.
+#define mp_obj_is_bool(o) ((o) == mp_const_false || (o) == mp_const_true)
+#else
+#define mp_obj_is_bool(o) mp_obj_is_type(o, &mp_type_bool)
+#endif
+#define mp_obj_is_int(o) (mp_obj_is_small_int(o) || mp_obj_is_type(o, &mp_type_int))
+#define mp_obj_is_str(o) (mp_obj_is_qstr(o) || mp_obj_is_type(o, &mp_type_str))
+#define mp_obj_is_str_or_bytes(o) (mp_obj_is_qstr(o) || (mp_obj_is_obj(o) && mp_type_get_binary_op_slot(((mp_obj_base_t *)MP_OBJ_TO_PTR(o))->type) == mp_obj_str_binary_op))
+#define mp_obj_is_dict_or_ordereddict(o) (mp_obj_is_obj(o) && ((mp_obj_base_t *)MP_OBJ_TO_PTR(o))->type->make_new == mp_obj_dict_make_new)
+#define mp_obj_is_fun(o) (mp_obj_is_obj(o) && (((mp_obj_base_t *)MP_OBJ_TO_PTR(o))->type->name == MP_QSTR_function))
+// type check is done on getiter method to allow tuple, namedtuple, attrtuple
+#define mp_obj_is_tuple_compatible(o) (mp_type_get_getiter_slot(mp_obj_get_type(o)) == mp_obj_tuple_getiter)
+
+mp_obj_t mp_obj_new_type(qstr name, mp_obj_t bases_tuple, mp_obj_t locals_dict);
+static inline mp_obj_t mp_obj_new_bool(mp_int_t x) {
+ return x ? mp_const_true : mp_const_false;
+}
+mp_obj_t mp_obj_new_cell(mp_obj_t obj);
+mp_obj_t mp_obj_new_int(mp_int_t value);
+mp_obj_t mp_obj_new_int_from_uint(mp_uint_t value);
+mp_obj_t mp_obj_new_int_from_str_len(const char **str, size_t len, bool neg, unsigned int base);
+mp_obj_t mp_obj_new_int_from_ll(long long val); // this must return a multi-precision integer object (or raise an overflow exception)
+mp_obj_t mp_obj_new_int_from_ull(unsigned long long val); // this must return a multi-precision integer object (or raise an overflow exception)
+mp_obj_t mp_obj_new_str(const char *data, size_t len);
+mp_obj_t mp_obj_new_str_via_qstr(const char *data, size_t len);
+mp_obj_t mp_obj_new_str_from_vstr(const mp_obj_type_t *type, vstr_t *vstr);
+mp_obj_t mp_obj_new_bytes(const byte *data, size_t len);
+mp_obj_t mp_obj_new_bytes_of_zeros(size_t len);
+mp_obj_t mp_obj_new_bytearray(size_t n, void *items);
+mp_obj_t mp_obj_new_bytearray_of_zeros(size_t n);
+mp_obj_t mp_obj_new_bytearray_by_ref(size_t n, void *items);
+#if MICROPY_PY_BUILTINS_FLOAT
+mp_obj_t mp_obj_new_int_from_float(mp_float_t val);
+mp_obj_t mp_obj_new_complex(mp_float_t real, mp_float_t imag);
+extern mp_float_t uint64_to_float(uint64_t ui64);
+extern uint64_t float_to_uint64(float f);
+#endif
+mp_obj_t mp_obj_new_exception(const mp_obj_type_t *exc_type);
+mp_obj_t mp_obj_new_exception_args(const mp_obj_type_t *exc_type, size_t n_args, const mp_obj_t *args);
+#if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_NONE
+#define mp_obj_new_exception_msg(exc_type, msg) mp_obj_new_exception(exc_type)
+#define mp_obj_new_exception_msg_varg(exc_type, ...) mp_obj_new_exception(exc_type)
+#else
+mp_obj_t mp_obj_new_exception_msg(const mp_obj_type_t *exc_type, const compressed_string_t *msg);
+mp_obj_t mp_obj_new_exception_msg_varg(const mp_obj_type_t *exc_type, const compressed_string_t *fmt, ...); // counts args by number of % symbols in fmt, excluding %%; can only handle void* sizes (ie no float/double!)
+#endif
+#ifdef va_start
+mp_obj_t mp_obj_new_exception_msg_vlist(const mp_obj_type_t *exc_type, const compressed_string_t *fmt, va_list ap); // counts args by number of % symbols in fmt, excluding %%; can only handle void* sizes (ie no float/double!)
+#endif
+// Only use this string version from native MPY files with static error strings.
+mp_obj_t mp_obj_new_exception_msg_str(const mp_obj_type_t *exc_type, const char *msg);
+mp_obj_t mp_obj_new_fun_bc(mp_obj_t def_args, mp_obj_t def_kw_args, const byte *code, const mp_uint_t *const_table);
+mp_obj_t mp_obj_new_fun_native(mp_obj_t def_args_in, mp_obj_t def_kw_args, const void *fun_data, const mp_uint_t *const_table);
+mp_obj_t mp_obj_new_fun_viper(size_t n_args, const void *fun_data, mp_uint_t type_sig);
+mp_obj_t mp_obj_new_fun_asm(size_t n_args, const void *fun_data, mp_uint_t type_sig);
+mp_obj_t mp_obj_new_gen_wrap(mp_obj_t fun, bool is_coroutine);
+mp_obj_t mp_obj_new_closure(mp_obj_t fun, size_t n_closed, const mp_obj_t *closed);
+mp_obj_t mp_obj_new_tuple(size_t n, const mp_obj_t *items);
+mp_obj_t mp_obj_new_list(size_t n, mp_obj_t *items);
+mp_obj_t mp_obj_new_list_from_iter(mp_obj_t iterable);
+mp_obj_t mp_obj_new_dict(size_t n_args);
+mp_obj_t mp_obj_new_set(size_t n_args, mp_obj_t *items);
+mp_obj_t mp_obj_new_slice(mp_obj_t start, mp_obj_t stop, mp_obj_t step);
+mp_obj_t mp_obj_new_bound_meth(mp_obj_t meth, mp_obj_t self);
+mp_obj_t mp_obj_new_getitem_iter(mp_obj_t *args, mp_obj_iter_buf_t *iter_buf);
+mp_obj_t mp_obj_new_module(qstr module_name);
+mp_obj_t mp_obj_new_memoryview(byte typecode, size_t nitems, void *items);
+
+const mp_obj_type_t *mp_obj_get_type(mp_const_obj_t o_in);
+const mp_obj_full_type_t *mp_obj_get_full_type(mp_const_obj_t o_in);
+const char *mp_obj_get_type_str(mp_const_obj_t o_in);
+#define mp_obj_get_type_qstr(o_in) (mp_obj_get_type((o_in))->name)
+bool mp_obj_is_subclass_fast(mp_const_obj_t object, mp_const_obj_t classinfo); // arguments should be type objects
+mp_obj_t mp_obj_cast_to_native_base(mp_obj_t self_in, mp_const_obj_t native_type);
+
+void mp_obj_print_helper(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind);
+void mp_obj_print(mp_obj_t o, mp_print_kind_t kind);
+void mp_obj_print_exception(const mp_print_t *print, mp_obj_t exc);
+void mp_obj_print_exception_with_limit(const mp_print_t *print, mp_obj_t exc, mp_int_t limit);
+
+bool mp_obj_is_true(mp_obj_t arg);
+bool mp_obj_is_callable(mp_obj_t o_in);
+mp_obj_t mp_obj_equal_not_equal(mp_binary_op_t op, mp_obj_t o1, mp_obj_t o2);
+bool mp_obj_equal(mp_obj_t o1, mp_obj_t o2);
+
+// returns true if o is bool, small int or long int
+static inline bool mp_obj_is_integer(mp_const_obj_t o) {
+ return mp_obj_is_int(o) || mp_obj_is_bool(o);
+}
+
+mp_int_t mp_obj_get_int(mp_const_obj_t arg);
+mp_int_t mp_obj_get_int_truncated(mp_const_obj_t arg);
+bool mp_obj_get_int_maybe(mp_const_obj_t arg, mp_int_t *value);
+#if MICROPY_PY_BUILTINS_FLOAT
+mp_float_t mp_obj_get_float(mp_obj_t self_in);
+bool mp_obj_get_float_maybe(mp_obj_t arg, mp_float_t *value);
+void mp_obj_get_complex(mp_obj_t self_in, mp_float_t *real, mp_float_t *imag);
+bool mp_obj_get_complex_maybe(mp_obj_t self_in, mp_float_t *real, mp_float_t *imag);
+#endif
+void mp_obj_get_array(mp_obj_t o, size_t *len, mp_obj_t **items); // *items may point inside a GC block
+void mp_obj_get_array_fixed_n(mp_obj_t o, size_t len, mp_obj_t **items); // *items may point inside a GC block
+size_t mp_get_index(const mp_obj_type_t *type, size_t len, mp_obj_t index, bool is_slice);
+mp_obj_t mp_obj_id(mp_obj_t o_in);
+mp_obj_t mp_obj_len(mp_obj_t o_in);
+mp_obj_t mp_obj_len_maybe(mp_obj_t o_in); // may return MP_OBJ_NULL
+mp_obj_t mp_obj_subscr(mp_obj_t base, mp_obj_t index, mp_obj_t val);
+mp_obj_t mp_generic_unary_op(mp_unary_op_t op, mp_obj_t o_in);
+
+// cell
+mp_obj_t mp_obj_cell_get(mp_obj_t self_in);
+void mp_obj_cell_set(mp_obj_t self_in, mp_obj_t obj);
+
+// int
+// For long int, returns value truncated to mp_int_t
+mp_int_t mp_obj_int_get_truncated(mp_const_obj_t self_in);
+// Will raise exception if value doesn't fit into mp_int_t
+mp_int_t mp_obj_int_get_checked(mp_const_obj_t self_in);
+// Will raise exception if value is negative or doesn't fit into mp_uint_t
+mp_uint_t mp_obj_int_get_uint_checked(mp_const_obj_t self_in);
+
+// exception
+#define mp_obj_is_native_exception_instance(o) (mp_obj_get_type(o)->make_new == mp_obj_exception_make_new)
+bool mp_obj_is_exception_type(mp_obj_t self_in);
+bool mp_obj_is_exception_instance(mp_obj_t self_in);
+bool mp_obj_exception_match(mp_obj_t exc, mp_const_obj_t exc_type);
+void mp_obj_exception_clear_traceback(mp_obj_t self_in);
+void mp_obj_exception_add_traceback(mp_obj_t self_in, qstr file, size_t line, qstr block);
+void mp_obj_exception_get_traceback(mp_obj_t self_in, size_t *n, size_t **values);
+mp_obj_t mp_obj_exception_get_traceback_obj(mp_obj_t self_in);
+mp_obj_t mp_obj_exception_get_value(mp_obj_t self_in);
+mp_obj_t mp_obj_exception_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args);
+mp_obj_t mp_alloc_emergency_exception_buf(mp_obj_t size_in);
+void mp_init_emergency_exception_buf(void);
+static inline mp_obj_t mp_obj_new_exception_arg1(const mp_obj_type_t *exc_type, mp_obj_t arg) {
+ assert(exc_type->make_new == mp_obj_exception_make_new);
+ return mp_obj_exception_make_new(exc_type, 1, 0, &arg);
+}
+
+// str
+bool mp_obj_str_equal(mp_obj_t s1, mp_obj_t s2);
+qstr mp_obj_str_get_qstr(mp_obj_t self_in); // use this if you will anyway convert the string to a qstr
+const char *mp_obj_str_get_str(mp_obj_t self_in); // use this only if you need the string to be null terminated
+const char *mp_obj_str_get_data(mp_obj_t self_in, size_t *len);
+mp_obj_t mp_obj_str_intern(mp_obj_t str);
+mp_obj_t mp_obj_str_intern_checked(mp_obj_t obj);
+void mp_str_print_quoted(const mp_print_t *print, const byte *str_data, size_t str_len, bool is_bytes);
+
+#if MICROPY_PY_BUILTINS_FLOAT
+// float
+#if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
+static inline float mp_obj_get_float_to_f(mp_obj_t o) {
+ return mp_obj_get_float(o);
+}
+
+static inline double mp_obj_get_float_to_d(mp_obj_t o) {
+ return (double)mp_obj_get_float(o);
+}
+
+static inline mp_obj_t mp_obj_new_float_from_f(float o) {
+ return mp_obj_new_float(o);
+}
+
+static inline mp_obj_t mp_obj_new_float_from_d(double o) {
+ return mp_obj_new_float((mp_float_t)o);
+}
+#elif MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE
+static inline float mp_obj_get_float_to_f(mp_obj_t o) {
+ return (float)mp_obj_get_float(o);
+}
+
+static inline double mp_obj_get_float_to_d(mp_obj_t o) {
+ return mp_obj_get_float(o);
+}
+
+static inline mp_obj_t mp_obj_new_float_from_f(float o) {
+ return mp_obj_new_float((mp_float_t)o);
+}
+
+static inline mp_obj_t mp_obj_new_float_from_d(double o) {
+ return mp_obj_new_float(o);
+}
+#endif
+#if MICROPY_FLOAT_HIGH_QUALITY_HASH
+mp_int_t mp_float_hash(mp_float_t val);
+#else
+static inline mp_int_t mp_float_hash(mp_float_t val) {
+ return (mp_int_t)val;
+}
+#endif
+mp_obj_t mp_obj_float_binary_op(mp_binary_op_t op, mp_float_t lhs_val, mp_obj_t rhs); // can return MP_OBJ_NULL if op not supported
+
+// complex
+void mp_obj_complex_get(mp_obj_t self_in, mp_float_t *real, mp_float_t *imag);
+mp_obj_t mp_obj_complex_binary_op(mp_binary_op_t op, mp_float_t lhs_real, mp_float_t lhs_imag, mp_obj_t rhs_in); // can return MP_OBJ_NULL if op not supported
+#else
+#define mp_obj_is_float(o) (false)
+#endif
+
+// tuple
+void mp_obj_tuple_get(mp_obj_t self_in, size_t *len, mp_obj_t **items);
+void mp_obj_tuple_del(mp_obj_t self_in);
+mp_int_t mp_obj_tuple_hash(mp_obj_t self_in);
+
+// list
+mp_obj_t mp_obj_list_clear(mp_obj_t self_in);
+mp_obj_t mp_obj_list_append(mp_obj_t self_in, mp_obj_t arg);
+mp_obj_t mp_obj_list_remove(mp_obj_t self_in, mp_obj_t value);
+void mp_obj_list_get(mp_obj_t self_in, size_t *len, mp_obj_t **items);
+void mp_obj_list_set_len(mp_obj_t self_in, size_t len);
+void mp_obj_list_store(mp_obj_t self_in, mp_obj_t index, mp_obj_t value);
+mp_obj_t mp_obj_list_sort(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs);
+
+// dict
+typedef struct _mp_obj_dict_t {
+ mp_obj_base_t base;
+ mp_map_t map;
+} mp_obj_dict_t;
+mp_obj_t mp_obj_dict_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args);
+void mp_obj_dict_init(mp_obj_dict_t *dict, size_t n_args);
+size_t mp_obj_dict_len(mp_obj_t self_in);
+mp_obj_t mp_obj_dict_get(mp_obj_t self_in, mp_obj_t index);
+mp_obj_t mp_obj_dict_store(mp_obj_t self_in, mp_obj_t key, mp_obj_t value);
+mp_obj_t mp_obj_dict_delete(mp_obj_t self_in, mp_obj_t key);
+mp_obj_t mp_obj_dict_copy(mp_obj_t self_in);
+static inline mp_map_t *mp_obj_dict_get_map(mp_obj_t dict) {
+ return &((mp_obj_dict_t *)MP_OBJ_TO_PTR(dict))->map;
+}
+
+// set
+void mp_obj_set_store(mp_obj_t self_in, mp_obj_t item);
+
+// slice indexes resolved to particular sequence
+typedef struct {
+ mp_int_t start;
+ mp_int_t stop;
+ mp_int_t step;
+} mp_bound_slice_t;
+
+// slice
+typedef struct _mp_obj_slice_t {
+ mp_obj_base_t base;
+ mp_obj_t start;
+ mp_obj_t stop;
+ mp_obj_t step;
+} mp_obj_slice_t;
+void mp_obj_slice_indices(mp_obj_t self_in, mp_int_t length, mp_bound_slice_t *result);
+
+// functions
+
+typedef struct _mp_obj_fun_builtin_fixed_t {
+ mp_obj_base_t base;
+ union {
+ mp_fun_0_t _0;
+ mp_fun_1_t _1;
+ mp_fun_2_t _2;
+ mp_fun_3_t _3;
+ } fun;
+} mp_obj_fun_builtin_fixed_t;
+
+typedef struct _mp_obj_fun_builtin_var_t {
+ mp_obj_base_t base;
+ uint32_t sig; // see MP_OBJ_FUN_MAKE_SIG
+ union {
+ mp_fun_var_t var;
+ mp_fun_kw_t kw;
+ } fun;
+} mp_obj_fun_builtin_var_t;
+
+qstr mp_obj_fun_get_name(mp_const_obj_t fun);
+qstr mp_obj_code_get_name(const byte *code_info);
+
+mp_obj_t mp_identity(mp_obj_t self);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_identity_obj);
+mp_obj_t mp_identity_getiter(mp_obj_t self, mp_obj_iter_buf_t *iter_buf);
+
+// Generic iterator that uses unary op and subscr to iterate over a native type. It will be slower
+// than a custom iterator but applies broadly.
+mp_obj_t mp_obj_new_generic_iterator(mp_obj_t self, mp_obj_iter_buf_t *iter_buf);
+
+// module
+typedef struct _mp_obj_module_t {
+ mp_obj_base_t base;
+ mp_obj_dict_t *globals;
+} mp_obj_module_t;
+mp_obj_dict_t *mp_obj_module_get_globals(mp_obj_t self_in);
+void mp_obj_module_set_globals(mp_obj_t self_in, mp_obj_dict_t *globals);
+// check if given module object is a package
+bool mp_obj_is_package(mp_obj_t module);
+
+// staticmethod and classmethod types; defined here so we can make const versions
+// this structure is used for instances of both staticmethod and classmethod
+typedef struct _mp_obj_static_class_method_t {
+ mp_obj_base_t base;
+ mp_obj_t fun;
+} mp_obj_static_class_method_t;
+typedef struct _mp_rom_obj_static_class_method_t {
+ mp_obj_base_t base;
+ mp_rom_obj_t fun;
+} mp_rom_obj_static_class_method_t;
+
+// property
+const mp_obj_t *mp_obj_property_get(mp_obj_t self_in, size_t *n_proxy);
+
+// sequence helpers
+
+// Compute the new length of a sequence and ensure an exception is thrown on overflow.
+size_t mp_seq_multiply_len(size_t item_sz, size_t len);
+void mp_seq_multiply(const void *items, size_t item_sz, size_t len, size_t times, void *dest);
+#if MICROPY_PY_BUILTINS_SLICE
+bool mp_seq_get_fast_slice_indexes(mp_uint_t len, mp_obj_t slice, mp_bound_slice_t *indexes);
+#endif
+#define mp_seq_copy(dest, src, len, item_t) memcpy(dest, src, len * sizeof(item_t))
+#define mp_seq_cat(dest, src1, len1, src2, len2, item_t) { memcpy(dest, src1, (len1) * sizeof(item_t)); memcpy(dest + (len1), src2, (len2) * sizeof(item_t)); }
+bool mp_seq_cmp_bytes(mp_uint_t op, const byte *data1, size_t len1, const byte *data2, size_t len2);
+bool mp_seq_cmp_objs(mp_uint_t op, const mp_obj_t *items1, size_t len1, const mp_obj_t *items2, size_t len2);
+mp_obj_t mp_seq_index_obj(const mp_obj_t *items, size_t len, size_t n_args, const mp_obj_t *args);
+mp_obj_t mp_seq_count_obj(const mp_obj_t *items, size_t len, mp_obj_t value);
+mp_obj_t mp_seq_extract_slice(size_t len, const mp_obj_t *seq, mp_bound_slice_t *indexes);
+
+// Helper to clear stale pointers from allocated, but unused memory, to preclude GC problems
+#define mp_seq_clear(start, len, alloc_len, item_sz) memset((byte *)(start) + (len) * (item_sz), 0, ((alloc_len) - (len)) * (item_sz))
+
+// Note: dest and slice regions may overlap
+#define mp_seq_replace_slice_no_grow(dest, dest_len, beg, end, slice, slice_len, item_sz) \
+ memmove(((char *)dest) + (beg) * (item_sz), slice, slice_len * (item_sz)); \
+ memmove(((char *)dest) + (beg + slice_len) * (item_sz), ((char *)dest) + (end) * (item_sz), (dest_len - end) * (item_sz));
+
+// Note: dest and slice regions may overlap
+#define mp_seq_replace_slice_grow_inplace(dest, dest_len, beg, end, slice, slice_len, len_adj, item_sz) \
+ memmove(((char *)dest) + (beg + slice_len) * (item_sz), ((char *)dest) + (end) * (item_sz), ((dest_len) + (len_adj) - ((beg) + (slice_len))) * (item_sz)); \
+ memmove(((char *)dest) + (beg) * (item_sz), slice, slice_len * (item_sz));
+
+#endif // MICROPY_INCLUDED_PY_OBJ_H
diff --git a/circuitpython/py/objarray.c b/circuitpython/py/objarray.c
new file mode 100644
index 0000000..b71687f
--- /dev/null
+++ b/circuitpython/py/objarray.c
@@ -0,0 +1,887 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+#include <assert.h>
+#include <stdint.h>
+
+#include "py/runtime.h"
+#include "py/binary.h"
+#include "py/objstr.h"
+#include "py/objarray.h"
+
+#include "supervisor/shared/translate.h"
+
+#if MICROPY_PY_ARRAY || MICROPY_PY_BUILTINS_BYTEARRAY || MICROPY_PY_BUILTINS_MEMORYVIEW
+
+// About memoryview object: We want to reuse as much code as possible from
+// array, and keep the memoryview object 4 words in size so it fits in 1 GC
+// block. Also, memoryview must keep a pointer to the base of the buffer so
+// that the buffer is not GC'd if the original parent object is no longer
+// around (we are assuming that all memoryview'able objects return a pointer
+// which points to the start of a GC chunk). Given the above constraints we
+// do the following:
+// - typecode high bit is set if the buffer is read-write (else read-only)
+// - free is the offset in elements to the first item in the memoryview
+// - len is the length in elements
+// - items points to the start of the original buffer
+// Note that we don't handle the case where the original buffer might change
+// size due to a resize of the original parent object.
+
+#if MICROPY_PY_BUILTINS_MEMORYVIEW
+#define TYPECODE_MASK (0x7f)
+#define memview_offset free
+#else
+// make (& TYPECODE_MASK) a null operation if memorview not enabled
+#define TYPECODE_MASK (~(size_t)0)
+// memview_offset should not be accessed if memoryview is not enabled,
+// so not defined to catch errors
+#endif
+
+STATIC mp_obj_t array_iterator_new(mp_obj_t array_in, mp_obj_iter_buf_t *iter_buf);
+STATIC mp_obj_t array_append(mp_obj_t self_in, mp_obj_t arg);
+STATIC mp_obj_t array_extend(mp_obj_t self_in, mp_obj_t arg_in);
+STATIC mp_int_t array_get_buffer(mp_obj_t o_in, mp_buffer_info_t *bufinfo, mp_uint_t flags);
+#if MICROPY_CPYTHON_COMPAT
+STATIC mp_obj_t array_decode(size_t n_args, const mp_obj_t *args);
+#endif
+
+
+/******************************************************************************/
+// array
+
+#if MICROPY_PY_BUILTINS_BYTEARRAY || MICROPY_PY_ARRAY
+STATIC void array_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_array_t *o = MP_OBJ_TO_PTR(o_in);
+ if (o->typecode == BYTEARRAY_TYPECODE) {
+ mp_print_str(print, "bytearray(b");
+ mp_str_print_quoted(print, o->items, o->len, true);
+ } else {
+ mp_printf(print, "array('%c'", o->typecode);
+ if (o->len > 0) {
+ mp_print_str(print, ", [");
+ for (size_t i = 0; i < o->len; i++) {
+ if (i > 0) {
+ mp_print_str(print, ", ");
+ }
+ mp_obj_print_helper(print, mp_binary_get_val_array(o->typecode, o->items, i), PRINT_REPR);
+ }
+ mp_print_str(print, "]");
+ }
+ }
+ mp_print_str(print, ")");
+}
+#endif
+
+#if MICROPY_PY_BUILTINS_BYTEARRAY || MICROPY_PY_ARRAY
+STATIC mp_obj_array_t *array_new(char typecode, size_t n) {
+ if (typecode == 'x') {
+ mp_raise_ValueError(MP_ERROR_TEXT("bad typecode"));
+ }
+ int typecode_size = mp_binary_get_size('@', typecode, NULL);
+ mp_obj_array_t *o = m_new_obj(mp_obj_array_t);
+ #if MICROPY_PY_BUILTINS_BYTEARRAY && MICROPY_PY_ARRAY
+ o->base.type = (typecode == BYTEARRAY_TYPECODE) ? &mp_type_bytearray : &mp_type_array;
+ #elif MICROPY_PY_BUILTINS_BYTEARRAY
+ o->base.type = &mp_type_bytearray;
+ #else
+ o->base.type = &mp_type_array;
+ #endif
+ o->typecode = typecode;
+ o->free = 0;
+ o->len = n;
+ o->items = m_new(byte, typecode_size * o->len);
+ return o;
+}
+#endif
+
+#if MICROPY_PY_BUILTINS_BYTEARRAY || MICROPY_PY_ARRAY
+STATIC mp_obj_t array_construct(char typecode, mp_obj_t initializer) {
+ // bytearrays can be raw-initialised from anything with the buffer protocol
+ // other arrays can only be raw-initialised from bytes and bytearray objects
+ mp_buffer_info_t bufinfo;
+ if (((MICROPY_PY_BUILTINS_BYTEARRAY
+ && typecode == BYTEARRAY_TYPECODE)
+ || (MICROPY_PY_ARRAY
+ && (mp_obj_is_type(initializer, &mp_type_bytes)
+ || (MICROPY_PY_BUILTINS_BYTEARRAY && mp_obj_is_type(initializer, &mp_type_bytearray)))))
+ && mp_get_buffer(initializer, &bufinfo, MP_BUFFER_READ)) {
+ // construct array from raw bytes
+ size_t sz = mp_binary_get_size('@', typecode, NULL);
+ if (bufinfo.len % sz) {
+ mp_raise_ValueError(MP_ERROR_TEXT("bytes length not a multiple of item size"));
+ }
+ size_t len = bufinfo.len / sz;
+ mp_obj_array_t *o = array_new(typecode, len);
+ memcpy(o->items, bufinfo.buf, len * sz);
+ return MP_OBJ_FROM_PTR(o);
+ }
+
+ size_t len;
+ // Try to create array of exact len if initializer len is known
+ mp_obj_t len_in = mp_obj_len_maybe(initializer);
+ if (len_in == MP_OBJ_NULL) {
+ len = 0;
+ } else {
+ len = MP_OBJ_SMALL_INT_VALUE(len_in);
+ }
+
+ mp_obj_array_t *array = array_new(typecode, len);
+
+ mp_obj_t iterable = mp_getiter(initializer, NULL);
+ mp_obj_t item;
+ size_t i = 0;
+ while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
+ if (len == 0) {
+ array_append(MP_OBJ_FROM_PTR(array), item);
+ } else {
+ mp_binary_set_val_array(typecode, array->items, i++, item);
+ }
+ }
+
+ return MP_OBJ_FROM_PTR(array);
+}
+#endif
+
+#if MICROPY_PY_ARRAY
+STATIC mp_obj_t array_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ (void)type_in;
+ mp_arg_check_num(n_args, n_kw, 1, 2, false);
+
+ // get typecode
+ const char *typecode = mp_obj_str_get_str(args[0]);
+
+ if (n_args == 1) {
+ // 1 arg: make an empty array
+ return MP_OBJ_FROM_PTR(array_new(*typecode, 0));
+ } else {
+ // 2 args: construct the array from the given object
+ return array_construct(*typecode, args[1]);
+ }
+}
+#endif
+
+#if MICROPY_PY_BUILTINS_BYTEARRAY
+STATIC mp_obj_t bytearray_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ (void)type_in;
+ mp_arg_check_num(n_args, n_kw, 0, 3, false);
+
+ if (n_args == 0) {
+ // no args: construct an empty bytearray
+ return MP_OBJ_FROM_PTR(array_new(BYTEARRAY_TYPECODE, 0));
+ } else if (mp_obj_is_int(args[0])) {
+ if (n_args > 1) {
+ mp_raise_TypeError(MP_ERROR_TEXT("wrong number of arguments"));
+ }
+ // 1 arg, an integer: construct a blank bytearray of that length
+ mp_uint_t len = mp_obj_get_int(args[0]);
+ mp_obj_array_t *o = array_new(BYTEARRAY_TYPECODE, len);
+ memset(o->items, 0, len);
+ return MP_OBJ_FROM_PTR(o);
+ } else {
+ // 1 arg: construct the bytearray from that
+ return array_construct(BYTEARRAY_TYPECODE, args[0]);
+ }
+}
+#endif
+
+#if MICROPY_PY_BUILTINS_MEMORYVIEW
+
+mp_obj_t mp_obj_new_memoryview(byte typecode, size_t nitems, void *items) {
+ mp_obj_array_t *self = m_new_obj(mp_obj_array_t);
+ mp_obj_memoryview_init(self, typecode, 0, nitems, items);
+ return MP_OBJ_FROM_PTR(self);
+}
+
+STATIC mp_obj_t memoryview_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ (void)type_in;
+
+ // TODO possibly allow memoryview constructor to take start/stop so that one
+ // can do memoryview(b, 4, 8) instead of memoryview(b)[4:8] (uses less RAM)
+
+ mp_arg_check_num(n_args, n_kw, 1, 1, false);
+
+ mp_buffer_info_t bufinfo;
+ mp_get_buffer_raise(args[0], &bufinfo, MP_BUFFER_READ);
+
+ mp_obj_array_t *self = MP_OBJ_TO_PTR(mp_obj_new_memoryview(bufinfo.typecode,
+ bufinfo.len / mp_binary_get_size('@', bufinfo.typecode, NULL),
+ bufinfo.buf));
+
+ // If the input object is a memoryview then need to point the items of the
+ // new memoryview to the start of the buffer so the GC can trace it.
+ if (mp_obj_get_type(args[0]) == &mp_type_memoryview) {
+ mp_obj_array_t *other = MP_OBJ_TO_PTR(args[0]);
+ self->memview_offset = other->memview_offset;
+ self->items = other->items;
+ }
+
+ // test if the object can be written to
+ if (mp_get_buffer(args[0], &bufinfo, MP_BUFFER_RW)) {
+ self->typecode |= MP_OBJ_ARRAY_TYPECODE_FLAG_RW; // indicate writable buffer
+ }
+
+ return MP_OBJ_FROM_PTR(self);
+}
+
+#if MICROPY_CPYTHON_COMPAT
+STATIC mp_obj_t memoryview_cast(const mp_obj_t self_in, const mp_obj_t typecode_in) {
+ mp_obj_array_t *self = MP_OBJ_TO_PTR(self_in);
+ const char *typecode = mp_obj_str_get_str(typecode_in);
+ size_t element_size = mp_binary_get_size('@', typecode[0], NULL);
+ size_t bytelen = self->len * mp_binary_get_size('@', self->typecode & ~MP_OBJ_ARRAY_TYPECODE_FLAG_RW, NULL);
+ if (bytelen % element_size != 0) {
+ mp_raise_TypeError(MP_ERROR_TEXT("memoryview: length is not a multiple of itemsize"));
+ }
+ mp_obj_array_t *result = MP_OBJ_TO_PTR(mp_obj_new_memoryview(*typecode, bytelen / element_size, self->items));
+
+ // test if the object can be written to
+ if (self->typecode & MP_OBJ_ARRAY_TYPECODE_FLAG_RW) {
+ result->typecode |= MP_OBJ_ARRAY_TYPECODE_FLAG_RW; // indicate writable buffer
+ }
+ return MP_OBJ_FROM_PTR(result);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(memoryview_cast_obj, memoryview_cast);
+#endif
+
+#if MICROPY_PY_BUILTINS_MEMORYVIEW_ITEMSIZE
+STATIC void memoryview_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+ if (dest[0] != MP_OBJ_NULL) {
+ return;
+ }
+ if (attr == MP_QSTR_itemsize) {
+ mp_obj_array_t *self = MP_OBJ_TO_PTR(self_in);
+ dest[0] = MP_OBJ_NEW_SMALL_INT(mp_binary_get_size('@', self->typecode & TYPECODE_MASK, NULL));
+ }
+}
+#endif
+
+#endif
+
+STATIC mp_obj_t array_unary_op(mp_unary_op_t op, mp_obj_t o_in) {
+ mp_obj_array_t *o = MP_OBJ_TO_PTR(o_in);
+ switch (op) {
+ case MP_UNARY_OP_BOOL:
+ return mp_obj_new_bool(o->len != 0);
+ case MP_UNARY_OP_LEN:
+ return MP_OBJ_NEW_SMALL_INT(o->len);
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+STATIC int typecode_for_comparison(int typecode, bool *is_unsigned) {
+ if (typecode == BYTEARRAY_TYPECODE) {
+ typecode = 'B';
+ }
+ if (typecode <= 'Z') {
+ typecode += 32; // to lowercase
+ *is_unsigned = true;
+ }
+ return typecode;
+}
+
+STATIC mp_obj_t array_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
+ mp_obj_array_t *lhs = MP_OBJ_TO_PTR(lhs_in);
+ switch (op) {
+ case MP_BINARY_OP_MULTIPLY:
+ case MP_BINARY_OP_INPLACE_MULTIPLY: {
+ if (!mp_obj_is_int(rhs_in)) {
+ return MP_OBJ_NULL; // op not supported
+ }
+ mp_uint_t repeat = mp_obj_get_int(rhs_in);
+ bool inplace = (op == MP_BINARY_OP_INPLACE_MULTIPLY);
+ mp_buffer_info_t lhs_bufinfo;
+ array_get_buffer(lhs_in, &lhs_bufinfo, MP_BUFFER_READ);
+ mp_obj_array_t *res;
+ byte *ptr;
+ size_t orig_lhs_bufinfo_len = lhs_bufinfo.len;
+ if (inplace) {
+ res = lhs;
+ size_t item_sz = mp_binary_get_size('@', lhs->typecode, NULL);
+ lhs->items = m_renew(byte, lhs->items, (lhs->len + lhs->free) * item_sz, lhs->len * repeat * item_sz);
+ lhs->len = lhs->len * repeat;
+ lhs->free = 0;
+ if (!repeat) {
+ return MP_OBJ_FROM_PTR(res);
+ }
+ repeat--;
+ ptr = (byte *)res->items + orig_lhs_bufinfo_len;
+ } else {
+ res = array_new(lhs_bufinfo.typecode, lhs->len * repeat);
+ ptr = (byte *)res->items;
+ }
+ if (orig_lhs_bufinfo_len) {
+ for (; repeat--; ptr += orig_lhs_bufinfo_len) {
+ memcpy(ptr, lhs_bufinfo.buf, orig_lhs_bufinfo_len);
+ }
+ }
+ return MP_OBJ_FROM_PTR(res);
+ }
+ case MP_BINARY_OP_ADD: {
+ // allow to add anything that has the buffer protocol (extension to CPython)
+ mp_buffer_info_t lhs_bufinfo;
+ mp_buffer_info_t rhs_bufinfo;
+ array_get_buffer(lhs_in, &lhs_bufinfo, MP_BUFFER_READ);
+ mp_get_buffer_raise(rhs_in, &rhs_bufinfo, MP_BUFFER_READ);
+
+ size_t sz = mp_binary_get_size('@', lhs_bufinfo.typecode, NULL);
+
+ // convert byte count to element count (in case rhs is not multiple of sz)
+ size_t rhs_len = rhs_bufinfo.len / sz;
+
+ // note: lhs->len is element count of lhs, lhs_bufinfo.len is byte count
+ mp_obj_array_t *res = array_new(lhs_bufinfo.typecode, lhs->len + rhs_len);
+ mp_seq_cat((byte *)res->items, lhs_bufinfo.buf, lhs_bufinfo.len, rhs_bufinfo.buf, rhs_len * sz, byte);
+ return MP_OBJ_FROM_PTR(res);
+ }
+
+ case MP_BINARY_OP_INPLACE_ADD: {
+ #if MICROPY_PY_BUILTINS_MEMORYVIEW
+ if (lhs->base.type == &mp_type_memoryview) {
+ return MP_OBJ_NULL; // op not supported
+ }
+ #endif
+ array_extend(lhs_in, rhs_in);
+ return lhs_in;
+ }
+
+ case MP_BINARY_OP_CONTAINS: {
+ #if MICROPY_PY_BUILTINS_BYTEARRAY
+ // Can search string only in bytearray
+ mp_buffer_info_t lhs_bufinfo;
+ mp_buffer_info_t rhs_bufinfo;
+ if (mp_get_buffer(rhs_in, &rhs_bufinfo, MP_BUFFER_READ)) {
+ if (!mp_obj_is_type(lhs_in, &mp_type_bytearray)) {
+ return mp_const_false;
+ }
+ array_get_buffer(lhs_in, &lhs_bufinfo, MP_BUFFER_READ);
+ return mp_obj_new_bool(
+ find_subbytes(lhs_bufinfo.buf, lhs_bufinfo.len, rhs_bufinfo.buf, rhs_bufinfo.len, 1) != NULL);
+ }
+ #endif
+
+ // Otherwise, can only look for a scalar numeric value in an array
+ if (mp_obj_is_int(rhs_in) || mp_obj_is_float(rhs_in)) {
+ mp_raise_NotImplementedError(NULL);
+ }
+
+ return mp_const_false;
+ }
+
+ case MP_BINARY_OP_EQUAL:
+ case MP_BINARY_OP_LESS:
+ case MP_BINARY_OP_LESS_EQUAL:
+ case MP_BINARY_OP_MORE:
+ case MP_BINARY_OP_MORE_EQUAL: {
+ mp_buffer_info_t lhs_bufinfo;
+ mp_buffer_info_t rhs_bufinfo;
+ array_get_buffer(lhs_in, &lhs_bufinfo, MP_BUFFER_READ);
+ if (!mp_get_buffer(rhs_in, &rhs_bufinfo, MP_BUFFER_READ)) {
+ return mp_const_false;
+ }
+ // mp_seq_cmp_bytes is used so only compatible representations can be correctly compared.
+ // The type doesn't matter: array/bytearray/str/bytes all have the same buffer layout, so
+ // just check if the typecodes are compatible; for testing equality the types should have the
+ // same code except for signedness, and not be floating point because nan never equals nan.
+ // For > and < the types should be the same and unsigned.
+ // Note that typecode_for_comparison always returns lowercase letters to save code size.
+ // No need for (& TYPECODE_MASK) here: xxx_get_buffer already takes care of that.
+ bool is_unsigned = false;
+ const int lhs_code = typecode_for_comparison(lhs_bufinfo.typecode, &is_unsigned);
+ const int rhs_code = typecode_for_comparison(rhs_bufinfo.typecode, &is_unsigned);
+ if (lhs_code == rhs_code && lhs_code != 'f' && lhs_code != 'd' && (op == MP_BINARY_OP_EQUAL || is_unsigned)) {
+ return mp_obj_new_bool(mp_seq_cmp_bytes(op, lhs_bufinfo.buf, lhs_bufinfo.len, rhs_bufinfo.buf, rhs_bufinfo.len));
+ }
+ // mp_obj_equal_not_equal treats returning MP_OBJ_NULL as 'fall back to pointer comparison'
+ // for MP_BINARY_OP_EQUAL but that is incompatible with CPython.
+ mp_raise_NotImplementedError(NULL);
+ }
+
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+#if MICROPY_PY_BUILTINS_BYTEARRAY || MICROPY_PY_ARRAY
+STATIC mp_obj_t array_append(mp_obj_t self_in, mp_obj_t arg) {
+ // self is not a memoryview, so we don't need to use (& TYPECODE_MASK)
+ assert((MICROPY_PY_BUILTINS_BYTEARRAY && mp_obj_is_type(self_in, &mp_type_bytearray))
+ || (MICROPY_PY_ARRAY && mp_obj_is_type(self_in, &mp_type_array)));
+ mp_obj_array_t *self = MP_OBJ_TO_PTR(self_in);
+
+ if (self->free == 0) {
+ size_t item_sz = mp_binary_get_size('@', self->typecode, NULL);
+ // TODO: alloc policy
+ self->free = 8;
+ self->items = m_renew(byte, self->items, item_sz * self->len, item_sz * (self->len + self->free));
+ mp_seq_clear(self->items, self->len + 1, self->len + self->free, item_sz);
+ }
+ mp_binary_set_val_array(self->typecode, self->items, self->len, arg);
+ // only update length/free if set succeeded
+ self->len++;
+ self->free--;
+ return mp_const_none; // return None, as per CPython
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(array_append_obj, array_append);
+
+STATIC mp_obj_t array_extend(mp_obj_t self_in, mp_obj_t arg_in) {
+ // self is not a memoryview, so we don't need to use (& TYPECODE_MASK)
+ assert((MICROPY_PY_BUILTINS_BYTEARRAY && mp_obj_is_type(self_in, &mp_type_bytearray))
+ || (MICROPY_PY_ARRAY && mp_obj_is_type(self_in, &mp_type_array)));
+ mp_obj_array_t *self = MP_OBJ_TO_PTR(self_in);
+
+ // allow to extend by anything that has the buffer protocol (extension to CPython)
+ mp_buffer_info_t arg_bufinfo;
+ mp_get_buffer_raise(arg_in, &arg_bufinfo, MP_BUFFER_READ);
+
+ size_t sz = mp_binary_get_size('@', self->typecode, NULL);
+
+ // convert byte count to element count
+ size_t len = arg_bufinfo.len / sz;
+
+ // make sure we have enough room to extend
+ // TODO: alloc policy; at the moment we go conservative
+ if (self->free < len) {
+ self->items = m_renew(byte, self->items, (self->len + self->free) * sz, (self->len + len) * sz);
+ self->free = 0;
+ } else {
+ self->free -= len;
+ }
+
+ // extend
+ mp_seq_copy((byte *)self->items + self->len * sz, arg_bufinfo.buf, len * sz, byte);
+ self->len += len;
+
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(array_extend_obj, array_extend);
+#endif
+
+#if MICROPY_PY_BUILTINS_BYTEARRAY && MICROPY_CPYTHON_COMPAT
+STATIC mp_obj_t buffer_finder(size_t n_args, const mp_obj_t *args, int direction, bool is_index) {
+ mp_check_self(mp_obj_is_type(args[0], &mp_type_bytearray));
+ const mp_obj_type_t *self_type = mp_obj_get_type(args[0]);
+
+ mp_buffer_info_t haystack_bufinfo;
+ mp_get_buffer_raise(args[0], &haystack_bufinfo, MP_BUFFER_READ);
+
+ mp_buffer_info_t needle_bufinfo;
+ mp_get_buffer_raise(args[1], &needle_bufinfo, MP_BUFFER_READ);
+
+ if (mp_binary_get_size('@', needle_bufinfo.typecode, NULL) != 1) {
+ mp_raise_TypeError(MP_ERROR_TEXT("a bytes-like object is required"));
+ }
+
+ const byte *start = haystack_bufinfo.buf;
+ const byte *end = ((const byte *)haystack_bufinfo.buf) + haystack_bufinfo.len;
+ if (n_args >= 3 && args[2] != mp_const_none) {
+ start += mp_get_index(self_type, haystack_bufinfo.len, args[2], true);
+ }
+ if (n_args >= 4 && args[3] != mp_const_none) {
+ end = ((const byte *)haystack_bufinfo.buf) + mp_get_index(self_type, haystack_bufinfo.len, args[3], true);
+ }
+
+ const byte *p = NULL;
+ if (end >= start) {
+ p = find_subbytes(start, end - start, needle_bufinfo.buf, needle_bufinfo.len, direction);
+ }
+
+ if (p == NULL) {
+ if (is_index) {
+ mp_raise_ValueError(MP_ERROR_TEXT("substring not found"));
+ } else {
+ return MP_OBJ_NEW_SMALL_INT(-1);
+ }
+ }
+ return MP_OBJ_NEW_SMALL_INT(p - (const byte *)haystack_bufinfo.buf);
+}
+
+STATIC mp_obj_t buffer_find(size_t n_args, const mp_obj_t *args) {
+ return buffer_finder(n_args, args, 1, false);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(buffer_find_obj, 2, 4, buffer_find);
+
+STATIC mp_obj_t buffer_rfind(size_t n_args, const mp_obj_t *args) {
+ return buffer_finder(n_args, args, -1, false);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(buffer_rfind_obj, 2, 4, buffer_rfind);
+
+STATIC mp_obj_t buffer_index(size_t n_args, const mp_obj_t *args) {
+ return buffer_finder(n_args, args, 1, true);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(buffer_index_obj, 2, 4, buffer_index);
+
+STATIC mp_obj_t buffer_rindex(size_t n_args, const mp_obj_t *args) {
+ return buffer_finder(n_args, args, -1, true);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(buffer_rindex_obj, 2, 4, buffer_rindex);
+#endif
+
+STATIC mp_obj_t array_subscr(mp_obj_t self_in, mp_obj_t index_in, mp_obj_t value) {
+ if (value == MP_OBJ_NULL) {
+ // delete item
+ // TODO implement
+ // TODO: confirmed that both bytearray and array.array support
+ // slice deletion
+ return MP_OBJ_NULL; // op not supported
+ } else {
+ mp_obj_array_t *o = MP_OBJ_TO_PTR(self_in);
+ #if MICROPY_PY_BUILTINS_SLICE
+ if (mp_obj_is_type(index_in, &mp_type_slice)) {
+ mp_bound_slice_t slice;
+ if (!mp_seq_get_fast_slice_indexes(o->len, index_in, &slice)) {
+ mp_raise_NotImplementedError(MP_ERROR_TEXT("only slices with step=1 (aka None) are supported"));
+ }
+ if (value != MP_OBJ_SENTINEL) {
+ #if MICROPY_PY_ARRAY_SLICE_ASSIGN
+ // Assign
+ size_t src_len;
+ void *src_items;
+ size_t item_sz = mp_binary_get_size('@', o->typecode & TYPECODE_MASK, NULL);
+ if (mp_obj_is_obj(value) && mp_type_get_subscr_slot(((mp_obj_base_t *)MP_OBJ_TO_PTR(value))->type) == array_subscr) {
+ // value is array, bytearray or memoryview
+ mp_obj_array_t *src_slice = MP_OBJ_TO_PTR(value);
+ if (item_sz != mp_binary_get_size('@', src_slice->typecode & TYPECODE_MASK, NULL)) {
+ compat_error:
+ mp_raise_ValueError(MP_ERROR_TEXT("lhs and rhs should be compatible"));
+ }
+ src_len = src_slice->len;
+ src_items = src_slice->items;
+ #if MICROPY_PY_BUILTINS_MEMORYVIEW
+ if (mp_obj_is_type(value, &mp_type_memoryview)) {
+ src_items = (uint8_t *)src_items + (src_slice->memview_offset * item_sz);
+ }
+ #endif
+ } else if (mp_obj_is_type(value, &mp_type_bytes)) {
+ if (item_sz != 1) {
+ goto compat_error;
+ }
+ mp_buffer_info_t bufinfo;
+ mp_get_buffer_raise(value, &bufinfo, MP_BUFFER_READ);
+ src_len = bufinfo.len;
+ src_items = bufinfo.buf;
+ } else {
+ mp_raise_NotImplementedError(MP_ERROR_TEXT("array/bytes required on right side"));
+ }
+
+ // TODO: check src/dst compat
+ mp_int_t len_adj = src_len - (slice.stop - slice.start);
+ uint8_t *dest_items = o->items;
+ #if MICROPY_PY_BUILTINS_MEMORYVIEW
+ if (o->base.type == &mp_type_memoryview) {
+ if (!(o->typecode & MP_OBJ_ARRAY_TYPECODE_FLAG_RW)) {
+ // store to read-only memoryview not allowed
+ return MP_OBJ_NULL;
+ }
+ if (len_adj != 0) {
+ goto compat_error;
+ }
+ dest_items += o->memview_offset * item_sz;
+ }
+ #endif
+ if (len_adj > 0) {
+ if ((size_t)len_adj > o->free) {
+ // TODO: alloc policy; at the moment we go conservative
+ o->items = m_renew(byte, o->items, (o->len + o->free) * item_sz, (o->len + len_adj) * item_sz);
+ o->free = len_adj;
+ dest_items = o->items;
+ }
+ mp_seq_replace_slice_grow_inplace(dest_items, o->len,
+ slice.start, slice.stop, src_items, src_len, len_adj, item_sz);
+ } else {
+ mp_seq_replace_slice_no_grow(dest_items, o->len,
+ slice.start, slice.stop, src_items, src_len, item_sz);
+ #if MICROPY_NONSTANDARD_TYPECODES
+ // Clear "freed" elements at the end of list
+ // TODO: This is actually only needed for typecode=='O'
+ mp_seq_clear(dest_items, o->len + len_adj, o->len, item_sz);
+ #endif
+ // TODO: alloc policy after shrinking
+ }
+ o->free -= len_adj;
+ o->len += len_adj;
+ return mp_const_none;
+ #else
+ return MP_OBJ_NULL; // op not supported
+ #endif
+ }
+
+ mp_obj_array_t *res;
+ size_t sz = mp_binary_get_size('@', o->typecode & TYPECODE_MASK, NULL);
+ assert(sz > 0);
+ #if MICROPY_PY_BUILTINS_MEMORYVIEW
+ if (o->base.type == &mp_type_memoryview) {
+ res = m_new_obj(mp_obj_array_t);
+ *res = *o;
+ res->memview_offset += slice.start;
+ res->len = slice.stop - slice.start;
+ } else
+ #endif
+ {
+ res = array_new(o->typecode, slice.stop - slice.start);
+ memcpy(res->items, (uint8_t *)o->items + slice.start * sz, (slice.stop - slice.start) * sz);
+ }
+ return MP_OBJ_FROM_PTR(res);
+ } else
+ #endif
+ {
+ size_t index = mp_get_index(o->base.type, o->len, index_in, false);
+ #if MICROPY_PY_BUILTINS_MEMORYVIEW
+ if (o->base.type == &mp_type_memoryview) {
+ index += o->memview_offset;
+ if (value != MP_OBJ_SENTINEL && !(o->typecode & MP_OBJ_ARRAY_TYPECODE_FLAG_RW)) {
+ // store to read-only memoryview
+ return MP_OBJ_NULL;
+ }
+ }
+ #endif
+ if (value == MP_OBJ_SENTINEL) {
+ // load
+ return mp_binary_get_val_array(o->typecode & TYPECODE_MASK, o->items, index);
+ } else {
+ // store
+ mp_binary_set_val_array(o->typecode & TYPECODE_MASK, o->items, index, value);
+ return mp_const_none;
+ }
+ }
+ }
+}
+
+STATIC mp_int_t array_get_buffer(mp_obj_t o_in, mp_buffer_info_t *bufinfo, mp_uint_t flags) {
+ mp_obj_array_t *o = MP_OBJ_TO_PTR(o_in);
+ size_t sz = mp_binary_get_size('@', o->typecode & TYPECODE_MASK, NULL);
+ bufinfo->buf = o->items;
+ bufinfo->len = o->len * sz;
+ bufinfo->typecode = o->typecode & TYPECODE_MASK;
+ #if MICROPY_PY_BUILTINS_MEMORYVIEW
+ if (o->base.type == &mp_type_memoryview) {
+ if (!(o->typecode & MP_OBJ_ARRAY_TYPECODE_FLAG_RW) && (flags & MP_BUFFER_WRITE)) {
+ // read-only memoryview
+ return 1;
+ }
+ bufinfo->buf = (uint8_t *)bufinfo->buf + (size_t)o->memview_offset * sz;
+ }
+ #else
+ (void)flags;
+ #endif
+ return 0;
+}
+
+
+#if MICROPY_CPYTHON_COMPAT && MICROPY_PY_BUILTINS_BYTEARRAY
+// Directly lifted from objstr.c
+STATIC mp_obj_t array_decode(size_t n_args, const mp_obj_t *args) {
+ mp_obj_t new_args[2];
+ if (n_args == 1) {
+ new_args[0] = args[0];
+ new_args[1] = MP_OBJ_NEW_QSTR(MP_QSTR_utf_hyphen_8);
+ args = new_args;
+ n_args++;
+ }
+ return mp_obj_str_make_new(&mp_type_str, n_args, 0, args);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(array_decode_obj, 1, 3, array_decode);
+#endif
+
+
+#if MICROPY_PY_ARRAY
+STATIC const mp_rom_map_elem_t array_locals_dict_table[] = {
+ { MP_ROM_QSTR(MP_QSTR_append), MP_ROM_PTR(&array_append_obj) },
+ { MP_ROM_QSTR(MP_QSTR_extend), MP_ROM_PTR(&array_extend_obj) },
+ #if MICROPY_CPYTHON_COMPAT
+ { MP_ROM_QSTR(MP_QSTR_decode), MP_ROM_PTR(&bytes_decode_obj) },
+ #endif
+};
+
+STATIC MP_DEFINE_CONST_DICT(array_locals_dict, array_locals_dict_table);
+#endif
+
+#if MICROPY_PY_BUILTINS_BYTEARRAY
+STATIC const mp_rom_map_elem_t bytearray_locals_dict_table[] = {
+ { MP_ROM_QSTR(MP_QSTR_append), MP_ROM_PTR(&array_append_obj) },
+ { MP_ROM_QSTR(MP_QSTR_extend), MP_ROM_PTR(&array_extend_obj) },
+
+ #if MICROPY_CPYTHON_COMPAT
+ { MP_ROM_QSTR(MP_QSTR_find), MP_ROM_PTR(&buffer_find_obj) },
+ { MP_ROM_QSTR(MP_QSTR_rfind), MP_ROM_PTR(&buffer_rfind_obj) },
+ { MP_ROM_QSTR(MP_QSTR_index), MP_ROM_PTR(&buffer_index_obj) },
+ { MP_ROM_QSTR(MP_QSTR_rindex), MP_ROM_PTR(&buffer_rindex_obj) },
+
+ { MP_ROM_QSTR(MP_QSTR_decode), MP_ROM_PTR(&array_decode_obj) },
+ #endif
+};
+
+STATIC MP_DEFINE_CONST_DICT(bytearray_locals_dict, bytearray_locals_dict_table);
+#endif
+
+
+#if MICROPY_PY_ARRAY
+const mp_obj_type_t mp_type_array = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_array,
+ .print = array_print,
+ .make_new = array_make_new,
+ .locals_dict = (mp_obj_dict_t *)&array_locals_dict,
+ MP_TYPE_EXTENDED_FIELDS(
+ .getiter = array_iterator_new,
+ .unary_op = array_unary_op,
+ .binary_op = array_binary_op,
+ .subscr = array_subscr,
+ .buffer_p = { .get_buffer = array_get_buffer },
+ ),
+};
+#endif
+
+#if MICROPY_PY_BUILTINS_BYTEARRAY
+const mp_obj_type_t mp_type_bytearray = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EQ_CHECKS_OTHER_TYPE | MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_bytearray,
+ .print = array_print,
+ .make_new = bytearray_make_new,
+ .locals_dict = (mp_obj_dict_t *)&bytearray_locals_dict,
+ MP_TYPE_EXTENDED_FIELDS(
+ .getiter = array_iterator_new,
+ .unary_op = array_unary_op,
+ .binary_op = array_binary_op,
+ .subscr = array_subscr,
+ .buffer_p = { .get_buffer = array_get_buffer },
+ ),
+};
+#endif
+
+#if MICROPY_PY_BUILTINS_MEMORYVIEW
+
+#if MICROPY_CPYTHON_COMPAT
+STATIC const mp_rom_map_elem_t memoryview_locals_dict_table[] = {
+ { MP_ROM_QSTR(MP_QSTR_cast), MP_ROM_PTR(&memoryview_cast_obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(memoryview_locals_dict, memoryview_locals_dict_table);
+#endif
+
+const mp_obj_type_t mp_type_memoryview = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EQ_CHECKS_OTHER_TYPE | MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_memoryview,
+ .make_new = memoryview_make_new,
+ #if MICROPY_CPYTHON_COMPAT
+ .locals_dict = (mp_obj_dict_t *)&memoryview_locals_dict,
+ #endif
+ #if MICROPY_PY_BUILTINS_MEMORYVIEW_ITEMSIZE
+ .attr = memoryview_attr,
+ #endif
+ MP_TYPE_EXTENDED_FIELDS(
+ .getiter = array_iterator_new,
+ .unary_op = array_unary_op,
+ .binary_op = array_binary_op,
+ .subscr = array_subscr,
+ .buffer_p = { .get_buffer = array_get_buffer },
+ ),
+};
+#endif
+
+/* unused
+size_t mp_obj_array_len(mp_obj_t self_in) {
+ return ((mp_obj_array_t *)self_in)->len;
+}
+*/
+
+#if MICROPY_PY_BUILTINS_BYTEARRAY
+mp_obj_t mp_obj_new_bytearray(size_t n, void *items) {
+ mp_obj_array_t *o = array_new(BYTEARRAY_TYPECODE, n);
+ memcpy(o->items, items, n);
+ return MP_OBJ_FROM_PTR(o);
+}
+
+mp_obj_t mp_obj_new_bytearray_of_zeros(size_t n) {
+ mp_obj_array_t *o = array_new(BYTEARRAY_TYPECODE, n);
+ memset(o->items, 0, n);
+ return MP_OBJ_FROM_PTR(o);
+}
+
+// Create bytearray which references specified memory area
+mp_obj_t mp_obj_new_bytearray_by_ref(size_t n, void *items) {
+ mp_obj_array_t *o = m_new_obj(mp_obj_array_t);
+ o->base.type = &mp_type_bytearray;
+ o->typecode = BYTEARRAY_TYPECODE;
+ o->free = 0;
+ o->len = n;
+ o->items = items;
+ return MP_OBJ_FROM_PTR(o);
+}
+#endif
+
+/******************************************************************************/
+// array iterator
+
+typedef struct _mp_obj_array_it_t {
+ mp_obj_base_t base;
+ mp_obj_array_t *array;
+ size_t offset;
+ size_t cur;
+} mp_obj_array_it_t;
+
+STATIC mp_obj_t array_it_iternext(mp_obj_t self_in) {
+ mp_obj_array_it_t *self = MP_OBJ_TO_PTR(self_in);
+ if (self->cur < self->array->len) {
+ return mp_binary_get_val_array(self->array->typecode & TYPECODE_MASK, self->array->items, self->offset + self->cur++);
+ } else {
+ return MP_OBJ_STOP_ITERATION;
+ }
+}
+
+STATIC const mp_obj_type_t mp_type_array_it = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_iterator,
+ MP_TYPE_EXTENDED_FIELDS(
+ .getiter = mp_identity_getiter,
+ .iternext = array_it_iternext,
+ ),
+};
+
+STATIC mp_obj_t array_iterator_new(mp_obj_t array_in, mp_obj_iter_buf_t *iter_buf) {
+ assert(sizeof(mp_obj_array_t) <= sizeof(mp_obj_iter_buf_t));
+ mp_obj_array_t *array = MP_OBJ_TO_PTR(array_in);
+ mp_obj_array_it_t *o = (mp_obj_array_it_t *)iter_buf;
+ o->base.type = &mp_type_array_it;
+ o->array = array;
+ o->offset = 0;
+ o->cur = 0;
+ #if MICROPY_PY_BUILTINS_MEMORYVIEW
+ if (array->base.type == &mp_type_memoryview) {
+ o->offset = array->memview_offset;
+ }
+ #endif
+ return MP_OBJ_FROM_PTR(o);
+}
+
+#endif // MICROPY_PY_ARRAY || MICROPY_PY_BUILTINS_BYTEARRAY || MICROPY_PY_BUILTINS_MEMORYVIEW
diff --git a/circuitpython/py/objarray.h b/circuitpython/py/objarray.h
new file mode 100644
index 0000000..a1bf6ab
--- /dev/null
+++ b/circuitpython/py/objarray.h
@@ -0,0 +1,62 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_OBJARRAY_H
+#define MICROPY_INCLUDED_PY_OBJARRAY_H
+
+#include "py/obj.h"
+
+// Used only for memoryview types, set in "typecode" to indicate a writable memoryview
+#define MP_OBJ_ARRAY_TYPECODE_FLAG_RW (0x80)
+
+// This structure is used for all of bytearray, array.array, memoryview
+// objects. Note that memoryview has different meaning for some fields,
+// see comment at the beginning of objarray.c.
+typedef struct _mp_obj_array_t {
+ mp_obj_base_t base;
+ size_t typecode : 8;
+ // free is number of unused elements after len used elements
+ // alloc size = len + free
+ // But for memoryview, 'free' is reused as offset (in elements) into the
+ // parent object. (Union is not used to not go into a complication of
+ // union-of-bitfields with different toolchains). See comments in
+ // objarray.c.
+ size_t free : (8 * sizeof(size_t) - 8);
+ size_t len; // in elements
+ void *items;
+} mp_obj_array_t;
+
+#if MICROPY_PY_BUILTINS_MEMORYVIEW
+static inline void mp_obj_memoryview_init(mp_obj_array_t *self, size_t typecode, size_t offset, size_t len, void *items) {
+ self->base.type = &mp_type_memoryview;
+ self->typecode = typecode;
+ self->free = offset;
+ self->len = len;
+ self->items = items;
+}
+#endif
+
+#endif // MICROPY_INCLUDED_PY_OBJARRAY_H
diff --git a/circuitpython/py/objattrtuple.c b/circuitpython/py/objattrtuple.c
new file mode 100644
index 0000000..7b664dd
--- /dev/null
+++ b/circuitpython/py/objattrtuple.c
@@ -0,0 +1,98 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2015 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/objtuple.h"
+
+#if MICROPY_PY_ATTRTUPLE || MICROPY_PY_COLLECTIONS
+
+// this helper function is used by collections.namedtuple
+#if !MICROPY_PY_COLLECTIONS
+STATIC
+#endif
+void mp_obj_attrtuple_print_helper(const mp_print_t *print, const qstr *fields, mp_obj_tuple_t *o) {
+ mp_print_str(print, "(");
+ for (size_t i = 0; i < o->len; i++) {
+ if (i > 0) {
+ mp_print_str(print, ", ");
+ }
+ mp_printf(print, "%q=", fields[i]);
+ mp_obj_print_helper(print, o->items[i], PRINT_REPR);
+ }
+ mp_print_str(print, ")");
+}
+
+#endif
+
+#if MICROPY_PY_ATTRTUPLE
+
+STATIC void mp_obj_attrtuple_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_tuple_t *o = MP_OBJ_TO_PTR(o_in);
+ const qstr *fields = (const qstr *)MP_OBJ_TO_PTR(o->items[o->len]);
+ mp_obj_attrtuple_print_helper(print, fields, o);
+}
+
+STATIC void mp_obj_attrtuple_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+ if (dest[0] == MP_OBJ_NULL) {
+ // load attribute
+ mp_obj_tuple_t *self = MP_OBJ_TO_PTR(self_in);
+ size_t len = self->len;
+ const qstr *fields = (const qstr *)MP_OBJ_TO_PTR(self->items[len]);
+ for (size_t i = 0; i < len; i++) {
+ if (fields[i] == attr) {
+ dest[0] = self->items[i];
+ return;
+ }
+ }
+ }
+}
+
+mp_obj_t mp_obj_new_attrtuple(const qstr *fields, size_t n, const mp_obj_t *items) {
+ mp_obj_tuple_t *o = m_new_obj_var(mp_obj_tuple_t, mp_obj_t, n + 1);
+ o->base.type = &mp_type_attrtuple;
+ o->len = n;
+ for (size_t i = 0; i < n; i++) {
+ o->items[i] = items[i];
+ }
+ o->items[n] = MP_OBJ_FROM_PTR(fields);
+ return MP_OBJ_FROM_PTR(o);
+}
+
+const mp_obj_type_t mp_type_attrtuple = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_tuple, // reuse tuple to save on a qstr
+ .print = mp_obj_attrtuple_print,
+ .attr = mp_obj_attrtuple_attr,
+ MP_TYPE_EXTENDED_FIELDS(
+ .unary_op = mp_obj_tuple_unary_op,
+ .binary_op = mp_obj_tuple_binary_op,
+ .subscr = mp_obj_tuple_subscr,
+ .getiter = mp_obj_tuple_getiter,
+ ),
+};
+
+#endif // MICROPY_PY_ATTRTUPLE
diff --git a/circuitpython/py/objbool.c b/circuitpython/py/objbool.c
new file mode 100644
index 0000000..13d10ff
--- /dev/null
+++ b/circuitpython/py/objbool.c
@@ -0,0 +1,102 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+
+#include "py/runtime.h"
+
+#if MICROPY_OBJ_IMMEDIATE_OBJS
+
+#define BOOL_VALUE(o) ((o) == mp_const_false ? 0 : 1)
+
+#else
+
+#define BOOL_VALUE(o) (((mp_obj_bool_t *)MP_OBJ_TO_PTR(o))->value)
+
+typedef struct _mp_obj_bool_t {
+ mp_obj_base_t base;
+ bool value;
+} mp_obj_bool_t;
+
+#endif
+
+STATIC void bool_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ bool value = BOOL_VALUE(self_in);
+ if (MICROPY_PY_UJSON && kind == PRINT_JSON) {
+ if (value) {
+ mp_print_str(print, "true");
+ } else {
+ mp_print_str(print, "false");
+ }
+ } else {
+ if (value) {
+ mp_print_str(print, "True");
+ } else {
+ mp_print_str(print, "False");
+ }
+ }
+}
+
+STATIC mp_obj_t bool_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ (void)type_in;
+ mp_arg_check_num(n_args, n_kw, 0, 1, false);
+
+ if (n_args == 0) {
+ return mp_const_false;
+ } else {
+ return mp_obj_new_bool(mp_obj_is_true(args[0]));
+ }
+}
+
+STATIC mp_obj_t bool_unary_op(mp_unary_op_t op, mp_obj_t o_in) {
+ if (op == MP_UNARY_OP_LEN) {
+ return MP_OBJ_NULL;
+ }
+ bool value = BOOL_VALUE(o_in);
+ return mp_unary_op(op, MP_OBJ_NEW_SMALL_INT(value));
+}
+
+STATIC mp_obj_t bool_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
+ bool value = BOOL_VALUE(lhs_in);
+ return mp_binary_op(op, MP_OBJ_NEW_SMALL_INT(value), rhs_in);
+}
+
+const mp_obj_type_t mp_type_bool = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EQ_CHECKS_OTHER_TYPE | MP_TYPE_FLAG_EXTENDED, // can match all numeric types
+ .name = MP_QSTR_bool,
+ .print = bool_print,
+ .make_new = bool_make_new,
+ MP_TYPE_EXTENDED_FIELDS(
+ .unary_op = bool_unary_op,
+ .binary_op = bool_binary_op,
+ ),
+};
+
+#if !MICROPY_OBJ_IMMEDIATE_OBJS
+const mp_obj_bool_t mp_const_false_obj = {{&mp_type_bool}, false};
+const mp_obj_bool_t mp_const_true_obj = {{&mp_type_bool}, true};
+#endif
diff --git a/circuitpython/py/objboundmeth.c b/circuitpython/py/objboundmeth.c
new file mode 100644
index 0000000..c460626
--- /dev/null
+++ b/circuitpython/py/objboundmeth.c
@@ -0,0 +1,119 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+
+#include "py/obj.h"
+#include "py/runtime.h"
+
+typedef struct _mp_obj_bound_meth_t {
+ mp_obj_base_t base;
+ mp_obj_t meth;
+ mp_obj_t self;
+} mp_obj_bound_meth_t;
+
+#if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_DETAILED
+STATIC void bound_meth_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_bound_meth_t *o = MP_OBJ_TO_PTR(o_in);
+ mp_printf(print, "<bound_method %p ", o);
+ mp_obj_print_helper(print, o->self, PRINT_REPR);
+ mp_print_str(print, ".");
+ mp_obj_print_helper(print, o->meth, PRINT_REPR);
+ mp_print_str(print, ">");
+}
+#endif
+
+mp_obj_t mp_call_method_self_n_kw(mp_obj_t meth, mp_obj_t self, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ // need to insert self before all other args and then call meth
+ size_t n_total = n_args + 2 * n_kw;
+ mp_obj_t *args2 = NULL;
+ #if MICROPY_ENABLE_PYSTACK
+ args2 = mp_pystack_alloc(sizeof(mp_obj_t) * (1 + n_total));
+ #else
+ mp_obj_t *free_args2 = NULL;
+ if (n_total > 4) {
+ // try to use heap to allocate temporary args array
+ args2 = m_new_maybe(mp_obj_t, 1 + n_total);
+ free_args2 = args2;
+ }
+ if (args2 == NULL) {
+ // (fallback to) use stack to allocate temporary args array
+ args2 = alloca(sizeof(mp_obj_t) * (1 + n_total));
+ }
+ #endif
+ args2[0] = self;
+ memcpy(args2 + 1, args, n_total * sizeof(mp_obj_t));
+ mp_obj_t res = mp_call_function_n_kw(meth, n_args + 1, n_kw, args2);
+ #if MICROPY_ENABLE_PYSTACK
+ mp_pystack_free(args2);
+ #else
+ if (free_args2 != NULL) {
+ m_del(mp_obj_t, free_args2, 1 + n_total);
+ }
+ #endif
+ return res;
+}
+
+STATIC mp_obj_t bound_meth_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_obj_bound_meth_t *self = MP_OBJ_TO_PTR(self_in);
+ return mp_call_method_self_n_kw(self->meth, self->self, n_args, n_kw, args);
+}
+
+#if MICROPY_PY_FUNCTION_ATTRS
+STATIC void bound_meth_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+ if (dest[0] != MP_OBJ_NULL) {
+ // not load attribute
+ return;
+ }
+ // Delegate the load to the method object
+ mp_obj_bound_meth_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_load_method_maybe(self->meth, attr, dest);
+}
+#endif
+
+STATIC const mp_obj_type_t mp_type_bound_meth = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_bound_method,
+ #if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_DETAILED
+ .print = bound_meth_print,
+ #endif
+ MP_TYPE_EXTENDED_FIELDS(
+ .call = bound_meth_call,
+ ),
+ #if MICROPY_PY_FUNCTION_ATTRS
+ .attr = bound_meth_attr,
+ #endif
+};
+
+mp_obj_t mp_obj_new_bound_meth(mp_obj_t meth, mp_obj_t self) {
+ mp_obj_bound_meth_t *o = m_new_obj(mp_obj_bound_meth_t);
+ o->base.type = &mp_type_bound_meth;
+ o->meth = meth;
+ o->self = self;
+ return MP_OBJ_FROM_PTR(o);
+}
diff --git a/circuitpython/py/objcell.c b/circuitpython/py/objcell.c
new file mode 100644
index 0000000..2e15e68
--- /dev/null
+++ b/circuitpython/py/objcell.c
@@ -0,0 +1,71 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/obj.h"
+
+typedef struct _mp_obj_cell_t {
+ mp_obj_base_t base;
+ mp_obj_t obj;
+} mp_obj_cell_t;
+
+mp_obj_t mp_obj_cell_get(mp_obj_t self_in) {
+ mp_obj_cell_t *self = MP_OBJ_TO_PTR(self_in);
+ return self->obj;
+}
+
+void mp_obj_cell_set(mp_obj_t self_in, mp_obj_t obj) {
+ mp_obj_cell_t *self = MP_OBJ_TO_PTR(self_in);
+ self->obj = obj;
+}
+
+#if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_DETAILED
+STATIC void cell_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_cell_t *o = MP_OBJ_TO_PTR(o_in);
+ mp_printf(print, "<cell %p ", o->obj);
+ if (o->obj == MP_OBJ_NULL) {
+ mp_print_str(print, "(nil)");
+ } else {
+ mp_obj_print_helper(print, o->obj, PRINT_REPR);
+ }
+ mp_print_str(print, ">");
+}
+#endif
+
+STATIC const mp_obj_type_t mp_type_cell = {
+ { &mp_type_type },
+ .name = MP_QSTR_, // cell representation is just value in < >
+ #if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_DETAILED
+ .print = cell_print,
+ #endif
+};
+
+mp_obj_t mp_obj_new_cell(mp_obj_t obj) {
+ mp_obj_cell_t *o = m_new_obj(mp_obj_cell_t);
+ o->base.type = &mp_type_cell;
+ o->obj = obj;
+ return MP_OBJ_FROM_PTR(o);
+}
diff --git a/circuitpython/py/objclosure.c b/circuitpython/py/objclosure.c
new file mode 100644
index 0000000..f5dbb70
--- /dev/null
+++ b/circuitpython/py/objclosure.c
@@ -0,0 +1,100 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+
+#include "py/obj.h"
+#include "py/runtime.h"
+
+typedef struct _mp_obj_closure_t {
+ mp_obj_base_t base;
+ mp_obj_t fun;
+ size_t n_closed;
+ mp_obj_t closed[];
+} mp_obj_closure_t;
+
+STATIC mp_obj_t closure_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_obj_closure_t *self = MP_OBJ_TO_PTR(self_in);
+
+ // need to concatenate closed-over-vars and args
+
+ size_t n_total = self->n_closed + n_args + 2 * n_kw;
+ if (n_total <= 5) {
+ // use stack to allocate temporary args array
+ mp_obj_t args2[5];
+ memcpy(args2, self->closed, self->n_closed * sizeof(mp_obj_t));
+ memcpy(args2 + self->n_closed, args, (n_args + 2 * n_kw) * sizeof(mp_obj_t));
+ return mp_call_function_n_kw(self->fun, self->n_closed + n_args, n_kw, args2);
+ } else {
+ // use heap to allocate temporary args array
+ mp_obj_t *args2 = m_new(mp_obj_t, n_total);
+ memcpy(args2, self->closed, self->n_closed * sizeof(mp_obj_t));
+ memcpy(args2 + self->n_closed, args, (n_args + 2 * n_kw) * sizeof(mp_obj_t));
+ mp_obj_t res = mp_call_function_n_kw(self->fun, self->n_closed + n_args, n_kw, args2);
+ m_del(mp_obj_t, args2, n_total);
+ return res;
+ }
+}
+
+#if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_DETAILED
+STATIC void closure_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_closure_t *o = MP_OBJ_TO_PTR(o_in);
+ mp_print_str(print, "<closure ");
+ mp_obj_print_helper(print, o->fun, PRINT_REPR);
+ mp_printf(print, " at %p, n_closed=%u ", o, (int)o->n_closed);
+ for (size_t i = 0; i < o->n_closed; i++) {
+ if (o->closed[i] == MP_OBJ_NULL) {
+ mp_print_str(print, "(nil)");
+ } else {
+ mp_obj_print_helper(print, o->closed[i], PRINT_REPR);
+ }
+ mp_print_str(print, " ");
+ }
+ mp_print_str(print, ">");
+}
+#endif
+
+const mp_obj_type_t mp_type_closure = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_BINDS_SELF | MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_closure,
+ #if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_DETAILED
+ .print = closure_print,
+ #endif
+ MP_TYPE_EXTENDED_FIELDS(
+ .call = closure_call,
+ )
+};
+
+mp_obj_t mp_obj_new_closure(mp_obj_t fun, size_t n_closed_over, const mp_obj_t *closed) {
+ mp_obj_closure_t *o = m_new_obj_var(mp_obj_closure_t, mp_obj_t, n_closed_over);
+ o->base.type = &mp_type_closure;
+ o->fun = fun;
+ o->n_closed = n_closed_over;
+ memcpy(o->closed, closed, n_closed_over * sizeof(mp_obj_t));
+ return MP_OBJ_FROM_PTR(o);
+}
diff --git a/circuitpython/py/objcomplex.c b/circuitpython/py/objcomplex.c
new file mode 100644
index 0000000..b37b059
--- /dev/null
+++ b/circuitpython/py/objcomplex.c
@@ -0,0 +1,273 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <assert.h>
+
+#include "py/parsenum.h"
+#include "py/runtime.h"
+
+#include "supervisor/shared/translate.h"
+
+#if MICROPY_PY_BUILTINS_COMPLEX
+
+#include <math.h>
+#include "py/formatfloat.h"
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+
+typedef struct _mp_obj_complex_t {
+ mp_obj_base_t base;
+ mp_float_t real;
+ mp_float_t imag;
+} mp_obj_complex_t;
+
+STATIC void complex_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_complex_t *o = MP_OBJ_TO_PTR(o_in);
+ #if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
+ char buf[16];
+ #if MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_C
+ const int precision = 6;
+ #else
+ const int precision = 7;
+ #endif
+ #else
+ char buf[32];
+ const int precision = 16;
+ #endif
+ if (o->real == 0) {
+ mp_format_float(o->imag, buf, sizeof(buf), 'g', precision, '\0');
+ mp_printf(print, "%sj", buf);
+ } else {
+ mp_format_float(o->real, buf, sizeof(buf), 'g', precision, '\0');
+ mp_printf(print, "(%s", buf);
+ if (o->imag >= 0 || isnan(o->imag)) {
+ mp_print_str(print, "+");
+ }
+ mp_format_float(o->imag, buf, sizeof(buf), 'g', precision, '\0');
+ mp_printf(print, "%sj)", buf);
+ }
+}
+
+STATIC mp_obj_t complex_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ (void)type_in;
+ mp_arg_check_num(n_args, n_kw, 0, 2, false);
+
+ switch (n_args) {
+ case 0:
+ return mp_obj_new_complex(0, 0);
+
+ case 1:
+ if (mp_obj_is_str(args[0])) {
+ // a string, parse it
+ size_t l;
+ const char *s = mp_obj_str_get_data(args[0], &l);
+ return mp_parse_num_decimal(s, l, true, true, NULL);
+ } else if (mp_obj_is_type(args[0], &mp_type_complex)) {
+ // a complex, just return it
+ return args[0];
+ } else {
+ // something else, try to cast it to a complex
+ return mp_obj_new_complex(mp_obj_get_float(args[0]), 0);
+ }
+
+ case 2:
+ default: {
+ mp_float_t real, imag;
+ if (mp_obj_is_type(args[0], &mp_type_complex)) {
+ mp_obj_complex_get(args[0], &real, &imag);
+ } else {
+ real = mp_obj_get_float(args[0]);
+ imag = 0;
+ }
+ if (mp_obj_is_type(args[1], &mp_type_complex)) {
+ mp_float_t real2, imag2;
+ mp_obj_complex_get(args[1], &real2, &imag2);
+ real -= imag2;
+ imag += real2;
+ } else {
+ imag += mp_obj_get_float(args[1]);
+ }
+ return mp_obj_new_complex(real, imag);
+ }
+ }
+}
+
+STATIC mp_obj_t complex_unary_op(mp_unary_op_t op, mp_obj_t o_in) {
+ mp_obj_complex_t *o = MP_OBJ_TO_PTR(o_in);
+ switch (op) {
+ case MP_UNARY_OP_BOOL:
+ return mp_obj_new_bool(o->real != 0 || o->imag != 0);
+ case MP_UNARY_OP_HASH:
+ return MP_OBJ_NEW_SMALL_INT(mp_float_hash(o->real) ^ mp_float_hash(o->imag));
+ case MP_UNARY_OP_POSITIVE:
+ return o_in;
+ case MP_UNARY_OP_NEGATIVE:
+ return mp_obj_new_complex(-o->real, -o->imag);
+ case MP_UNARY_OP_ABS:
+ return mp_obj_new_float(MICROPY_FLOAT_C_FUN(sqrt)(o->real * o->real + o->imag * o->imag));
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+STATIC mp_obj_t complex_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
+ mp_obj_complex_t *lhs = MP_OBJ_TO_PTR(lhs_in);
+ return mp_obj_complex_binary_op(op, lhs->real, lhs->imag, rhs_in);
+}
+
+STATIC void complex_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+ if (dest[0] != MP_OBJ_NULL) {
+ // not load attribute
+ return;
+ }
+ mp_obj_complex_t *self = MP_OBJ_TO_PTR(self_in);
+ if (attr == MP_QSTR_real) {
+ dest[0] = mp_obj_new_float(self->real);
+ } else if (attr == MP_QSTR_imag) {
+ dest[0] = mp_obj_new_float(self->imag);
+ }
+}
+
+const mp_obj_type_t mp_type_complex = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EQ_NOT_REFLEXIVE | MP_TYPE_FLAG_EQ_CHECKS_OTHER_TYPE | MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_complex,
+ .print = complex_print,
+ .make_new = complex_make_new,
+ .attr = complex_attr,
+ MP_TYPE_EXTENDED_FIELDS(
+ .unary_op = complex_unary_op,
+ .binary_op = complex_binary_op,
+ ),
+};
+
+mp_obj_t mp_obj_new_complex(mp_float_t real, mp_float_t imag) {
+ mp_obj_complex_t *o = m_new_obj(mp_obj_complex_t);
+ o->base.type = &mp_type_complex;
+ o->real = real;
+ o->imag = imag;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+void mp_obj_complex_get(mp_obj_t self_in, mp_float_t *real, mp_float_t *imag) {
+ assert(mp_obj_is_type(self_in, &mp_type_complex));
+ mp_obj_complex_t *self = MP_OBJ_TO_PTR(self_in);
+ *real = self->real;
+ *imag = self->imag;
+}
+
+mp_obj_t mp_obj_complex_binary_op(mp_binary_op_t op, mp_float_t lhs_real, mp_float_t lhs_imag, mp_obj_t rhs_in) {
+ mp_float_t rhs_real, rhs_imag;
+ if (!mp_obj_get_complex_maybe(rhs_in, &rhs_real, &rhs_imag)) {
+ return MP_OBJ_NULL; // op not supported
+ }
+
+ switch (op) {
+ case MP_BINARY_OP_ADD:
+ case MP_BINARY_OP_INPLACE_ADD:
+ lhs_real += rhs_real;
+ lhs_imag += rhs_imag;
+ break;
+ case MP_BINARY_OP_SUBTRACT:
+ case MP_BINARY_OP_INPLACE_SUBTRACT:
+ lhs_real -= rhs_real;
+ lhs_imag -= rhs_imag;
+ break;
+ case MP_BINARY_OP_MULTIPLY:
+ case MP_BINARY_OP_INPLACE_MULTIPLY: {
+ mp_float_t real;
+ multiply:
+ real = lhs_real * rhs_real - lhs_imag * rhs_imag;
+ lhs_imag = lhs_real * rhs_imag + lhs_imag * rhs_real;
+ lhs_real = real;
+ break;
+ }
+ case MP_BINARY_OP_FLOOR_DIVIDE:
+ case MP_BINARY_OP_INPLACE_FLOOR_DIVIDE:
+ mp_raise_TypeError(MP_ERROR_TEXT("can't do truncated division of a complex number"));
+
+ case MP_BINARY_OP_TRUE_DIVIDE:
+ case MP_BINARY_OP_INPLACE_TRUE_DIVIDE:
+ if (rhs_imag == 0) {
+ if (rhs_real == 0) {
+ mp_raise_msg(&mp_type_ZeroDivisionError, MP_ERROR_TEXT("complex division by zero"));
+ }
+ lhs_real /= rhs_real;
+ lhs_imag /= rhs_real;
+ } else if (rhs_real == 0) {
+ mp_float_t real = lhs_imag / rhs_imag;
+ lhs_imag = -lhs_real / rhs_imag;
+ lhs_real = real;
+ } else {
+ mp_float_t rhs_len_sq = rhs_real * rhs_real + rhs_imag * rhs_imag;
+ rhs_real /= rhs_len_sq;
+ rhs_imag /= -rhs_len_sq;
+ goto multiply;
+ }
+ break;
+
+ case MP_BINARY_OP_POWER:
+ case MP_BINARY_OP_INPLACE_POWER: {
+ // z1**z2 = exp(z2*ln(z1))
+ // = exp(z2*(ln(|z1|)+i*arg(z1)))
+ // = exp( (x2*ln1 - y2*arg1) + i*(y2*ln1 + x2*arg1) )
+ // = exp(x3 + i*y3)
+ // = exp(x3)*(cos(y3) + i*sin(y3))
+ mp_float_t abs1 = MICROPY_FLOAT_C_FUN(sqrt)(lhs_real * lhs_real + lhs_imag * lhs_imag);
+ if (abs1 == 0) {
+ if (rhs_imag == 0 && rhs_real >= 0) {
+ lhs_real = (rhs_real == 0);
+ } else {
+ mp_raise_msg(&mp_type_ZeroDivisionError, MP_ERROR_TEXT("0.0 to a complex power"));
+ }
+ } else {
+ mp_float_t ln1 = MICROPY_FLOAT_C_FUN(log)(abs1);
+ mp_float_t arg1 = MICROPY_FLOAT_C_FUN(atan2)(lhs_imag, lhs_real);
+ mp_float_t x3 = rhs_real * ln1 - rhs_imag * arg1;
+ mp_float_t y3 = rhs_imag * ln1 + rhs_real * arg1;
+ mp_float_t exp_x3 = MICROPY_FLOAT_C_FUN(exp)(x3);
+ lhs_real = exp_x3 * MICROPY_FLOAT_C_FUN(cos)(y3);
+ lhs_imag = exp_x3 * MICROPY_FLOAT_C_FUN(sin)(y3);
+ }
+ break;
+ }
+
+ case MP_BINARY_OP_EQUAL:
+ return mp_obj_new_bool(lhs_real == rhs_real && lhs_imag == rhs_imag);
+
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+ return mp_obj_new_complex(lhs_real, lhs_imag);
+}
+
+#pragma GCC diagnostic pop
+
+#endif
diff --git a/circuitpython/py/objdeque.c b/circuitpython/py/objdeque.c
new file mode 100644
index 0000000..2d29d71
--- /dev/null
+++ b/circuitpython/py/objdeque.c
@@ -0,0 +1,172 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2018 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <unistd.h> // for ssize_t
+#include <string.h>
+
+#include "py/mpconfig.h"
+#include "supervisor/shared/translate.h"
+
+#if MICROPY_PY_COLLECTIONS_DEQUE
+
+#include "py/runtime.h"
+
+typedef struct _mp_obj_deque_t {
+ mp_obj_base_t base;
+ size_t alloc;
+ size_t i_get;
+ size_t i_put;
+ mp_obj_t *items;
+ uint32_t flags;
+ #define FLAG_CHECK_OVERFLOW 1
+} mp_obj_deque_t;
+
+STATIC mp_obj_t deque_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_arg_check_num(n_args, n_kw, 2, 3, false);
+
+ /* Initialization from existing sequence is not supported, so an empty
+ tuple must be passed as such. */
+ if (args[0] != mp_const_empty_tuple) {
+ mp_raise_ValueError(NULL);
+ }
+
+ // Protect against -1 leading to zero-length allocation and bad array access
+ mp_int_t maxlen = mp_obj_get_int(args[1]);
+ if (maxlen < 0) {
+ mp_raise_ValueError(NULL);
+ }
+
+ mp_obj_deque_t *o = m_new_obj(mp_obj_deque_t);
+ o->base.type = type;
+ o->alloc = maxlen + 1;
+ o->i_get = o->i_put = 0;
+ o->items = m_new0(mp_obj_t, o->alloc);
+
+ if (n_args > 2) {
+ o->flags = mp_obj_get_int(args[2]);
+ }
+
+ return MP_OBJ_FROM_PTR(o);
+}
+
+STATIC mp_obj_t deque_unary_op(mp_unary_op_t op, mp_obj_t self_in) {
+ mp_obj_deque_t *self = MP_OBJ_TO_PTR(self_in);
+ switch (op) {
+ case MP_UNARY_OP_BOOL:
+ return mp_obj_new_bool(self->i_get != self->i_put);
+ case MP_UNARY_OP_LEN: {
+ ssize_t len = self->i_put - self->i_get;
+ if (len < 0) {
+ len += self->alloc;
+ }
+ return MP_OBJ_NEW_SMALL_INT(len);
+ }
+ #if MICROPY_PY_SYS_GETSIZEOF
+ case MP_UNARY_OP_SIZEOF: {
+ size_t sz = sizeof(*self) + sizeof(mp_obj_t) * self->alloc;
+ return MP_OBJ_NEW_SMALL_INT(sz);
+ }
+ #endif
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+STATIC mp_obj_t mp_obj_deque_append(mp_obj_t self_in, mp_obj_t arg) {
+ mp_obj_deque_t *self = MP_OBJ_TO_PTR(self_in);
+
+ size_t new_i_put = self->i_put + 1;
+ if (new_i_put == self->alloc) {
+ new_i_put = 0;
+ }
+
+ if (self->flags & FLAG_CHECK_OVERFLOW && new_i_put == self->i_get) {
+ mp_raise_msg(&mp_type_IndexError, MP_ERROR_TEXT("full"));
+ }
+
+ self->items[self->i_put] = arg;
+ self->i_put = new_i_put;
+
+ if (self->i_get == new_i_put) {
+ if (++self->i_get == self->alloc) {
+ self->i_get = 0;
+ }
+ }
+
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(deque_append_obj, mp_obj_deque_append);
+
+STATIC mp_obj_t deque_popleft(mp_obj_t self_in) {
+ mp_obj_deque_t *self = MP_OBJ_TO_PTR(self_in);
+
+ if (self->i_get == self->i_put) {
+ mp_raise_msg(&mp_type_IndexError, MP_ERROR_TEXT("empty"));
+ }
+
+ mp_obj_t ret = self->items[self->i_get];
+ self->items[self->i_get] = MP_OBJ_NULL;
+
+ if (++self->i_get == self->alloc) {
+ self->i_get = 0;
+ }
+
+ return ret;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(deque_popleft_obj, deque_popleft);
+
+#if 0
+STATIC mp_obj_t deque_clear(mp_obj_t self_in) {
+ mp_obj_deque_t *self = MP_OBJ_TO_PTR(self_in);
+ self->i_get = self->i_put = 0;
+ mp_seq_clear(self->items, 0, self->alloc, sizeof(*self->items));
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(deque_clear_obj, deque_clear);
+#endif
+
+STATIC const mp_rom_map_elem_t deque_locals_dict_table[] = {
+ { MP_ROM_QSTR(MP_QSTR_append), MP_ROM_PTR(&deque_append_obj) },
+ #if 0
+ { MP_ROM_QSTR(MP_QSTR_clear), MP_ROM_PTR(&deque_clear_obj) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_popleft), MP_ROM_PTR(&deque_popleft_obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(deque_locals_dict, deque_locals_dict_table);
+
+const mp_obj_type_t mp_type_deque = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_deque,
+ .make_new = deque_make_new,
+ .locals_dict = (mp_obj_dict_t *)&deque_locals_dict,
+ MP_TYPE_EXTENDED_FIELDS(
+ .unary_op = deque_unary_op,
+ ),
+};
+
+#endif // MICROPY_PY_COLLECTIONS_DEQUE
diff --git a/circuitpython/py/objdict.c b/circuitpython/py/objdict.c
new file mode 100644
index 0000000..5f23374
--- /dev/null
+++ b/circuitpython/py/objdict.c
@@ -0,0 +1,658 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ * SPDX-FileCopyrightText: Copyright (c) 2014-2017 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+#include <assert.h>
+
+#include "py/runtime.h"
+#include "py/builtin.h"
+#include "py/objtype.h"
+#include "py/objstr.h"
+
+#include "supervisor/linker.h"
+#include "supervisor/shared/translate.h"
+
+const mp_obj_dict_t mp_const_empty_dict_obj = {
+ .base = { .type = &mp_type_dict },
+ .map = {
+ .all_keys_are_qstrs = 0,
+ .is_fixed = 1,
+ .is_ordered = 1,
+ .used = 0,
+ .alloc = 0,
+ .table = NULL,
+ }
+};
+
+STATIC mp_obj_t dict_update(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs);
+
+// This is a helper function to iterate through a dictionary. The state of
+// the iteration is held in *cur and should be initialised with zero for the
+// first call. Will return NULL when no more elements are available.
+STATIC mp_map_elem_t *dict_iter_next(mp_obj_dict_t *dict, size_t *cur) {
+ size_t max = dict->map.alloc;
+ mp_map_t *map = &dict->map;
+
+ size_t i = *cur;
+ for (; i < max; i++) {
+ if (mp_map_slot_is_filled(map, i)) {
+ *cur = i + 1;
+ return &(map->table[i]);
+ }
+ }
+
+ assert(map->used == 0 || i == max);
+ return NULL;
+}
+
+STATIC void dict_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in);
+ bool first = true;
+ const char *item_separator = ", ";
+ const char *key_separator = ": ";
+ if (!(MICROPY_PY_UJSON && kind == PRINT_JSON)) {
+ kind = PRINT_REPR;
+ } else {
+ #if MICROPY_PY_UJSON_SEPARATORS
+ item_separator = MP_PRINT_GET_EXT(print)->item_separator;
+ key_separator = MP_PRINT_GET_EXT(print)->key_separator;
+ #endif
+ }
+ if (MICROPY_PY_COLLECTIONS_ORDEREDDICT && self->base.type != &mp_type_dict && kind != PRINT_JSON) {
+ mp_printf(print, "%q(", self->base.type->name);
+ }
+ mp_print_str(print, "{");
+ size_t cur = 0;
+ mp_map_elem_t *next = NULL;
+ while ((next = dict_iter_next(self, &cur)) != NULL) {
+ if (!first) {
+ mp_print_str(print, item_separator);
+ }
+ first = false;
+ bool add_quote = MICROPY_PY_UJSON && kind == PRINT_JSON && !mp_obj_is_str_or_bytes(next->key);
+ if (add_quote) {
+ mp_print_str(print, "\"");
+ }
+ mp_obj_print_helper(print, next->key, kind);
+ if (add_quote) {
+ mp_print_str(print, "\"");
+ }
+ mp_print_str(print, key_separator);
+ mp_obj_print_helper(print, next->value, kind);
+ }
+ mp_print_str(print, "}");
+ if (MICROPY_PY_COLLECTIONS_ORDEREDDICT && self->base.type != &mp_type_dict && kind != PRINT_JSON) {
+ mp_print_str(print, ")");
+ }
+}
+
+mp_obj_t mp_obj_dict_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_obj_t dict_out = mp_obj_new_dict(0);
+ mp_obj_dict_t *dict = MP_OBJ_TO_PTR(dict_out);
+ dict->base.type = type;
+ #if MICROPY_PY_COLLECTIONS_ORDEREDDICT
+ if (type == &mp_type_ordereddict) {
+ dict->map.is_ordered = 1;
+ }
+ #endif
+ if (n_args > 0 || n_kw > 0) {
+ mp_obj_t args2[2] = {dict_out, args[0]}; // args[0] is always valid, even if it's not a positional arg
+ mp_map_t kwargs;
+ mp_map_init_fixed_table(&kwargs, n_kw, args + n_args);
+ dict_update(n_args + 1, args2, &kwargs); // dict_update will check that n_args + 1 == 1 or 2
+ }
+ return dict_out;
+}
+
+STATIC mp_obj_t dict_unary_op(mp_unary_op_t op, mp_obj_t self_in) {
+ mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in);
+ switch (op) {
+ case MP_UNARY_OP_BOOL:
+ return mp_obj_new_bool(self->map.used != 0);
+ case MP_UNARY_OP_LEN:
+ return MP_OBJ_NEW_SMALL_INT(self->map.used);
+ #if MICROPY_PY_SYS_GETSIZEOF
+ case MP_UNARY_OP_SIZEOF: {
+ size_t sz = sizeof(*self) + sizeof(*self->map.table) * self->map.alloc;
+ return MP_OBJ_NEW_SMALL_INT(sz);
+ }
+ #endif
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+STATIC mp_obj_t dict_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
+ mp_obj_dict_t *o = MP_OBJ_TO_PTR(lhs_in);
+ switch (op) {
+ case MP_BINARY_OP_CONTAINS: {
+ mp_map_elem_t *elem = mp_map_lookup(&o->map, rhs_in, MP_MAP_LOOKUP);
+ return mp_obj_new_bool(elem != NULL);
+ }
+ case MP_BINARY_OP_EQUAL: {
+ #if MICROPY_PY_COLLECTIONS_ORDEREDDICT
+ if (MP_UNLIKELY(mp_obj_is_type(lhs_in, &mp_type_ordereddict) && mp_obj_is_type(rhs_in, &mp_type_ordereddict))) {
+ // Iterate through both dictionaries simultaneously and compare keys and values.
+ mp_obj_dict_t *rhs = MP_OBJ_TO_PTR(rhs_in);
+ size_t c1 = 0, c2 = 0;
+ mp_map_elem_t *e1 = dict_iter_next(o, &c1), *e2 = dict_iter_next(rhs, &c2);
+ for (; e1 != NULL && e2 != NULL; e1 = dict_iter_next(o, &c1), e2 = dict_iter_next(rhs, &c2)) {
+ if (!mp_obj_equal(e1->key, e2->key) || !mp_obj_equal(e1->value, e2->value)) {
+ return mp_const_false;
+ }
+ }
+ return e1 == NULL && e2 == NULL ? mp_const_true : mp_const_false;
+ }
+ #endif
+ if (mp_obj_is_type(rhs_in, &mp_type_dict)) {
+ mp_obj_dict_t *rhs = MP_OBJ_TO_PTR(rhs_in);
+ if (o->map.used != rhs->map.used) {
+ return mp_const_false;
+ }
+
+ size_t cur = 0;
+ mp_map_elem_t *next = NULL;
+ while ((next = dict_iter_next(o, &cur)) != NULL) {
+ mp_map_elem_t *elem = mp_map_lookup(&rhs->map, next->key, MP_MAP_LOOKUP);
+ if (elem == NULL || !mp_obj_equal(next->value, elem->value)) {
+ return mp_const_false;
+ }
+ }
+ return mp_const_true;
+ } else {
+ // dict is not equal to instance of any other type
+ return mp_const_false;
+ }
+ }
+ default:
+ // op not supported
+ return MP_OBJ_NULL;
+ }
+}
+
+// Note: Make sure this is inlined in load part of dict_subscr() below.
+mp_obj_t mp_obj_dict_get(mp_obj_t self_in, mp_obj_t index) {
+ mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_map_elem_t *elem = mp_map_lookup(&self->map, index, MP_MAP_LOOKUP);
+ if (elem == NULL) {
+ mp_raise_type_arg(&mp_type_KeyError, index);
+ } else {
+ return elem->value;
+ }
+}
+
+STATIC mp_obj_t dict_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) {
+ if (value == MP_OBJ_NULL) {
+ // delete
+ mp_obj_dict_delete(self_in, index);
+ return mp_const_none;
+ } else if (value == MP_OBJ_SENTINEL) {
+ // load
+ mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_map_elem_t *elem = mp_map_lookup(&self->map, index, MP_MAP_LOOKUP);
+ if (elem == NULL) {
+ mp_raise_type_arg(&mp_type_KeyError, index);
+ } else {
+ return elem->value;
+ }
+ } else {
+ // store
+ mp_obj_dict_store(self_in, index, value);
+ return mp_const_none;
+ }
+}
+
+/******************************************************************************/
+/* dict methods */
+
+STATIC void mp_ensure_not_fixed(const mp_obj_dict_t *dict) {
+ if (dict->map.is_fixed) {
+ mp_raise_TypeError(NULL);
+ }
+}
+
+STATIC mp_obj_t dict_clear(mp_obj_t self_in) {
+ mp_check_self(mp_obj_is_dict_or_ordereddict(self_in));
+ mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_ensure_not_fixed(self);
+
+ mp_map_clear(&self->map);
+
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(dict_clear_obj, dict_clear);
+
+mp_obj_t mp_obj_dict_copy(mp_obj_t self_in) {
+ mp_check_self(mp_obj_is_dict_or_ordereddict(self_in));
+ mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_t other_out = mp_obj_new_dict(self->map.alloc);
+ mp_obj_dict_t *other = MP_OBJ_TO_PTR(other_out);
+ other->base.type = self->base.type;
+ other->map.used = self->map.used;
+ other->map.all_keys_are_qstrs = self->map.all_keys_are_qstrs;
+ other->map.is_fixed = 0;
+ other->map.is_ordered = self->map.is_ordered;
+ memcpy(other->map.table, self->map.table, self->map.alloc * sizeof(mp_map_elem_t));
+ return other_out;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(dict_copy_obj, mp_obj_dict_copy);
+
+#if MICROPY_PY_BUILTINS_DICT_FROMKEYS
+// this is a classmethod
+STATIC mp_obj_t dict_fromkeys(size_t n_args, const mp_obj_t *args) {
+ mp_obj_t iter = mp_getiter(args[1], NULL);
+ mp_obj_t value = mp_const_none;
+ mp_obj_t next = MP_OBJ_NULL;
+
+ if (n_args > 2) {
+ value = args[2];
+ }
+
+ // optimisation to allocate result based on len of argument
+ mp_obj_t self_out;
+ mp_obj_t len = mp_obj_len_maybe(args[1]);
+ if (len == MP_OBJ_NULL) {
+ /* object's type doesn't have a __len__ slot */
+ self_out = mp_obj_new_dict(0);
+ } else {
+ self_out = mp_obj_new_dict(MP_OBJ_SMALL_INT_VALUE(len));
+ }
+
+ mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_out);
+ while ((next = mp_iternext(iter)) != MP_OBJ_STOP_ITERATION) {
+ mp_map_lookup(&self->map, next, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = value;
+ }
+
+ return self_out;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(dict_fromkeys_fun_obj, 2, 3, dict_fromkeys);
+STATIC MP_DEFINE_CONST_CLASSMETHOD_OBJ(dict_fromkeys_obj, MP_ROM_PTR(&dict_fromkeys_fun_obj));
+#endif
+
+STATIC mp_obj_t dict_get_helper(size_t n_args, const mp_obj_t *args, mp_map_lookup_kind_t lookup_kind) {
+ mp_check_self(mp_obj_is_dict_or_ordereddict(args[0]));
+ mp_obj_dict_t *self = MP_OBJ_TO_PTR(args[0]);
+ if (lookup_kind != MP_MAP_LOOKUP) {
+ mp_ensure_not_fixed(self);
+ }
+ mp_map_elem_t *elem = mp_map_lookup(&self->map, args[1], lookup_kind);
+ mp_obj_t value;
+ if (elem == NULL || elem->value == MP_OBJ_NULL) {
+ if (n_args == 2) {
+ if (lookup_kind == MP_MAP_LOOKUP_REMOVE_IF_FOUND) {
+ mp_raise_type_arg(&mp_type_KeyError, args[1]);
+ } else {
+ value = mp_const_none;
+ }
+ } else {
+ value = args[2];
+ }
+ if (lookup_kind == MP_MAP_LOOKUP_ADD_IF_NOT_FOUND) {
+ elem->value = value;
+ }
+ } else {
+ value = elem->value;
+ if (lookup_kind == MP_MAP_LOOKUP_REMOVE_IF_FOUND) {
+ elem->value = MP_OBJ_NULL; // so that GC can collect the deleted value
+ }
+ }
+ return value;
+}
+
+STATIC mp_obj_t dict_get(size_t n_args, const mp_obj_t *args) {
+ return dict_get_helper(n_args, args, MP_MAP_LOOKUP);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(dict_get_obj, 2, 3, dict_get);
+
+STATIC mp_obj_t dict_pop(size_t n_args, const mp_obj_t *args) {
+ return dict_get_helper(n_args, args, MP_MAP_LOOKUP_REMOVE_IF_FOUND);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(dict_pop_obj, 2, 3, dict_pop);
+
+STATIC mp_obj_t dict_setdefault(size_t n_args, const mp_obj_t *args) {
+ return dict_get_helper(n_args, args, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(dict_setdefault_obj, 2, 3, dict_setdefault);
+
+STATIC mp_obj_t dict_popitem(mp_obj_t self_in) {
+ mp_check_self(mp_obj_is_dict_or_ordereddict(self_in));
+ mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_ensure_not_fixed(self);
+ if (self->map.used == 0) {
+ mp_raise_msg_varg(&mp_type_KeyError, MP_ERROR_TEXT("pop from empty %q"), MP_QSTR_dict);
+ }
+ size_t cur = 0;
+ #if MICROPY_PY_COLLECTIONS_ORDEREDDICT
+ if (self->map.is_ordered) {
+ cur = self->map.used - 1;
+ }
+ #endif
+ mp_map_elem_t *next = dict_iter_next(self, &cur);
+ assert(next);
+ self->map.used--;
+ mp_obj_t items[] = {next->key, next->value};
+ next->key = MP_OBJ_SENTINEL; // must mark key as sentinel to indicate that it was deleted
+ next->value = MP_OBJ_NULL;
+ mp_obj_t tuple = mp_obj_new_tuple(2, items);
+
+ return tuple;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(dict_popitem_obj, dict_popitem);
+
+STATIC mp_obj_t dict_update(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs) {
+ mp_check_self(mp_obj_is_dict_or_ordereddict(args[0]));
+ mp_obj_dict_t *self = MP_OBJ_TO_PTR(args[0]);
+ mp_ensure_not_fixed(self);
+
+ mp_arg_check_num(n_args, kwargs->used, 1, 2, true);
+
+ if (n_args == 2) {
+ // given a positional argument
+
+ if (mp_obj_is_dict_or_ordereddict(args[1])) {
+ // update from other dictionary (make sure other is not self)
+ if (args[1] != args[0]) {
+ size_t cur = 0;
+ mp_map_elem_t *elem = NULL;
+ while ((elem = dict_iter_next((mp_obj_dict_t *)MP_OBJ_TO_PTR(args[1]), &cur)) != NULL) {
+ mp_map_lookup(&self->map, elem->key, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = elem->value;
+ }
+ }
+ } else {
+ // update from a generic iterable of pairs
+ mp_obj_t iter = mp_getiter(args[1], NULL);
+ mp_obj_t next = MP_OBJ_NULL;
+ while ((next = mp_iternext(iter)) != MP_OBJ_STOP_ITERATION) {
+ mp_obj_t inneriter = mp_getiter(next, NULL);
+ mp_obj_t key = mp_iternext(inneriter);
+ mp_obj_t value = mp_iternext(inneriter);
+ mp_obj_t stop = mp_iternext(inneriter);
+ if (key == MP_OBJ_STOP_ITERATION
+ || value == MP_OBJ_STOP_ITERATION
+ || stop != MP_OBJ_STOP_ITERATION) {
+ mp_raise_ValueError(MP_ERROR_TEXT("dict update sequence has wrong length"));
+ } else {
+ mp_map_lookup(&self->map, key, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = value;
+ }
+ }
+ }
+ }
+
+ // update the dict with any keyword args
+ for (size_t i = 0; i < kwargs->alloc; i++) {
+ if (mp_map_slot_is_filled(kwargs, i)) {
+ mp_map_lookup(&self->map, kwargs->table[i].key, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = kwargs->table[i].value;
+ }
+ }
+
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_KW(dict_update_obj, 1, dict_update);
+
+
+/******************************************************************************/
+/* dict views */
+
+STATIC const mp_obj_type_t mp_type_dict_view;
+STATIC const mp_obj_type_t mp_type_dict_view_it;
+
+typedef enum _mp_dict_view_kind_t {
+ MP_DICT_VIEW_ITEMS,
+ MP_DICT_VIEW_KEYS,
+ MP_DICT_VIEW_VALUES,
+} mp_dict_view_kind_t;
+
+STATIC const char *const mp_dict_view_names[] = {"dict_items", "dict_keys", "dict_values"};
+
+typedef struct _mp_obj_dict_view_it_t {
+ mp_obj_base_t base;
+ mp_dict_view_kind_t kind;
+ mp_obj_t dict;
+ size_t cur;
+} mp_obj_dict_view_it_t;
+
+typedef struct _mp_obj_dict_view_t {
+ mp_obj_base_t base;
+ mp_obj_t dict;
+ mp_dict_view_kind_t kind;
+} mp_obj_dict_view_t;
+
+STATIC mp_obj_t dict_view_it_iternext(mp_obj_t self_in) {
+ mp_check_self(mp_obj_is_type(self_in, &mp_type_dict_view_it));
+ mp_obj_dict_view_it_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_map_elem_t *next = dict_iter_next(MP_OBJ_TO_PTR(self->dict), &self->cur);
+
+ if (next == NULL) {
+ return MP_OBJ_STOP_ITERATION;
+ } else {
+ switch (self->kind) {
+ case MP_DICT_VIEW_ITEMS:
+ default: {
+ mp_obj_t items[] = {next->key, next->value};
+ return mp_obj_new_tuple(2, items);
+ }
+ case MP_DICT_VIEW_KEYS:
+ return next->key;
+ case MP_DICT_VIEW_VALUES:
+ return next->value;
+ }
+ }
+}
+
+STATIC const mp_obj_type_t mp_type_dict_view_it = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_iterator,
+ MP_TYPE_EXTENDED_FIELDS(
+ .getiter = mp_identity_getiter,
+ .iternext = dict_view_it_iternext,
+ ),
+};
+
+STATIC mp_obj_t dict_view_getiter(mp_obj_t view_in, mp_obj_iter_buf_t *iter_buf) {
+ assert(sizeof(mp_obj_dict_view_it_t) <= sizeof(mp_obj_iter_buf_t));
+ mp_check_self(mp_obj_is_type(view_in, &mp_type_dict_view));
+ mp_obj_dict_view_t *view = MP_OBJ_TO_PTR(view_in);
+ mp_obj_dict_view_it_t *o = (mp_obj_dict_view_it_t *)iter_buf;
+ o->base.type = &mp_type_dict_view_it;
+ o->kind = view->kind;
+ o->dict = view->dict;
+ o->cur = 0;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+STATIC void dict_view_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_check_self(mp_obj_is_type(self_in, &mp_type_dict_view));
+ mp_obj_dict_view_t *self = MP_OBJ_TO_PTR(self_in);
+ bool first = true;
+ mp_print_str(print, mp_dict_view_names[self->kind]);
+ mp_print_str(print, "([");
+ mp_obj_iter_buf_t iter_buf;
+ mp_obj_t self_iter = dict_view_getiter(self_in, &iter_buf);
+ mp_obj_t next = MP_OBJ_NULL;
+ while ((next = dict_view_it_iternext(self_iter)) != MP_OBJ_STOP_ITERATION) {
+ if (!first) {
+ mp_print_str(print, ", ");
+ }
+ first = false;
+ mp_obj_print_helper(print, next, PRINT_REPR);
+ }
+ mp_print_str(print, "])");
+}
+
+STATIC mp_obj_t dict_view_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
+ // only supported for the 'keys' kind until sets and dicts are refactored
+ mp_obj_dict_view_t *o = MP_OBJ_TO_PTR(lhs_in);
+ if (o->kind != MP_DICT_VIEW_KEYS) {
+ return MP_OBJ_NULL; // op not supported
+ }
+ if (op != MP_BINARY_OP_CONTAINS) {
+ return MP_OBJ_NULL; // op not supported
+ }
+ return dict_binary_op(op, o->dict, rhs_in);
+}
+
+STATIC const mp_obj_type_t mp_type_dict_view = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_dict_view,
+ .print = dict_view_print,
+ MP_TYPE_EXTENDED_FIELDS(
+ .binary_op = dict_view_binary_op,
+ .getiter = dict_view_getiter,
+ ),
+};
+
+STATIC mp_obj_t mp_obj_new_dict_view(mp_obj_t dict, mp_dict_view_kind_t kind) {
+ mp_obj_dict_view_t *o = m_new_obj(mp_obj_dict_view_t);
+ o->base.type = &mp_type_dict_view;
+ o->dict = dict;
+ o->kind = kind;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+STATIC mp_obj_t dict_view(mp_obj_t self_in, mp_dict_view_kind_t kind) {
+ mp_check_self(mp_obj_is_dict_or_ordereddict(self_in));
+ return mp_obj_new_dict_view(self_in, kind);
+}
+
+STATIC mp_obj_t dict_items(mp_obj_t self_in) {
+ return dict_view(self_in, MP_DICT_VIEW_ITEMS);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(dict_items_obj, dict_items);
+
+STATIC mp_obj_t dict_keys(mp_obj_t self_in) {
+ return dict_view(self_in, MP_DICT_VIEW_KEYS);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(dict_keys_obj, dict_keys);
+
+STATIC mp_obj_t dict_values(mp_obj_t self_in) {
+ return dict_view(self_in, MP_DICT_VIEW_VALUES);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(dict_values_obj, dict_values);
+
+/******************************************************************************/
+/* dict iterator */
+
+STATIC mp_obj_t dict_getiter(mp_obj_t self_in, mp_obj_iter_buf_t *iter_buf) {
+ assert(sizeof(mp_obj_dict_view_it_t) <= sizeof(mp_obj_iter_buf_t));
+ mp_check_self(mp_obj_is_dict_or_ordereddict(self_in));
+ mp_obj_dict_view_it_t *o = (mp_obj_dict_view_it_t *)iter_buf;
+ o->base.type = &mp_type_dict_view_it;
+ o->kind = MP_DICT_VIEW_KEYS;
+ o->dict = self_in;
+ o->cur = 0;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+/******************************************************************************/
+/* dict constructors & public C API */
+
+STATIC const mp_rom_map_elem_t dict_locals_dict_table[] = {
+ { MP_ROM_QSTR(MP_QSTR_clear), MP_ROM_PTR(&dict_clear_obj) },
+ { MP_ROM_QSTR(MP_QSTR_copy), MP_ROM_PTR(&dict_copy_obj) },
+ #if MICROPY_PY_BUILTINS_DICT_FROMKEYS
+ { MP_ROM_QSTR(MP_QSTR_fromkeys), MP_ROM_PTR(&dict_fromkeys_obj) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_get), MP_ROM_PTR(&dict_get_obj) },
+ { MP_ROM_QSTR(MP_QSTR_items), MP_ROM_PTR(&dict_items_obj) },
+ { MP_ROM_QSTR(MP_QSTR_keys), MP_ROM_PTR(&dict_keys_obj) },
+ { MP_ROM_QSTR(MP_QSTR_pop), MP_ROM_PTR(&dict_pop_obj) },
+ { MP_ROM_QSTR(MP_QSTR_popitem), MP_ROM_PTR(&dict_popitem_obj) },
+ { MP_ROM_QSTR(MP_QSTR_setdefault), MP_ROM_PTR(&dict_setdefault_obj) },
+ { MP_ROM_QSTR(MP_QSTR_update), MP_ROM_PTR(&dict_update_obj) },
+ { MP_ROM_QSTR(MP_QSTR_values), MP_ROM_PTR(&dict_values_obj) },
+ { MP_ROM_QSTR(MP_QSTR___getitem__), MP_ROM_PTR(&mp_op_getitem_obj) },
+ { MP_ROM_QSTR(MP_QSTR___setitem__), MP_ROM_PTR(&mp_op_setitem_obj) },
+ { MP_ROM_QSTR(MP_QSTR___delitem__), MP_ROM_PTR(&mp_op_delitem_obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(dict_locals_dict, dict_locals_dict_table);
+
+const mp_obj_type_t mp_type_dict = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_dict,
+ .print = dict_print,
+ .make_new = mp_obj_dict_make_new,
+ .locals_dict = (mp_obj_dict_t *)&dict_locals_dict,
+ MP_TYPE_EXTENDED_FIELDS(
+ .unary_op = dict_unary_op,
+ .binary_op = dict_binary_op,
+ .subscr = dict_subscr,
+ .getiter = dict_getiter,
+ ),
+};
+
+#if MICROPY_PY_COLLECTIONS_ORDEREDDICT
+const mp_obj_type_t mp_type_ordereddict = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_OrderedDict,
+ .print = dict_print,
+ .make_new = mp_obj_dict_make_new,
+ .parent = &mp_type_dict,
+ .locals_dict = (mp_obj_dict_t *)&dict_locals_dict,
+ MP_TYPE_EXTENDED_FIELDS(
+ .unary_op = dict_unary_op,
+ .binary_op = dict_binary_op,
+ .subscr = dict_subscr,
+ .getiter = dict_getiter,
+ ),
+};
+#endif
+
+void mp_obj_dict_init(mp_obj_dict_t *dict, size_t n_args) {
+ dict->base.type = &mp_type_dict;
+ mp_map_init(&dict->map, n_args);
+}
+
+mp_obj_t mp_obj_new_dict(size_t n_args) {
+ mp_obj_dict_t *o = m_new_obj(mp_obj_dict_t);
+ mp_obj_dict_init(o, n_args);
+ return MP_OBJ_FROM_PTR(o);
+}
+
+size_t mp_obj_dict_len(mp_obj_t self_in) {
+ mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in);
+ return self->map.used;
+}
+
+mp_obj_t mp_obj_dict_store(mp_obj_t self_in, mp_obj_t key, mp_obj_t value) {
+ mp_check_self(mp_obj_is_dict_or_ordereddict(self_in));
+ mp_obj_dict_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_ensure_not_fixed(self);
+ mp_map_lookup(&self->map, key, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = value;
+ return self_in;
+}
+
+mp_obj_t mp_obj_dict_delete(mp_obj_t self_in, mp_obj_t key) {
+ mp_obj_t args[2] = {self_in, key};
+ dict_get_helper(2, args, MP_MAP_LOOKUP_REMOVE_IF_FOUND);
+ return self_in;
+}
diff --git a/circuitpython/py/objenumerate.c b/circuitpython/py/objenumerate.c
new file mode 100644
index 0000000..c18ac1d
--- /dev/null
+++ b/circuitpython/py/objenumerate.c
@@ -0,0 +1,95 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+
+#include "py/runtime.h"
+
+#if MICROPY_PY_BUILTINS_ENUMERATE
+
+typedef struct _mp_obj_enumerate_t {
+ mp_obj_base_t base;
+ mp_obj_t iter;
+ mp_int_t cur;
+} mp_obj_enumerate_t;
+
+STATIC mp_obj_t enumerate_iternext(mp_obj_t self_in);
+
+STATIC mp_obj_t enumerate_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ #if MICROPY_CPYTHON_COMPAT
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_iterable, MP_ARG_REQUIRED | MP_ARG_OBJ, {.u_obj = MP_OBJ_NULL} },
+ { MP_QSTR_start, MP_ARG_INT, {.u_int = 0} },
+ };
+
+ // parse args
+ struct {
+ mp_arg_val_t iterable, start;
+ } arg_vals;
+ mp_arg_parse_all_kw_array(n_args, n_kw, args,
+ MP_ARRAY_SIZE(allowed_args), allowed_args, (mp_arg_val_t *)&arg_vals);
+
+ // create enumerate object
+ mp_obj_enumerate_t *o = m_new_obj(mp_obj_enumerate_t);
+ o->base.type = type;
+ o->iter = mp_getiter(arg_vals.iterable.u_obj, NULL);
+ o->cur = arg_vals.start.u_int;
+ #else
+ (void)n_kw;
+ mp_obj_enumerate_t *o = m_new_obj(mp_obj_enumerate_t);
+ o->base.type = type;
+ o->iter = mp_getiter(args[0], NULL);
+ o->cur = n_args > 1 ? mp_obj_get_int(args[1]) : 0;
+ #endif
+
+ return MP_OBJ_FROM_PTR(o);
+}
+
+const mp_obj_type_t mp_type_enumerate = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_enumerate,
+ .make_new = enumerate_make_new,
+ MP_TYPE_EXTENDED_FIELDS(
+ .iternext = enumerate_iternext,
+ .getiter = mp_identity_getiter,
+ )
+};
+
+STATIC mp_obj_t enumerate_iternext(mp_obj_t self_in) {
+ assert(mp_obj_is_type(self_in, &mp_type_enumerate));
+ mp_obj_enumerate_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_t next = mp_iternext(self->iter);
+ if (next == MP_OBJ_STOP_ITERATION) {
+ return MP_OBJ_STOP_ITERATION;
+ } else {
+ mp_obj_t items[] = {MP_OBJ_NEW_SMALL_INT(self->cur++), next};
+ return mp_obj_new_tuple(2, items);
+ }
+}
+
+#endif // MICROPY_PY_BUILTINS_ENUMERATE
diff --git a/circuitpython/py/objexcept.c b/circuitpython/py/objexcept.c
new file mode 100644
index 0000000..fc07876
--- /dev/null
+++ b/circuitpython/py/objexcept.c
@@ -0,0 +1,793 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ * SPDX-FileCopyrightText: Copyright (c) 2014-2016 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+#include <stdarg.h>
+#include <assert.h>
+#include <stdio.h>
+
+#include "py/objlist.h"
+#include "py/objnamedtuple.h"
+#include "py/objstr.h"
+#include "py/objtuple.h"
+#include "py/objtype.h"
+#include "py/runtime.h"
+#include "py/gc.h"
+#include "py/mperrno.h"
+
+#include "supervisor/shared/translate.h"
+
+// Number of items per traceback entry (file, line, block)
+#define TRACEBACK_ENTRY_LEN (3)
+
+// Optionally allocated buffer for storing some traceback, the tuple argument,
+// and possible string object and data, for when the heap is locked.
+#if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
+
+// When used the layout of the emergency exception buffer is:
+// - traceback entry (file, line, block)
+// - traceback entry (file, line, block)
+// - mp_obj_tuple_t object
+// - n_args * mp_obj_t for tuple
+// - mp_obj_str_t object
+// - string data
+#define EMG_BUF_TRACEBACK_OFFSET (0)
+#define EMG_BUF_TRACEBACK_SIZE (2 * TRACEBACK_ENTRY_LEN * sizeof(size_t))
+#define EMG_BUF_TUPLE_OFFSET (EMG_BUF_TRACEBACK_OFFSET + EMG_BUF_TRACEBACK_SIZE)
+#define EMG_BUF_TUPLE_SIZE(n_args) (sizeof(mp_obj_tuple_t) + n_args * sizeof(mp_obj_t))
+#define EMG_BUF_STR_OFFSET (EMG_BUF_TUPLE_OFFSET + EMG_BUF_TUPLE_SIZE(1))
+#define EMG_BUF_STR_BUF_OFFSET (EMG_BUF_STR_OFFSET + sizeof(mp_obj_str_t))
+
+#if MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE > 0
+#define mp_emergency_exception_buf_size MICROPY_EMERGENCY_EXCEPTION_BUF_SIZE
+
+void mp_init_emergency_exception_buf(void) {
+ // Nothing to do since the buffer was declared statically. We put this
+ // definition here so that the calling code can call this function
+ // regardless of how its configured (makes the calling code a bit cleaner).
+}
+
+#else
+#define mp_emergency_exception_buf_size MP_STATE_VM(mp_emergency_exception_buf_size)
+
+void mp_init_emergency_exception_buf(void) {
+ mp_emergency_exception_buf_size = 0;
+ MP_STATE_VM(mp_emergency_exception_buf) = NULL;
+}
+
+mp_obj_t mp_alloc_emergency_exception_buf(mp_obj_t size_in) {
+ mp_int_t size = mp_obj_get_int(size_in);
+ void *buf = NULL;
+ if (size > 0) {
+ buf = m_new(byte, size);
+ }
+
+ int old_size = mp_emergency_exception_buf_size;
+ void *old_buf = MP_STATE_VM(mp_emergency_exception_buf);
+
+ // Update the 2 variables atomically so that an interrupt can't occur
+ // between the assignments.
+ mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
+ mp_emergency_exception_buf_size = size;
+ MP_STATE_VM(mp_emergency_exception_buf) = buf;
+ MICROPY_END_ATOMIC_SECTION(atomic_state);
+
+ if (old_buf != NULL) {
+ m_del(byte, old_buf, old_size);
+ }
+ return mp_const_none;
+}
+#endif
+#endif // MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
+
+mp_obj_exception_t *mp_obj_exception_get_native(mp_obj_t self_in) {
+ assert(mp_obj_is_exception_instance(self_in));
+ if (mp_obj_is_native_exception_instance(self_in)) {
+ return MP_OBJ_TO_PTR(self_in);
+ } else {
+ return MP_OBJ_TO_PTR(((mp_obj_instance_t *)MP_OBJ_TO_PTR(self_in))->subobj[0]);
+ }
+}
+
+void mp_obj_exception_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ mp_obj_exception_t *o = MP_OBJ_TO_PTR(o_in);
+ mp_print_kind_t k = kind & ~PRINT_EXC_SUBCLASS;
+ bool is_subclass = kind & PRINT_EXC_SUBCLASS;
+ if (!is_subclass && (k == PRINT_REPR || k == PRINT_EXC)) {
+ mp_print_str(print, qstr_str(o->base.type->name));
+ }
+
+ if (k == PRINT_EXC) {
+ mp_print_str(print, ": ");
+ }
+
+ if (k == PRINT_STR || k == PRINT_EXC) {
+ if (o->args == NULL || o->args->len == 0) {
+ mp_print_str(print, "");
+ return;
+ }
+ #if MICROPY_PY_UERRNO
+ // try to provide a nice OSError error message
+ if (o->base.type == &mp_type_OSError && o->args->len > 0 && o->args->len < 3 && mp_obj_is_small_int(o->args->items[0])) {
+ char decompressed[50];
+ const char *msg = mp_common_errno_to_str(o->args->items[0], decompressed, sizeof(decompressed));
+ if (msg != NULL) {
+ mp_printf(print, "[Errno " INT_FMT "] %s", MP_OBJ_SMALL_INT_VALUE(o->args->items[0]), msg);
+ if (o->args->len > 1) {
+ mp_print_str(print, ": ");
+ mp_obj_print_helper(print, o->args->items[1], PRINT_STR);
+ }
+ return;
+ }
+ }
+ #endif
+
+ if (o->args->len == 1) {
+ mp_obj_print_helper(print, o->args->items[0], PRINT_STR);
+ return;
+ }
+ }
+
+ mp_obj_tuple_print(print, MP_OBJ_FROM_PTR(o->args), kind);
+}
+
+mp_obj_t mp_obj_exception_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_arg_check_num(n_args, n_kw, 0, MP_OBJ_FUN_ARGS_MAX, false);
+
+ // Try to allocate memory for the exception, with fallback to emergency exception object
+ mp_obj_exception_t *o_exc = m_new_obj_maybe(mp_obj_exception_t);
+ if (o_exc == NULL) {
+ o_exc = &MP_STATE_VM(mp_emergency_exception_obj);
+ }
+
+ // Populate the exception object
+ o_exc->base.type = type;
+ o_exc->traceback = (mp_obj_traceback_t *)&mp_const_empty_traceback_obj;
+
+ mp_obj_tuple_t *o_tuple;
+ if (n_args == 0) {
+ // No args, can use the empty tuple straightaway
+ o_tuple = (mp_obj_tuple_t *)&mp_const_empty_tuple_obj;
+ } else {
+ // Try to allocate memory for the tuple containing the args
+ o_tuple = m_new_obj_var_maybe(mp_obj_tuple_t, mp_obj_t, n_args);
+
+ #if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
+ // If we are called by mp_obj_new_exception_msg_varg then it will have
+ // reserved room (after the traceback data) for a tuple with 1 element.
+ // Otherwise we are free to use the whole buffer after the traceback data.
+ if (o_tuple == NULL && mp_emergency_exception_buf_size >=
+ (mp_int_t)(EMG_BUF_TUPLE_OFFSET + EMG_BUF_TUPLE_SIZE(n_args))) {
+ o_tuple = (mp_obj_tuple_t *)
+ ((uint8_t *)MP_STATE_VM(mp_emergency_exception_buf) + EMG_BUF_TUPLE_OFFSET);
+ }
+ #endif
+
+ if (o_tuple == NULL) {
+ // No memory for a tuple, fallback to an empty tuple
+ o_tuple = (mp_obj_tuple_t *)&mp_const_empty_tuple_obj;
+ } else {
+ // Have memory for a tuple so populate it
+ o_tuple->base.type = &mp_type_tuple;
+ o_tuple->len = n_args;
+ memcpy(o_tuple->items, args, n_args * sizeof(mp_obj_t));
+ }
+ }
+
+ // Store the tuple of args in the exception object
+ o_exc->args = o_tuple;
+
+ return MP_OBJ_FROM_PTR(o_exc);
+}
+
+// Get exception "value" - that is, first argument, or None
+mp_obj_t mp_obj_exception_get_value(mp_obj_t self_in) {
+ mp_obj_exception_t *self = mp_obj_exception_get_native(self_in);
+ if (self->args->len == 0) {
+ return mp_const_none;
+ } else {
+ return self->args->items[0];
+ }
+}
+
+void mp_obj_exception_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+ mp_obj_exception_t *self = MP_OBJ_TO_PTR(self_in);
+ if (dest[0] != MP_OBJ_NULL) {
+ // store/delete attribute
+ if (attr == MP_QSTR___traceback__) {
+ if (dest[1] == mp_const_none) {
+ self->traceback = (mp_obj_traceback_t *)&mp_const_empty_traceback_obj;
+ } else {
+ if (!mp_obj_is_type(dest[1], &mp_type_traceback)) {
+ mp_raise_TypeError(MP_ERROR_TEXT("invalid traceback"));
+ }
+ self->traceback = MP_OBJ_TO_PTR(dest[1]);
+ }
+ dest[0] = MP_OBJ_NULL; // indicate success
+ }
+ return;
+ }
+ if (attr == MP_QSTR_args) {
+ dest[0] = MP_OBJ_FROM_PTR(self->args);
+ } else if (attr == MP_QSTR_value && self->base.type == &mp_type_StopIteration) {
+ dest[0] = mp_obj_exception_get_value(self_in);
+ } else if (attr == MP_QSTR___traceback__) {
+ dest[0] = (self->traceback) ? MP_OBJ_FROM_PTR(self->traceback) : mp_const_none;
+ #if MICROPY_CPYTHON_COMPAT
+ } else if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(self->base.type), MP_OBJ_FROM_PTR(&mp_type_OSError))) {
+ if (attr == MP_QSTR_errno) {
+ dest[0] = mp_obj_exception_get_value(self_in);
+ } else if (attr == MP_QSTR_strerror) {
+ if (self->args->len > 1) {
+ dest[0] = self->args->items[1];
+ } else if (self->args->len > 0) {
+ char decompressed[50];
+ const char *msg = mp_common_errno_to_str(self->args->items[0], decompressed, sizeof(decompressed));
+ if (msg != NULL) {
+ dest[0] = mp_obj_new_str(msg, strlen(msg));
+ } else {
+ dest[0] = mp_const_none;
+ }
+ } else {
+ dest[0] = mp_const_none;
+ }
+ } else if (attr == MP_QSTR_filename) {
+ dest[0] = self->args->len > 2 ? self->args->items[2] : mp_const_none;
+ // skip winerror
+ } else if (attr == MP_QSTR_filename2) {
+ dest[0] = self->args->len > 4 ? self->args->items[4] : mp_const_none;
+ }
+ #endif
+ }
+}
+
+const mp_obj_type_t mp_type_BaseException = {
+ { &mp_type_type },
+ .name = MP_QSTR_BaseException,
+ .print = mp_obj_exception_print,
+ .make_new = mp_obj_exception_make_new,
+ .attr = mp_obj_exception_attr,
+};
+
+// *FORMAT-OFF*
+
+// List of all exceptions, arranged as in the table at:
+// http://docs.python.org/3/library/exceptions.html
+MP_DEFINE_EXCEPTION(SystemExit, BaseException)
+MP_DEFINE_EXCEPTION(KeyboardInterrupt, BaseException)
+MP_DEFINE_EXCEPTION(ReloadException, BaseException)
+MP_DEFINE_EXCEPTION(GeneratorExit, BaseException)
+MP_DEFINE_EXCEPTION(Exception, BaseException)
+ #if MICROPY_PY_ASYNC_AWAIT
+MP_DEFINE_EXCEPTION(StopAsyncIteration, Exception)
+ #endif
+MP_DEFINE_EXCEPTION(StopIteration, Exception)
+MP_DEFINE_EXCEPTION(ArithmeticError, Exception)
+// MP_DEFINE_EXCEPTION(FloatingPointError, ArithmeticError)
+MP_DEFINE_EXCEPTION(OverflowError, ArithmeticError)
+MP_DEFINE_EXCEPTION(ZeroDivisionError, ArithmeticError)
+MP_DEFINE_EXCEPTION(AssertionError, Exception)
+MP_DEFINE_EXCEPTION(AttributeError, Exception)
+// MP_DEFINE_EXCEPTION(BufferError, Exception)
+// MP_DEFINE_EXCEPTION(EnvironmentError, Exception) use OSError instead
+MP_DEFINE_EXCEPTION(EOFError, Exception)
+MP_DEFINE_EXCEPTION(ImportError, Exception)
+// MP_DEFINE_EXCEPTION(IOError, Exception) use OSError instead
+MP_DEFINE_EXCEPTION(LookupError, Exception)
+MP_DEFINE_EXCEPTION(IndexError, LookupError)
+MP_DEFINE_EXCEPTION(KeyError, LookupError)
+MP_DEFINE_EXCEPTION(MemoryError, Exception)
+MP_DEFINE_EXCEPTION(NameError, Exception)
+/*
+ MP_DEFINE_EXCEPTION(UnboundLocalError, NameError)
+ */
+MP_DEFINE_EXCEPTION(OSError, Exception)
+MP_DEFINE_EXCEPTION(TimeoutError, OSError)
+MP_DEFINE_EXCEPTION(ConnectionError, OSError)
+MP_DEFINE_EXCEPTION(BrokenPipeError, ConnectionError)
+/*
+ MP_DEFINE_EXCEPTION(ConnectionAbortedError, ConnectionError)
+ MP_DEFINE_EXCEPTION(ConnectionRefusedError, ConnectionError)
+ MP_DEFINE_EXCEPTION(ConnectionResetError, ConnectionError)
+ */
+/*
+ MP_DEFINE_EXCEPTION(BlockingIOError, OSError)
+ MP_DEFINE_EXCEPTION(ChildProcessError, OSError)
+ MP_DEFINE_EXCEPTION(InterruptedError, OSError)
+ MP_DEFINE_EXCEPTION(IsADirectoryError, OSError)
+ MP_DEFINE_EXCEPTION(NotADirectoryError, OSError)
+ MP_DEFINE_EXCEPTION(PermissionError, OSError)
+ MP_DEFINE_EXCEPTION(ProcessLookupError, OSError)
+ MP_DEFINE_EXCEPTION(TimeoutError, OSError)
+ MP_DEFINE_EXCEPTION(FileExistsError, OSError)
+ MP_DEFINE_EXCEPTION(FileNotFoundError, OSError)
+ MP_DEFINE_EXCEPTION(ReferenceError, Exception)
+ */
+MP_DEFINE_EXCEPTION(RuntimeError, Exception)
+MP_DEFINE_EXCEPTION(NotImplementedError, RuntimeError)
+MP_DEFINE_EXCEPTION(SyntaxError, Exception)
+MP_DEFINE_EXCEPTION(IndentationError, SyntaxError)
+/*
+ MP_DEFINE_EXCEPTION(TabError, IndentationError)
+ */
+// MP_DEFINE_EXCEPTION(SystemError, Exception)
+MP_DEFINE_EXCEPTION(TypeError, Exception)
+#if MICROPY_EMIT_NATIVE
+MP_DEFINE_EXCEPTION(ViperTypeError, TypeError)
+#endif
+MP_DEFINE_EXCEPTION(ValueError, Exception)
+#if MICROPY_PY_BUILTINS_STR_UNICODE
+MP_DEFINE_EXCEPTION(UnicodeError, ValueError)
+// TODO: Implement more UnicodeError subclasses which take arguments
+#endif
+#if CIRCUITPY_ALARM
+MP_DEFINE_EXCEPTION(DeepSleepRequest, BaseException)
+#endif
+MP_DEFINE_EXCEPTION(MpyError, ValueError)
+/*
+ MP_DEFINE_EXCEPTION(Warning, Exception)
+ MP_DEFINE_EXCEPTION(DeprecationWarning, Warning)
+ MP_DEFINE_EXCEPTION(PendingDeprecationWarning, Warning)
+ MP_DEFINE_EXCEPTION(RuntimeWarning, Warning)
+ MP_DEFINE_EXCEPTION(SyntaxWarning, Warning)
+ MP_DEFINE_EXCEPTION(UserWarning, Warning)
+ MP_DEFINE_EXCEPTION(FutureWarning, Warning)
+ MP_DEFINE_EXCEPTION(ImportWarning, Warning)
+ MP_DEFINE_EXCEPTION(UnicodeWarning, Warning)
+ MP_DEFINE_EXCEPTION(BytesWarning, Warning)
+ MP_DEFINE_EXCEPTION(ResourceWarning, Warning)
+ */
+
+// *FORMAT-ON*
+
+mp_obj_t mp_obj_new_exception(const mp_obj_type_t *exc_type) {
+ assert(exc_type->make_new == mp_obj_exception_make_new);
+ return mp_obj_exception_make_new(exc_type, 0, 0, NULL);
+}
+
+mp_obj_t mp_obj_new_exception_args(const mp_obj_type_t *exc_type, size_t n_args, const mp_obj_t *args) {
+ assert(exc_type->make_new == mp_obj_exception_make_new);
+ return exc_type->make_new(exc_type, n_args, 0, args);
+}
+
+#if MICROPY_ERROR_REPORTING != MICROPY_ERROR_REPORTING_NONE
+mp_obj_t mp_obj_new_exception_msg(const mp_obj_type_t *exc_type, const compressed_string_t *msg) {
+ return mp_obj_new_exception_msg_varg(exc_type, msg);
+}
+#endif
+
+// The following struct and function implement a simple printer that conservatively
+// allocates memory and truncates the output data if no more memory can be obtained.
+// It leaves room for a null byte at the end of the buffer.
+
+struct _exc_printer_t {
+ bool allow_realloc;
+ size_t alloc;
+ size_t len;
+ byte *buf;
+};
+
+STATIC void exc_add_strn(void *data, const char *str, size_t len) {
+ struct _exc_printer_t *pr = data;
+ if (pr->len + len >= pr->alloc) {
+ // Not enough room for data plus a null byte so try to grow the buffer
+ if (pr->allow_realloc) {
+ size_t new_alloc = pr->alloc + len + 16;
+ byte *new_buf = m_renew_maybe(byte, pr->buf, pr->alloc, new_alloc, true);
+ if (new_buf == NULL) {
+ pr->allow_realloc = false;
+ len = pr->alloc - pr->len - 1;
+ } else {
+ pr->alloc = new_alloc;
+ pr->buf = new_buf;
+ }
+ } else {
+ len = pr->alloc - pr->len - 1;
+ }
+ }
+ memcpy(pr->buf + pr->len, str, len);
+ pr->len += len;
+}
+
+
+mp_obj_t mp_obj_new_exception_msg_varg(const mp_obj_type_t *exc_type, const compressed_string_t *fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ mp_obj_t exception = mp_obj_new_exception_msg_vlist(exc_type, fmt, ap);
+ va_end(ap);
+ return exception;
+}
+
+mp_obj_t mp_obj_new_exception_msg_vlist(const mp_obj_type_t *exc_type, const compressed_string_t *fmt, va_list ap) {
+ assert(fmt != NULL);
+
+ // Check that the given type is an exception type
+ assert(exc_type->make_new == mp_obj_exception_make_new);
+
+ // Try to allocate memory for the message
+ mp_obj_str_t *o_str = m_new_obj_maybe(mp_obj_str_t);
+ size_t o_str_alloc = decompress_length(fmt);
+ byte *o_str_buf = m_new_maybe(byte, o_str_alloc);
+
+ bool used_emg_buf = false;
+ #if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
+ // If memory allocation failed and there is an emergency buffer then try to use
+ // that buffer to store the string object and its data (at least 16 bytes for
+ // the string data), reserving room at the start for the traceback and 1-tuple.
+ if ((o_str == NULL || o_str_buf == NULL)
+ && mp_emergency_exception_buf_size >= (mp_int_t)(EMG_BUF_STR_OFFSET + sizeof(mp_obj_str_t) + 16)) {
+ used_emg_buf = true;
+ o_str = (mp_obj_str_t *)((uint8_t *)MP_STATE_VM(mp_emergency_exception_buf)
+ + EMG_BUF_STR_OFFSET);
+ o_str_buf = (byte *)&o_str[1];
+ o_str_alloc = (uint8_t *)MP_STATE_VM(mp_emergency_exception_buf)
+ + mp_emergency_exception_buf_size - o_str_buf;
+ }
+ #endif
+
+ if (o_str == NULL) {
+ // No memory for the string object so create the exception with no args.
+ // The exception will only have a type and no message (compression is irrelevant).
+ return mp_obj_exception_make_new(exc_type, 0, 0, NULL);
+ }
+
+ if (o_str_buf == NULL) {
+ // No memory for the string buffer: the string is compressed so don't add it.
+ o_str->len = 0;
+ o_str->data = NULL;
+ } else {
+ // We have some memory to format the string.
+ struct _exc_printer_t exc_pr = {!used_emg_buf, o_str_alloc, 0, o_str_buf};
+ mp_print_t print = {&exc_pr, exc_add_strn};
+ mp_vcprintf(&print, fmt, ap);
+ exc_pr.buf[exc_pr.len] = '\0';
+ o_str->len = exc_pr.len;
+ o_str->data = exc_pr.buf;
+ }
+
+ // Create the string object and call mp_obj_exception_make_new to create the exception
+ o_str->base.type = &mp_type_str;
+ o_str->hash = qstr_compute_hash(o_str->data, o_str->len);
+ mp_obj_t arg = MP_OBJ_FROM_PTR(o_str);
+ return mp_obj_exception_make_new(exc_type, 1, 0, &arg);
+}
+
+mp_obj_t mp_obj_new_exception_msg_str(const mp_obj_type_t *exc_type, const char *msg) {
+ assert(msg != NULL);
+
+ // Check that the given type is an exception type
+ assert(exc_type->make_new == mp_obj_exception_make_new);
+
+ // Try to allocate memory for the message
+ mp_obj_str_t *o_str = m_new_obj_maybe(mp_obj_str_t);
+
+ #if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
+ // If memory allocation failed and there is an emergency buffer then try to use
+ // that buffer to store the string object and its data (at least 16 bytes for
+ // the string data), reserving room at the start for the traceback and 1-tuple.
+ if (o_str == NULL
+ && mp_emergency_exception_buf_size >= EMG_BUF_STR_OFFSET + sizeof(mp_obj_str_t) + 16) {
+ o_str = (mp_obj_str_t *)((uint8_t *)MP_STATE_VM(mp_emergency_exception_buf)
+ + EMG_BUF_STR_OFFSET);
+ }
+ #endif
+
+ if (o_str == NULL) {
+ // No memory for the string object so create the exception with no args
+ return mp_obj_exception_make_new(exc_type, 0, 0, NULL);
+ }
+
+ // Assume the message is statically allocated.
+ o_str->len = strlen(msg);
+ o_str->data = (const byte *)msg;
+
+ // Create the string object and call mp_obj_exception_make_new to create the exception
+ o_str->base.type = &mp_type_str;
+ o_str->hash = qstr_compute_hash(o_str->data, o_str->len);
+ mp_obj_t arg = MP_OBJ_FROM_PTR(o_str);
+ return mp_obj_exception_make_new(exc_type, 1, 0, &arg);
+}
+
+// return true if the given object is an exception type
+bool mp_obj_is_exception_type(mp_obj_t self_in) {
+ if (mp_obj_is_type(self_in, &mp_type_type)) {
+ // optimisation when self_in is a builtin exception
+ mp_obj_type_t *self = MP_OBJ_TO_PTR(self_in);
+ if (self->make_new == mp_obj_exception_make_new) {
+ return true;
+ }
+ }
+ return mp_obj_is_subclass_fast(self_in, MP_OBJ_FROM_PTR(&mp_type_BaseException));
+}
+
+// return true if the given object is an instance of an exception type
+bool mp_obj_is_exception_instance(mp_obj_t self_in) {
+ return mp_obj_is_exception_type(MP_OBJ_FROM_PTR(mp_obj_get_type(self_in)));
+}
+
+// Return true if exception (type or instance) is a subclass of given
+// exception type. Assumes exc_type is a subclass of BaseException, as
+// defined by mp_obj_is_exception_type(exc_type).
+bool mp_obj_exception_match(mp_obj_t exc, mp_const_obj_t exc_type) {
+ // if exc is an instance of an exception, then extract and use its type
+ if (mp_obj_is_exception_instance(exc)) {
+ exc = MP_OBJ_FROM_PTR(mp_obj_get_type(exc));
+ }
+ return mp_obj_is_subclass_fast(exc, exc_type);
+}
+
+// traceback handling functions
+
+void mp_obj_exception_clear_traceback(mp_obj_t self_in) {
+ mp_obj_exception_t *self = mp_obj_exception_get_native(self_in);
+ // just set the traceback to the empty traceback object
+ // we don't want to call any memory management functions here
+ self->traceback = (mp_obj_traceback_t *)&mp_const_empty_traceback_obj;
+}
+
+void mp_obj_exception_add_traceback(mp_obj_t self_in, qstr file, size_t line, qstr block) {
+ mp_obj_exception_t *self = mp_obj_exception_get_native(self_in);
+
+ // Try to allocate memory for the traceback, with fallback to emergency traceback object
+ if (self->traceback == NULL || self->traceback == (mp_obj_traceback_t *)&mp_const_empty_traceback_obj) {
+ self->traceback = m_new_obj_maybe(mp_obj_traceback_t);
+ if (self->traceback == NULL) {
+ self->traceback = &MP_STATE_VM(mp_emergency_traceback_obj);
+ }
+ // populate traceback object
+ *self->traceback = mp_const_empty_traceback_obj;
+ }
+
+ // append the provided traceback info to traceback data
+ // if memory allocation fails (eg because gc is locked), just return
+ if (self->traceback->data == NULL) {
+ self->traceback->data = m_new_maybe(size_t, TRACEBACK_ENTRY_LEN);
+ if (self->traceback->data == NULL) {
+ #if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
+ if (mp_emergency_exception_buf_size >= (mp_int_t)(EMG_BUF_TRACEBACK_OFFSET + EMG_BUF_TRACEBACK_SIZE)) {
+ // There is room in the emergency buffer for traceback data
+ size_t *tb = (size_t *)((uint8_t *)MP_STATE_VM(mp_emergency_exception_buf)
+ + EMG_BUF_TRACEBACK_OFFSET);
+ self->traceback->data = tb;
+ self->traceback->alloc = EMG_BUF_TRACEBACK_SIZE / sizeof(size_t);
+ } else {
+ // Can't allocate and no room in emergency buffer
+ return;
+ }
+ #else
+ // Can't allocate
+ return;
+ #endif
+ } else {
+ // Allocated the traceback data on the heap
+ self->traceback->alloc = TRACEBACK_ENTRY_LEN;
+ }
+ self->traceback->len = 0;
+ } else if (self->traceback->len + TRACEBACK_ENTRY_LEN > self->traceback->alloc) {
+ #if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
+ if (self->traceback->data == (size_t *)MP_STATE_VM(mp_emergency_exception_buf)) {
+ // Can't resize the emergency buffer
+ return;
+ }
+ #endif
+ // be conservative with growing traceback data
+ size_t *tb_data = m_renew_maybe(size_t, self->traceback->data, self->traceback->alloc,
+ self->traceback->alloc + TRACEBACK_ENTRY_LEN, true);
+ if (tb_data == NULL) {
+ return;
+ }
+ self->traceback->data = tb_data;
+ self->traceback->alloc += TRACEBACK_ENTRY_LEN;
+ }
+
+ size_t *tb_data = &self->traceback->data[self->traceback->len];
+ self->traceback->len += TRACEBACK_ENTRY_LEN;
+ tb_data[0] = file;
+ tb_data[1] = line;
+ tb_data[2] = block;
+}
+
+void mp_obj_exception_get_traceback(mp_obj_t self_in, size_t *n, size_t **values) {
+ mp_obj_exception_t *self = mp_obj_exception_get_native(self_in);
+
+ if (self->traceback == NULL) {
+ *n = 0;
+ *values = NULL;
+ } else {
+ *n = self->traceback->len;
+ *values = self->traceback->data;
+ }
+}
+
+#if MICROPY_PY_SYS_EXC_INFO
+STATIC const mp_obj_namedtuple_type_t code_type_obj = {
+ .base = {
+ .base = {
+ .type = &mp_type_type
+ },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_code,
+ .print = namedtuple_print,
+ .make_new = namedtuple_make_new,
+ .parent = &mp_type_tuple,
+ .attr = namedtuple_attr,
+ MP_TYPE_EXTENDED_FIELDS(
+ .unary_op = mp_obj_tuple_unary_op,
+ .binary_op = mp_obj_tuple_binary_op,
+ .subscr = mp_obj_tuple_subscr,
+ .getiter = mp_obj_tuple_getiter,
+ ),
+ },
+ .n_fields = 15,
+ .fields = {
+ MP_QSTR_co_argcount,
+ MP_QSTR_co_kwonlyargcount,
+ MP_QSTR_co_nlocals,
+ MP_QSTR_co_stacksize,
+ MP_QSTR_co_flags,
+ MP_QSTR_co_code,
+ MP_QSTR_co_consts,
+ MP_QSTR_co_names,
+ MP_QSTR_co_varnames,
+ MP_QSTR_co_freevars,
+ MP_QSTR_co_cellvars,
+ MP_QSTR_co_filename,
+ MP_QSTR_co_name,
+ MP_QSTR_co_firstlineno,
+ MP_QSTR_co_lnotab,
+ },
+};
+
+STATIC mp_obj_t code_make_new(qstr file, qstr block) {
+ mp_obj_t elems[15] = {
+ mp_obj_new_int(0), // co_argcount
+ mp_obj_new_int(0), // co_kwonlyargcount
+ mp_obj_new_int(0), // co_nlocals
+ mp_obj_new_int(0), // co_stacksize
+ mp_obj_new_int(0), // co_flags
+ mp_obj_new_bytearray(0, NULL), // co_code
+ mp_obj_new_tuple(0, NULL), // co_consts
+ mp_obj_new_tuple(0, NULL), // co_names
+ mp_obj_new_tuple(0, NULL), // co_varnames
+ mp_obj_new_tuple(0, NULL), // co_freevars
+ mp_obj_new_tuple(0, NULL), // co_cellvars
+ MP_OBJ_NEW_QSTR(file), // co_filename
+ MP_OBJ_NEW_QSTR(block), // co_name
+ mp_obj_new_int(1), // co_firstlineno
+ mp_obj_new_bytearray(0, NULL), // co_lnotab
+ };
+
+ return namedtuple_make_new((const mp_obj_type_t *)&code_type_obj, 15, 0, elems);
+}
+
+STATIC const mp_obj_namedtuple_type_t frame_type_obj = {
+ .base = {
+ .base = {
+ .type = &mp_type_type
+ },
+ .name = MP_QSTR_frame,
+ .print = namedtuple_print,
+ .make_new = namedtuple_make_new,
+ .parent = &mp_type_tuple,
+ .attr = namedtuple_attr,
+ MP_TYPE_EXTENDED_FIELDS(
+ .unary_op = mp_obj_tuple_unary_op,
+ .binary_op = mp_obj_tuple_binary_op,
+ .subscr = mp_obj_tuple_subscr,
+ .getiter = mp_obj_tuple_getiter,
+ ),
+ },
+ .n_fields = 8,
+ .fields = {
+ MP_QSTR_f_back,
+ MP_QSTR_f_builtins,
+ MP_QSTR_f_code,
+ MP_QSTR_f_globals,
+ MP_QSTR_f_lasti,
+ MP_QSTR_f_lineno,
+ MP_QSTR_f_locals,
+ MP_QSTR_f_trace,
+ },
+};
+
+STATIC mp_obj_t frame_make_new(mp_obj_t f_code, int f_lineno) {
+ mp_obj_t elems[8] = {
+ mp_const_none, // f_back
+ mp_obj_new_dict(0), // f_builtins
+ f_code, // f_code
+ mp_obj_new_dict(0), // f_globals
+ mp_obj_new_int(0), // f_lasti
+ mp_obj_new_int(f_lineno), // f_lineno
+ mp_obj_new_dict(0), // f_locals
+ mp_const_none, // f_trace
+ };
+
+ return namedtuple_make_new((const mp_obj_type_t *)&frame_type_obj, 8, 0, elems);
+}
+
+STATIC const mp_obj_namedtuple_type_t traceback_type_obj = {
+ .base = {
+ .base = {
+ .type = &mp_type_type
+ },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_traceback,
+ .print = namedtuple_print,
+ .make_new = namedtuple_make_new,
+ .parent = &mp_type_tuple,
+ .attr = namedtuple_attr,
+ MP_TYPE_EXTENDED_FIELDS(
+ .unary_op = mp_obj_tuple_unary_op,
+ .binary_op = mp_obj_tuple_binary_op,
+ .subscr = mp_obj_tuple_subscr,
+ .getiter = mp_obj_tuple_getiter,
+ ),
+ },
+ .n_fields = 4,
+ .fields = {
+ MP_QSTR_tb_frame,
+ MP_QSTR_tb_lasti,
+ MP_QSTR_tb_lineno,
+ MP_QSTR_tb_next,
+ },
+};
+
+STATIC mp_obj_t traceback_from_values(size_t *values, mp_obj_t tb_next) {
+ int lineno = values[1];
+
+ mp_obj_t elems[4] = {
+ frame_make_new(code_make_new(values[0], values[2]), lineno),
+ mp_obj_new_int(0),
+ mp_obj_new_int(lineno),
+ tb_next,
+ };
+
+ return namedtuple_make_new((const mp_obj_type_t *)&traceback_type_obj, 4, 0, elems);
+};
+
+mp_obj_t mp_obj_exception_get_traceback_obj(mp_obj_t self_in) {
+ mp_obj_exception_t *self = MP_OBJ_TO_PTR(self_in);
+
+ if (!mp_obj_is_exception_instance(self)) {
+ return mp_const_none;
+ }
+
+ size_t n, *values;
+ mp_obj_exception_get_traceback(self, &n, &values);
+ if (n == 0) {
+ return mp_const_none;
+ }
+
+ mp_obj_t tb_next = mp_const_none;
+
+ for (size_t i = 0; i < n; i += 3) {
+ tb_next = traceback_from_values(&values[i], tb_next);
+ }
+
+ return tb_next;
+}
+#endif
diff --git a/circuitpython/py/objexcept.h b/circuitpython/py/objexcept.h
new file mode 100644
index 0000000..f28f50f
--- /dev/null
+++ b/circuitpython/py/objexcept.h
@@ -0,0 +1,53 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_OBJEXCEPT_H
+#define MICROPY_INCLUDED_PY_OBJEXCEPT_H
+
+#include "py/obj.h"
+#include "py/objtuple.h"
+#include "py/objtraceback.h"
+
+typedef struct _mp_obj_exception_t {
+ mp_obj_base_t base;
+ mp_obj_tuple_t *args;
+ mp_obj_traceback_t *traceback;
+} mp_obj_exception_t;
+
+void mp_obj_exception_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind);
+void mp_obj_exception_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest);
+mp_obj_exception_t *mp_obj_exception_get_native(mp_obj_t self_in);
+
+#define MP_DEFINE_EXCEPTION(exc_name, base_name) \
+ const mp_obj_type_t mp_type_##exc_name = { \
+ { &mp_type_type }, \
+ .name = MP_QSTR_##exc_name, \
+ .print = mp_obj_exception_print, \
+ .make_new = mp_obj_exception_make_new, \
+ .attr = mp_obj_exception_attr, \
+ .parent = &mp_type_##base_name, \
+ };
+
+#endif // MICROPY_INCLUDED_PY_OBJEXCEPT_H
diff --git a/circuitpython/py/objfilter.c b/circuitpython/py/objfilter.c
new file mode 100644
index 0000000..c034df2
--- /dev/null
+++ b/circuitpython/py/objfilter.c
@@ -0,0 +1,75 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/runtime.h"
+
+#if MICROPY_PY_BUILTINS_FILTER
+
+typedef struct _mp_obj_filter_t {
+ mp_obj_base_t base;
+ mp_obj_t fun;
+ mp_obj_t iter;
+} mp_obj_filter_t;
+
+STATIC mp_obj_t filter_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_arg_check_num(n_args, n_kw, 2, 2, false);
+ mp_obj_filter_t *o = m_new_obj(mp_obj_filter_t);
+ o->base.type = type;
+ o->fun = args[0];
+ o->iter = mp_getiter(args[1], NULL);
+ return MP_OBJ_FROM_PTR(o);
+}
+
+STATIC mp_obj_t filter_iternext(mp_obj_t self_in) {
+ mp_check_self(mp_obj_is_type(self_in, &mp_type_filter));
+ mp_obj_filter_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_t next;
+ while ((next = mp_iternext(self->iter)) != MP_OBJ_STOP_ITERATION) {
+ mp_obj_t val;
+ if (self->fun != mp_const_none) {
+ val = mp_call_function_n_kw(self->fun, 1, 0, &next);
+ } else {
+ val = next;
+ }
+ if (mp_obj_is_true(val)) {
+ return next;
+ }
+ }
+ return MP_OBJ_STOP_ITERATION;
+}
+
+const mp_obj_type_t mp_type_filter = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_filter,
+ .make_new = filter_make_new,
+ MP_TYPE_EXTENDED_FIELDS(
+ .getiter = mp_identity_getiter,
+ .iternext = filter_iternext,
+ ),
+};
+
+#endif // MICROPY_PY_BUILTINS_FILTER
diff --git a/circuitpython/py/objfloat.c b/circuitpython/py/objfloat.c
new file mode 100644
index 0000000..6d1d101
--- /dev/null
+++ b/circuitpython/py/objfloat.c
@@ -0,0 +1,364 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/parsenum.h"
+#include "py/runtime.h"
+
+#include "supervisor/shared/translate.h"
+
+#if MICROPY_PY_BUILTINS_FLOAT
+
+#include <math.h>
+#include "py/formatfloat.h"
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+
+#if MICROPY_OBJ_REPR != MICROPY_OBJ_REPR_C && MICROPY_OBJ_REPR != MICROPY_OBJ_REPR_D
+
+// M_E and M_PI are not part of the math.h standard and may not be defined
+#ifndef M_E
+#define M_E (2.7182818284590452354)
+#endif
+#ifndef M_PI
+#define M_PI (3.14159265358979323846)
+#endif
+
+typedef struct _mp_obj_float_t {
+ mp_obj_base_t base;
+ mp_float_t value;
+} mp_obj_float_t;
+
+const mp_obj_float_t mp_const_float_e_obj = {{&mp_type_float}, (mp_float_t)M_E};
+const mp_obj_float_t mp_const_float_pi_obj = {{&mp_type_float}, (mp_float_t)M_PI};
+
+#endif
+
+#define MICROPY_FLOAT_ZERO MICROPY_FLOAT_CONST(0.0)
+
+#if MICROPY_FLOAT_HIGH_QUALITY_HASH
+// must return actual integer value if it fits in mp_int_t
+mp_int_t mp_float_hash(mp_float_t src) {
+ mp_float_union_t u = {.f = src};
+ mp_int_t val;
+ const int adj_exp = (int)u.p.exp - MP_FLOAT_EXP_BIAS;
+ if (adj_exp < 0) {
+ // value < 1; must be sure to handle 0.0 correctly (ie return 0)
+ val = u.i;
+ } else {
+ // if adj_exp is max then: u.p.frc==0 indicates inf, else NaN
+ // else: 1 <= value
+ mp_float_uint_t frc = u.p.frc | ((mp_float_uint_t)1 << MP_FLOAT_FRAC_BITS);
+
+ if (adj_exp <= MP_FLOAT_FRAC_BITS) {
+ // number may have a fraction; xor the integer part with the fractional part
+ val = (frc >> (MP_FLOAT_FRAC_BITS - adj_exp))
+ ^ (frc & (((mp_float_uint_t)1 << (MP_FLOAT_FRAC_BITS - adj_exp)) - 1));
+ } else if ((unsigned int)adj_exp < MP_BITS_PER_BYTE * sizeof(mp_int_t) - 1) {
+ // the number is a (big) whole integer and will fit in val's signed-width
+ val = (mp_int_t)frc << (adj_exp - MP_FLOAT_FRAC_BITS);
+ } else {
+ // integer part will overflow val's width so just use what bits we can
+ val = frc;
+ }
+ }
+
+ if (u.p.sgn) {
+ val = -(mp_uint_t)val;
+ }
+
+ return val;
+}
+#endif
+
+STATIC void float_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_float_t o_val = mp_obj_float_get(o_in);
+ #if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
+ char buf[16];
+ #if MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_C
+ const int precision = 6;
+ #else
+ const int precision = 7;
+ #endif
+ #else
+ char buf[32];
+ const int precision = 16;
+ #endif
+ mp_format_float(o_val, buf, sizeof(buf), 'g', precision, '\0');
+ mp_print_str(print, buf);
+ if (strchr(buf, '.') == NULL && strchr(buf, 'e') == NULL && strchr(buf, 'n') == NULL) {
+ // Python floats always have decimal point (unless inf or nan)
+ mp_print_str(print, ".0");
+ }
+}
+
+STATIC mp_obj_t float_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ (void)type_in;
+ mp_arg_check_num(n_args, n_kw, 0, 1, false);
+
+ switch (n_args) {
+ case 0:
+ return mp_obj_new_float(0);
+
+ case 1:
+ default: {
+ mp_buffer_info_t bufinfo;
+ if (mp_get_buffer(args[0], &bufinfo, MP_BUFFER_READ)) {
+ // a textual representation, parse it
+ return mp_parse_num_decimal(bufinfo.buf, bufinfo.len, false, false, NULL);
+ } else if (mp_obj_is_float(args[0])) {
+ // a float, just return it
+ return args[0];
+ } else {
+ // something else, try to cast it to a float
+ return mp_obj_new_float(mp_obj_get_float(args[0]));
+ }
+ }
+ }
+}
+
+STATIC mp_obj_t float_unary_op(mp_unary_op_t op, mp_obj_t o_in) {
+ mp_float_t val = mp_obj_float_get(o_in);
+ switch (op) {
+ case MP_UNARY_OP_BOOL:
+ return mp_obj_new_bool(val != 0);
+ case MP_UNARY_OP_HASH:
+ return MP_OBJ_NEW_SMALL_INT(mp_float_hash(val));
+ case MP_UNARY_OP_POSITIVE:
+ return o_in;
+ case MP_UNARY_OP_NEGATIVE:
+ return mp_obj_new_float(-val);
+ case MP_UNARY_OP_ABS: {
+ if (signbit(val)) {
+ return mp_obj_new_float(-val);
+ } else {
+ return o_in;
+ }
+ }
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+STATIC mp_obj_t float_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
+ mp_float_t lhs_val = mp_obj_float_get(lhs_in);
+ #if MICROPY_PY_BUILTINS_COMPLEX
+ if (mp_obj_is_type(rhs_in, &mp_type_complex)) {
+ return mp_obj_complex_binary_op(op, lhs_val, 0, rhs_in);
+ }
+ #endif
+ return mp_obj_float_binary_op(op, lhs_val, rhs_in);
+}
+
+const mp_obj_type_t mp_type_float = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EQ_NOT_REFLEXIVE | MP_TYPE_FLAG_EQ_CHECKS_OTHER_TYPE | MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_float,
+ .print = float_print,
+ .make_new = float_make_new,
+ MP_TYPE_EXTENDED_FIELDS(
+ .unary_op = float_unary_op,
+ .binary_op = float_binary_op,
+ ),
+};
+
+#if MICROPY_OBJ_REPR != MICROPY_OBJ_REPR_C && MICROPY_OBJ_REPR != MICROPY_OBJ_REPR_D
+
+mp_obj_t mp_obj_new_float(mp_float_t value) {
+ mp_obj_float_t *o = m_new(mp_obj_float_t, 1);
+ o->base.type = &mp_type_float;
+ o->value = value;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+mp_float_t mp_obj_float_get(mp_obj_t self_in) {
+ assert(mp_obj_is_float(self_in));
+ mp_obj_float_t *self = MP_OBJ_TO_PTR(self_in);
+ return self->value;
+}
+
+#endif
+
+STATIC void mp_obj_float_divmod(mp_float_t *x, mp_float_t *y) {
+ // logic here follows that of CPython
+ // https://docs.python.org/3/reference/expressions.html#binary-arithmetic-operations
+ // x == (x//y)*y + (x%y)
+ // divmod(x, y) == (x//y, x%y)
+ mp_float_t mod = MICROPY_FLOAT_C_FUN(fmod)(*x, *y);
+ mp_float_t div = (*x - mod) / *y;
+
+ // Python specs require that mod has same sign as second operand
+ if (mod == MICROPY_FLOAT_ZERO) {
+ mod = MICROPY_FLOAT_C_FUN(copysign)(MICROPY_FLOAT_ZERO, *y);
+ } else {
+ if ((mod < MICROPY_FLOAT_ZERO) != (*y < MICROPY_FLOAT_ZERO)) {
+ mod += *y;
+ div -= MICROPY_FLOAT_CONST(1.0);
+ }
+ }
+
+ mp_float_t floordiv;
+ if (div == MICROPY_FLOAT_ZERO) {
+ // if division is zero, take the correct sign of zero
+ floordiv = MICROPY_FLOAT_C_FUN(copysign)(MICROPY_FLOAT_ZERO, *x / *y);
+ } else {
+ // Python specs require that x == (x//y)*y + (x%y)
+ floordiv = MICROPY_FLOAT_C_FUN(floor)(div);
+ if (div - floordiv > MICROPY_FLOAT_CONST(0.5)) {
+ floordiv += MICROPY_FLOAT_CONST(1.0);
+ }
+ }
+
+ // return results
+ *x = floordiv;
+ *y = mod;
+}
+
+mp_obj_t mp_obj_float_binary_op(mp_binary_op_t op, mp_float_t lhs_val, mp_obj_t rhs_in) {
+ mp_float_t rhs_val;
+ if (!mp_obj_get_float_maybe(rhs_in, &rhs_val)) {
+ return MP_OBJ_NULL; // op not supported
+ }
+
+ switch (op) {
+ case MP_BINARY_OP_ADD:
+ case MP_BINARY_OP_INPLACE_ADD:
+ lhs_val += rhs_val;
+ break;
+ case MP_BINARY_OP_SUBTRACT:
+ case MP_BINARY_OP_INPLACE_SUBTRACT:
+ lhs_val -= rhs_val;
+ break;
+ case MP_BINARY_OP_MULTIPLY:
+ case MP_BINARY_OP_INPLACE_MULTIPLY:
+ lhs_val *= rhs_val;
+ break;
+ case MP_BINARY_OP_FLOOR_DIVIDE:
+ case MP_BINARY_OP_INPLACE_FLOOR_DIVIDE:
+ if (rhs_val == 0) {
+ zero_division_error:
+ mp_raise_msg(&mp_type_ZeroDivisionError, MP_ERROR_TEXT("divide by zero"));
+ }
+ // Python specs require that x == (x//y)*y + (x%y) so we must
+ // call divmod to compute the correct floor division, which
+ // returns the floor divide in lhs_val.
+ mp_obj_float_divmod(&lhs_val, &rhs_val);
+ break;
+ case MP_BINARY_OP_TRUE_DIVIDE:
+ case MP_BINARY_OP_INPLACE_TRUE_DIVIDE:
+ if (rhs_val == 0) {
+ goto zero_division_error;
+ }
+ lhs_val /= rhs_val;
+ break;
+ case MP_BINARY_OP_MODULO:
+ case MP_BINARY_OP_INPLACE_MODULO:
+ if (rhs_val == MICROPY_FLOAT_ZERO) {
+ goto zero_division_error;
+ }
+ lhs_val = MICROPY_FLOAT_C_FUN(fmod)(lhs_val, rhs_val);
+ // Python specs require that mod has same sign as second operand
+ if (lhs_val == MICROPY_FLOAT_ZERO) {
+ lhs_val = MICROPY_FLOAT_C_FUN(copysign)(0.0, rhs_val);
+ } else {
+ if ((lhs_val < MICROPY_FLOAT_ZERO) != (rhs_val < MICROPY_FLOAT_ZERO)) {
+ lhs_val += rhs_val;
+ }
+ }
+ break;
+ case MP_BINARY_OP_POWER:
+ case MP_BINARY_OP_INPLACE_POWER:
+ if (lhs_val == 0 && rhs_val < 0 && !isinf(rhs_val)) {
+ goto zero_division_error;
+ }
+ if (lhs_val < 0 && rhs_val != MICROPY_FLOAT_C_FUN(floor)(rhs_val) && !isnan(rhs_val)) {
+ #if MICROPY_PY_BUILTINS_COMPLEX
+ return mp_obj_complex_binary_op(MP_BINARY_OP_POWER, lhs_val, 0, rhs_in);
+ #else
+ mp_raise_ValueError(MP_ERROR_TEXT("complex values not supported"));
+ #endif
+ }
+ #if MICROPY_PY_MATH_POW_FIX_NAN // Also see modmath.c.
+ if (lhs_val == MICROPY_FLOAT_CONST(1.0) || rhs_val == MICROPY_FLOAT_CONST(0.0)) {
+ lhs_val = MICROPY_FLOAT_CONST(1.0);
+ break;
+ }
+ #endif
+ lhs_val = MICROPY_FLOAT_C_FUN(pow)(lhs_val, rhs_val);
+ break;
+ case MP_BINARY_OP_DIVMOD: {
+ if (rhs_val == 0) {
+ goto zero_division_error;
+ }
+ mp_obj_float_divmod(&lhs_val, &rhs_val);
+ mp_obj_t tuple[2] = {
+ mp_obj_new_float(lhs_val),
+ mp_obj_new_float(rhs_val),
+ };
+ return mp_obj_new_tuple(2, tuple);
+ }
+ case MP_BINARY_OP_LESS:
+ return mp_obj_new_bool(lhs_val < rhs_val);
+ case MP_BINARY_OP_MORE:
+ return mp_obj_new_bool(lhs_val > rhs_val);
+ case MP_BINARY_OP_EQUAL:
+ return mp_obj_new_bool(lhs_val == rhs_val);
+ case MP_BINARY_OP_LESS_EQUAL:
+ return mp_obj_new_bool(lhs_val <= rhs_val);
+ case MP_BINARY_OP_MORE_EQUAL:
+ return mp_obj_new_bool(lhs_val >= rhs_val);
+
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+ return mp_obj_new_float(lhs_val);
+}
+
+// Convert a uint64_t to a 32-bit float without invoking the double-precision math routines,
+// which are large.
+mp_float_t uint64_to_float(uint64_t ui64) {
+ // 4294967296 = 2^32
+ return (mp_float_t)((uint32_t)(ui64 >> 32) * 4294967296.0f + (uint32_t)(ui64 & 0xffffffff));
+}
+
+// Convert a uint64_t to a 32-bit float to a uint64_t without invoking extra math routines.
+// which are large.
+// Assume f >= 0.
+uint64_t float_to_uint64(float f) {
+ // 4294967296 = 2^32
+ const uint32_t upper_half = (uint32_t)(f / 4294967296.0f);
+ const uint32_t lower_half = (uint32_t)f;
+ return (((uint64_t)upper_half) << 32) + lower_half;
+}
+#pragma GCC diagnostic pop
+
+#endif // MICROPY_PY_BUILTINS_FLOAT
diff --git a/circuitpython/py/objfun.c b/circuitpython/py/objfun.c
new file mode 100644
index 0000000..55c3fbb
--- /dev/null
+++ b/circuitpython/py/objfun.c
@@ -0,0 +1,557 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+#include <assert.h>
+
+#include "py/objtuple.h"
+#include "py/objfun.h"
+#include "py/runtime.h"
+#include "py/bc.h"
+#include "py/stackctrl.h"
+
+#include "supervisor/linker.h"
+
+#if MICROPY_DEBUG_VERBOSE // print debugging info
+#define DEBUG_PRINT (1)
+#else // don't print debugging info
+#define DEBUG_PRINT (0)
+#define DEBUG_printf(...) (void)0
+#endif
+
+// Note: the "name" entry in mp_obj_type_t for a function type must be
+// MP_QSTR_function because it is used to determine if an object is of generic
+// function type.
+
+/******************************************************************************/
+/* builtin functions */
+
+STATIC mp_obj_t fun_builtin_0_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ (void)args;
+ assert(mp_obj_is_type(self_in, &mp_type_fun_builtin_0));
+ mp_obj_fun_builtin_fixed_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_arg_check_num(n_args, n_kw, 0, 0, false);
+ return self->fun._0();
+}
+
+const mp_obj_type_t mp_type_fun_builtin_0 = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_BINDS_SELF | MP_TYPE_FLAG_BUILTIN_FUN | MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_function,
+ MP_TYPE_EXTENDED_FIELDS(
+ .call = fun_builtin_0_call,
+ .unary_op = mp_generic_unary_op,
+ ),
+};
+
+STATIC mp_obj_t fun_builtin_1_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ assert(mp_obj_is_type(self_in, &mp_type_fun_builtin_1));
+ mp_obj_fun_builtin_fixed_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_arg_check_num(n_args, n_kw, 1, 1, false);
+ return self->fun._1(args[0]);
+}
+
+const mp_obj_type_t mp_type_fun_builtin_1 = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_BINDS_SELF | MP_TYPE_FLAG_BUILTIN_FUN | MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_function,
+ MP_TYPE_EXTENDED_FIELDS(
+ .call = fun_builtin_1_call,
+ .unary_op = mp_generic_unary_op,
+ ),
+};
+
+STATIC mp_obj_t fun_builtin_2_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ assert(mp_obj_is_type(self_in, &mp_type_fun_builtin_2));
+ mp_obj_fun_builtin_fixed_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_arg_check_num(n_args, n_kw, 2, 2, false);
+ return self->fun._2(args[0], args[1]);
+}
+
+const mp_obj_type_t mp_type_fun_builtin_2 = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_BINDS_SELF | MP_TYPE_FLAG_BUILTIN_FUN | MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_function,
+ MP_TYPE_EXTENDED_FIELDS(
+ .call = fun_builtin_2_call,
+ .unary_op = mp_generic_unary_op,
+ ),
+};
+
+STATIC mp_obj_t fun_builtin_3_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ assert(mp_obj_is_type(self_in, &mp_type_fun_builtin_3));
+ mp_obj_fun_builtin_fixed_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_arg_check_num(n_args, n_kw, 3, 3, false);
+ return self->fun._3(args[0], args[1], args[2]);
+}
+
+const mp_obj_type_t mp_type_fun_builtin_3 = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_BINDS_SELF | MP_TYPE_FLAG_BUILTIN_FUN | MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_function,
+ MP_TYPE_EXTENDED_FIELDS(
+ .call = fun_builtin_3_call,
+ .unary_op = mp_generic_unary_op,
+ ),
+};
+
+STATIC mp_obj_t fun_builtin_var_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ assert(mp_obj_is_type(self_in, &mp_type_fun_builtin_var));
+ mp_obj_fun_builtin_var_t *self = MP_OBJ_TO_PTR(self_in);
+
+ // check number of arguments
+ mp_arg_check_num_sig(n_args, n_kw, self->sig);
+
+ if (self->sig & 1) {
+ // function allows keywords
+
+ // we create a map directly from the given args array
+ mp_map_t kw_args;
+ mp_map_init_fixed_table(&kw_args, n_kw, args + n_args);
+
+ return self->fun.kw(n_args, args, &kw_args);
+
+ } else {
+ // function takes a variable number of arguments, but no keywords
+
+ return self->fun.var(n_args, args);
+ }
+}
+
+const mp_obj_type_t mp_type_fun_builtin_var = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_BINDS_SELF | MP_TYPE_FLAG_BUILTIN_FUN | MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_function,
+ MP_TYPE_EXTENDED_FIELDS(
+ .call = fun_builtin_var_call,
+ .unary_op = mp_generic_unary_op,
+ ),
+};
+
+/******************************************************************************/
+/* byte code functions */
+
+qstr mp_obj_code_get_name(const byte *code_info) {
+ MP_BC_PRELUDE_SIZE_DECODE(code_info);
+ #if MICROPY_PERSISTENT_CODE
+ return code_info[0] | (code_info[1] << 8);
+ #else
+ return mp_decode_uint_value(code_info);
+ #endif
+}
+
+qstr mp_obj_fun_get_name(mp_const_obj_t fun_in) {
+ const mp_obj_fun_bc_t *fun = MP_OBJ_TO_PTR(fun_in);
+ #if MICROPY_EMIT_NATIVE
+ if (fun->base.type == &mp_type_fun_native) {
+ // TODO native functions don't have name stored
+ return MP_QSTR_;
+ }
+ #endif
+
+ const byte *bc = fun->bytecode;
+ MP_BC_PRELUDE_SIG_DECODE(bc);
+ return mp_obj_code_get_name(bc);
+}
+
+#if MICROPY_CPYTHON_COMPAT
+STATIC void fun_bc_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_fun_bc_t *o = MP_OBJ_TO_PTR(o_in);
+ mp_printf(print, "<function %q at %p>", mp_obj_fun_get_name(o_in), o);
+}
+#endif
+
+#if DEBUG_PRINT
+STATIC void dump_args(const mp_obj_t *a, size_t sz) {
+ DEBUG_printf("%p: ", a);
+ for (size_t i = 0; i < sz; i++) {
+ DEBUG_printf("%p ", a[i]);
+ }
+ DEBUG_printf("\n");
+}
+#else
+#define dump_args(...) (void)0
+#endif
+
+// With this macro you can tune the maximum number of function state bytes
+// that will be allocated on the stack. Any function that needs more
+// than this will try to use the heap, with fallback to stack allocation.
+#define VM_MAX_STATE_ON_STACK (sizeof(mp_uint_t) * 11)
+
+#define DECODE_CODESTATE_SIZE(bytecode, n_state_out_var, state_size_out_var) \
+ { \
+ const uint8_t *ip = bytecode; \
+ size_t n_exc_stack, scope_flags, n_pos_args, n_kwonly_args, n_def_args; \
+ MP_BC_PRELUDE_SIG_DECODE_INTO(ip, n_state_out_var, n_exc_stack, scope_flags, n_pos_args, n_kwonly_args, n_def_args); \
+ (void)scope_flags; (void)n_pos_args; (void)n_kwonly_args; (void)n_def_args; \
+ \
+ /* state size in bytes */ \
+ state_size_out_var = n_state_out_var * sizeof(mp_obj_t) \
+ + n_exc_stack * sizeof(mp_exc_stack_t); \
+ }
+
+#define INIT_CODESTATE(code_state, _fun_bc, _n_state, n_args, n_kw, args) \
+ code_state->fun_bc = _fun_bc; \
+ code_state->ip = 0; \
+ code_state->n_state = _n_state; \
+ mp_setup_code_state(code_state, n_args, n_kw, args); \
+ code_state->old_globals = mp_globals_get();
+
+#if MICROPY_STACKLESS
+mp_code_state_t *mp_obj_fun_bc_prepare_codestate(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ MP_STACK_CHECK();
+ mp_obj_fun_bc_t *self = MP_OBJ_TO_PTR(self_in);
+
+ size_t n_state, state_size;
+ DECODE_CODESTATE_SIZE(self->bytecode, n_state, state_size);
+
+ mp_code_state_t *code_state;
+ #if MICROPY_ENABLE_PYSTACK
+ code_state = mp_pystack_alloc(sizeof(mp_code_state_t) + state_size);
+ #else
+ // If we use m_new_obj_var(), then on no memory, MemoryError will be
+ // raised. But this is not correct exception for a function call,
+ // RuntimeError should be raised instead. So, we use m_new_obj_var_maybe(),
+ // return NULL, then vm.c takes the needed action (either raise
+ // RuntimeError or fallback to stack allocation).
+ code_state = m_new_obj_var_maybe(mp_code_state_t, byte, state_size);
+ if (!code_state) {
+ return NULL;
+ }
+ #endif
+
+ INIT_CODESTATE(code_state, self, n_state, n_args, n_kw, args);
+
+ // execute the byte code with the correct globals context
+ mp_globals_set(self->globals);
+
+ return code_state;
+}
+#endif
+
+STATIC mp_obj_t PLACE_IN_ITCM(fun_bc_call)(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ MP_STACK_CHECK();
+
+ DEBUG_printf("Input n_args: " UINT_FMT ", n_kw: " UINT_FMT "\n", n_args, n_kw);
+ DEBUG_printf("Input pos args: ");
+ dump_args(args, n_args);
+ DEBUG_printf("Input kw args: ");
+ dump_args(args + n_args, n_kw * 2);
+
+ mp_obj_fun_bc_t *self = MP_OBJ_TO_PTR(self_in);
+
+ size_t n_state, state_size;
+ DECODE_CODESTATE_SIZE(self->bytecode, n_state, state_size);
+
+ // allocate state for locals and stack
+ mp_code_state_t *code_state = NULL;
+ #if MICROPY_ENABLE_PYSTACK
+ code_state = mp_pystack_alloc(sizeof(mp_code_state_t) + state_size);
+ #else
+ if (state_size > VM_MAX_STATE_ON_STACK) {
+ code_state = m_new_obj_var_maybe(mp_code_state_t, byte, state_size);
+ #if MICROPY_DEBUG_VM_STACK_OVERFLOW
+ if (code_state != NULL) {
+ memset(code_state->state, 0, state_size);
+ }
+ #endif
+ }
+ if (code_state == NULL) {
+ code_state = alloca(sizeof(mp_code_state_t) + state_size);
+ #if MICROPY_DEBUG_VM_STACK_OVERFLOW
+ memset(code_state->state, 0, state_size);
+ #endif
+ state_size = 0; // indicate that we allocated using alloca
+ }
+ #endif
+
+ INIT_CODESTATE(code_state, self, n_state, n_args, n_kw, args);
+
+ // execute the byte code with the correct globals context
+ mp_globals_set(self->globals);
+ mp_vm_return_kind_t vm_return_kind = mp_execute_bytecode(code_state, MP_OBJ_NULL);
+ mp_globals_set(code_state->old_globals);
+
+ #if MICROPY_DEBUG_VM_STACK_OVERFLOW
+ if (vm_return_kind == MP_VM_RETURN_NORMAL) {
+ if (code_state->sp < code_state->state) {
+ mp_printf(MICROPY_DEBUG_PRINTER, "VM stack underflow: " INT_FMT "\n", code_state->sp - code_state->state);
+ assert(0);
+ }
+ }
+ const byte *bytecode_ptr = self->bytecode;
+ size_t n_state_unused, n_exc_stack_unused, scope_flags_unused;
+ size_t n_pos_args, n_kwonly_args, n_def_args_unused;
+ MP_BC_PRELUDE_SIG_DECODE_INTO(bytecode_ptr, n_state_unused, n_exc_stack_unused,
+ scope_flags_unused, n_pos_args, n_kwonly_args, n_def_args_unused);
+ // We can't check the case when an exception is returned in state[0]
+ // and there are no arguments, because in this case our detection slot may have
+ // been overwritten by the returned exception (which is allowed).
+ if (!(vm_return_kind == MP_VM_RETURN_EXCEPTION && n_pos_args + n_kwonly_args == 0)) {
+ // Just check to see that we have at least 1 null object left in the state.
+ bool overflow = true;
+ for (size_t i = 0; i < n_state - n_pos_args - n_kwonly_args; ++i) {
+ if (code_state->state[i] == MP_OBJ_NULL) {
+ overflow = false;
+ break;
+ }
+ }
+ if (overflow) {
+ mp_printf(MICROPY_DEBUG_PRINTER, "VM stack overflow state=%p n_state+1=" UINT_FMT "\n", code_state->state, n_state);
+ assert(0);
+ }
+ }
+ #endif
+
+ mp_obj_t result;
+ if (vm_return_kind == MP_VM_RETURN_NORMAL) {
+ // return value is in *sp
+ result = *code_state->sp;
+ } else {
+ // must be an exception because normal functions can't yield
+ assert(vm_return_kind == MP_VM_RETURN_EXCEPTION);
+ // returned exception is in state[0]
+ result = code_state->state[0];
+ }
+
+ #if MICROPY_ENABLE_PYSTACK
+ mp_pystack_free(code_state);
+ #else
+ // free the state if it was allocated on the heap
+ if (state_size != 0) {
+ m_del_var(mp_code_state_t, byte, state_size, code_state);
+ }
+ #endif
+
+ if (vm_return_kind == MP_VM_RETURN_NORMAL) {
+ return result;
+ } else { // MP_VM_RETURN_EXCEPTION
+ nlr_raise(result);
+ }
+}
+
+#if MICROPY_PY_FUNCTION_ATTRS
+void mp_obj_fun_bc_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+ if (dest[0] != MP_OBJ_NULL) {
+ // not load attribute
+ return;
+ }
+ if (attr == MP_QSTR___name__) {
+ dest[0] = MP_OBJ_NEW_QSTR(mp_obj_fun_get_name(self_in));
+ }
+ if (attr == MP_QSTR___globals__) {
+ mp_obj_fun_bc_t *self = MP_OBJ_TO_PTR(self_in);
+ dest[0] = MP_OBJ_FROM_PTR(self->globals);
+ }
+}
+#endif
+
+const mp_obj_type_t mp_type_fun_bc = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_BINDS_SELF | MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_function,
+ #if MICROPY_CPYTHON_COMPAT
+ .print = fun_bc_print,
+ #endif
+ #if MICROPY_PY_FUNCTION_ATTRS
+ .attr = mp_obj_fun_bc_attr,
+ #endif
+ MP_TYPE_EXTENDED_FIELDS(
+ .call = fun_bc_call,
+ .unary_op = mp_generic_unary_op,
+ ),
+};
+
+mp_obj_t mp_obj_new_fun_bc(mp_obj_t def_args_in, mp_obj_t def_kw_args, const byte *code, const mp_uint_t *const_table) {
+ size_t n_def_args = 0;
+ size_t n_extra_args = 0;
+ mp_obj_tuple_t *def_args = MP_OBJ_TO_PTR(def_args_in);
+ if (def_args_in != MP_OBJ_NULL) {
+ assert(mp_obj_is_type(def_args_in, &mp_type_tuple));
+ n_def_args = def_args->len;
+ n_extra_args = def_args->len;
+ }
+ if (def_kw_args != MP_OBJ_NULL) {
+ n_extra_args += 1;
+ }
+ mp_obj_fun_bc_t *o = m_new_obj_var(mp_obj_fun_bc_t, mp_obj_t, n_extra_args);
+ o->base.type = &mp_type_fun_bc;
+ o->globals = mp_globals_get();
+ o->bytecode = code;
+ o->const_table = const_table;
+ if (def_args != NULL) {
+ memcpy(o->extra_args, def_args->items, n_def_args * sizeof(mp_obj_t));
+ }
+ if (def_kw_args != MP_OBJ_NULL) {
+ o->extra_args[n_def_args] = def_kw_args;
+ }
+ return MP_OBJ_FROM_PTR(o);
+}
+
+/******************************************************************************/
+/* native functions */
+
+#if MICROPY_EMIT_NATIVE
+
+STATIC mp_obj_t fun_native_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ MP_STACK_CHECK();
+ mp_obj_fun_bc_t *self = self_in;
+ mp_call_fun_t fun = MICROPY_MAKE_POINTER_CALLABLE((void *)self->bytecode);
+ return fun(self_in, n_args, n_kw, args);
+}
+
+const mp_obj_type_t mp_type_fun_native = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_BINDS_SELF | MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_function,
+ MP_TYPE_EXTENDED_FIELDS(
+ .call = fun_native_call,
+ .unary_op = mp_generic_unary_op,
+ ),
+};
+
+mp_obj_t mp_obj_new_fun_native(mp_obj_t def_args_in, mp_obj_t def_kw_args, const void *fun_data, const mp_uint_t *const_table) {
+ mp_obj_fun_bc_t *o = mp_obj_new_fun_bc(def_args_in, def_kw_args, (const byte *)fun_data, const_table);
+ o->base.type = &mp_type_fun_native;
+ return o;
+}
+
+#endif // MICROPY_EMIT_NATIVE
+
+/******************************************************************************/
+/* inline assembler functions */
+
+#if MICROPY_EMIT_INLINE_ASM
+
+typedef struct _mp_obj_fun_asm_t {
+ mp_obj_base_t base;
+ size_t n_args;
+ const void *fun_data; // GC must be able to trace this pointer
+ mp_uint_t type_sig;
+} mp_obj_fun_asm_t;
+
+typedef mp_uint_t (*inline_asm_fun_0_t)(void);
+typedef mp_uint_t (*inline_asm_fun_1_t)(mp_uint_t);
+typedef mp_uint_t (*inline_asm_fun_2_t)(mp_uint_t, mp_uint_t);
+typedef mp_uint_t (*inline_asm_fun_3_t)(mp_uint_t, mp_uint_t, mp_uint_t);
+typedef mp_uint_t (*inline_asm_fun_4_t)(mp_uint_t, mp_uint_t, mp_uint_t, mp_uint_t);
+
+// convert a MicroPython object to a sensible value for inline asm
+STATIC mp_uint_t convert_obj_for_inline_asm(mp_obj_t obj) {
+ // TODO for byte_array, pass pointer to the array
+ if (mp_obj_is_small_int(obj)) {
+ return MP_OBJ_SMALL_INT_VALUE(obj);
+ } else if (obj == mp_const_none) {
+ return 0;
+ } else if (obj == mp_const_false) {
+ return 0;
+ } else if (obj == mp_const_true) {
+ return 1;
+ } else if (mp_obj_is_type(obj, &mp_type_int)) {
+ return mp_obj_int_get_truncated(obj);
+ } else if (mp_obj_is_str(obj)) {
+ // pointer to the string (it's probably constant though!)
+ size_t l;
+ return (mp_uint_t)mp_obj_str_get_data(obj, &l);
+ } else {
+ const mp_obj_type_t *type = mp_obj_get_type(obj);
+ #if MICROPY_PY_BUILTINS_FLOAT
+ if (type == &mp_type_float) {
+ // convert float to int (could also pass in float registers)
+ return (mp_int_t)mp_obj_float_get(obj);
+ }
+ #endif
+ if (type == &mp_type_tuple || type == &mp_type_list) {
+ // pointer to start of tuple (could pass length, but then could use len(x) for that)
+ size_t len;
+ mp_obj_t *items;
+ mp_obj_get_array(obj, &len, &items);
+ return (mp_uint_t)items;
+ } else {
+ mp_buffer_info_t bufinfo;
+ if (mp_get_buffer(obj, &bufinfo, MP_BUFFER_READ)) {
+ // supports the buffer protocol, return a pointer to the data
+ return (mp_uint_t)bufinfo.buf;
+ } else {
+ // just pass along a pointer to the object
+ return (mp_uint_t)obj;
+ }
+ }
+ }
+}
+
+STATIC mp_obj_t fun_asm_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_obj_fun_asm_t *self = self_in;
+
+ mp_arg_check_num(n_args, n_kw, self->n_args, self->n_args, false);
+
+ const void *fun = MICROPY_MAKE_POINTER_CALLABLE(self->fun_data);
+
+ mp_uint_t ret;
+ if (n_args == 0) {
+ ret = ((inline_asm_fun_0_t)fun)();
+ } else if (n_args == 1) {
+ ret = ((inline_asm_fun_1_t)fun)(convert_obj_for_inline_asm(args[0]));
+ } else if (n_args == 2) {
+ ret = ((inline_asm_fun_2_t)fun)(convert_obj_for_inline_asm(args[0]), convert_obj_for_inline_asm(args[1]));
+ } else if (n_args == 3) {
+ ret = ((inline_asm_fun_3_t)fun)(convert_obj_for_inline_asm(args[0]), convert_obj_for_inline_asm(args[1]), convert_obj_for_inline_asm(args[2]));
+ } else {
+ // compiler allows at most 4 arguments
+ assert(n_args == 4);
+ ret = ((inline_asm_fun_4_t)fun)(
+ convert_obj_for_inline_asm(args[0]),
+ convert_obj_for_inline_asm(args[1]),
+ convert_obj_for_inline_asm(args[2]),
+ convert_obj_for_inline_asm(args[3])
+ );
+ }
+
+ return mp_native_to_obj(ret, self->type_sig);
+}
+
+STATIC const mp_obj_type_t mp_type_fun_asm = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_BINDS_SELF | MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_function,
+ MP_TYPE_EXTENDED_FIELDS(
+ .call = fun_asm_call,
+ .unary_op = mp_generic_unary_op,
+ ),
+};
+
+mp_obj_t mp_obj_new_fun_asm(size_t n_args, const void *fun_data, mp_uint_t type_sig) {
+ mp_obj_fun_asm_t *o = m_new_obj(mp_obj_fun_asm_t);
+ o->base.type = &mp_type_fun_asm;
+ o->n_args = n_args;
+ o->fun_data = fun_data;
+ o->type_sig = type_sig;
+ return o;
+}
+
+#endif // MICROPY_EMIT_INLINE_ASM
diff --git a/circuitpython/py/objfun.h b/circuitpython/py/objfun.h
new file mode 100644
index 0000000..aae780b
--- /dev/null
+++ b/circuitpython/py/objfun.h
@@ -0,0 +1,47 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_OBJFUN_H
+#define MICROPY_INCLUDED_PY_OBJFUN_H
+
+#include "py/obj.h"
+
+typedef struct _mp_obj_fun_bc_t {
+ mp_obj_base_t base;
+ mp_obj_dict_t *globals; // the context within which this function was defined
+ const byte *bytecode; // bytecode for the function
+ const mp_uint_t *const_table; // constant table
+ #if MICROPY_PY_SYS_SETTRACE
+ const struct _mp_raw_code_t *rc;
+ #endif
+ // the following extra_args array is allocated space to take (in order):
+ // - values of positional default args (if any)
+ // - a single slot for default kw args dict (if it has them)
+ mp_obj_t extra_args[];
+} mp_obj_fun_bc_t;
+
+void mp_obj_fun_bc_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest);
+
+#endif // MICROPY_INCLUDED_PY_OBJFUN_H
diff --git a/circuitpython/py/objgenerator.c b/circuitpython/py/objgenerator.c
new file mode 100644
index 0000000..c63ea6b
--- /dev/null
+++ b/circuitpython/py/objgenerator.c
@@ -0,0 +1,423 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013-2019 Damien P. George
+ * SPDX-FileCopyrightText: Copyright (c) 2014-2017 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+
+#include "py/runtime.h"
+#include "py/bc.h"
+#include "py/objstr.h"
+#include "py/objgenerator.h"
+#include "py/objfun.h"
+#include "py/stackctrl.h"
+
+#include "supervisor/shared/translate.h"
+
+// Instance of GeneratorExit exception - needed by generator.close()
+const mp_obj_exception_t mp_const_GeneratorExit_obj = {{&mp_type_GeneratorExit}, (mp_obj_tuple_t *)&mp_const_empty_tuple_obj, (mp_obj_traceback_t *)&mp_const_empty_traceback_obj};
+
+/******************************************************************************/
+/* generator wrapper */
+
+typedef struct _mp_obj_gen_wrap_t {
+ mp_obj_base_t base;
+ mp_obj_t *fun;
+ bool coroutine_generator;
+} mp_obj_gen_wrap_t;
+
+typedef struct _mp_obj_gen_instance_t {
+ mp_obj_base_t base;
+ // mp_const_none: Not-running, no exception.
+ // MP_OBJ_NULL: Running, no exception.
+ // other: Not running, pending exception.
+ mp_obj_t pend_exc;
+ bool coroutine_generator;
+ mp_code_state_t code_state;
+} mp_obj_gen_instance_t;
+
+/******************************************************************************/
+// native generator wrapper
+
+#if MICROPY_EMIT_NATIVE
+
+STATIC mp_obj_t native_gen_wrap_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_obj_gen_wrap_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_fun_bc_t *self_fun = (mp_obj_fun_bc_t *)self->fun;
+
+ // Determine start of prelude, and extract n_state from it
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wcast-align"
+ uintptr_t prelude_offset = ((uintptr_t *)self_fun->bytecode)[0];
+ #pragma GCC diagnostic pop
+
+ #if MICROPY_EMIT_NATIVE_PRELUDE_AS_BYTES_OBJ
+ // Prelude is in bytes object in const_table, at index prelude_offset
+ mp_obj_str_t *prelude_bytes = MP_OBJ_TO_PTR(self_fun->const_table[prelude_offset]);
+ prelude_offset = (const byte *)prelude_bytes->data - self_fun->bytecode;
+ #endif
+ const uint8_t *ip = self_fun->bytecode + prelude_offset;
+ size_t n_state, n_exc_stack_unused, scope_flags, n_pos_args, n_kwonly_args, n_def_args;
+ MP_BC_PRELUDE_SIG_DECODE_INTO(ip, n_state, n_exc_stack_unused, scope_flags, n_pos_args, n_kwonly_args, n_def_args);
+ size_t n_exc_stack = 0;
+
+ // Allocate the generator object, with room for local stack and exception stack
+ mp_obj_gen_instance_t *o = m_new_obj_var(mp_obj_gen_instance_t, byte,
+ n_state * sizeof(mp_obj_t) + n_exc_stack * sizeof(mp_exc_stack_t));
+ o->base.type = &mp_type_gen_instance;
+
+ // Parse the input arguments and set up the code state
+ o->coroutine_generator = self->coroutine_generator;
+ o->pend_exc = mp_const_none;
+ o->code_state.fun_bc = self_fun;
+ o->code_state.ip = (const byte *)prelude_offset;
+ o->code_state.n_state = n_state;
+ mp_setup_code_state(&o->code_state, n_args, n_kw, args);
+
+ // Indicate we are a native function, which doesn't use this variable
+ o->code_state.exc_sp_idx = MP_CODE_STATE_EXC_SP_IDX_SENTINEL;
+
+ // Prepare the generator instance for execution
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wcast-align"
+ uintptr_t start_offset = ((uintptr_t *)self_fun->bytecode)[1];
+ #pragma GCC diagnostic pop
+ o->code_state.ip = MICROPY_MAKE_POINTER_CALLABLE((void *)(self_fun->bytecode + start_offset));
+
+ return MP_OBJ_FROM_PTR(o);
+}
+
+#endif // MICROPY_EMIT_NATIVE
+
+STATIC mp_obj_t bc_gen_wrap_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_obj_gen_wrap_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_fun_bc_t *self_fun = (mp_obj_fun_bc_t *)self->fun;
+ assert(self_fun->base.type == &mp_type_fun_bc);
+
+ // bytecode prelude: get state size and exception stack size
+ const uint8_t *ip = self_fun->bytecode;
+ MP_BC_PRELUDE_SIG_DECODE(ip);
+
+ // allocate the generator object, with room for local stack and exception stack
+ mp_obj_gen_instance_t *o = m_new_obj_var(mp_obj_gen_instance_t, byte,
+ n_state * sizeof(mp_obj_t) + n_exc_stack * sizeof(mp_exc_stack_t));
+ o->base.type = &mp_type_gen_instance;
+
+ o->coroutine_generator = self->coroutine_generator;
+ o->pend_exc = mp_const_none;
+ o->code_state.fun_bc = self_fun;
+ o->code_state.ip = 0;
+ o->code_state.n_state = n_state;
+ mp_setup_code_state(&o->code_state, n_args, n_kw, args);
+ return MP_OBJ_FROM_PTR(o);
+}
+
+STATIC mp_obj_t gen_wrap_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ #if MICROPY_EMIT_NATIVE
+ mp_obj_gen_wrap_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_fun_bc_t *self_fun = (mp_obj_fun_bc_t *)self->fun;
+ if (self_fun->base.type == &mp_type_fun_native) {
+ return native_gen_wrap_call(self, n_args, n_kw, args);
+ }
+ #endif
+ return bc_gen_wrap_call(self_in, n_args, n_kw, args);
+}
+
+#if MICROPY_PY_FUNCTION_ATTRS
+static void gen_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+ mp_obj_gen_wrap_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_fun_bc_t *self_fun = (mp_obj_fun_bc_t *)self->fun;
+ mp_obj_fun_bc_attr(MP_OBJ_FROM_PTR(self_fun), attr, dest);
+}
+#endif
+
+const mp_obj_type_t mp_type_gen_wrap = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_BINDS_SELF | MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_generator,
+ #if MICROPY_PY_FUNCTION_ATTRS
+ .attr = gen_attr,
+ #endif
+ MP_TYPE_EXTENDED_FIELDS(
+ .call = gen_wrap_call,
+ .unary_op = mp_generic_unary_op,
+ ),
+};
+
+
+mp_obj_t mp_obj_new_gen_wrap(mp_obj_t fun, bool is_coroutine) {
+ mp_obj_gen_wrap_t *o = m_new_obj(mp_obj_gen_wrap_t);
+ o->base.type = &mp_type_gen_wrap;
+ o->fun = MP_OBJ_TO_PTR(fun);
+ o->coroutine_generator = is_coroutine;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+/******************************************************************************/
+/* generator instance */
+
+STATIC void gen_instance_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_gen_instance_t *self = MP_OBJ_TO_PTR(self_in);
+ #if MICROPY_PY_ASYNC_AWAIT
+ if (self->coroutine_generator) {
+ mp_printf(print, "<%q object '%q' at %p>", MP_QSTR_coroutine, mp_obj_fun_get_name(MP_OBJ_FROM_PTR(self->code_state.fun_bc)), self);
+ } else {
+ mp_printf(print, "<%q object '%q' at %p>", MP_QSTR_generator, mp_obj_fun_get_name(MP_OBJ_FROM_PTR(self->code_state.fun_bc)), self);
+ }
+ #else
+ mp_printf(print, "<generator object '%q' at %p>", mp_obj_fun_get_name(MP_OBJ_FROM_PTR(self->code_state.fun_bc)), self);
+ #endif
+}
+
+mp_vm_return_kind_t mp_obj_gen_resume(mp_obj_t self_in, mp_obj_t send_value, mp_obj_t throw_value, mp_obj_t *ret_val) {
+ MP_STACK_CHECK();
+ mp_check_self(mp_obj_is_type(self_in, &mp_type_gen_instance));
+ mp_obj_gen_instance_t *self = MP_OBJ_TO_PTR(self_in);
+ if (self->code_state.ip == 0) {
+ // Trying to resume an already stopped generator.
+ // This is an optimised "raise StopIteration(None)".
+ *ret_val = mp_const_none;
+ return MP_VM_RETURN_NORMAL;
+ }
+
+ // Ensure the generator cannot be reentered during execution
+ if (self->pend_exc == MP_OBJ_NULL) {
+ mp_raise_ValueError(MP_ERROR_TEXT("generator already executing"));
+ }
+
+ #if MICROPY_PY_GENERATOR_PEND_THROW
+ // If exception is pending (set using .pend_throw()), process it now.
+ if (self->pend_exc != mp_const_none) {
+ throw_value = self->pend_exc;
+ }
+ #endif
+
+ // If the generator is started, allow sending a value.
+ if (self->code_state.sp == self->code_state.state - 1) {
+ if (send_value != mp_const_none) {
+ mp_raise_TypeError(MP_ERROR_TEXT("can't send non-None value to a just-started generator"));
+ }
+ } else {
+ *self->code_state.sp = send_value;
+ }
+
+ // Mark as running
+ self->pend_exc = MP_OBJ_NULL;
+
+ // Set up the correct globals context for the generator and execute it
+ self->code_state.old_globals = mp_globals_get();
+ mp_globals_set(self->code_state.fun_bc->globals);
+
+ mp_vm_return_kind_t ret_kind;
+
+ #if MICROPY_EMIT_NATIVE
+ if (self->code_state.exc_sp_idx == MP_CODE_STATE_EXC_SP_IDX_SENTINEL) {
+ // A native generator, with entry point 2 words into the "bytecode" pointer
+ typedef uintptr_t (*mp_fun_native_gen_t)(void *, mp_obj_t);
+ mp_fun_native_gen_t fun = MICROPY_MAKE_POINTER_CALLABLE((const void *)(self->code_state.fun_bc->bytecode + 2 * sizeof(uintptr_t)));
+ ret_kind = fun((void *)&self->code_state, throw_value);
+ } else
+ #endif
+ {
+ // A bytecode generator
+ ret_kind = mp_execute_bytecode(&self->code_state, throw_value);
+ }
+
+ mp_globals_set(self->code_state.old_globals);
+
+ // Mark as not running
+ self->pend_exc = mp_const_none;
+
+ switch (ret_kind) {
+ case MP_VM_RETURN_NORMAL:
+ default:
+ // Explicitly mark generator as completed. If we don't do this,
+ // subsequent next() may re-execute statements after last yield
+ // again and again, leading to side effects.
+ self->code_state.ip = 0;
+ // This is an optimised "raise StopIteration(*ret_val)".
+ *ret_val = *self->code_state.sp;
+ break;
+
+ case MP_VM_RETURN_YIELD:
+ *ret_val = *self->code_state.sp;
+ #if MICROPY_PY_GENERATOR_PEND_THROW
+ *self->code_state.sp = mp_const_none;
+ #endif
+ break;
+
+ case MP_VM_RETURN_EXCEPTION: {
+ self->code_state.ip = 0;
+ *ret_val = self->code_state.state[0];
+ // PEP479: if StopIteration is raised inside a generator it is replaced with RuntimeError
+ if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(mp_obj_get_type(*ret_val)), MP_OBJ_FROM_PTR(&mp_type_StopIteration))) {
+ *ret_val = mp_obj_new_exception_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("generator raised StopIteration"));
+ }
+ break;
+ }
+ }
+
+ return ret_kind;
+}
+
+STATIC mp_obj_t gen_resume_and_raise(mp_obj_t self_in, mp_obj_t send_value, mp_obj_t throw_value, bool raise_stop_iteration) {
+ mp_obj_t ret;
+ switch (mp_obj_gen_resume(self_in, send_value, throw_value, &ret)) {
+ case MP_VM_RETURN_NORMAL:
+ default:
+ // A normal return is a StopIteration, either raise it or return
+ // MP_OBJ_STOP_ITERATION as an optimisation.
+ if (ret == mp_const_none) {
+ ret = MP_OBJ_NULL;
+ }
+ if (raise_stop_iteration) {
+ mp_raise_StopIteration(ret);
+ } else {
+ return mp_make_stop_iteration(ret);
+ }
+
+ case MP_VM_RETURN_YIELD:
+ return ret;
+
+ case MP_VM_RETURN_EXCEPTION:
+ nlr_raise(ret);
+ }
+}
+
+STATIC mp_obj_t gen_instance_iternext(mp_obj_t self_in) {
+ #if MICROPY_PY_ASYNC_AWAIT
+ // This translate is literally too much for m0 boards
+ mp_obj_gen_instance_t *self = MP_OBJ_TO_PTR(self_in);
+ if (self->coroutine_generator) {
+ mp_raise_TypeError(MP_ERROR_TEXT("'coroutine' object is not an iterator"));
+ }
+ #endif
+ return gen_resume_and_raise(self_in, mp_const_none, MP_OBJ_NULL, false);
+}
+
+STATIC mp_obj_t gen_instance_send(mp_obj_t self_in, mp_obj_t send_value) {
+ return gen_resume_and_raise(self_in, send_value, MP_OBJ_NULL, true);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(gen_instance_send_obj, gen_instance_send);
+
+#if MICROPY_PY_ASYNC_AWAIT
+STATIC mp_obj_t gen_instance_await(mp_obj_t self_in) {
+ mp_obj_gen_instance_t *self = MP_OBJ_TO_PTR(self_in);
+ if (!self->coroutine_generator) {
+ // Pretend like a generator does not have this coroutine behavior.
+ // Pay no attention to the dir() behind the curtain
+ mp_raise_AttributeError(MP_ERROR_TEXT("type object 'generator' has no attribute '__await__'"));
+ }
+ // You can directly call send on a coroutine generator or you can __await__ then send on the return of that.
+ return self;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(gen_instance_await_obj, gen_instance_await);
+#endif
+
+STATIC mp_obj_t gen_instance_close(mp_obj_t self_in);
+STATIC mp_obj_t gen_instance_throw(size_t n_args, const mp_obj_t *args) {
+ // The signature of this function is: throw(type[, value[, traceback]])
+ // CPython will pass all given arguments through the call chain and process them
+ // at the point they are used (native generators will handle them differently to
+ // user-defined generators with a throw() method). To save passing multiple
+ // values, MicroPython instead does partial processing here to reduce it down to
+ // one argument and passes that through:
+ // - if only args[1] is given, or args[2] is given but is None, args[1] is
+ // passed through (in the standard case it is an exception class or instance)
+ // - if args[2] is given and not None it is passed through (in the standard
+ // case it would be an exception instance and args[1] its corresponding class)
+ // - args[3] is always ignored
+
+ mp_obj_t exc = args[1];
+ if (n_args > 2 && args[2] != mp_const_none) {
+ exc = args[2];
+ }
+
+ return gen_resume_and_raise(args[0], mp_const_none, exc, true);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(gen_instance_throw_obj, 2, 4, gen_instance_throw);
+
+STATIC mp_obj_t gen_instance_close(mp_obj_t self_in) {
+ mp_obj_t ret;
+ switch (mp_obj_gen_resume(self_in, mp_const_none, MP_OBJ_FROM_PTR(&mp_const_GeneratorExit_obj), &ret)) {
+ case MP_VM_RETURN_YIELD:
+ mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("generator ignored GeneratorExit"));
+
+ // Swallow GeneratorExit (== successful close), and re-raise any other
+ case MP_VM_RETURN_EXCEPTION:
+ // ret should always be an instance of an exception class
+ if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(mp_obj_get_type(ret)), MP_OBJ_FROM_PTR(&mp_type_GeneratorExit))) {
+ return mp_const_none;
+ }
+ nlr_raise(ret);
+
+ default:
+ // The only choice left is MP_VM_RETURN_NORMAL which is successful close
+ return mp_const_none;
+ }
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(gen_instance_close_obj, gen_instance_close);
+
+#if MICROPY_PY_GENERATOR_PEND_THROW
+STATIC mp_obj_t gen_instance_pend_throw(mp_obj_t self_in, mp_obj_t exc_in) {
+ mp_obj_gen_instance_t *self = MP_OBJ_TO_PTR(self_in);
+ if (self->pend_exc == MP_OBJ_NULL) {
+ mp_raise_ValueError(MP_ERROR_TEXT("generator already executing"));
+ }
+ mp_obj_t prev = self->pend_exc;
+ self->pend_exc = exc_in;
+ return prev;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(gen_instance_pend_throw_obj, gen_instance_pend_throw);
+#endif
+
+STATIC const mp_rom_map_elem_t gen_instance_locals_dict_table[] = {
+ { MP_ROM_QSTR(MP_QSTR_close), MP_ROM_PTR(&gen_instance_close_obj) },
+ { MP_ROM_QSTR(MP_QSTR_send), MP_ROM_PTR(&gen_instance_send_obj) },
+ { MP_ROM_QSTR(MP_QSTR_throw), MP_ROM_PTR(&gen_instance_throw_obj) },
+ #if MICROPY_PY_GENERATOR_PEND_THROW
+ { MP_ROM_QSTR(MP_QSTR_pend_throw), MP_ROM_PTR(&gen_instance_pend_throw_obj) },
+ #endif
+ #if MICROPY_PY_ASYNC_AWAIT
+ { MP_ROM_QSTR(MP_QSTR___await__), MP_ROM_PTR(&gen_instance_await_obj) },
+ #endif
+};
+
+STATIC MP_DEFINE_CONST_DICT(gen_instance_locals_dict, gen_instance_locals_dict_table);
+
+const mp_obj_type_t mp_type_gen_instance = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_generator,
+ .print = gen_instance_print,
+ .locals_dict = (mp_obj_dict_t *)&gen_instance_locals_dict,
+ MP_TYPE_EXTENDED_FIELDS(
+ .unary_op = mp_generic_unary_op,
+ .getiter = mp_identity_getiter,
+ .iternext = gen_instance_iternext,
+ ),
+};
diff --git a/circuitpython/py/objgenerator.h b/circuitpython/py/objgenerator.h
new file mode 100644
index 0000000..4b7f8c1
--- /dev/null
+++ b/circuitpython/py/objgenerator.h
@@ -0,0 +1,34 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_OBJGENERATOR_H
+#define MICROPY_INCLUDED_PY_OBJGENERATOR_H
+
+#include "py/obj.h"
+#include "py/runtime.h"
+
+mp_vm_return_kind_t mp_obj_gen_resume(mp_obj_t self_in, mp_obj_t send_val, mp_obj_t throw_val, mp_obj_t *ret_val);
+
+#endif // MICROPY_INCLUDED_PY_OBJGENERATOR_H
diff --git a/circuitpython/py/objgetitemiter.c b/circuitpython/py/objgetitemiter.c
new file mode 100644
index 0000000..098e7da
--- /dev/null
+++ b/circuitpython/py/objgetitemiter.c
@@ -0,0 +1,78 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+
+#include "py/runtime.h"
+
+// this is a wrapper object that turns something that has a __getitem__ method into an iterator
+
+typedef struct _mp_obj_getitem_iter_t {
+ mp_obj_base_t base;
+ mp_obj_t args[3];
+} mp_obj_getitem_iter_t;
+
+STATIC mp_obj_t it_iternext(mp_obj_t self_in) {
+ mp_obj_getitem_iter_t *self = MP_OBJ_TO_PTR(self_in);
+ nlr_buf_t nlr;
+ if (nlr_push(&nlr) == 0) {
+ // try to get next item
+ mp_obj_t value = mp_call_method_n_kw(1, 0, self->args);
+ self->args[2] = MP_OBJ_NEW_SMALL_INT(MP_OBJ_SMALL_INT_VALUE(self->args[2]) + 1);
+ nlr_pop();
+ return value;
+ } else {
+ // an exception was raised
+ mp_obj_type_t *t = (mp_obj_type_t *)((mp_obj_base_t *)nlr.ret_val)->type;
+ if (t == &mp_type_StopIteration || t == &mp_type_IndexError) {
+ return MP_OBJ_STOP_ITERATION;
+ } else {
+ // re-raise exception
+ nlr_jump(nlr.ret_val);
+ }
+ }
+}
+
+STATIC const mp_obj_type_t mp_type_it = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_iterator,
+ MP_TYPE_EXTENDED_FIELDS(
+ .getiter = mp_identity_getiter,
+ .iternext = it_iternext,
+ ),
+};
+
+// args are those returned from mp_load_method_maybe (ie either an attribute or a method)
+mp_obj_t mp_obj_new_getitem_iter(mp_obj_t *args, mp_obj_iter_buf_t *iter_buf) {
+ assert(sizeof(mp_obj_getitem_iter_t) <= sizeof(mp_obj_iter_buf_t));
+ mp_obj_getitem_iter_t *o = (mp_obj_getitem_iter_t *)iter_buf;
+ o->base.type = &mp_type_it;
+ o->args[0] = args[0];
+ o->args[1] = args[1];
+ o->args[2] = MP_OBJ_NEW_SMALL_INT(0);
+ return MP_OBJ_FROM_PTR(o);
+}
diff --git a/circuitpython/py/objint.c b/circuitpython/py/objint.c
new file mode 100644
index 0000000..50bcbf5
--- /dev/null
+++ b/circuitpython/py/objint.c
@@ -0,0 +1,582 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+#include <string.h>
+
+#include "py/parsenum.h"
+#include "py/smallint.h"
+#include "py/objint.h"
+#include "py/objstr.h"
+#include "py/runtime.h"
+#include "py/binary.h"
+
+#include "supervisor/shared/translate.h"
+
+#if MICROPY_PY_BUILTINS_FLOAT
+#include <math.h>
+#endif
+
+// This dispatcher function is expected to be independent of the implementation of long int
+STATIC mp_obj_t mp_obj_int_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ (void)type_in;
+ mp_arg_check_num(n_args, n_kw, 0, 2, false);
+
+ switch (n_args) {
+ case 0:
+ return MP_OBJ_NEW_SMALL_INT(0);
+
+ case 1:
+ if (mp_obj_is_int(args[0])) {
+ // already an int (small or long), just return it
+ return args[0];
+ } else if (mp_obj_is_str_or_bytes(args[0])) {
+ // a string, parse it
+ size_t l;
+ const char *s = mp_obj_str_get_data(args[0], &l);
+ return mp_parse_num_integer(s, l, 0, NULL);
+ #if MICROPY_PY_BUILTINS_FLOAT
+ } else if (mp_obj_is_float(args[0])) {
+ return mp_obj_new_int_from_float(mp_obj_float_get(args[0]));
+ #endif
+ } else {
+ return mp_unary_op(MP_UNARY_OP_INT, args[0]);
+ }
+
+ case 2:
+ default: {
+ // should be a string, parse it
+ size_t l;
+ const char *s = mp_obj_str_get_data(args[0], &l);
+ return mp_parse_num_integer(s, l, mp_obj_get_int(args[1]), NULL);
+ }
+ }
+}
+
+#if MICROPY_PY_BUILTINS_FLOAT
+
+typedef enum {
+ MP_FP_CLASS_FIT_SMALLINT,
+ MP_FP_CLASS_FIT_LONGINT,
+ MP_FP_CLASS_OVERFLOW
+} mp_fp_as_int_class_t;
+
+STATIC mp_fp_as_int_class_t mp_classify_fp_as_int(mp_float_t val) {
+ union {
+ mp_float_t f;
+ #if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
+ uint32_t i;
+ #elif MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE
+ uint32_t i[2];
+ #endif
+ } u = {val};
+
+ uint32_t e;
+ #if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
+ e = u.i;
+ #elif MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE
+ e = u.i[MP_ENDIANNESS_LITTLE];
+ #endif
+#define MP_FLOAT_SIGN_SHIFT_I32 ((MP_FLOAT_FRAC_BITS + MP_FLOAT_EXP_BITS) % 32)
+#define MP_FLOAT_EXP_SHIFT_I32 (MP_FLOAT_FRAC_BITS % 32)
+
+ if (e & (1U << MP_FLOAT_SIGN_SHIFT_I32)) {
+ #if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE
+ e |= u.i[MP_ENDIANNESS_BIG] != 0;
+ #endif
+ if ((e & ~(1U << MP_FLOAT_SIGN_SHIFT_I32)) == 0) {
+ // handle case of -0 (when sign is set but rest of bits are zero)
+ e = 0;
+ } else {
+ e += ((1U << MP_FLOAT_EXP_BITS) - 1) << MP_FLOAT_EXP_SHIFT_I32;
+ }
+ } else {
+ e &= ~((1U << MP_FLOAT_EXP_SHIFT_I32) - 1);
+ }
+ // 8 * sizeof(uintptr_t) counts the number of bits for a small int
+ // TODO provide a way to configure this properly
+ if (e <= ((8 * sizeof(uintptr_t) + MP_FLOAT_EXP_BIAS - 3) << MP_FLOAT_EXP_SHIFT_I32)) {
+ return MP_FP_CLASS_FIT_SMALLINT;
+ }
+ #if MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_LONGLONG
+ if (e <= (((sizeof(long long) * MP_BITS_PER_BYTE) + MP_FLOAT_EXP_BIAS - 2) << MP_FLOAT_EXP_SHIFT_I32)) {
+ return MP_FP_CLASS_FIT_LONGINT;
+ }
+ #endif
+ #if MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_MPZ
+ return MP_FP_CLASS_FIT_LONGINT;
+ #else
+ return MP_FP_CLASS_OVERFLOW;
+ #endif
+}
+#undef MP_FLOAT_SIGN_SHIFT_I32
+#undef MP_FLOAT_EXP_SHIFT_I32
+
+mp_obj_t mp_obj_new_int_from_float(mp_float_t val) {
+ mp_float_union_t u = {val};
+ // IEEE-754: if biased exponent is all 1 bits...
+ if (u.p.exp == ((1 << MP_FLOAT_EXP_BITS) - 1)) {
+ // ...then number is Inf (positive or negative) if fraction is 0, else NaN.
+ if (u.p.frc == 0) {
+ mp_raise_OverflowError_varg(MP_ERROR_TEXT("can't convert %q to %q"), MP_QSTR_inf, MP_QSTR_int);
+ } else {
+ mp_raise_ValueError_varg(MP_ERROR_TEXT("can't convert %q to %q"), MP_QSTR_NaN, MP_QSTR_int);
+ }
+ } else {
+ mp_fp_as_int_class_t icl = mp_classify_fp_as_int(val);
+ if (icl == MP_FP_CLASS_FIT_SMALLINT) {
+ return MP_OBJ_NEW_SMALL_INT((mp_int_t)val);
+ #if MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_MPZ
+ } else {
+ mp_obj_int_t *o = mp_obj_int_new_mpz();
+ mpz_set_from_float(&o->mpz, val);
+ return MP_OBJ_FROM_PTR(o);
+ }
+ #else
+ #if MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_LONGLONG
+ } else if (icl == MP_FP_CLASS_FIT_LONGINT) {
+ return mp_obj_new_int_from_ll((long long)val);
+ #endif
+ } else {
+ mp_raise_ValueError(MP_ERROR_TEXT("float too big"));
+ }
+ #endif
+ }
+}
+
+#endif
+
+#if MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_LONGLONG
+typedef mp_longint_impl_t fmt_int_t;
+typedef unsigned long long fmt_uint_t;
+#else
+typedef mp_int_t fmt_int_t;
+typedef mp_uint_t fmt_uint_t;
+#endif
+
+void mp_obj_int_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ (void)kind;
+ // The size of this buffer is rather arbitrary. If it's not large
+ // enough, a dynamic one will be allocated.
+ char stack_buf[sizeof(fmt_int_t) * 4];
+ char *buf = stack_buf;
+ size_t buf_size = sizeof(stack_buf);
+ size_t fmt_size;
+
+ char *str = mp_obj_int_formatted(&buf, &buf_size, &fmt_size, self_in, 10, NULL, '\0', '\0');
+ mp_print_str(print, str);
+
+ if (buf != stack_buf) {
+ m_del(char, buf, buf_size);
+ }
+}
+
+STATIC const uint8_t log_base2_floor[] = {
+ 0, 1, 1, 2,
+ 2, 2, 2, 3,
+ 3, 3, 3, 3,
+ 3, 3, 3, 4,
+ /* if needed, these are the values for higher bases
+ 4, 4, 4, 4,
+ 4, 4, 4, 4,
+ 4, 4, 4, 4,
+ 4, 4, 4, 5
+ */
+};
+
+size_t mp_int_format_size(size_t num_bits, int base, const char *prefix, char comma) {
+ assert(2 <= base && base <= 16);
+ size_t num_digits = num_bits / log_base2_floor[base - 1] + 1;
+ size_t num_commas = comma ? num_digits / 3 : 0;
+ size_t prefix_len = prefix ? strlen(prefix) : 0;
+ return num_digits + num_commas + prefix_len + 2; // +1 for sign, +1 for null byte
+}
+
+// This routine expects you to pass in a buffer and size (in *buf and *buf_size).
+// If, for some reason, this buffer is too small, then it will allocate a
+// buffer and return the allocated buffer and size in *buf and *buf_size. It
+// is the callers responsibility to free this allocated buffer.
+//
+// The resulting formatted string will be returned from this function and the
+// formatted size will be in *fmt_size.
+char *mp_obj_int_formatted(char **buf, size_t *buf_size, size_t *fmt_size, mp_const_obj_t self_in,
+ int base, const char *prefix, char base_char, char comma) {
+ fmt_int_t num;
+ #if MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_NONE
+ // Only have small ints; get the integer value to format.
+ num = MP_OBJ_SMALL_INT_VALUE(self_in);
+ #else
+ if (mp_obj_is_small_int(self_in)) {
+ // A small int; get the integer value to format.
+ num = MP_OBJ_SMALL_INT_VALUE(self_in);
+ } else {
+ assert(mp_obj_is_type(self_in, &mp_type_int));
+ // Not a small int.
+ #if MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_LONGLONG
+ const mp_obj_int_t *self = self_in;
+ // Get the value to format; mp_obj_get_int truncates to mp_int_t.
+ num = self->val;
+ #else
+ // Delegate to the implementation for the long int.
+ return mp_obj_int_formatted_impl(buf, buf_size, fmt_size, self_in, base, prefix, base_char, comma);
+ #endif
+ }
+ #endif
+
+ char sign = '\0';
+ if (num < 0) {
+ num = -num;
+ sign = '-';
+ }
+
+ size_t needed_size = mp_int_format_size(sizeof(fmt_int_t) * 8, base, prefix, comma);
+ if (needed_size > *buf_size) {
+ *buf = m_new(char, needed_size);
+ *buf_size = needed_size;
+ }
+ char *str = *buf;
+
+ char *b = str + needed_size;
+ *(--b) = '\0';
+ char *last_comma = b;
+
+ if (num == 0) {
+ *(--b) = '0';
+ } else {
+ do {
+ // The cast to fmt_uint_t is because num is positive and we want unsigned arithmetic
+ int c = (fmt_uint_t)num % base;
+ num = (fmt_uint_t)num / base;
+ if (c >= 10) {
+ c += base_char - 10;
+ } else {
+ c += '0';
+ }
+ *(--b) = c;
+ if (comma && num != 0 && b > str && (last_comma - b) == 3) {
+ *(--b) = comma;
+ last_comma = b;
+ }
+ }
+ while (b > str && num != 0);
+ }
+ if (prefix) {
+ size_t prefix_len = strlen(prefix);
+ char *p = b - prefix_len;
+ if (p > str) {
+ b = p;
+ while (*prefix) {
+ *p++ = *prefix++;
+ }
+ }
+ }
+ if (sign && b > str) {
+ *(--b) = sign;
+ }
+ *fmt_size = *buf + needed_size - b - 1;
+
+ return b;
+}
+
+#if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE
+
+void mp_obj_int_buffer_overflow_check(mp_obj_t self_in, size_t nbytes, bool is_signed) {
+ if (is_signed) {
+ // self must be < 2**(bits - 1)
+ mp_obj_t edge = mp_binary_op(MP_BINARY_OP_INPLACE_LSHIFT,
+ mp_obj_new_int(1),
+ mp_obj_new_int(nbytes * 8 - 1));
+
+ if (mp_binary_op(MP_BINARY_OP_LESS, self_in, edge) == mp_const_true) {
+ // and >= -2**(bits - 1)
+ edge = mp_unary_op(MP_UNARY_OP_NEGATIVE, edge);
+ if (mp_binary_op(MP_BINARY_OP_MORE_EQUAL, self_in, edge) == mp_const_true) {
+ return;
+ }
+ }
+ } else {
+ // self must be >= 0
+ if (mp_obj_int_sign(self_in) >= 0) {
+ // and < 2**(bits)
+ mp_obj_t edge = mp_binary_op(MP_BINARY_OP_INPLACE_LSHIFT,
+ mp_obj_new_int(1),
+ mp_obj_new_int(nbytes * 8));
+
+ if (mp_binary_op(MP_BINARY_OP_LESS, self_in, edge) == mp_const_true) {
+ return;
+ }
+ }
+ }
+
+ mp_raise_OverflowError_varg(MP_ERROR_TEXT("value must fit in %d byte(s)"), nbytes);
+}
+
+#endif // MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE
+
+void mp_small_int_buffer_overflow_check(mp_int_t val, size_t nbytes, bool is_signed) {
+ // Fast path for zero.
+ if (val == 0) {
+ return;
+ }
+
+ // Trying to store negative values in unsigned bytes falls through to failure.
+ if (is_signed || val >= 0) {
+
+ if (nbytes >= sizeof(val)) {
+ // All non-negative N bit signed integers fit in an unsigned N bit integer.
+ // This case prevents shifting too far below.
+ return;
+ }
+
+ if (is_signed) {
+ mp_int_t edge = ((mp_int_t)1 << (nbytes * 8 - 1));
+ if (-edge <= val && val < edge) {
+ return;
+ }
+ // Out of range, fall through to failure.
+ } else {
+ // Unsigned. We already know val >= 0.
+ mp_int_t edge = ((mp_int_t)1 << (nbytes * 8));
+ if (val < edge) {
+ return;
+ }
+ }
+ // Fall through to failure.
+ }
+
+ mp_raise_OverflowError_varg(MP_ERROR_TEXT("value must fit in %d byte(s)"), nbytes);
+}
+
+#if MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_NONE
+
+int mp_obj_int_sign(mp_obj_t self_in) {
+ mp_int_t val = mp_obj_get_int(self_in);
+ if (val < 0) {
+ return -1;
+ } else if (val > 0) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+// This is called for operations on SMALL_INT that are not handled by mp_unary_op
+mp_obj_t mp_obj_int_unary_op(mp_unary_op_t op, mp_obj_t o_in) {
+ return MP_OBJ_NULL; // op not supported
+}
+
+// This is called for operations on SMALL_INT that are not handled by mp_binary_op
+mp_obj_t mp_obj_int_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
+ return mp_obj_int_binary_op_extra_cases(op, lhs_in, rhs_in);
+}
+
+// This is called only with strings whose value doesn't fit in SMALL_INT
+mp_obj_t mp_obj_new_int_from_str_len(const char **str, size_t len, bool neg, unsigned int base) {
+ mp_raise_msg(&mp_type_OverflowError, MP_ERROR_TEXT("long int not supported in this build"));
+ return mp_const_none;
+}
+
+// This is called when an integer larger than a SMALL_INT is needed (although val might still fit in a SMALL_INT)
+mp_obj_t mp_obj_new_int_from_ll(long long val) {
+ mp_raise_msg(&mp_type_OverflowError, MP_ERROR_TEXT("small int overflow"));
+ return mp_const_none;
+}
+
+// This is called when an integer larger than a SMALL_INT is needed (although val might still fit in a SMALL_INT)
+mp_obj_t mp_obj_new_int_from_ull(unsigned long long val) {
+ mp_raise_msg(&mp_type_OverflowError, MP_ERROR_TEXT("small int overflow"));
+ return mp_const_none;
+}
+
+mp_obj_t mp_obj_new_int_from_uint(mp_uint_t value) {
+ // SMALL_INT accepts only signed numbers, so make sure the input
+ // value fits completely in the small-int positive range.
+ if ((value & ~MP_SMALL_INT_POSITIVE_MASK) == 0) {
+ return MP_OBJ_NEW_SMALL_INT(value);
+ }
+ mp_raise_msg(&mp_type_OverflowError, MP_ERROR_TEXT("small int overflow"));
+ return mp_const_none;
+}
+
+mp_obj_t mp_obj_new_int(mp_int_t value) {
+ if (MP_SMALL_INT_FITS(value)) {
+ return MP_OBJ_NEW_SMALL_INT(value);
+ }
+ mp_raise_msg(&mp_type_OverflowError, MP_ERROR_TEXT("small int overflow"));
+ return mp_const_none;
+}
+
+mp_int_t mp_obj_int_get_truncated(mp_const_obj_t self_in) {
+ return MP_OBJ_SMALL_INT_VALUE(self_in);
+}
+
+mp_int_t mp_obj_int_get_checked(mp_const_obj_t self_in) {
+ return MP_OBJ_SMALL_INT_VALUE(self_in);
+}
+
+#endif // MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_NONE
+
+// This dispatcher function is expected to be independent of the implementation of long int
+// It handles the extra cases for integer-like arithmetic
+mp_obj_t mp_obj_int_binary_op_extra_cases(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
+ if (rhs_in == mp_const_false) {
+ // false acts as 0
+ return mp_binary_op(op, lhs_in, MP_OBJ_NEW_SMALL_INT(0));
+ } else if (rhs_in == mp_const_true) {
+ // true acts as 0
+ return mp_binary_op(op, lhs_in, MP_OBJ_NEW_SMALL_INT(1));
+ } else if (op == MP_BINARY_OP_MULTIPLY) {
+ if (mp_obj_is_str_or_bytes(rhs_in) || mp_obj_is_type(rhs_in, &mp_type_tuple) || mp_obj_is_type(rhs_in, &mp_type_list)) {
+ // multiply is commutative for these types, so delegate to them
+ return mp_binary_op(op, rhs_in, lhs_in);
+ }
+ }
+ return MP_OBJ_NULL; // op not supported
+}
+
+#if MICROPY_CPYTHON_COMPAT
+STATIC mp_obj_t int_bit_length(mp_obj_t self_in) {
+ #if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE
+ if (!mp_obj_is_small_int(self_in)) {
+ return mp_obj_int_bit_length_impl(self_in);
+ } else
+ #endif
+ {
+ mp_int_t int_val = MP_OBJ_SMALL_INT_VALUE(self_in);
+ mp_uint_t value =
+ (int_val == 0) ? 0 :
+ (int_val == MP_SMALL_INT_MIN) ? 8 * sizeof(mp_int_t) :
+ (int_val < 0) ? 8 * sizeof(long) - __builtin_clzl(-int_val) :
+ 8 * sizeof(long) - __builtin_clzl(int_val);
+ return mp_obj_new_int_from_uint(value);
+ }
+
+}
+MP_DEFINE_CONST_FUN_OBJ_1(int_bit_length_obj, int_bit_length);
+#endif
+
+// this is a classmethod
+STATIC mp_obj_t int_from_bytes(size_t n_args, const mp_obj_t *args) {
+ // TODO: Support signed param (assumes signed=False at the moment)
+ (void)n_args;
+
+ // get the buffer info
+ mp_buffer_info_t bufinfo;
+ mp_get_buffer_raise(args[1], &bufinfo, MP_BUFFER_READ);
+
+ const byte *buf = (const byte *)bufinfo.buf;
+ int delta = 1;
+ if (args[2] == MP_OBJ_NEW_QSTR(MP_QSTR_little)) {
+ buf += bufinfo.len - 1;
+ delta = -1;
+ }
+
+ mp_uint_t value = 0;
+ size_t len = bufinfo.len;
+ for (; len--; buf += delta) {
+ #if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE
+ if (value > (MP_SMALL_INT_MAX >> 8)) {
+ // Result will overflow a small-int so construct a big-int
+ return mp_obj_int_from_bytes_impl(args[2] != MP_OBJ_NEW_QSTR(MP_QSTR_little), bufinfo.len, bufinfo.buf);
+ }
+ #endif
+ value = (value << 8) | *buf;
+ }
+ return mp_obj_new_int_from_uint(value);
+}
+
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(int_from_bytes_fun_obj, 3, 4, int_from_bytes);
+STATIC MP_DEFINE_CONST_CLASSMETHOD_OBJ(int_from_bytes_obj, MP_ROM_PTR(&int_from_bytes_fun_obj));
+
+STATIC mp_obj_t int_to_bytes(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ enum { ARG_length, ARG_byteorder, ARG_signed };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_length, MP_ARG_REQUIRED | MP_ARG_INT, {.u_int = 0} },
+ { MP_QSTR_byteorder, MP_ARG_REQUIRED | MP_ARG_OBJ, {.u_obj = MP_OBJ_NULL} },
+ { MP_QSTR_signed, MP_ARG_KW_ONLY | MP_ARG_BOOL, {.u_bool = false} },
+ };
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args - 1, pos_args + 1, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ mp_int_t len = args[ARG_length].u_int;
+ if (len < 0) {
+ mp_raise_ValueError(NULL);
+ }
+
+ mp_obj_t self = pos_args[0];
+ bool big_endian = args[ARG_byteorder].u_obj != MP_OBJ_NEW_QSTR(MP_QSTR_little);
+ bool signed_ = args[ARG_signed].u_bool;
+
+ vstr_t vstr;
+ vstr_init_len(&vstr, len);
+ byte *data = (byte *)vstr.buf;
+ memset(data, 0, len);
+
+ #if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE
+ if (!mp_obj_is_small_int(self)) {
+ mp_obj_int_buffer_overflow_check(self, len, signed_);
+ mp_obj_int_to_bytes_impl(self, big_endian, len, data);
+ } else
+ #endif
+ {
+ mp_int_t val = MP_OBJ_SMALL_INT_VALUE(self);
+ // Small int checking is separate, to be fast.
+ mp_small_int_buffer_overflow_check(val, len, signed_);
+ size_t l = MIN((size_t)len, sizeof(val));
+ if (val < 0) {
+ // Sign extend negative numbers.
+ memset(data, -1, len);
+ }
+ mp_binary_set_int(l, big_endian, data + (big_endian ? (len - l) : 0), val);
+ }
+
+ return mp_obj_new_str_from_vstr(&mp_type_bytes, &vstr);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_KW(int_to_bytes_obj, 3, int_to_bytes);
+
+STATIC const mp_rom_map_elem_t int_locals_dict_table[] = {
+ #if MICROPY_CPYTHON_COMPAT
+ { MP_ROM_QSTR(MP_QSTR_bit_length), MP_ROM_PTR(&int_bit_length_obj) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_from_bytes), MP_ROM_PTR(&int_from_bytes_obj) },
+ { MP_ROM_QSTR(MP_QSTR_to_bytes), MP_ROM_PTR(&int_to_bytes_obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(int_locals_dict, int_locals_dict_table);
+
+const mp_obj_type_t mp_type_int = {
+ { &mp_type_type },
+ .name = MP_QSTR_int,
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .print = mp_obj_int_print,
+ .make_new = mp_obj_int_make_new,
+ .locals_dict = (mp_obj_dict_t *)&int_locals_dict,
+ MP_TYPE_EXTENDED_FIELDS(
+ .unary_op = mp_obj_int_unary_op,
+ .binary_op = mp_obj_int_binary_op,
+ ),
+};
diff --git a/circuitpython/py/objint.h b/circuitpython/py/objint.h
new file mode 100644
index 0000000..fb49191
--- /dev/null
+++ b/circuitpython/py/objint.h
@@ -0,0 +1,72 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_OBJINT_H
+#define MICROPY_INCLUDED_PY_OBJINT_H
+
+#include "py/mpz.h"
+#include "py/obj.h"
+
+typedef struct _mp_obj_int_t {
+ mp_obj_base_t base;
+ #if MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_LONGLONG
+ mp_longint_impl_t val;
+ #elif MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_MPZ
+ mpz_t mpz;
+ #endif
+} mp_obj_int_t;
+
+extern const mp_obj_int_t mp_sys_maxsize_obj;
+
+#if MICROPY_PY_BUILTINS_FLOAT
+mp_float_t mp_obj_int_as_float_impl(mp_obj_t self_in);
+#endif
+
+size_t mp_int_format_size(size_t num_bits, int base, const char *prefix, char comma);
+
+mp_obj_int_t *mp_obj_int_new_mpz(void);
+
+void mp_obj_int_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind);
+char *mp_obj_int_formatted(char **buf, size_t *buf_size, size_t *fmt_size, mp_const_obj_t self_in,
+ int base, const char *prefix, char base_char, char comma);
+char *mp_obj_int_formatted_impl(char **buf, size_t *buf_size, size_t *fmt_size, mp_const_obj_t self_in,
+ int base, const char *prefix, char base_char, char comma);
+#if MICROPY_LONGINT_IMPL != MICROPY_LONGINT_IMPL_NONE
+void mp_obj_int_buffer_overflow_check(mp_obj_t self_in, size_t nbytes, bool is_signed);
+#endif
+
+void mp_small_int_buffer_overflow_check(mp_int_t val, size_t nbytes, bool is_signed);
+
+mp_int_t mp_obj_int_hash(mp_obj_t self_in);
+mp_obj_t mp_obj_int_bit_length_impl(mp_obj_t self_in);
+mp_obj_t mp_obj_int_from_bytes_impl(bool big_endian, size_t len, const byte *buf);
+void mp_obj_int_to_bytes_impl(mp_obj_t self_in, bool big_endian, size_t len, byte *buf);
+int mp_obj_int_sign(mp_obj_t self_in);
+mp_obj_t mp_obj_int_unary_op(mp_unary_op_t op, mp_obj_t o_in);
+mp_obj_t mp_obj_int_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_in);
+mp_obj_t mp_obj_int_binary_op_extra_cases(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_in);
+mp_obj_t mp_obj_int_pow3(mp_obj_t base, mp_obj_t exponent, mp_obj_t modulus);
+
+#endif // MICROPY_INCLUDED_PY_OBJINT_H
diff --git a/circuitpython/py/objint_longlong.c b/circuitpython/py/objint_longlong.c
new file mode 100644
index 0000000..368c74e
--- /dev/null
+++ b/circuitpython/py/objint_longlong.c
@@ -0,0 +1,309 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "py/smallint.h"
+#include "py/objint.h"
+#include "py/runtime.h"
+
+#include "supervisor/shared/translate.h"
+
+#if MICROPY_PY_BUILTINS_FLOAT
+#include <math.h>
+#endif
+
+#if MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_LONGLONG
+
+#if MICROPY_PY_SYS_MAXSIZE
+// Export value for sys.maxsize
+const mp_obj_int_t mp_sys_maxsize_obj = {{&mp_type_int}, MP_SSIZE_MAX};
+#endif
+
+mp_obj_t mp_obj_int_bit_length_impl(mp_obj_t self_in) {
+ assert(mp_obj_is_type(self_in, &mp_type_int));
+ mp_obj_int_t *self = self_in;
+ long long val = self->val;
+ return MP_OBJ_NEW_SMALL_INT(
+ (val == 0) ? 0 :
+ (val == MP_SMALL_INT_MIN) ? 8 * sizeof(long long) :
+ (val < 0) ? 8 * sizeof(long long) - __builtin_clzll(-val) :
+ 8 * sizeof(long long) - __builtin_clzll(val));
+}
+
+mp_obj_t mp_obj_int_from_bytes_impl(bool big_endian, size_t len, const byte *buf) {
+ int delta = 1;
+ if (!big_endian) {
+ buf += len - 1;
+ delta = -1;
+ }
+
+ mp_longint_impl_t value = 0;
+ for (; len--; buf += delta) {
+ value = (value << 8) | *buf;
+ }
+ return mp_obj_new_int_from_ll(value);
+}
+
+void mp_obj_int_to_bytes_impl(mp_obj_t self_in, bool big_endian, size_t len, byte *buf) {
+ assert(mp_obj_is_type(self_in, &mp_type_int));
+ mp_obj_int_t *self = self_in;
+ long long val = self->val;
+ if (big_endian) {
+ byte *b = buf + len;
+ while (b > buf) {
+ *--b = val;
+ val >>= 8;
+ }
+ } else {
+ for (; len > 0; --len) {
+ *buf++ = val;
+ val >>= 8;
+ }
+ }
+}
+
+int mp_obj_int_sign(mp_obj_t self_in) {
+ mp_longint_impl_t val;
+ if (mp_obj_is_small_int(self_in)) {
+ val = MP_OBJ_SMALL_INT_VALUE(self_in);
+ } else {
+ mp_obj_int_t *self = self_in;
+ val = self->val;
+ }
+ if (val < 0) {
+ return -1;
+ } else if (val > 0) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+mp_obj_t mp_obj_int_unary_op(mp_unary_op_t op, mp_obj_t o_in) {
+ mp_obj_int_t *o = o_in;
+ switch (op) {
+ case MP_UNARY_OP_BOOL:
+ return mp_obj_new_bool(o->val != 0);
+
+ // truncate value to fit in mp_int_t, which gives the same hash as
+ // small int if the value fits without truncation
+ case MP_UNARY_OP_HASH:
+ return MP_OBJ_NEW_SMALL_INT((mp_int_t)o->val);
+
+ case MP_UNARY_OP_POSITIVE:
+ return o_in;
+ case MP_UNARY_OP_NEGATIVE:
+ return mp_obj_new_int_from_ll(-o->val);
+ case MP_UNARY_OP_INVERT:
+ return mp_obj_new_int_from_ll(~o->val);
+ case MP_UNARY_OP_ABS: {
+ mp_obj_int_t *self = MP_OBJ_TO_PTR(o_in);
+ if (self->val >= 0) {
+ return o_in;
+ }
+ self = mp_obj_new_int_from_ll(self->val);
+ // TODO could overflow long long
+ self->val = -self->val;
+ return MP_OBJ_FROM_PTR(self);
+ }
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+mp_obj_t mp_obj_int_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
+ long long lhs_val;
+ long long rhs_val;
+
+ if (mp_obj_is_small_int(lhs_in)) {
+ lhs_val = MP_OBJ_SMALL_INT_VALUE(lhs_in);
+ } else {
+ assert(mp_obj_is_type(lhs_in, &mp_type_int));
+ lhs_val = ((mp_obj_int_t *)lhs_in)->val;
+ }
+
+ if (mp_obj_is_small_int(rhs_in)) {
+ rhs_val = MP_OBJ_SMALL_INT_VALUE(rhs_in);
+ } else if (mp_obj_is_type(rhs_in, &mp_type_int)) {
+ rhs_val = ((mp_obj_int_t *)rhs_in)->val;
+ } else {
+ // delegate to generic function to check for extra cases
+ return mp_obj_int_binary_op_extra_cases(op, lhs_in, rhs_in);
+ }
+
+ switch (op) {
+ case MP_BINARY_OP_ADD:
+ case MP_BINARY_OP_INPLACE_ADD:
+ return mp_obj_new_int_from_ll(lhs_val + rhs_val);
+ case MP_BINARY_OP_SUBTRACT:
+ case MP_BINARY_OP_INPLACE_SUBTRACT:
+ return mp_obj_new_int_from_ll(lhs_val - rhs_val);
+ case MP_BINARY_OP_MULTIPLY:
+ case MP_BINARY_OP_INPLACE_MULTIPLY:
+ return mp_obj_new_int_from_ll(lhs_val * rhs_val);
+ case MP_BINARY_OP_FLOOR_DIVIDE:
+ case MP_BINARY_OP_INPLACE_FLOOR_DIVIDE:
+ if (rhs_val == 0) {
+ goto zero_division;
+ }
+ return mp_obj_new_int_from_ll(lhs_val / rhs_val);
+ case MP_BINARY_OP_MODULO:
+ case MP_BINARY_OP_INPLACE_MODULO:
+ if (rhs_val == 0) {
+ goto zero_division;
+ }
+ return mp_obj_new_int_from_ll(lhs_val % rhs_val);
+
+ case MP_BINARY_OP_AND:
+ case MP_BINARY_OP_INPLACE_AND:
+ return mp_obj_new_int_from_ll(lhs_val & rhs_val);
+ case MP_BINARY_OP_OR:
+ case MP_BINARY_OP_INPLACE_OR:
+ return mp_obj_new_int_from_ll(lhs_val | rhs_val);
+ case MP_BINARY_OP_XOR:
+ case MP_BINARY_OP_INPLACE_XOR:
+ return mp_obj_new_int_from_ll(lhs_val ^ rhs_val);
+
+ case MP_BINARY_OP_LSHIFT:
+ case MP_BINARY_OP_INPLACE_LSHIFT:
+ return mp_obj_new_int_from_ll(lhs_val << (int)rhs_val);
+ case MP_BINARY_OP_RSHIFT:
+ case MP_BINARY_OP_INPLACE_RSHIFT:
+ return mp_obj_new_int_from_ll(lhs_val >> (int)rhs_val);
+
+ case MP_BINARY_OP_POWER:
+ case MP_BINARY_OP_INPLACE_POWER: {
+ if (rhs_val < 0) {
+ #if MICROPY_PY_BUILTINS_FLOAT
+ return mp_obj_float_binary_op(op, lhs_val, rhs_in);
+ #else
+ mp_raise_ValueError(MP_ERROR_TEXT("negative power with no float support"));
+ #endif
+ }
+ long long ans = 1;
+ while (rhs_val > 0) {
+ if (rhs_val & 1) {
+ ans *= lhs_val;
+ }
+ if (rhs_val == 1) {
+ break;
+ }
+ rhs_val /= 2;
+ lhs_val *= lhs_val;
+ }
+ return mp_obj_new_int_from_ll(ans);
+ }
+
+ case MP_BINARY_OP_LESS:
+ return mp_obj_new_bool(lhs_val < rhs_val);
+ case MP_BINARY_OP_MORE:
+ return mp_obj_new_bool(lhs_val > rhs_val);
+ case MP_BINARY_OP_LESS_EQUAL:
+ return mp_obj_new_bool(lhs_val <= rhs_val);
+ case MP_BINARY_OP_MORE_EQUAL:
+ return mp_obj_new_bool(lhs_val >= rhs_val);
+ case MP_BINARY_OP_EQUAL:
+ return mp_obj_new_bool(lhs_val == rhs_val);
+
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+
+zero_division:
+ mp_raise_msg(&mp_type_ZeroDivisionError, MP_ERROR_TEXT("division by zero"));
+}
+
+mp_obj_t mp_obj_new_int(mp_int_t value) {
+ if (MP_SMALL_INT_FITS(value)) {
+ return MP_OBJ_NEW_SMALL_INT(value);
+ }
+ return mp_obj_new_int_from_ll(value);
+}
+
+mp_obj_t mp_obj_new_int_from_uint(mp_uint_t value) {
+ // SMALL_INT accepts only signed numbers, so make sure the input
+ // value fits completely in the small-int positive range.
+ if ((value & ~MP_SMALL_INT_POSITIVE_MASK) == 0) {
+ return MP_OBJ_NEW_SMALL_INT(value);
+ }
+ return mp_obj_new_int_from_ll(value);
+}
+
+mp_obj_t mp_obj_new_int_from_ll(long long val) {
+ mp_obj_int_t *o = m_new_obj(mp_obj_int_t);
+ o->base.type = &mp_type_int;
+ o->val = val;
+ return o;
+}
+
+mp_obj_t mp_obj_new_int_from_ull(unsigned long long val) {
+ // TODO raise an exception if the unsigned long long won't fit
+ if (val >> (sizeof(unsigned long long) * 8 - 1) != 0) {
+ mp_raise_msg(&mp_type_OverflowError, MP_ERROR_TEXT("ulonglong too large"));
+ }
+ mp_obj_int_t *o = m_new_obj(mp_obj_int_t);
+ o->base.type = &mp_type_int;
+ o->val = val;
+ return o;
+}
+
+mp_obj_t mp_obj_new_int_from_str_len(const char **str, size_t len, bool neg, unsigned int base) {
+ // TODO this does not honor the given length of the string, but it all cases it should anyway be null terminated
+ // TODO check overflow
+ mp_obj_int_t *o = m_new_obj(mp_obj_int_t);
+ o->base.type = &mp_type_int;
+ char *endptr;
+ o->val = strtoll(*str, &endptr, base);
+ *str = endptr;
+ return o;
+}
+
+mp_int_t mp_obj_int_get_truncated(mp_const_obj_t self_in) {
+ if (mp_obj_is_small_int(self_in)) {
+ return MP_OBJ_SMALL_INT_VALUE(self_in);
+ } else {
+ const mp_obj_int_t *self = self_in;
+ return self->val;
+ }
+}
+
+mp_int_t mp_obj_int_get_checked(mp_const_obj_t self_in) {
+ // TODO: Check overflow
+ return mp_obj_int_get_truncated(self_in);
+}
+
+#if MICROPY_PY_BUILTINS_FLOAT
+mp_float_t mp_obj_int_as_float_impl(mp_obj_t self_in) {
+ assert(mp_obj_is_type(self_in, &mp_type_int));
+ mp_obj_int_t *self = self_in;
+ return self->val;
+}
+#endif
+
+#endif
diff --git a/circuitpython/py/objint_mpz.c b/circuitpython/py/objint_mpz.c
new file mode 100644
index 0000000..b804ce2
--- /dev/null
+++ b/circuitpython/py/objint_mpz.c
@@ -0,0 +1,469 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+#include <stdio.h>
+#include <assert.h>
+
+#include "py/parsenumbase.h"
+#include "py/smallint.h"
+#include "py/objint.h"
+#include "py/runtime.h"
+
+#include "supervisor/shared/translate.h"
+
+#if MICROPY_PY_BUILTINS_FLOAT
+#include <math.h>
+#endif
+
+#if MICROPY_LONGINT_IMPL == MICROPY_LONGINT_IMPL_MPZ
+
+#if MICROPY_PY_SYS_MAXSIZE
+// Export value for sys.maxsize
+// *FORMAT-OFF*
+#define DIG_MASK ((MPZ_LONG_1 << MPZ_DIG_SIZE) - 1)
+STATIC const mpz_dig_t maxsize_dig[] = {
+ #define NUM_DIG 1
+ (MP_SSIZE_MAX >> MPZ_DIG_SIZE * 0) & DIG_MASK,
+ #if (MP_SSIZE_MAX >> MPZ_DIG_SIZE * 0) > DIG_MASK
+#undef NUM_DIG
+ #define NUM_DIG 2
+ (MP_SSIZE_MAX >> MPZ_DIG_SIZE * 1) & DIG_MASK,
+ #if (MP_SSIZE_MAX >> MPZ_DIG_SIZE * 1) > DIG_MASK
+#undef NUM_DIG
+ #define NUM_DIG 3
+ (MP_SSIZE_MAX >> MPZ_DIG_SIZE * 2) & DIG_MASK,
+ #if (MP_SSIZE_MAX >> MPZ_DIG_SIZE * 2) > DIG_MASK
+#undef NUM_DIG
+ #define NUM_DIG 4
+ (MP_SSIZE_MAX >> MPZ_DIG_SIZE * 3) & DIG_MASK,
+ #if (MP_SSIZE_MAX >> MPZ_DIG_SIZE * 3) > DIG_MASK
+ #error cannot encode MP_SSIZE_MAX as mpz
+ #endif
+ #endif
+ #endif
+ #endif
+};
+// *FORMAT-ON*
+const mp_obj_int_t mp_sys_maxsize_obj = {
+ {&mp_type_int},
+ {.fixed_dig = 1, .len = NUM_DIG, .alloc = NUM_DIG, .dig = (mpz_dig_t *)maxsize_dig}
+};
+#undef DIG_MASK
+#undef NUM_DIG
+#endif
+
+mp_obj_int_t *mp_obj_int_new_mpz(void) {
+ mp_obj_int_t *o = m_new_obj(mp_obj_int_t);
+ o->base.type = &mp_type_int;
+ mpz_init_zero(&o->mpz);
+ return o;
+}
+
+// This routine expects you to pass in a buffer and size (in *buf and buf_size).
+// If, for some reason, this buffer is too small, then it will allocate a
+// buffer and return the allocated buffer and size in *buf and *buf_size. It
+// is the callers responsibility to free this allocated buffer.
+//
+// The resulting formatted string will be returned from this function and the
+// formatted size will be in *fmt_size.
+//
+// This particular routine should only be called for the mpz representation of the int.
+char *mp_obj_int_formatted_impl(char **buf, size_t *buf_size, size_t *fmt_size, mp_const_obj_t self_in,
+ int base, const char *prefix, char base_char, char comma) {
+ assert(mp_obj_is_type(self_in, &mp_type_int));
+ const mp_obj_int_t *self = MP_OBJ_TO_PTR(self_in);
+
+ size_t needed_size = mp_int_format_size(mpz_max_num_bits(&self->mpz), base, prefix, comma);
+ if (needed_size > *buf_size) {
+ *buf = m_new(char, needed_size);
+ *buf_size = needed_size;
+ }
+ char *str = *buf;
+
+ *fmt_size = mpz_as_str_inpl(&self->mpz, base, prefix, base_char, comma, str);
+
+ return str;
+}
+
+mp_obj_t mp_obj_int_bit_length_impl(mp_obj_t self_in) {
+ assert(mp_obj_is_type(self_in, &mp_type_int));
+ mp_obj_int_t *self = MP_OBJ_TO_PTR(self_in);
+ return MP_OBJ_NEW_SMALL_INT(mpz_num_bits(&self->mpz));
+}
+
+mp_obj_t mp_obj_int_from_bytes_impl(bool big_endian, size_t len, const byte *buf) {
+ mp_obj_int_t *o = mp_obj_int_new_mpz();
+ mpz_set_from_bytes(&o->mpz, big_endian, len, buf);
+ return MP_OBJ_FROM_PTR(o);
+}
+
+void mp_obj_int_to_bytes_impl(mp_obj_t self_in, bool big_endian, size_t len, byte *buf) {
+ assert(mp_obj_is_type(self_in, &mp_type_int));
+ mp_obj_int_t *self = MP_OBJ_TO_PTR(self_in);
+ mpz_as_bytes(&self->mpz, big_endian, len, buf);
+}
+
+int mp_obj_int_sign(mp_obj_t self_in) {
+ if (mp_obj_is_small_int(self_in)) {
+ mp_int_t val = MP_OBJ_SMALL_INT_VALUE(self_in);
+ if (val < 0) {
+ return -1;
+ } else if (val > 0) {
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+ mp_obj_int_t *self = MP_OBJ_TO_PTR(self_in);
+ if (self->mpz.len == 0) {
+ return 0;
+ } else if (self->mpz.neg == 0) {
+ return 1;
+ } else {
+ return -1;
+ }
+}
+
+mp_obj_t mp_obj_int_unary_op(mp_unary_op_t op, mp_obj_t o_in) {
+ mp_obj_int_t *o = MP_OBJ_TO_PTR(o_in);
+ switch (op) {
+ case MP_UNARY_OP_BOOL:
+ return mp_obj_new_bool(!mpz_is_zero(&o->mpz));
+ case MP_UNARY_OP_HASH:
+ return MP_OBJ_NEW_SMALL_INT(mpz_hash(&o->mpz));
+ case MP_UNARY_OP_POSITIVE:
+ return o_in;
+ case MP_UNARY_OP_NEGATIVE: { mp_obj_int_t *o2 = mp_obj_int_new_mpz();
+ mpz_neg_inpl(&o2->mpz, &o->mpz);
+ return MP_OBJ_FROM_PTR(o2);
+ }
+ case MP_UNARY_OP_INVERT: { mp_obj_int_t *o2 = mp_obj_int_new_mpz();
+ mpz_not_inpl(&o2->mpz, &o->mpz);
+ return MP_OBJ_FROM_PTR(o2);
+ }
+ case MP_UNARY_OP_ABS: {
+ mp_obj_int_t *self = MP_OBJ_TO_PTR(o_in);
+ if (self->mpz.neg == 0) {
+ return o_in;
+ }
+ mp_obj_int_t *self2 = mp_obj_int_new_mpz();
+ mpz_abs_inpl(&self2->mpz, &self->mpz);
+ return MP_OBJ_FROM_PTR(self2);
+ }
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+mp_obj_t mp_obj_int_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
+ const mpz_t *zlhs;
+ const mpz_t *zrhs;
+ mpz_t z_int;
+ mpz_dig_t z_int_dig[MPZ_NUM_DIG_FOR_INT];
+
+ // lhs could be a small int (eg small-int + mpz)
+ if (mp_obj_is_small_int(lhs_in)) {
+ mpz_init_fixed_from_int(&z_int, z_int_dig, MPZ_NUM_DIG_FOR_INT, MP_OBJ_SMALL_INT_VALUE(lhs_in));
+ zlhs = &z_int;
+ } else {
+ assert(mp_obj_is_type(lhs_in, &mp_type_int));
+ zlhs = &((mp_obj_int_t *)MP_OBJ_TO_PTR(lhs_in))->mpz;
+ }
+
+ // if rhs is small int, then lhs was not (otherwise mp_binary_op handles it)
+ if (mp_obj_is_small_int(rhs_in)) {
+ mpz_init_fixed_from_int(&z_int, z_int_dig, MPZ_NUM_DIG_FOR_INT, MP_OBJ_SMALL_INT_VALUE(rhs_in));
+ zrhs = &z_int;
+ } else if (mp_obj_is_type(rhs_in, &mp_type_int)) {
+ zrhs = &((mp_obj_int_t *)MP_OBJ_TO_PTR(rhs_in))->mpz;
+ #if MICROPY_PY_BUILTINS_FLOAT
+ } else if (mp_obj_is_float(rhs_in)) {
+ return mp_obj_float_binary_op(op, mpz_as_float(zlhs), rhs_in);
+ #endif
+ #if MICROPY_PY_BUILTINS_COMPLEX
+ } else if (mp_obj_is_type(rhs_in, &mp_type_complex)) {
+ return mp_obj_complex_binary_op(op, mpz_as_float(zlhs), 0, rhs_in);
+ #endif
+ } else {
+ // delegate to generic function to check for extra cases
+ return mp_obj_int_binary_op_extra_cases(op, lhs_in, rhs_in);
+ }
+
+ #if MICROPY_PY_BUILTINS_FLOAT
+ if (op == MP_BINARY_OP_TRUE_DIVIDE || op == MP_BINARY_OP_INPLACE_TRUE_DIVIDE) {
+ if (mpz_is_zero(zrhs)) {
+ goto zero_division_error;
+ }
+ mp_float_t flhs = mpz_as_float(zlhs);
+ mp_float_t frhs = mpz_as_float(zrhs);
+ return mp_obj_new_float(flhs / frhs);
+ }
+ #endif
+
+ if (op >= MP_BINARY_OP_INPLACE_OR && op < MP_BINARY_OP_CONTAINS) {
+ mp_obj_int_t *res = mp_obj_int_new_mpz();
+
+ switch (op) {
+ case MP_BINARY_OP_ADD:
+ case MP_BINARY_OP_INPLACE_ADD:
+ mpz_add_inpl(&res->mpz, zlhs, zrhs);
+ break;
+ case MP_BINARY_OP_SUBTRACT:
+ case MP_BINARY_OP_INPLACE_SUBTRACT:
+ mpz_sub_inpl(&res->mpz, zlhs, zrhs);
+ break;
+ case MP_BINARY_OP_MULTIPLY:
+ case MP_BINARY_OP_INPLACE_MULTIPLY:
+ mpz_mul_inpl(&res->mpz, zlhs, zrhs);
+ break;
+ case MP_BINARY_OP_FLOOR_DIVIDE:
+ case MP_BINARY_OP_INPLACE_FLOOR_DIVIDE: {
+ if (mpz_is_zero(zrhs)) {
+ zero_division_error:
+ mp_raise_msg(&mp_type_ZeroDivisionError, MP_ERROR_TEXT("divide by zero"));
+ }
+ mpz_t rem;
+ mpz_init_zero(&rem);
+ mpz_divmod_inpl(&res->mpz, &rem, zlhs, zrhs);
+ mpz_deinit(&rem);
+ break;
+ }
+ case MP_BINARY_OP_MODULO:
+ case MP_BINARY_OP_INPLACE_MODULO: {
+ if (mpz_is_zero(zrhs)) {
+ goto zero_division_error;
+ }
+ mpz_t quo;
+ mpz_init_zero(&quo);
+ mpz_divmod_inpl(&quo, &res->mpz, zlhs, zrhs);
+ mpz_deinit(&quo);
+ break;
+ }
+
+ case MP_BINARY_OP_AND:
+ case MP_BINARY_OP_INPLACE_AND:
+ mpz_and_inpl(&res->mpz, zlhs, zrhs);
+ break;
+ case MP_BINARY_OP_OR:
+ case MP_BINARY_OP_INPLACE_OR:
+ mpz_or_inpl(&res->mpz, zlhs, zrhs);
+ break;
+ case MP_BINARY_OP_XOR:
+ case MP_BINARY_OP_INPLACE_XOR:
+ mpz_xor_inpl(&res->mpz, zlhs, zrhs);
+ break;
+
+ case MP_BINARY_OP_LSHIFT:
+ case MP_BINARY_OP_INPLACE_LSHIFT:
+ case MP_BINARY_OP_RSHIFT:
+ case MP_BINARY_OP_INPLACE_RSHIFT: {
+ mp_int_t irhs = mp_obj_int_get_checked(rhs_in);
+ if (irhs < 0) {
+ mp_raise_ValueError(MP_ERROR_TEXT("negative shift count"));
+ }
+ if (op == MP_BINARY_OP_LSHIFT || op == MP_BINARY_OP_INPLACE_LSHIFT) {
+ mpz_shl_inpl(&res->mpz, zlhs, irhs);
+ } else {
+ mpz_shr_inpl(&res->mpz, zlhs, irhs);
+ }
+ break;
+ }
+
+ case MP_BINARY_OP_POWER:
+ case MP_BINARY_OP_INPLACE_POWER:
+ if (mpz_is_neg(zrhs)) {
+ #if MICROPY_PY_BUILTINS_FLOAT
+ return mp_obj_float_binary_op(op, mpz_as_float(zlhs), rhs_in);
+ #else
+ mp_raise_ValueError(MP_ERROR_TEXT("negative power with no float support"));
+ #endif
+ }
+ mpz_pow_inpl(&res->mpz, zlhs, zrhs);
+ break;
+
+ default: {
+ assert(op == MP_BINARY_OP_DIVMOD);
+ if (mpz_is_zero(zrhs)) {
+ goto zero_division_error;
+ }
+ mp_obj_int_t *quo = mp_obj_int_new_mpz();
+ mpz_divmod_inpl(&quo->mpz, &res->mpz, zlhs, zrhs);
+ mp_obj_t tuple[2] = {MP_OBJ_FROM_PTR(quo), MP_OBJ_FROM_PTR(res)};
+ return mp_obj_new_tuple(2, tuple);
+ }
+ }
+
+ return MP_OBJ_FROM_PTR(res);
+
+ } else {
+ int cmp = mpz_cmp(zlhs, zrhs);
+ switch (op) {
+ case MP_BINARY_OP_LESS:
+ return mp_obj_new_bool(cmp < 0);
+ case MP_BINARY_OP_MORE:
+ return mp_obj_new_bool(cmp > 0);
+ case MP_BINARY_OP_LESS_EQUAL:
+ return mp_obj_new_bool(cmp <= 0);
+ case MP_BINARY_OP_MORE_EQUAL:
+ return mp_obj_new_bool(cmp >= 0);
+ case MP_BINARY_OP_EQUAL:
+ return mp_obj_new_bool(cmp == 0);
+
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+ }
+}
+
+#if MICROPY_PY_BUILTINS_POW3
+STATIC mpz_t *mp_mpz_for_int(mp_obj_t arg, mpz_t *temp) {
+ if (mp_obj_is_small_int(arg)) {
+ mpz_init_from_int(temp, MP_OBJ_SMALL_INT_VALUE(arg));
+ return temp;
+ } else {
+ mp_obj_int_t *arp_p = MP_OBJ_TO_PTR(arg);
+ return &(arp_p->mpz);
+ }
+}
+
+mp_obj_t mp_obj_int_pow3(mp_obj_t base, mp_obj_t exponent, mp_obj_t modulus) {
+ if (!mp_obj_is_int(base) || !mp_obj_is_int(exponent) || !mp_obj_is_int(modulus)) {
+ mp_raise_TypeError(MP_ERROR_TEXT("pow() with 3 arguments requires integers"));
+ } else {
+ mp_obj_t result = mp_obj_new_int_from_ull(0); // Use the _from_ull version as this forces an mpz int
+ mp_obj_int_t *res_p = (mp_obj_int_t *)MP_OBJ_TO_PTR(result);
+
+ mpz_t l_temp, r_temp, m_temp;
+ mpz_t *lhs = mp_mpz_for_int(base, &l_temp);
+ mpz_t *rhs = mp_mpz_for_int(exponent, &r_temp);
+ mpz_t *mod = mp_mpz_for_int(modulus, &m_temp);
+
+ if (mpz_is_zero(mod)) {
+ mp_raise_msg(&mp_type_ValueError, MP_ERROR_TEXT("pow() 3rd argument cannot be 0"));
+ }
+
+ mpz_pow3_inpl(&(res_p->mpz), lhs, rhs, mod);
+
+ if (lhs == &l_temp) {
+ mpz_deinit(lhs);
+ }
+ if (rhs == &r_temp) {
+ mpz_deinit(rhs);
+ }
+ if (mod == &m_temp) {
+ mpz_deinit(mod);
+ }
+ return result;
+ }
+}
+#endif
+
+mp_obj_t mp_obj_new_int(mp_int_t value) {
+ if (MP_SMALL_INT_FITS(value)) {
+ return MP_OBJ_NEW_SMALL_INT(value);
+ }
+ return mp_obj_new_int_from_ll(value);
+}
+
+mp_obj_t mp_obj_new_int_from_ll(long long val) {
+ mp_obj_int_t *o = mp_obj_int_new_mpz();
+ mpz_set_from_ll(&o->mpz, val, true);
+ return MP_OBJ_FROM_PTR(o);
+}
+
+mp_obj_t mp_obj_new_int_from_ull(unsigned long long val) {
+ mp_obj_int_t *o = mp_obj_int_new_mpz();
+ mpz_set_from_ll(&o->mpz, val, false);
+ return MP_OBJ_FROM_PTR(o);
+}
+
+mp_obj_t mp_obj_new_int_from_uint(mp_uint_t value) {
+ // SMALL_INT accepts only signed numbers, so make sure the input
+ // value fits completely in the small-int positive range.
+ if ((value & ~MP_SMALL_INT_POSITIVE_MASK) == 0) {
+ return MP_OBJ_NEW_SMALL_INT(value);
+ }
+ return mp_obj_new_int_from_ull(value);
+}
+
+mp_obj_t mp_obj_new_int_from_str_len(const char **str, size_t len, bool neg, unsigned int base) {
+ mp_obj_int_t *o = mp_obj_int_new_mpz();
+ size_t n = mpz_set_from_str(&o->mpz, *str, len, neg, base);
+ *str += n;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+mp_int_t mp_obj_int_get_truncated(mp_const_obj_t self_in) {
+ if (mp_obj_is_small_int(self_in)) {
+ return MP_OBJ_SMALL_INT_VALUE(self_in);
+ } else {
+ const mp_obj_int_t *self = MP_OBJ_TO_PTR(self_in);
+ // hash returns actual int value if it fits in mp_int_t
+ return mpz_hash(&self->mpz);
+ }
+}
+
+mp_int_t mp_obj_int_get_checked(mp_const_obj_t self_in) {
+ if (mp_obj_is_small_int(self_in)) {
+ return MP_OBJ_SMALL_INT_VALUE(self_in);
+ } else {
+ const mp_obj_int_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_int_t value;
+ if (mpz_as_int_checked(&self->mpz, &value)) {
+ return value;
+ } else {
+ // overflow
+ mp_raise_msg(&mp_type_OverflowError, MP_ERROR_TEXT("overflow converting long int to machine word"));
+ }
+ }
+}
+
+mp_uint_t mp_obj_int_get_uint_checked(mp_const_obj_t self_in) {
+ if (mp_obj_is_small_int(self_in)) {
+ if (MP_OBJ_SMALL_INT_VALUE(self_in) >= 0) {
+ return MP_OBJ_SMALL_INT_VALUE(self_in);
+ }
+ } else {
+ const mp_obj_int_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_uint_t value;
+ if (mpz_as_uint_checked(&self->mpz, &value)) {
+ return value;
+ }
+ }
+
+ mp_raise_msg(&mp_type_OverflowError, MP_ERROR_TEXT("overflow converting long int to machine word"));
+}
+
+#if MICROPY_PY_BUILTINS_FLOAT
+mp_float_t mp_obj_int_as_float_impl(mp_obj_t self_in) {
+ assert(mp_obj_is_type(self_in, &mp_type_int));
+ mp_obj_int_t *self = MP_OBJ_TO_PTR(self_in);
+ return mpz_as_float(&self->mpz);
+}
+#endif
+
+#endif
diff --git a/circuitpython/py/objlist.c b/circuitpython/py/objlist.c
new file mode 100644
index 0000000..9d1949b
--- /dev/null
+++ b/circuitpython/py/objlist.c
@@ -0,0 +1,559 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+#include <assert.h>
+
+#include "py/objlist.h"
+#include "py/runtime.h"
+#include "py/stackctrl.h"
+
+#include "supervisor/shared/translate.h"
+
+STATIC mp_obj_t mp_obj_new_list_iterator(mp_obj_t list, size_t cur, mp_obj_iter_buf_t *iter_buf);
+STATIC mp_obj_list_t *list_new(size_t n);
+STATIC mp_obj_t list_extend(mp_obj_t self_in, mp_obj_t arg_in);
+STATIC mp_obj_t list_pop(size_t n_args, const mp_obj_t *args);
+
+// TODO: Move to mpconfig.h
+#define LIST_MIN_ALLOC 4
+
+/******************************************************************************/
+/* list */
+
+STATIC void list_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ mp_obj_list_t *o = MP_OBJ_TO_PTR(o_in);
+ const char *item_separator = ", ";
+ if (!(MICROPY_PY_UJSON && kind == PRINT_JSON)) {
+ kind = PRINT_REPR;
+ } else {
+ #if MICROPY_PY_UJSON_SEPARATORS
+ item_separator = MP_PRINT_GET_EXT(print)->item_separator;
+ #endif
+ }
+ mp_print_str(print, "[");
+ for (size_t i = 0; i < o->len; i++) {
+ if (i > 0) {
+ mp_print_str(print, item_separator);
+ }
+ mp_obj_print_helper(print, o->items[i], kind);
+ }
+ mp_print_str(print, "]");
+}
+
+STATIC mp_obj_t list_extend_from_iter(mp_obj_t list, mp_obj_t iterable) {
+ mp_obj_t iter = mp_getiter(iterable, NULL);
+ mp_obj_t item;
+ while ((item = mp_iternext(iter)) != MP_OBJ_STOP_ITERATION) {
+ mp_obj_list_append(list, item);
+ }
+ return list;
+}
+
+mp_obj_t mp_obj_new_list_from_iter(mp_obj_t iterable) {
+ mp_obj_t list = mp_obj_new_list(0, NULL);
+ return list_extend_from_iter(list, iterable);
+}
+
+STATIC mp_obj_t list_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ (void)type_in;
+ mp_arg_check_num(n_args, n_kw, 0, 1, false);
+
+ switch (n_args) {
+ case 0:
+ // return a new, empty list
+ return mp_obj_new_list(0, NULL);
+
+ case 1:
+ default: {
+ // make list from iterable
+ // TODO: optimize list/tuple
+ mp_obj_t list = mp_obj_new_list(0, NULL);
+ return list_extend_from_iter(list, args[0]);
+ }
+ }
+}
+
+STATIC mp_obj_list_t *native_list(mp_obj_t self_in) {
+ return MP_OBJ_TO_PTR(mp_obj_cast_to_native_base(self_in, MP_OBJ_FROM_PTR(&mp_type_list)));
+}
+
+STATIC mp_obj_t list_unary_op(mp_unary_op_t op, mp_obj_t self_in) {
+ mp_obj_list_t *self = native_list(self_in);
+ switch (op) {
+ case MP_UNARY_OP_BOOL:
+ return mp_obj_new_bool(self->len != 0);
+ case MP_UNARY_OP_LEN:
+ return MP_OBJ_NEW_SMALL_INT(self->len);
+ #if MICROPY_PY_SYS_GETSIZEOF
+ case MP_UNARY_OP_SIZEOF: {
+ size_t sz = sizeof(*self) + sizeof(mp_obj_t) * self->alloc;
+ return MP_OBJ_NEW_SMALL_INT(sz);
+ }
+ #endif
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+STATIC mp_obj_t list_binary_op(mp_binary_op_t op, mp_obj_t lhs, mp_obj_t rhs) {
+ mp_obj_list_t *o = native_list(lhs);
+ switch (op) {
+ case MP_BINARY_OP_ADD: {
+ if (!mp_obj_is_type(rhs, &mp_type_list)) {
+ return MP_OBJ_NULL; // op not supported
+ }
+ mp_obj_list_t *p = MP_OBJ_TO_PTR(rhs);
+ mp_obj_list_t *s = list_new(o->len + p->len);
+ mp_seq_cat(s->items, o->items, o->len, p->items, p->len, mp_obj_t);
+ return MP_OBJ_FROM_PTR(s);
+ }
+ case MP_BINARY_OP_INPLACE_ADD: {
+ list_extend(lhs, rhs);
+ return lhs;
+ }
+ case MP_BINARY_OP_MULTIPLY: {
+ mp_int_t n;
+ if (!mp_obj_get_int_maybe(rhs, &n)) {
+ return MP_OBJ_NULL; // op not supported
+ }
+ if (n < 0) {
+ n = 0;
+ }
+ size_t new_len = mp_seq_multiply_len(o->len, n);
+ mp_obj_list_t *s = list_new(new_len);
+ mp_seq_multiply(o->items, sizeof(*o->items), o->len, n, s->items);
+ return MP_OBJ_FROM_PTR(s);
+ }
+ case MP_BINARY_OP_EQUAL:
+ case MP_BINARY_OP_LESS:
+ case MP_BINARY_OP_LESS_EQUAL:
+ case MP_BINARY_OP_MORE:
+ case MP_BINARY_OP_MORE_EQUAL: {
+ if (!mp_obj_is_type(rhs, &mp_type_list)) {
+ if (op == MP_BINARY_OP_EQUAL) {
+ return mp_const_false;
+ }
+ return MP_OBJ_NULL; // op not supported
+ }
+
+ mp_obj_list_t *another = MP_OBJ_TO_PTR(rhs);
+ bool res = mp_seq_cmp_objs(op, o->items, o->len, another->items, another->len);
+ return mp_obj_new_bool(res);
+ }
+
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+STATIC mp_obj_t list_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) {
+ mp_obj_list_t *self = native_list(self_in);
+ if (value == MP_OBJ_NULL) {
+ // delete
+ #if MICROPY_PY_BUILTINS_SLICE
+ if (mp_obj_is_type(index, &mp_type_slice)) {
+ mp_bound_slice_t slice;
+ if (!mp_seq_get_fast_slice_indexes(self->len, index, &slice)) {
+ mp_raise_NotImplementedError(NULL);
+ }
+
+ mp_int_t len_adj = slice.start - slice.stop;
+ assert(len_adj <= 0);
+ mp_seq_replace_slice_no_grow(self->items, self->len, slice.start, slice.stop, self->items /*NULL*/, 0, sizeof(*self->items));
+ // Clear "freed" elements at the end of list
+ mp_seq_clear(self->items, self->len + len_adj, self->len, sizeof(*self->items));
+ self->len += len_adj;
+ return mp_const_none;
+ }
+ #endif
+ mp_obj_t args[2] = {MP_OBJ_FROM_PTR(self), index};
+ list_pop(2, args);
+ return mp_const_none;
+ } else if (value == MP_OBJ_SENTINEL) {
+ // load
+ #if MICROPY_PY_BUILTINS_SLICE
+ if (mp_obj_is_type(index, &mp_type_slice)) {
+ mp_bound_slice_t slice;
+ if (!mp_seq_get_fast_slice_indexes(self->len, index, &slice)) {
+ return mp_seq_extract_slice(self->len, self->items, &slice);
+ }
+ mp_obj_list_t *res = list_new(slice.stop - slice.start);
+ mp_seq_copy(res->items, self->items + slice.start, res->len, mp_obj_t);
+ return MP_OBJ_FROM_PTR(res);
+ }
+ #endif
+ size_t index_val = mp_get_index(self->base.type, self->len, index, false);
+ return self->items[index_val];
+ } else {
+ #if MICROPY_PY_BUILTINS_SLICE
+ if (mp_obj_is_type(index, &mp_type_slice)) {
+ size_t value_len;
+ mp_obj_t *value_items;
+ mp_obj_get_array(value, &value_len, &value_items);
+ mp_bound_slice_t slice_out;
+ if (!mp_seq_get_fast_slice_indexes(self->len, index, &slice_out)) {
+ mp_raise_NotImplementedError(NULL);
+ }
+ mp_int_t len_adj = value_len - (slice_out.stop - slice_out.start);
+ if (len_adj > 0) {
+ if (self->len + len_adj > self->alloc) {
+ // TODO: Might optimize memory copies here by checking if block can
+ // be grown inplace or not
+ self->items = m_renew(mp_obj_t, self->items, self->alloc, self->len + len_adj);
+ self->alloc = self->len + len_adj;
+ }
+ mp_seq_replace_slice_grow_inplace(self->items, self->len,
+ slice_out.start, slice_out.stop, value_items, value_len, len_adj, sizeof(*self->items));
+ } else {
+ mp_seq_replace_slice_no_grow(self->items, self->len,
+ slice_out.start, slice_out.stop, value_items, value_len, sizeof(*self->items));
+ // Clear "freed" elements at the end of list
+ mp_seq_clear(self->items, self->len + len_adj, self->len, sizeof(*self->items));
+ // TODO: apply allocation policy re: alloc_size
+ }
+ self->len += len_adj;
+ return mp_const_none;
+ }
+ #endif
+ mp_obj_list_store(self_in, index, value);
+ return mp_const_none;
+ }
+}
+
+STATIC mp_obj_t list_getiter(mp_obj_t o_in, mp_obj_iter_buf_t *iter_buf) {
+ return mp_obj_new_list_iterator(o_in, 0, iter_buf);
+}
+
+mp_obj_t mp_obj_list_append(mp_obj_t self_in, mp_obj_t arg) {
+ mp_check_self(mp_obj_is_type(self_in, &mp_type_list));
+ mp_obj_list_t *self = native_list(self_in);
+ if (self->len >= self->alloc) {
+ self->items = m_renew(mp_obj_t, self->items, self->alloc, self->alloc * 2);
+ self->alloc *= 2;
+ mp_seq_clear(self->items, self->len + 1, self->alloc, sizeof(*self->items));
+ }
+ self->items[self->len++] = arg;
+ return mp_const_none; // return None, as per CPython
+}
+
+STATIC mp_obj_t list_extend(mp_obj_t self_in, mp_obj_t arg_in) {
+ mp_check_self(mp_obj_is_type(self_in, &mp_type_list));
+ if (mp_obj_is_type(arg_in, &mp_type_list)) {
+ mp_obj_list_t *self = native_list(self_in);
+ mp_obj_list_t *arg = native_list(arg_in);
+
+ if (self->len + arg->len > self->alloc) {
+ // TODO: use alloc policy for "4"
+ self->items = m_renew(mp_obj_t, self->items, self->alloc, self->len + arg->len + 4);
+ self->alloc = self->len + arg->len + 4;
+ mp_seq_clear(self->items, self->len + arg->len, self->alloc, sizeof(*self->items));
+ }
+
+ memcpy(self->items + self->len, arg->items, sizeof(mp_obj_t) * arg->len);
+ self->len += arg->len;
+ } else {
+ list_extend_from_iter(self_in, arg_in);
+ }
+ return mp_const_none; // return None, as per CPython
+}
+
+inline mp_obj_t mp_obj_list_pop(mp_obj_list_t *self, size_t index) {
+ if (self->len == 0) {
+ mp_raise_IndexError_varg(MP_ERROR_TEXT("pop from empty %q"), MP_QSTR_list);
+ }
+ mp_obj_t ret = self->items[index];
+ self->len -= 1;
+ memmove(self->items + index, self->items + index + 1, (self->len - index) * sizeof(mp_obj_t));
+ // Clear stale pointer from slot which just got freed to prevent GC issues
+ self->items[self->len] = MP_OBJ_NULL;
+ if (self->alloc > LIST_MIN_ALLOC && self->alloc > 2 * self->len) {
+ self->items = m_renew(mp_obj_t, self->items, self->alloc, self->alloc / 2);
+ self->alloc /= 2;
+ }
+ return ret;
+}
+
+STATIC mp_obj_t list_pop(size_t n_args, const mp_obj_t *args) {
+ mp_check_self(mp_obj_is_type(args[0], &mp_type_list));
+ mp_obj_list_t *self = native_list(args[0]);
+ size_t index = mp_get_index(self->base.type, self->len, n_args == 1 ? MP_OBJ_NEW_SMALL_INT(-1) : args[1], false);
+ return mp_obj_list_pop(self, index);
+}
+
+STATIC void mp_quicksort(mp_obj_t *head, mp_obj_t *tail, mp_obj_t key_fn, mp_obj_t binop_less_result) {
+ MP_STACK_CHECK();
+ while (head < tail) {
+ mp_obj_t *h = head - 1;
+ mp_obj_t *t = tail;
+ mp_obj_t v = key_fn == MP_OBJ_NULL ? tail[0] : mp_call_function_1(key_fn, tail[0]); // get pivot using key_fn
+ for (;;) {
+ do {++h;
+ } while (h < t && mp_binary_op(MP_BINARY_OP_LESS, key_fn == MP_OBJ_NULL ? h[0] : mp_call_function_1(key_fn, h[0]), v) == binop_less_result);
+ do {--t;
+ } while (h < t && mp_binary_op(MP_BINARY_OP_LESS, v, key_fn == MP_OBJ_NULL ? t[0] : mp_call_function_1(key_fn, t[0])) == binop_less_result);
+ if (h >= t) {
+ break;
+ }
+ mp_obj_t x = h[0];
+ h[0] = t[0];
+ t[0] = x;
+ }
+ mp_obj_t x = h[0];
+ h[0] = tail[0];
+ tail[0] = x;
+ // do the smaller recursive call first, to keep stack within O(log(N))
+ if (t - head < tail - h - 1) {
+ mp_quicksort(head, t, key_fn, binop_less_result);
+ head = h + 1;
+ } else {
+ mp_quicksort(h + 1, tail, key_fn, binop_less_result);
+ tail = t;
+ }
+ }
+}
+
+// TODO Python defines sort to be stable but ours is not
+mp_obj_t mp_obj_list_sort(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_key, MP_ARG_KW_ONLY | MP_ARG_OBJ, {.u_rom_obj = MP_ROM_NONE} },
+ { MP_QSTR_reverse, MP_ARG_KW_ONLY | MP_ARG_BOOL, {.u_bool = false} },
+ };
+
+ // parse args
+ struct {
+ mp_arg_val_t key, reverse;
+ } args;
+ mp_arg_parse_all(n_args - 1, pos_args + 1, kw_args,
+ MP_ARRAY_SIZE(allowed_args), allowed_args, (mp_arg_val_t *)&args);
+
+ mp_check_self(mp_obj_is_type(pos_args[0], &mp_type_list));
+ mp_obj_list_t *self = native_list(pos_args[0]);
+
+ if (self->len > 1) {
+ mp_quicksort(self->items, self->items + self->len - 1,
+ args.key.u_obj == mp_const_none ? MP_OBJ_NULL : args.key.u_obj,
+ args.reverse.u_bool ? mp_const_false : mp_const_true);
+ }
+
+ return mp_const_none;
+}
+
+mp_obj_t mp_obj_list_clear(mp_obj_t self_in) {
+ mp_check_self(mp_obj_is_type(self_in, &mp_type_list));
+ mp_obj_list_t *self = native_list(self_in);
+ self->len = 0;
+ self->items = m_renew(mp_obj_t, self->items, self->alloc, LIST_MIN_ALLOC);
+ self->alloc = LIST_MIN_ALLOC;
+ mp_seq_clear(self->items, 0, self->alloc, sizeof(*self->items));
+ return mp_const_none;
+}
+
+STATIC mp_obj_t list_copy(mp_obj_t self_in) {
+ mp_check_self(mp_obj_is_type(self_in, &mp_type_list));
+ mp_obj_list_t *self = native_list(self_in);
+ return mp_obj_new_list(self->len, self->items);
+}
+
+STATIC mp_obj_t list_count(mp_obj_t self_in, mp_obj_t value) {
+ mp_check_self(mp_obj_is_type(self_in, &mp_type_list));
+ mp_obj_list_t *self = native_list(self_in);
+ return mp_seq_count_obj(self->items, self->len, value);
+}
+
+STATIC mp_obj_t list_index(size_t n_args, const mp_obj_t *args) {
+ mp_check_self(mp_obj_is_type(args[0], &mp_type_list));
+ mp_obj_list_t *self = native_list(args[0]);
+ return mp_seq_index_obj(self->items, self->len, n_args, args);
+}
+
+inline void mp_obj_list_insert(mp_obj_list_t *self, size_t index, mp_obj_t obj) {
+ mp_obj_list_append(MP_OBJ_FROM_PTR(self), mp_const_none);
+
+ for (size_t i = self->len - 1; i > index; --i) {
+ self->items[i] = self->items[i - 1];
+ }
+ self->items[index] = obj;
+}
+
+STATIC mp_obj_t list_insert(mp_obj_t self_in, mp_obj_t idx, mp_obj_t obj) {
+ mp_check_self(mp_obj_is_type(self_in, &mp_type_list));
+ mp_obj_list_t *self = native_list(self_in);
+ // insert has its own strange index logic
+ mp_int_t index = MP_OBJ_SMALL_INT_VALUE(idx);
+ if (index < 0) {
+ index += self->len;
+ }
+ if (index < 0) {
+ index = 0;
+ }
+ if ((size_t)index > self->len) {
+ index = self->len;
+ }
+ mp_obj_list_insert(self, index, obj);
+ return mp_const_none;
+}
+
+mp_obj_t mp_obj_list_remove(mp_obj_t self_in, mp_obj_t value) {
+ mp_check_self(mp_obj_is_type(self_in, &mp_type_list));
+ mp_obj_t args[] = {self_in, value};
+ args[1] = list_index(2, args);
+ list_pop(2, args);
+
+ return mp_const_none;
+}
+
+STATIC mp_obj_t list_reverse(mp_obj_t self_in) {
+ mp_check_self(mp_obj_is_type(self_in, &mp_type_list));
+ mp_obj_list_t *self = native_list(self_in);
+
+ mp_int_t len = self->len;
+ for (mp_int_t i = 0; i < len / 2; i++) {
+ mp_obj_t a = self->items[i];
+ self->items[i] = self->items[len - i - 1];
+ self->items[len - i - 1] = a;
+ }
+
+ return mp_const_none;
+}
+
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(list_append_obj, mp_obj_list_append);
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(list_extend_obj, list_extend);
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(list_clear_obj, mp_obj_list_clear);
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(list_copy_obj, list_copy);
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(list_count_obj, list_count);
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(list_index_obj, 2, 4, list_index);
+STATIC MP_DEFINE_CONST_FUN_OBJ_3(list_insert_obj, list_insert);
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(list_pop_obj, 1, 2, list_pop);
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(list_remove_obj, mp_obj_list_remove);
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(list_reverse_obj, list_reverse);
+STATIC MP_DEFINE_CONST_FUN_OBJ_KW(list_sort_obj, 1, mp_obj_list_sort);
+
+STATIC const mp_rom_map_elem_t list_locals_dict_table[] = {
+ { MP_ROM_QSTR(MP_QSTR_append), MP_ROM_PTR(&list_append_obj) },
+ { MP_ROM_QSTR(MP_QSTR_clear), MP_ROM_PTR(&list_clear_obj) },
+ { MP_ROM_QSTR(MP_QSTR_copy), MP_ROM_PTR(&list_copy_obj) },
+ { MP_ROM_QSTR(MP_QSTR_count), MP_ROM_PTR(&list_count_obj) },
+ { MP_ROM_QSTR(MP_QSTR_extend), MP_ROM_PTR(&list_extend_obj) },
+ { MP_ROM_QSTR(MP_QSTR_index), MP_ROM_PTR(&list_index_obj) },
+ { MP_ROM_QSTR(MP_QSTR_insert), MP_ROM_PTR(&list_insert_obj) },
+ { MP_ROM_QSTR(MP_QSTR_pop), MP_ROM_PTR(&list_pop_obj) },
+ { MP_ROM_QSTR(MP_QSTR_remove), MP_ROM_PTR(&list_remove_obj) },
+ { MP_ROM_QSTR(MP_QSTR_reverse), MP_ROM_PTR(&list_reverse_obj) },
+ { MP_ROM_QSTR(MP_QSTR_sort), MP_ROM_PTR(&list_sort_obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(list_locals_dict, list_locals_dict_table);
+
+const mp_obj_type_t mp_type_list = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_list,
+ .print = list_print,
+ .make_new = list_make_new,
+ .locals_dict = (mp_obj_dict_t *)&list_locals_dict,
+ MP_TYPE_EXTENDED_FIELDS(
+ .unary_op = list_unary_op,
+ .binary_op = list_binary_op,
+ .subscr = list_subscr,
+ .getiter = list_getiter,
+ ),
+};
+
+void mp_obj_list_init(mp_obj_list_t *o, size_t n) {
+ o->base.type = &mp_type_list;
+ o->alloc = n < LIST_MIN_ALLOC ? LIST_MIN_ALLOC : n;
+ o->len = n;
+ o->items = m_new(mp_obj_t, o->alloc);
+ mp_seq_clear(o->items, n, o->alloc, sizeof(*o->items));
+}
+
+STATIC mp_obj_list_t *list_new(size_t n) {
+ mp_obj_list_t *o = m_new_obj(mp_obj_list_t);
+ mp_obj_list_init(o, n);
+ return o;
+}
+
+mp_obj_t mp_obj_new_list(size_t n, mp_obj_t *items) {
+ mp_obj_list_t *o = list_new(n);
+ if (items != NULL) {
+ for (size_t i = 0; i < n; i++) {
+ o->items[i] = items[i];
+ }
+ }
+ return MP_OBJ_FROM_PTR(o);
+}
+
+void mp_obj_list_get(mp_obj_t self_in, size_t *len, mp_obj_t **items) {
+ mp_obj_list_t *self = native_list(self_in);
+ *len = self->len;
+ *items = self->items;
+}
+
+void mp_obj_list_set_len(mp_obj_t self_in, size_t len) {
+ // trust that the caller knows what it's doing
+ // TODO realloc if len got much smaller than alloc
+ mp_obj_list_t *self = MP_OBJ_TO_PTR(self_in);
+ self->len = len;
+}
+
+void mp_obj_list_store(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) {
+ mp_obj_list_t *self = native_list(self_in);
+ size_t i = mp_get_index(self->base.type, self->len, index, false);
+ self->items[i] = value;
+}
+
+/******************************************************************************/
+/* list iterator */
+
+typedef struct _mp_obj_list_it_t {
+ mp_obj_base_t base;
+ mp_fun_1_t iternext;
+ mp_obj_t list;
+ size_t cur;
+} mp_obj_list_it_t;
+
+STATIC mp_obj_t list_it_iternext(mp_obj_t self_in) {
+ mp_obj_list_it_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_list_t *list = MP_OBJ_TO_PTR(self->list);
+ if (self->cur < list->len) {
+ mp_obj_t o_out = list->items[self->cur];
+ self->cur += 1;
+ return o_out;
+ } else {
+ return MP_OBJ_STOP_ITERATION;
+ }
+}
+
+mp_obj_t mp_obj_new_list_iterator(mp_obj_t list, size_t cur, mp_obj_iter_buf_t *iter_buf) {
+ assert(sizeof(mp_obj_list_it_t) <= sizeof(mp_obj_iter_buf_t));
+ mp_obj_list_it_t *o = (mp_obj_list_it_t *)iter_buf;
+ o->base.type = &mp_type_polymorph_iter;
+ o->iternext = list_it_iternext;
+ o->list = list;
+ o->cur = cur;
+ return MP_OBJ_FROM_PTR(o);
+}
diff --git a/circuitpython/py/objlist.h b/circuitpython/py/objlist.h
new file mode 100644
index 0000000..eb005e8
--- /dev/null
+++ b/circuitpython/py/objlist.h
@@ -0,0 +1,42 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_OBJLIST_H
+#define MICROPY_INCLUDED_PY_OBJLIST_H
+
+#include "py/obj.h"
+
+typedef struct _mp_obj_list_t {
+ mp_obj_base_t base;
+ size_t alloc;
+ size_t len;
+ mp_obj_t *items;
+} mp_obj_list_t;
+
+void mp_obj_list_init(mp_obj_list_t *o, size_t n);
+mp_obj_t mp_obj_list_pop(mp_obj_list_t *self, size_t index);
+void mp_obj_list_insert(mp_obj_list_t *self, size_t index, mp_obj_t obj);
+
+#endif // MICROPY_INCLUDED_PY_OBJLIST_H
diff --git a/circuitpython/py/objmap.c b/circuitpython/py/objmap.c
new file mode 100644
index 0000000..dc1dc13
--- /dev/null
+++ b/circuitpython/py/objmap.c
@@ -0,0 +1,76 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+
+#include "py/runtime.h"
+
+typedef struct _mp_obj_map_t {
+ mp_obj_base_t base;
+ size_t n_iters;
+ mp_obj_t fun;
+ mp_obj_t iters[];
+} mp_obj_map_t;
+
+STATIC mp_obj_t map_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_arg_check_num(n_args, n_kw, 2, MP_OBJ_FUN_ARGS_MAX, false);
+ mp_obj_map_t *o = m_new_obj_var(mp_obj_map_t, mp_obj_t, n_args - 1);
+ o->base.type = type;
+ o->n_iters = n_args - 1;
+ o->fun = args[0];
+ for (size_t i = 0; i < n_args - 1; i++) {
+ o->iters[i] = mp_getiter(args[i + 1], NULL);
+ }
+ return MP_OBJ_FROM_PTR(o);
+}
+
+STATIC mp_obj_t map_iternext(mp_obj_t self_in) {
+ mp_check_self(mp_obj_is_type(self_in, &mp_type_map));
+ mp_obj_map_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_t *nextses = m_new(mp_obj_t, self->n_iters);
+
+ for (size_t i = 0; i < self->n_iters; i++) {
+ mp_obj_t next = mp_iternext(self->iters[i]);
+ if (next == MP_OBJ_STOP_ITERATION) {
+ m_del(mp_obj_t, nextses, self->n_iters);
+ return MP_OBJ_STOP_ITERATION;
+ }
+ nextses[i] = next;
+ }
+ return mp_call_function_n_kw(self->fun, self->n_iters, 0, nextses);
+}
+
+const mp_obj_type_t mp_type_map = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_map,
+ .make_new = map_make_new,
+ MP_TYPE_EXTENDED_FIELDS(
+ .getiter = mp_identity_getiter,
+ .iternext = map_iternext,
+ ),
+};
diff --git a/circuitpython/py/objmodule.c b/circuitpython/py/objmodule.c
new file mode 100644
index 0000000..a9d20c7
--- /dev/null
+++ b/circuitpython/py/objmodule.c
@@ -0,0 +1,329 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013-2019 Damien P. George
+ * SPDX-FileCopyrightText: Copyright (c) 2014-2015 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/gc.h"
+#include "py/objmodule.h"
+#include "py/runtime.h"
+#include "py/builtin.h"
+
+#include "genhdr/moduledefs.h"
+
+#if MICROPY_MODULE_BUILTIN_INIT
+STATIC void mp_module_call_init(mp_obj_t module_name, mp_obj_t module_obj);
+#endif
+
+STATIC void module_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_module_t *self = MP_OBJ_TO_PTR(self_in);
+
+ const char *module_name = "";
+ mp_map_elem_t *elem = mp_map_lookup(&self->globals->map, MP_OBJ_NEW_QSTR(MP_QSTR___name__), MP_MAP_LOOKUP);
+ if (elem != NULL) {
+ module_name = mp_obj_str_get_str(elem->value);
+ }
+
+ #if MICROPY_PY___FILE__
+ // If we store __file__ to imported modules then try to lookup this
+ // symbol to give more information about the module.
+ elem = mp_map_lookup(&self->globals->map, MP_OBJ_NEW_QSTR(MP_QSTR___file__), MP_MAP_LOOKUP);
+ if (elem != NULL) {
+ mp_printf(print, "<module '%s' from '%s'>", module_name, mp_obj_str_get_str(elem->value));
+ return;
+ }
+ #endif
+
+ mp_printf(print, "<module '%s'>", module_name);
+}
+
+STATIC void module_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+ mp_obj_module_t *self = MP_OBJ_TO_PTR(self_in);
+ if (dest[0] == MP_OBJ_NULL) {
+ // load attribute
+ mp_map_elem_t *elem = mp_map_lookup(&self->globals->map, MP_OBJ_NEW_QSTR(attr), MP_MAP_LOOKUP);
+ if (elem != NULL) {
+ dest[0] = elem->value;
+ #if MICROPY_MODULE_GETATTR
+ } else if (attr != MP_QSTR___getattr__) {
+ elem = mp_map_lookup(&self->globals->map, MP_OBJ_NEW_QSTR(MP_QSTR___getattr__), MP_MAP_LOOKUP);
+ if (elem != NULL) {
+ dest[0] = mp_call_function_1(elem->value, MP_OBJ_NEW_QSTR(attr));
+ }
+ #endif
+ }
+ } else {
+ // delete/store attribute
+ mp_obj_dict_t *dict = self->globals;
+ if (dict->map.is_fixed) {
+ mp_map_elem_t *elem = mp_map_lookup(&dict->map, MP_OBJ_NEW_QSTR(attr), MP_MAP_LOOKUP);
+ #if MICROPY_CAN_OVERRIDE_BUILTINS
+ if (dict == &mp_module_builtins_globals) {
+ if (MP_STATE_VM(mp_module_builtins_override_dict) == NULL) {
+ MP_STATE_VM(mp_module_builtins_override_dict) = gc_make_long_lived(MP_OBJ_TO_PTR(mp_obj_new_dict(1)));
+ }
+ dict = MP_STATE_VM(mp_module_builtins_override_dict);
+ } else
+ #endif
+ // Return success if the given value is already in the dictionary. This is the case for
+ // native packages with native submodules.
+ if (elem != NULL && elem->value == dest[1]) {
+ dest[0] = MP_OBJ_NULL; // indicate success
+ return;
+ } else {
+ // can't delete or store to fixed map
+ return;
+ }
+ }
+ if (dest[1] == MP_OBJ_NULL) {
+ // delete attribute
+ mp_obj_dict_delete(MP_OBJ_FROM_PTR(dict), MP_OBJ_NEW_QSTR(attr));
+ } else {
+ // store attribute
+ mp_obj_t long_lived = MP_OBJ_FROM_PTR(gc_make_long_lived(MP_OBJ_TO_PTR(dest[1])));
+ // TODO CPython allows STORE_ATTR to a module, but is this the correct implementation?
+ mp_obj_dict_store(MP_OBJ_FROM_PTR(dict), MP_OBJ_NEW_QSTR(attr), long_lived);
+ }
+ dest[0] = MP_OBJ_NULL; // indicate success
+ }
+}
+
+const mp_obj_type_t mp_type_module = {
+ { &mp_type_type },
+ .name = MP_QSTR_module,
+ .print = module_print,
+ .attr = module_attr,
+};
+
+mp_obj_t mp_obj_new_module(qstr module_name) {
+ mp_map_t *mp_loaded_modules_map = &MP_STATE_VM(mp_loaded_modules_dict).map;
+ mp_map_elem_t *el = mp_map_lookup(mp_loaded_modules_map, MP_OBJ_NEW_QSTR(module_name), MP_MAP_LOOKUP_ADD_IF_NOT_FOUND);
+ // We could error out if module already exists, but let C extensions
+ // add new members to existing modules.
+ if (el->value != MP_OBJ_NULL) {
+ return el->value;
+ }
+
+ // create new module object
+ mp_obj_module_t *o = m_new_ll_obj(mp_obj_module_t);
+ o->base.type = &mp_type_module;
+ o->globals = gc_make_long_lived(MP_OBJ_TO_PTR(mp_obj_new_dict(MICROPY_MODULE_DICT_SIZE)));
+
+ // store __name__ entry in the module
+ mp_obj_dict_store(MP_OBJ_FROM_PTR(o->globals), MP_OBJ_NEW_QSTR(MP_QSTR___name__), MP_OBJ_NEW_QSTR(module_name));
+
+ // store the new module into the slot in the global dict holding all modules
+ el->value = MP_OBJ_FROM_PTR(o);
+
+ // return the new module
+ return MP_OBJ_FROM_PTR(o);
+}
+
+mp_obj_dict_t *mp_obj_module_get_globals(mp_obj_t self_in) {
+ assert(mp_obj_is_type(self_in, &mp_type_module));
+ mp_obj_module_t *self = MP_OBJ_TO_PTR(self_in);
+ return self->globals;
+}
+
+void mp_obj_module_set_globals(mp_obj_t self_in, mp_obj_dict_t *globals) {
+ assert(mp_obj_is_type(self_in, &mp_type_module));
+ mp_obj_module_t *self = MP_OBJ_TO_PTR(self_in);
+ self->globals = globals;
+}
+
+/******************************************************************************/
+// Global module table and related functions
+
+STATIC const mp_rom_map_elem_t mp_builtin_module_table[] = {
+ { MP_ROM_QSTR(MP_QSTR___main__), MP_ROM_PTR(&mp_module___main__) },
+ { MP_ROM_QSTR(MP_QSTR_builtins), MP_ROM_PTR(&mp_module_builtins) },
+ { MP_ROM_QSTR(MP_QSTR_micropython), MP_ROM_PTR(&mp_module_micropython) },
+
+ #if MICROPY_PY_IO
+ #if CIRCUITPY
+ { MP_ROM_QSTR(MP_QSTR_io), MP_ROM_PTR(&mp_module_io) },
+ #else
+ { MP_ROM_QSTR(MP_QSTR_uio), MP_ROM_PTR(&mp_module_io) },
+ #endif
+ #endif
+ #if MICROPY_PY_COLLECTIONS
+ { MP_ROM_QSTR(MP_QSTR_collections), MP_ROM_PTR(&mp_module_collections) },
+ #endif
+// CircuitPython: Now in shared-bindings/, so not defined here.
+ #if MICROPY_PY_STRUCT
+ { MP_ROM_QSTR(MP_QSTR_ustruct), MP_ROM_PTR(&mp_module_ustruct) },
+ #endif
+
+ #if MICROPY_PY_BUILTINS_FLOAT
+ #if MICROPY_PY_MATH
+ { MP_ROM_QSTR(MP_QSTR_math), MP_ROM_PTR(&mp_module_math) },
+ #endif
+ #if MICROPY_PY_BUILTINS_COMPLEX && MICROPY_PY_CMATH
+ { MP_ROM_QSTR(MP_QSTR_cmath), MP_ROM_PTR(&mp_module_cmath) },
+ #endif
+ #endif
+ #if MICROPY_PY_SYS
+ { MP_ROM_QSTR(MP_QSTR_sys), MP_ROM_PTR(&mp_module_sys) },
+ #endif
+ #if MICROPY_PY_GC && MICROPY_ENABLE_GC
+ { MP_ROM_QSTR(MP_QSTR_gc), MP_ROM_PTR(&mp_module_gc) },
+ #endif
+ #if MICROPY_PY_THREAD
+ { MP_ROM_QSTR(MP_QSTR__thread), MP_ROM_PTR(&mp_module_thread) },
+ #endif
+
+ // extmod modules
+
+ // Modules included in CircuitPython are registered using MP_REGISTER_MODULE,
+ // and do not have the "u" prefix.
+
+ #if MICROPY_PY_UASYNCIO && !CIRCUITPY
+ { MP_ROM_QSTR(MP_QSTR__uasyncio), MP_ROM_PTR(&mp_module_uasyncio) },
+ #endif
+ #if MICROPY_PY_UERRNO && !CIRCUITPY
+ { MP_ROM_QSTR(MP_QSTR_uerrno), MP_ROM_PTR(&mp_module_uerrno) },
+ #endif
+ #if MICROPY_PY_UCTYPES
+ { MP_ROM_QSTR(MP_QSTR_uctypes), MP_ROM_PTR(&mp_module_uctypes) },
+ #endif
+ #if MICROPY_PY_UZLIB
+ { MP_ROM_QSTR(MP_QSTR_uzlib), MP_ROM_PTR(&mp_module_uzlib) },
+ #endif
+ #if MICROPY_PY_UJSON && !CIRCUITPY
+ { MP_ROM_QSTR(MP_QSTR_ujson), MP_ROM_PTR(&mp_module_ujson) },
+ #endif
+ #if CIRCUITPY_ULAB
+ { MP_ROM_QSTR(MP_QSTR_ulab), MP_ROM_PTR(&ulab_user_cmodule) },
+ #endif
+ #if MICROPY_PY_URE && !CIRCUITPY
+ { MP_ROM_QSTR(MP_QSTR_ure), MP_ROM_PTR(&mp_module_ure) },
+ #endif
+ #if MICROPY_PY_UHEAPQ
+ { MP_ROM_QSTR(MP_QSTR_uheapq), MP_ROM_PTR(&mp_module_uheapq) },
+ #endif
+ #if MICROPY_PY_UTIMEQ
+ { MP_ROM_QSTR(MP_QSTR_utimeq), MP_ROM_PTR(&mp_module_utimeq) },
+ #endif
+ #if MICROPY_PY_UHASHLIB
+ { MP_ROM_QSTR(MP_QSTR_hashlib), MP_ROM_PTR(&mp_module_uhashlib) },
+ #endif
+ #if MICROPY_PY_UBINASCII && !CIRCUITPY
+ { MP_ROM_QSTR(MP_QSTR_ubinascii), MP_ROM_PTR(&mp_module_ubinascii) },
+ #endif
+ #if MICROPY_PY_URANDOM
+ { MP_ROM_QSTR(MP_QSTR_urandom), MP_ROM_PTR(&mp_module_urandom) },
+ #endif
+ #if MICROPY_PY_USELECT
+ { MP_ROM_QSTR(MP_QSTR_uselect), MP_ROM_PTR(&mp_module_uselect) },
+ #endif
+ #if MICROPY_PY_FRAMEBUF
+ { MP_ROM_QSTR(MP_QSTR_framebuf), MP_ROM_PTR(&mp_module_framebuf) },
+ #endif
+ #if MICROPY_PY_BTREE
+ { MP_ROM_QSTR(MP_QSTR_btree), MP_ROM_PTR(&mp_module_btree) },
+ #endif
+
+ // extra builtin modules as defined by a port
+ MICROPY_PORT_BUILTIN_MODULES
+
+ #ifdef MICROPY_REGISTERED_MODULES
+ // builtin modules declared with MP_REGISTER_MODULE()
+ MICROPY_REGISTERED_MODULES
+ #endif
+
+ #if defined(MICROPY_DEBUG_MODULES) && defined(MICROPY_PORT_BUILTIN_DEBUG_MODULES)
+ , MICROPY_PORT_BUILTIN_DEBUG_MODULES
+ #endif
+};
+
+MP_DEFINE_CONST_MAP(mp_builtin_module_map, mp_builtin_module_table);
+
+// Tries to find a loaded module, otherwise attempts to load a builtin, otherwise MP_OBJ_NULL.
+mp_obj_t mp_module_get_loaded_or_builtin(qstr module_name) {
+ // First try loaded modules.
+ mp_map_elem_t *elem = mp_map_lookup(&MP_STATE_VM(mp_loaded_modules_dict).map, MP_OBJ_NEW_QSTR(module_name), MP_MAP_LOOKUP);
+
+ if (!elem) {
+ #if MICROPY_MODULE_WEAK_LINKS
+ return mp_module_get_builtin(module_name);
+ #else
+ // Otherwise try builtin.
+ elem = mp_map_lookup((mp_map_t *)&mp_builtin_module_map, MP_OBJ_NEW_QSTR(module_name), MP_MAP_LOOKUP);
+ if (!elem) {
+ return MP_OBJ_NULL;
+ }
+
+ #if MICROPY_MODULE_BUILTIN_INIT
+ // If found, it's a newly loaded built-in, so init it.
+ mp_module_call_init(MP_OBJ_NEW_QSTR(module_name), elem->value);
+ #endif
+ #endif
+ }
+
+ return elem->value;
+}
+
+#if MICROPY_MODULE_WEAK_LINKS
+// Tries to find a loaded module, otherwise attempts to load a builtin, otherwise MP_OBJ_NULL.
+mp_obj_t mp_module_get_builtin(qstr module_name) {
+ // Try builtin.
+ mp_map_elem_t *elem = mp_map_lookup((mp_map_t *)&mp_builtin_module_map, MP_OBJ_NEW_QSTR(module_name), MP_MAP_LOOKUP);
+ if (!elem) {
+ return MP_OBJ_NULL;
+ }
+
+ #if MICROPY_MODULE_BUILTIN_INIT
+ // If found, it's a newly loaded built-in, so init it.
+ mp_module_call_init(MP_OBJ_NEW_QSTR(module_name), elem->value);
+ #endif
+
+ return elem->value;
+}
+#endif
+
+#if MICROPY_MODULE_BUILTIN_INIT
+STATIC void mp_module_register(mp_obj_t module_name, mp_obj_t module) {
+ mp_map_t *mp_loaded_modules_map = &MP_STATE_VM(mp_loaded_modules_dict).map;
+ mp_map_lookup(mp_loaded_modules_map, module_name, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = module;
+}
+
+STATIC void mp_module_call_init(mp_obj_t module_name, mp_obj_t module_obj) {
+ // Look for __init__ and call it if it exists
+ mp_obj_t dest[2];
+ mp_load_method_maybe(module_obj, MP_QSTR___init__, dest);
+ if (dest[0] != MP_OBJ_NULL) {
+ mp_call_method_n_kw(0, 0, dest);
+ // Register module so __init__ is not called again.
+ // If a module can be referenced by more than one name (eg due to weak links)
+ // then __init__ will still be called for each distinct import, and it's then
+ // up to the particular module to make sure it's __init__ code only runs once.
+ mp_module_register(module_name, module_obj);
+ }
+}
+#endif
diff --git a/circuitpython/py/objmodule.h b/circuitpython/py/objmodule.h
new file mode 100644
index 0000000..5e54dbf
--- /dev/null
+++ b/circuitpython/py/objmodule.h
@@ -0,0 +1,38 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013-2019 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_OBJMODULE_H
+#define MICROPY_INCLUDED_PY_OBJMODULE_H
+
+#include "py/obj.h"
+
+extern const mp_map_t mp_builtin_module_map;
+
+mp_obj_t mp_module_get_loaded_or_builtin(qstr module_name);
+#if MICROPY_MODULE_WEAK_LINKS
+mp_obj_t mp_module_get_builtin(qstr module_name);
+#endif
+
+#endif // MICROPY_INCLUDED_PY_OBJMODULE_H
diff --git a/circuitpython/py/objnamedtuple.c b/circuitpython/py/objnamedtuple.c
new file mode 100644
index 0000000..c970d1a
--- /dev/null
+++ b/circuitpython/py/objnamedtuple.c
@@ -0,0 +1,193 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ * SPDX-FileCopyrightText: Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+
+#include "py/objtuple.h"
+#include "py/runtime.h"
+#include "py/objstr.h"
+#include "py/objnamedtuple.h"
+#include "py/objtype.h"
+
+#include "supervisor/shared/translate.h"
+
+#if MICROPY_PY_COLLECTIONS
+
+size_t mp_obj_namedtuple_find_field(const mp_obj_namedtuple_type_t *type, qstr name) {
+ for (size_t i = 0; i < type->n_fields; i++) {
+ if (type->fields[i] == name) {
+ return i;
+ }
+ }
+ return (size_t)-1;
+}
+
+#if MICROPY_PY_COLLECTIONS_NAMEDTUPLE__ASDICT
+STATIC mp_obj_t namedtuple_asdict(mp_obj_t self_in) {
+ mp_obj_namedtuple_t *self = MP_OBJ_TO_PTR(self_in);
+ const qstr *fields = ((mp_obj_namedtuple_type_t *)self->tuple.base.type)->fields;
+ mp_obj_t dict = mp_obj_new_dict(self->tuple.len);
+ mp_obj_dict_t *dictObj = MP_OBJ_TO_PTR(dict);
+ #if MICROPY_PY_COLLECTIONS_ORDEREDDICT
+ // make it an OrderedDict
+ dictObj->base.type = &mp_type_ordereddict;
+ dictObj->map.is_ordered = 1;
+ #else
+ dictObj->base.type = &mp_type_dict;
+ dictObj->map.is_ordered = 0;
+ #endif
+
+ for (size_t i = 0; i < self->tuple.len; ++i) {
+ mp_obj_dict_store(dict, MP_OBJ_NEW_QSTR(fields[i]), self->tuple.items[i]);
+ }
+ return dict;
+}
+MP_DEFINE_CONST_FUN_OBJ_1(namedtuple_asdict_obj, namedtuple_asdict);
+#endif
+
+void namedtuple_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_namedtuple_t *o = MP_OBJ_TO_PTR(o_in);
+ mp_printf(print, "%q", o->tuple.base.type->name);
+ const qstr *fields = ((mp_obj_namedtuple_type_t *)o->tuple.base.type)->fields;
+ mp_obj_attrtuple_print_helper(print, fields, &o->tuple);
+}
+
+void namedtuple_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+ if (dest[0] == MP_OBJ_NULL) {
+ // load attribute
+ mp_obj_namedtuple_t *self = MP_OBJ_TO_PTR(self_in);
+ #if MICROPY_PY_COLLECTIONS_NAMEDTUPLE__ASDICT
+ if (attr == MP_QSTR__asdict) {
+ dest[0] = MP_OBJ_FROM_PTR(&namedtuple_asdict_obj);
+ dest[1] = self_in;
+ return;
+ }
+ #endif
+ size_t id = mp_obj_namedtuple_find_field((mp_obj_namedtuple_type_t *)self->tuple.base.type, attr);
+ if (id == (size_t)-1) {
+ return;
+ }
+ dest[0] = self->tuple.items[id];
+ } else {
+ // delete/store attribute
+ // provide more detailed error message than we'd get by just returning
+ mp_raise_AttributeError(MP_ERROR_TEXT("can't set attribute"));
+ }
+}
+
+mp_obj_t namedtuple_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ const mp_obj_namedtuple_type_t *type = (const mp_obj_namedtuple_type_t *)type_in;
+ size_t num_fields = type->n_fields;
+ if (n_args + n_kw != num_fields) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_arg_error_terse_mismatch();
+ #elif MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_NORMAL
+ mp_raise_TypeError_varg(
+ MP_ERROR_TEXT("function takes %d positional arguments but %d were given"),
+ num_fields, n_args + n_kw);
+ #else
+ mp_raise_TypeError_varg(
+ MP_ERROR_TEXT("%q() takes %d positional arguments but %d were given"),
+ type->base.name, num_fields, n_args + n_kw);
+ #endif
+ }
+
+ // Create a tuple and set the type to this namedtuple
+ mp_obj_tuple_t *tuple = MP_OBJ_TO_PTR(mp_obj_new_tuple(num_fields, NULL));
+ tuple->base.type = type_in;
+
+ // Copy the positional args into the first slots of the namedtuple
+ memcpy(&tuple->items[0], args, sizeof(mp_obj_t) * n_args);
+
+ // Fill in the remaining slots with the keyword args
+ memset(&tuple->items[n_args], 0, sizeof(mp_obj_t) * n_kw);
+ for (size_t i = n_args; i < n_args + 2 * n_kw; i += 2) {
+ qstr kw = mp_obj_str_get_qstr(args[i]);
+ size_t id = mp_obj_namedtuple_find_field(type, kw);
+ if (id == (size_t)-1) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_arg_error_terse_mismatch();
+ #else
+ mp_raise_msg_varg(&mp_type_TypeError, MP_ERROR_TEXT("unexpected keyword argument '%q'"), kw);
+ #endif
+ }
+ if (tuple->items[id] != MP_OBJ_NULL) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_arg_error_terse_mismatch();
+ #else
+ mp_raise_msg_varg(&mp_type_TypeError,
+ MP_ERROR_TEXT("function got multiple values for argument '%q'"), kw);
+ #endif
+ }
+ tuple->items[id] = args[i + 1];
+ }
+
+ return MP_OBJ_FROM_PTR(tuple);
+}
+
+mp_obj_namedtuple_type_t *mp_obj_new_namedtuple_base(size_t n_fields, mp_obj_t *fields) {
+ mp_obj_namedtuple_type_t *o = m_new_obj_var(mp_obj_namedtuple_type_t, qstr, n_fields);
+ memset(&o->base, 0, sizeof(o->base));
+ o->n_fields = n_fields;
+ for (size_t i = 0; i < n_fields; i++) {
+ o->fields[i] = mp_obj_str_get_qstr(fields[i]);
+ }
+ return o;
+}
+
+STATIC mp_obj_t mp_obj_new_namedtuple_type(qstr name, size_t n_fields, mp_obj_t *fields) {
+ mp_obj_namedtuple_type_t *o = mp_obj_new_namedtuple_base(n_fields, fields);
+ o->base.base.type = &mp_type_type;
+ o->base.flags = MP_TYPE_FLAG_EQ_CHECKS_OTHER_TYPE | MP_TYPE_FLAG_EXTENDED; // can match tuple
+ o->base.name = name;
+ o->base.print = namedtuple_print;
+ o->base.make_new = namedtuple_make_new;
+ o->base.MP_TYPE_UNARY_OP = mp_obj_tuple_unary_op;
+ o->base.MP_TYPE_BINARY_OP = mp_obj_tuple_binary_op;
+ o->base.attr = namedtuple_attr;
+ o->base.MP_TYPE_SUBSCR = mp_obj_tuple_subscr;
+ o->base.MP_TYPE_GETITER = mp_obj_tuple_getiter;
+ o->base.parent = &mp_type_tuple;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+STATIC mp_obj_t new_namedtuple_type(mp_obj_t name_in, mp_obj_t fields_in) {
+ qstr name = mp_obj_str_get_qstr(name_in);
+ size_t n_fields;
+ mp_obj_t *fields;
+ #if MICROPY_CPYTHON_COMPAT
+ if (mp_obj_is_str(fields_in)) {
+ fields_in = mp_obj_str_split(1, &fields_in);
+ }
+ #endif
+ mp_obj_get_array(fields_in, &n_fields, &fields);
+ return mp_obj_new_namedtuple_type(name, n_fields, fields);
+}
+MP_DEFINE_CONST_FUN_OBJ_2(mp_namedtuple_obj, new_namedtuple_type);
+
+#endif // MICROPY_PY_COLLECTIONS
diff --git a/circuitpython/py/objnamedtuple.h b/circuitpython/py/objnamedtuple.h
new file mode 100644
index 0000000..4ebefb9
--- /dev/null
+++ b/circuitpython/py/objnamedtuple.h
@@ -0,0 +1,58 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef MICROPY_INCLUDED_PY_OBJNAMEDTUPLE_H
+#define MICROPY_INCLUDED_PY_OBJNAMEDTUPLE_H
+
+#include <string.h>
+
+#include "py/nlr.h"
+#include "py/objtuple.h"
+#include "py/runtime.h"
+#include "py/objstr.h"
+
+#if MICROPY_PY_COLLECTIONS
+
+typedef struct _mp_obj_namedtuple_type_t {
+ mp_obj_full_type_t base;
+ size_t n_fields;
+ qstr fields[];
+} mp_obj_namedtuple_type_t;
+
+typedef struct _mp_obj_namedtuple_t {
+ mp_obj_tuple_t tuple;
+} mp_obj_namedtuple_t;
+
+void namedtuple_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind);
+size_t mp_obj_namedtuple_find_field(const mp_obj_namedtuple_type_t *type, qstr name);
+void namedtuple_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest);
+mp_obj_namedtuple_type_t *mp_obj_new_namedtuple_base(size_t n_fields, mp_obj_t *fields);
+mp_obj_t namedtuple_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args);
+
+#endif // MICROPY_PY_COLLECTIONS
+
+#endif // MICROPY_INCLUDED_PY_OBJNAMEDTUPLE_H
diff --git a/circuitpython/py/objnone.c b/circuitpython/py/objnone.c
new file mode 100644
index 0000000..2c33d89
--- /dev/null
+++ b/circuitpython/py/objnone.c
@@ -0,0 +1,58 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+
+#include "py/obj.h"
+
+#if !MICROPY_OBJ_IMMEDIATE_OBJS
+typedef struct _mp_obj_none_t {
+ mp_obj_base_t base;
+} mp_obj_none_t;
+#endif
+
+STATIC void none_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ (void)self_in;
+ if (MICROPY_PY_UJSON && kind == PRINT_JSON) {
+ mp_print_str(print, "null");
+ } else {
+ mp_print_str(print, "None");
+ }
+}
+
+const mp_obj_type_t mp_type_NoneType = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_NoneType,
+ .print = none_print,
+ MP_TYPE_EXTENDED_FIELDS(
+ .unary_op = mp_generic_unary_op,
+ ),
+};
+
+#if !MICROPY_OBJ_IMMEDIATE_OBJS
+const mp_obj_none_t mp_const_none_obj = {{&mp_type_NoneType}};
+#endif
diff --git a/circuitpython/py/objobject.c b/circuitpython/py/objobject.c
new file mode 100644
index 0000000..d9c75fa
--- /dev/null
+++ b/circuitpython/py/objobject.c
@@ -0,0 +1,124 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+
+#include "py/objtype.h"
+#include "py/runtime.h"
+
+#include "supervisor/shared/translate.h"
+
+typedef struct _mp_obj_object_t {
+ mp_obj_base_t base;
+} mp_obj_object_t;
+
+STATIC mp_obj_t object_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ (void)args;
+ mp_arg_check_num(n_args, n_kw, 0, 0, false);
+ mp_obj_object_t *o = m_new_obj(mp_obj_object_t);
+ o->base.type = type;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+#if MICROPY_CPYTHON_COMPAT
+STATIC mp_obj_t object___init__(mp_obj_t self) {
+ (void)self;
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(object___init___obj, object___init__);
+
+STATIC mp_obj_t object___new__(mp_obj_t cls) {
+ if (!mp_obj_is_type(cls, &mp_type_type) || !mp_obj_is_instance_type((mp_obj_type_t *)MP_OBJ_TO_PTR(cls))) {
+ mp_raise_TypeError(MP_ERROR_TEXT("__new__ arg must be a user-type"));
+ }
+ // This executes only "__new__" part of instance creation.
+ // TODO: This won't work well for classes with native bases.
+ // TODO: This is a hack, should be resolved along the lines of
+ // https://github.com/micropython/micropython/issues/606#issuecomment-43685883
+ const mp_obj_type_t *native_base;
+ return MP_OBJ_FROM_PTR(mp_obj_new_instance(MP_OBJ_TO_PTR(cls), &native_base));
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(object___new___fun_obj, object___new__);
+STATIC MP_DEFINE_CONST_STATICMETHOD_OBJ(object___new___obj, MP_ROM_PTR(&object___new___fun_obj));
+
+#if MICROPY_PY_DELATTR_SETATTR
+STATIC mp_obj_t object___setattr__(mp_obj_t self_in, mp_obj_t attr, mp_obj_t value) {
+ if (!mp_obj_is_instance_type(mp_obj_get_type(self_in))) {
+ mp_raise_TypeError(MP_ERROR_TEXT("arg must be user-type"));
+ }
+
+ if (!mp_obj_is_str(attr)) {
+ mp_raise_TypeError(NULL);
+ }
+
+ mp_obj_instance_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_map_lookup(&self->members, attr, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = value;
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_3(object___setattr___obj, object___setattr__);
+
+STATIC mp_obj_t object___delattr__(mp_obj_t self_in, mp_obj_t attr) {
+ if (!mp_obj_is_instance_type(mp_obj_get_type(self_in))) {
+ mp_raise_TypeError(MP_ERROR_TEXT("arg must be user-type"));
+ }
+
+ if (!mp_obj_is_str(attr)) {
+ mp_raise_TypeError(NULL);
+ }
+
+ mp_obj_instance_t *self = MP_OBJ_TO_PTR(self_in);
+ if (mp_map_lookup(&self->members, attr, MP_MAP_LOOKUP_REMOVE_IF_FOUND) == NULL) {
+ mp_raise_msg(&mp_type_AttributeError, MP_ERROR_TEXT("no such attribute"));
+ }
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(object___delattr___obj, object___delattr__);
+#endif
+
+STATIC const mp_rom_map_elem_t object_locals_dict_table[] = {
+ #if MICROPY_CPYTHON_COMPAT
+ { MP_ROM_QSTR(MP_QSTR___init__), MP_ROM_PTR(&object___init___obj) },
+ #endif
+ #if MICROPY_CPYTHON_COMPAT
+ { MP_ROM_QSTR(MP_QSTR___new__), MP_ROM_PTR(&object___new___obj) },
+ #endif
+ #if MICROPY_PY_DELATTR_SETATTR
+ { MP_ROM_QSTR(MP_QSTR___setattr__), MP_ROM_PTR(&object___setattr___obj) },
+ { MP_ROM_QSTR(MP_QSTR___delattr__), MP_ROM_PTR(&object___delattr___obj) },
+ #endif
+};
+
+STATIC MP_DEFINE_CONST_DICT(object_locals_dict, object_locals_dict_table);
+#endif
+
+const mp_obj_type_t mp_type_object = {
+ { &mp_type_type },
+ .name = MP_QSTR_object,
+ .make_new = object_make_new,
+ #if MICROPY_CPYTHON_COMPAT
+ .locals_dict = (mp_obj_dict_t *)&object_locals_dict,
+ #endif
+};
diff --git a/circuitpython/py/objpolyiter.c b/circuitpython/py/objpolyiter.c
new file mode 100644
index 0000000..20f1238
--- /dev/null
+++ b/circuitpython/py/objpolyiter.c
@@ -0,0 +1,56 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2015 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+
+#include "py/runtime.h"
+
+// This is universal iterator type which calls "iternext" method stored in
+// particular object instance. (So, each instance of this time can have its
+// own iteration behavior.) Having this type saves to define type objects
+// for various internal iterator objects.
+
+// Any instance should have these 2 fields at the beginning
+typedef struct _mp_obj_polymorph_iter_t {
+ mp_obj_base_t base;
+ mp_fun_1_t iternext;
+} mp_obj_polymorph_iter_t;
+
+STATIC mp_obj_t polymorph_it_iternext(mp_obj_t self_in) {
+ mp_obj_polymorph_iter_t *self = MP_OBJ_TO_PTR(self_in);
+ // Redirect call to object instance's iternext method
+ return self->iternext(self_in);
+}
+
+const mp_obj_type_t mp_type_polymorph_iter = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_iterator,
+ MP_TYPE_EXTENDED_FIELDS(
+ .getiter = mp_identity_getiter,
+ .iternext = polymorph_it_iternext,
+ ),
+};
diff --git a/circuitpython/py/objproperty.c b/circuitpython/py/objproperty.c
new file mode 100644
index 0000000..e8ae509
--- /dev/null
+++ b/circuitpython/py/objproperty.c
@@ -0,0 +1,119 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+
+#include "py/nlr.h"
+#include "py/objproperty.h"
+#include "py/runtime.h"
+
+#if MICROPY_PY_BUILTINS_PROPERTY
+
+STATIC mp_obj_t property_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ enum { ARG_fget, ARG_fset, ARG_fdel, ARG_doc };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_, MP_ARG_OBJ, {.u_rom_obj = MP_ROM_NONE} },
+ { MP_QSTR_, MP_ARG_OBJ, {.u_rom_obj = MP_ROM_NONE} },
+ { MP_QSTR_, MP_ARG_OBJ, {.u_rom_obj = MP_ROM_NONE} },
+ { MP_QSTR_doc, MP_ARG_OBJ, {.u_rom_obj = MP_ROM_NONE} },
+ };
+ mp_arg_val_t vals[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all_kw_array(n_args, n_kw, args, MP_ARRAY_SIZE(allowed_args), allowed_args, vals);
+
+ mp_obj_property_t *o = m_new_obj(mp_obj_property_t);
+ o->base.type = type;
+ o->proxy[0] = vals[ARG_fget].u_obj;
+ o->proxy[1] = vals[ARG_fset].u_obj;
+ o->proxy[2] = vals[ARG_fdel].u_obj;
+ // vals[ARG_doc] is silently discarded
+ return MP_OBJ_FROM_PTR(o);
+}
+
+STATIC mp_obj_t property_getter(mp_obj_t self_in, mp_obj_t getter) {
+ mp_obj_property_t *p2 = m_new_obj(mp_obj_property_t);
+ *p2 = *(mp_obj_property_t *)MP_OBJ_TO_PTR(self_in);
+ p2->proxy[0] = getter;
+ return MP_OBJ_FROM_PTR(p2);
+}
+
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(property_getter_obj, property_getter);
+
+STATIC mp_obj_t property_setter(mp_obj_t self_in, mp_obj_t setter) {
+ mp_obj_property_t *p2 = m_new_obj(mp_obj_property_t);
+ *p2 = *(mp_obj_property_t *)MP_OBJ_TO_PTR(self_in);
+ p2->proxy[1] = setter;
+ return MP_OBJ_FROM_PTR(p2);
+}
+
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(property_setter_obj, property_setter);
+
+STATIC mp_obj_t property_deleter(mp_obj_t self_in, mp_obj_t deleter) {
+ mp_obj_property_t *p2 = m_new_obj(mp_obj_property_t);
+ *p2 = *(mp_obj_property_t *)MP_OBJ_TO_PTR(self_in);
+ p2->proxy[2] = deleter;
+ return MP_OBJ_FROM_PTR(p2);
+}
+
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(property_deleter_obj, property_deleter);
+
+STATIC const mp_rom_map_elem_t property_locals_dict_table[] = {
+ { MP_ROM_QSTR(MP_QSTR_getter), MP_ROM_PTR(&property_getter_obj) },
+ { MP_ROM_QSTR(MP_QSTR_setter), MP_ROM_PTR(&property_setter_obj) },
+ { MP_ROM_QSTR(MP_QSTR_deleter), MP_ROM_PTR(&property_deleter_obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(property_locals_dict, property_locals_dict_table);
+
+const mp_obj_type_t mp_type_property = {
+ { &mp_type_type },
+ .name = MP_QSTR_property,
+ .make_new = property_make_new,
+ .locals_dict = (mp_obj_dict_t *)&property_locals_dict,
+};
+
+#if MICROPY_PY_OPTIMIZE_PROPERTY_FLASH_SIZE
+extern const mp_obj_property_t __property_getter_start, __property_getter_end, __property_getset_start, __property_getset_end;
+#endif
+
+const mp_obj_t *mp_obj_property_get(mp_obj_t self_in, size_t *n_proxy) {
+ mp_check_self(mp_obj_is_type(self_in, &mp_type_property));
+ mp_obj_property_t *self = MP_OBJ_TO_PTR(self_in);
+ #if MICROPY_PY_OPTIMIZE_PROPERTY_FLASH_SIZE
+ if (self >= &__property_getter_start && self < &__property_getter_end) {
+ *n_proxy = 1;
+ } else if (self >= &__property_getset_start && self < &__property_getset_end) {
+ *n_proxy = 2;
+ } else {
+ *n_proxy = 3;
+ }
+ #else
+ *n_proxy = 3;
+ #endif
+ return self->proxy;
+}
+
+#endif // MICROPY_PY_BUILTINS_PROPERTY
diff --git a/circuitpython/py/objproperty.h b/circuitpython/py/objproperty.h
new file mode 100644
index 0000000..c1b5dba
--- /dev/null
+++ b/circuitpython/py/objproperty.h
@@ -0,0 +1,62 @@
+/*
+ * This file is part of the Micro Python project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_OBJPROPERTY_H
+#define MICROPY_INCLUDED_PY_OBJPROPERTY_H
+
+#include "py/obj.h"
+
+#if MICROPY_PY_BUILTINS_PROPERTY
+
+typedef struct _mp_obj_property_t {
+ mp_obj_base_t base;
+ mp_obj_t proxy[3]; // getter, setter, deleter
+} mp_obj_property_t;
+
+#if MICROPY_PY_OPTIMIZE_PROPERTY_FLASH_SIZE
+typedef struct _mp_obj_property_getter_t {
+ mp_obj_base_t base;
+ mp_obj_t proxy[1]; // getter
+} mp_obj_property_getter_t;
+
+typedef struct _mp_obj_property_getset_t {
+ mp_obj_base_t base;
+ mp_obj_t proxy[2]; // getter, setter
+} mp_obj_property_getset_t;
+
+#define MP_PROPERTY_GETTER(P, G) const mp_obj_property_getter_t P __attribute((section(".property_getter"))) = {.base.type = &mp_type_property, .proxy = {G}}
+#define MP_PROPERTY_GETSET(P, G, S) const mp_obj_property_getset_t P __attribute((section(".property_getset"))) = {.base.type = &mp_type_property, .proxy = {G, S}}
+
+#else
+typedef struct _mp_obj_property_t mp_obj_property_getter_t;
+typedef struct _mp_obj_property_t mp_obj_property_getset_t;
+
+#define MP_PROPERTY_GETTER(P, G) const mp_obj_property_t P = {.base.type = &mp_type_property, .proxy = {G, MP_ROM_NONE, MP_ROM_NONE}}
+#define MP_PROPERTY_GETSET(P, G, S) const mp_obj_property_t P = {.base.type = &mp_type_property, .proxy = {G, S, MP_ROM_NONE}}
+#endif
+
+#endif // MICROPY_PY_BUILTINS_PROPERTY
+
+#endif // MICROPY_INCLUDED_PY_OBJPROPERTY_H
diff --git a/circuitpython/py/objrange.c b/circuitpython/py/objrange.c
new file mode 100644
index 0000000..b23ab23
--- /dev/null
+++ b/circuitpython/py/objrange.c
@@ -0,0 +1,235 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+
+#include "py/runtime.h"
+
+#include "supervisor/shared/translate.h"
+
+/******************************************************************************/
+/* range iterator */
+
+typedef struct _mp_obj_range_it_t {
+ mp_obj_base_t base;
+ // TODO make these values generic objects or something
+ mp_int_t cur;
+ mp_int_t stop;
+ mp_int_t step;
+} mp_obj_range_it_t;
+
+STATIC mp_obj_t range_it_iternext(mp_obj_t o_in) {
+ mp_obj_range_it_t *o = MP_OBJ_TO_PTR(o_in);
+ if ((o->step > 0 && o->cur < o->stop) || (o->step < 0 && o->cur > o->stop)) {
+ mp_obj_t o_out = MP_OBJ_NEW_SMALL_INT(o->cur);
+ o->cur += o->step;
+ return o_out;
+ } else {
+ return MP_OBJ_STOP_ITERATION;
+ }
+}
+
+STATIC const mp_obj_type_t mp_type_range_it = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_iterator,
+ MP_TYPE_EXTENDED_FIELDS(
+ .getiter = mp_identity_getiter,
+ .iternext = range_it_iternext,
+ ),
+};
+
+STATIC mp_obj_t mp_obj_new_range_iterator(mp_int_t cur, mp_int_t stop, mp_int_t step, mp_obj_iter_buf_t *iter_buf) {
+ assert(sizeof(mp_obj_range_it_t) <= sizeof(mp_obj_iter_buf_t));
+ mp_obj_range_it_t *o = (mp_obj_range_it_t *)iter_buf;
+ o->base.type = &mp_type_range_it;
+ o->cur = cur;
+ o->stop = stop;
+ o->step = step;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+/******************************************************************************/
+/* range */
+
+typedef struct _mp_obj_range_t {
+ mp_obj_base_t base;
+ // TODO make these values generic objects or something
+ mp_int_t start;
+ mp_int_t stop;
+ mp_int_t step;
+} mp_obj_range_t;
+
+STATIC void range_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_range_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_printf(print, "range(" INT_FMT ", " INT_FMT "", self->start, self->stop);
+ if (self->step == 1) {
+ mp_print_str(print, ")");
+ } else {
+ mp_printf(print, ", " INT_FMT ")", self->step);
+ }
+}
+
+STATIC mp_obj_t range_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_arg_check_num(n_args, n_kw, 1, 3, false);
+
+ mp_obj_range_t *o = m_new_obj(mp_obj_range_t);
+ o->base.type = type;
+ o->start = 0;
+ o->step = 1;
+
+ if (n_args == 1) {
+ o->stop = mp_obj_get_int(args[0]);
+ } else {
+ o->start = mp_obj_get_int(args[0]);
+ o->stop = mp_obj_get_int(args[1]);
+ if (n_args == 3) {
+ o->step = mp_obj_get_int(args[2]);
+ if (o->step == 0) {
+ mp_raise_ValueError(MP_ERROR_TEXT("zero step"));
+ }
+ }
+ }
+
+ return MP_OBJ_FROM_PTR(o);
+}
+
+STATIC mp_int_t range_len(mp_obj_range_t *self) {
+ // When computing length, need to take into account step!=1 and step<0.
+ mp_int_t len = self->stop - self->start + self->step;
+ if (self->step > 0) {
+ len -= 1;
+ } else {
+ len += 1;
+ }
+ len = len / self->step;
+ if (len < 0) {
+ len = 0;
+ }
+ return len;
+}
+
+STATIC mp_obj_t range_unary_op(mp_unary_op_t op, mp_obj_t self_in) {
+ mp_obj_range_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_int_t len = range_len(self);
+ switch (op) {
+ case MP_UNARY_OP_BOOL:
+ return mp_obj_new_bool(len > 0);
+ case MP_UNARY_OP_LEN:
+ return MP_OBJ_NEW_SMALL_INT(len);
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+#if MICROPY_PY_BUILTINS_RANGE_BINOP
+STATIC mp_obj_t range_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
+ if (!mp_obj_is_type(rhs_in, &mp_type_range) || op != MP_BINARY_OP_EQUAL) {
+ return MP_OBJ_NULL; // op not supported
+ }
+ mp_obj_range_t *lhs = MP_OBJ_TO_PTR(lhs_in);
+ mp_obj_range_t *rhs = MP_OBJ_TO_PTR(rhs_in);
+ mp_int_t lhs_len = range_len(lhs);
+ mp_int_t rhs_len = range_len(rhs);
+ return mp_obj_new_bool(
+ lhs_len == rhs_len
+ && (lhs_len == 0
+ || (lhs->start == rhs->start
+ && (lhs_len == 1 || lhs->step == rhs->step)))
+ );
+}
+#endif
+
+STATIC mp_obj_t range_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) {
+ if (value == MP_OBJ_SENTINEL) {
+ // load
+ mp_obj_range_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_int_t len = range_len(self);
+ #if MICROPY_PY_BUILTINS_SLICE
+ if (mp_obj_is_type(index, &mp_type_slice)) {
+ mp_bound_slice_t slice;
+ mp_seq_get_fast_slice_indexes(len, index, &slice);
+ mp_obj_range_t *o = m_new_obj(mp_obj_range_t);
+ o->base.type = &mp_type_range;
+ o->start = self->start + slice.start * self->step;
+ o->stop = self->start + slice.stop * self->step;
+ o->step = slice.step * self->step;
+ if (slice.step < 0) {
+ // Negative slice steps have inclusive stop, so adjust for exclusive
+ o->stop -= self->step;
+ }
+ return MP_OBJ_FROM_PTR(o);
+ }
+ #endif
+ size_t index_val = mp_get_index(self->base.type, len, index, false);
+ return MP_OBJ_NEW_SMALL_INT(self->start + index_val * self->step);
+ } else {
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+STATIC mp_obj_t range_getiter(mp_obj_t o_in, mp_obj_iter_buf_t *iter_buf) {
+ mp_obj_range_t *o = MP_OBJ_TO_PTR(o_in);
+ return mp_obj_new_range_iterator(o->start, o->stop, o->step, iter_buf);
+}
+
+
+#if MICROPY_PY_BUILTINS_RANGE_ATTRS
+STATIC void range_attr(mp_obj_t o_in, qstr attr, mp_obj_t *dest) {
+ if (dest[0] != MP_OBJ_NULL) {
+ // not load attribute
+ return;
+ }
+ mp_obj_range_t *o = MP_OBJ_TO_PTR(o_in);
+ if (attr == MP_QSTR_start) {
+ dest[0] = mp_obj_new_int(o->start);
+ } else if (attr == MP_QSTR_stop) {
+ dest[0] = mp_obj_new_int(o->stop);
+ } else if (attr == MP_QSTR_step) {
+ dest[0] = mp_obj_new_int(o->step);
+ }
+}
+#endif
+
+const mp_obj_type_t mp_type_range = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_range,
+ .print = range_print,
+ .make_new = range_make_new,
+ #if MICROPY_PY_BUILTINS_RANGE_ATTRS
+ .attr = range_attr,
+ #endif
+ MP_TYPE_EXTENDED_FIELDS(
+ .unary_op = range_unary_op,
+ #if MICROPY_PY_BUILTINS_RANGE_BINOP
+ .binary_op = range_binary_op,
+ #endif
+ .subscr = range_subscr,
+ .getiter = range_getiter,
+ ),
+};
diff --git a/circuitpython/py/objreversed.c b/circuitpython/py/objreversed.c
new file mode 100644
index 0000000..fe517cc
--- /dev/null
+++ b/circuitpython/py/objreversed.c
@@ -0,0 +1,83 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+
+#include "py/runtime.h"
+
+#if MICROPY_PY_BUILTINS_REVERSED
+
+typedef struct _mp_obj_reversed_t {
+ mp_obj_base_t base;
+ mp_obj_t seq; // sequence object that we are reversing
+ mp_uint_t cur_index; // current index, plus 1; 0=no more, 1=last one (index 0)
+} mp_obj_reversed_t;
+
+STATIC mp_obj_t reversed_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_arg_check_num(n_args, n_kw, 1, 1, false);
+
+ // check if __reversed__ exists, and if so delegate to it
+ mp_obj_t dest[2];
+ mp_load_method_maybe(args[0], MP_QSTR___reversed__, dest);
+ if (dest[0] != MP_OBJ_NULL) {
+ return mp_call_method_n_kw(0, 0, dest);
+ }
+
+ mp_obj_reversed_t *o = m_new_obj(mp_obj_reversed_t);
+ o->base.type = type;
+ o->seq = args[0];
+ o->cur_index = mp_obj_get_int(mp_obj_len(args[0])); // start at the end of the sequence
+
+ return MP_OBJ_FROM_PTR(o);
+}
+
+STATIC mp_obj_t reversed_iternext(mp_obj_t self_in) {
+ mp_check_self(mp_obj_is_type(self_in, &mp_type_reversed));
+ mp_obj_reversed_t *self = MP_OBJ_TO_PTR(self_in);
+
+ // "raise" stop iteration if we are at the end (the start) of the sequence
+ if (self->cur_index == 0) {
+ return MP_OBJ_STOP_ITERATION;
+ }
+
+ // pre-decrement and index sequence
+ self->cur_index -= 1;
+ return mp_obj_subscr(self->seq, MP_OBJ_NEW_SMALL_INT(self->cur_index), MP_OBJ_SENTINEL);
+}
+
+const mp_obj_type_t mp_type_reversed = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_reversed,
+ .make_new = reversed_make_new,
+ MP_TYPE_EXTENDED_FIELDS(
+ .getiter = mp_identity_getiter,
+ .iternext = reversed_iternext,
+ ),
+};
+
+#endif // MICROPY_PY_BUILTINS_REVERSED
diff --git a/circuitpython/py/objset.c b/circuitpython/py/objset.c
new file mode 100644
index 0000000..d7dda6b
--- /dev/null
+++ b/circuitpython/py/objset.c
@@ -0,0 +1,605 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013-2017 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdbool.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/runtime.h"
+#include "py/builtin.h"
+
+#include "supervisor/shared/translate.h"
+
+#if MICROPY_PY_BUILTINS_SET
+
+typedef struct _mp_obj_set_t {
+ mp_obj_base_t base;
+ mp_set_t set;
+} mp_obj_set_t;
+
+typedef struct _mp_obj_set_it_t {
+ mp_obj_base_t base;
+ mp_fun_1_t iternext;
+ mp_obj_set_t *set;
+ size_t cur;
+} mp_obj_set_it_t;
+
+STATIC bool is_set_or_frozenset(mp_obj_t o) {
+ return mp_obj_is_type(o, &mp_type_set)
+ #if MICROPY_PY_BUILTINS_FROZENSET
+ || mp_obj_is_type(o, &mp_type_frozenset)
+ #endif
+ ;
+}
+
+// This macro is shorthand for mp_check_self to verify the argument is a set.
+#define check_set(o) mp_check_self(mp_obj_is_type(o, &mp_type_set))
+
+// This macro is shorthand for mp_check_self to verify the argument is a
+// set or frozenset for methods that operate on both of these types.
+#define check_set_or_frozenset(o) mp_check_self(is_set_or_frozenset(o))
+
+STATIC void set_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in);
+ #if MICROPY_PY_BUILTINS_FROZENSET
+ bool is_frozen = mp_obj_is_type(self_in, &mp_type_frozenset);
+ #endif
+ if (self->set.used == 0) {
+ #if MICROPY_PY_BUILTINS_FROZENSET
+ if (is_frozen) {
+ mp_print_str(print, "frozen");
+ }
+ #endif
+ mp_print_str(print, "set()");
+ return;
+ }
+ bool first = true;
+ #if MICROPY_PY_BUILTINS_FROZENSET
+ if (is_frozen) {
+ mp_print_str(print, "frozenset(");
+ }
+ #endif
+ mp_print_str(print, "{");
+ for (size_t i = 0; i < self->set.alloc; i++) {
+ if (mp_set_slot_is_filled(&self->set, i)) {
+ if (!first) {
+ mp_print_str(print, ", ");
+ }
+ first = false;
+ mp_obj_print_helper(print, self->set.table[i], PRINT_REPR);
+ }
+ }
+ mp_print_str(print, "}");
+ #if MICROPY_PY_BUILTINS_FROZENSET
+ if (is_frozen) {
+ mp_print_str(print, ")");
+ }
+ #endif
+}
+
+STATIC mp_obj_t set_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_arg_check_num(n_args, n_kw, 0, 1, false);
+
+ switch (n_args) {
+ case 0: {
+ // create a new, empty set
+ mp_obj_set_t *set = MP_OBJ_TO_PTR(mp_obj_new_set(0, NULL));
+ // set actual set/frozenset type
+ set->base.type = type;
+ return MP_OBJ_FROM_PTR(set);
+ }
+
+ case 1:
+ default: { // can only be 0 or 1 arg
+ // 1 argument, an iterable from which we make a new set
+ mp_obj_t set = mp_obj_new_set(0, NULL);
+ mp_obj_t iterable = mp_getiter(args[0], NULL);
+ mp_obj_t item;
+ while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
+ mp_obj_set_store(set, item);
+ }
+ // Set actual set/frozenset type
+ ((mp_obj_set_t *)MP_OBJ_TO_PTR(set))->base.type = type;
+ return set;
+ }
+ }
+}
+
+STATIC mp_obj_t set_it_iternext(mp_obj_t self_in) {
+ mp_obj_set_it_t *self = MP_OBJ_TO_PTR(self_in);
+ size_t max = self->set->set.alloc;
+ mp_set_t *set = &self->set->set;
+
+ for (size_t i = self->cur; i < max; i++) {
+ if (mp_set_slot_is_filled(set, i)) {
+ self->cur = i + 1;
+ return set->table[i];
+ }
+ }
+
+ return MP_OBJ_STOP_ITERATION;
+}
+
+STATIC mp_obj_t set_getiter(mp_obj_t set_in, mp_obj_iter_buf_t *iter_buf) {
+ assert(sizeof(mp_obj_set_it_t) <= sizeof(mp_obj_iter_buf_t));
+ mp_obj_set_it_t *o = (mp_obj_set_it_t *)iter_buf;
+ o->base.type = &mp_type_polymorph_iter;
+ o->iternext = set_it_iternext;
+ o->set = (mp_obj_set_t *)MP_OBJ_TO_PTR(set_in);
+ o->cur = 0;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+/******************************************************************************/
+/* set methods */
+
+STATIC mp_obj_t set_add(mp_obj_t self_in, mp_obj_t item) {
+ check_set(self_in);
+ mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_set_lookup(&self->set, item, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND);
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(set_add_obj, set_add);
+
+STATIC mp_obj_t set_clear(mp_obj_t self_in) {
+ check_set(self_in);
+ mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_set_clear(&self->set);
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(set_clear_obj, set_clear);
+
+STATIC mp_obj_t set_copy(mp_obj_t self_in) {
+ check_set_or_frozenset(self_in);
+ mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_set_t *other = m_new_obj(mp_obj_set_t);
+ other->base.type = self->base.type;
+ mp_set_init(&other->set, self->set.alloc);
+ other->set.used = self->set.used;
+ memcpy(other->set.table, self->set.table, self->set.alloc * sizeof(mp_obj_t));
+ return MP_OBJ_FROM_PTR(other);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(set_copy_obj, set_copy);
+
+STATIC mp_obj_t set_discard(mp_obj_t self_in, mp_obj_t item) {
+ check_set(self_in);
+ mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_set_lookup(&self->set, item, MP_MAP_LOOKUP_REMOVE_IF_FOUND);
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(set_discard_obj, set_discard);
+
+STATIC mp_obj_t set_diff_int(size_t n_args, const mp_obj_t *args, bool update) {
+ mp_obj_t self;
+ if (update) {
+ check_set(args[0]);
+ self = args[0];
+ } else {
+ self = set_copy(args[0]);
+ }
+
+ for (size_t i = 1; i < n_args; i++) {
+ mp_obj_t other = args[i];
+ if (self == other) {
+ set_clear(self);
+ } else {
+ mp_set_t *self_set = &((mp_obj_set_t *)MP_OBJ_TO_PTR(self))->set;
+ mp_obj_t iter = mp_getiter(other, NULL);
+ mp_obj_t next;
+ while ((next = mp_iternext(iter)) != MP_OBJ_STOP_ITERATION) {
+ mp_set_lookup(self_set, next, MP_MAP_LOOKUP_REMOVE_IF_FOUND);
+ }
+ }
+ }
+
+ return self;
+}
+
+STATIC mp_obj_t set_diff(size_t n_args, const mp_obj_t *args) {
+ return set_diff_int(n_args, args, false);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR(set_diff_obj, 1, set_diff);
+
+STATIC mp_obj_t set_diff_update(size_t n_args, const mp_obj_t *args) {
+ set_diff_int(n_args, args, true);
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR(set_diff_update_obj, 1, set_diff_update);
+
+STATIC mp_obj_t set_intersect_int(mp_obj_t self_in, mp_obj_t other, bool update) {
+ if (update) {
+ check_set(self_in);
+ } else {
+ check_set_or_frozenset(self_in);
+ }
+
+ if (self_in == other) {
+ return update ? mp_const_none : set_copy(self_in);
+ }
+
+ mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_set_t *out = MP_OBJ_TO_PTR(mp_obj_new_set(0, NULL));
+
+ mp_obj_t iter = mp_getiter(other, NULL);
+ mp_obj_t next;
+ while ((next = mp_iternext(iter)) != MP_OBJ_STOP_ITERATION) {
+ if (mp_set_lookup(&self->set, next, MP_MAP_LOOKUP)) {
+ set_add(MP_OBJ_FROM_PTR(out), next);
+ }
+ }
+
+ if (update) {
+ m_del(mp_obj_t, self->set.table, self->set.alloc);
+ self->set.alloc = out->set.alloc;
+ self->set.used = out->set.used;
+ self->set.table = out->set.table;
+ }
+
+ return update ? mp_const_none : MP_OBJ_FROM_PTR(out);
+}
+
+STATIC mp_obj_t set_intersect(mp_obj_t self_in, mp_obj_t other) {
+ return set_intersect_int(self_in, other, false);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(set_intersect_obj, set_intersect);
+
+STATIC mp_obj_t set_intersect_update(mp_obj_t self_in, mp_obj_t other) {
+ return set_intersect_int(self_in, other, true);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(set_intersect_update_obj, set_intersect_update);
+
+STATIC mp_obj_t set_isdisjoint(mp_obj_t self_in, mp_obj_t other) {
+ check_set_or_frozenset(self_in);
+ mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in);
+
+ mp_obj_iter_buf_t iter_buf;
+ mp_obj_t iter = mp_getiter(other, &iter_buf);
+ mp_obj_t next;
+ while ((next = mp_iternext(iter)) != MP_OBJ_STOP_ITERATION) {
+ if (mp_set_lookup(&self->set, next, MP_MAP_LOOKUP)) {
+ return mp_const_false;
+ }
+ }
+ return mp_const_true;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(set_isdisjoint_obj, set_isdisjoint);
+
+STATIC mp_obj_t set_issubset_internal(mp_obj_t self_in, mp_obj_t other_in, bool proper) {
+ mp_obj_set_t *self;
+ bool cleanup_self = false;
+ if (is_set_or_frozenset(self_in)) {
+ self = MP_OBJ_TO_PTR(self_in);
+ } else {
+ self = MP_OBJ_TO_PTR(set_make_new(&mp_type_set, 1, 0, &self_in));
+ cleanup_self = true;
+ }
+
+ mp_obj_set_t *other;
+ bool cleanup_other = false;
+ if (is_set_or_frozenset(other_in)) {
+ other = MP_OBJ_TO_PTR(other_in);
+ } else {
+ other = MP_OBJ_TO_PTR(set_make_new(&mp_type_set, 1, 0, &other_in));
+ cleanup_other = true;
+ }
+ mp_obj_t out = mp_const_true;
+ if (proper && self->set.used == other->set.used) {
+ out = mp_const_false;
+ } else {
+ mp_obj_iter_buf_t iter_buf;
+ mp_obj_t iter = set_getiter(MP_OBJ_FROM_PTR(self), &iter_buf);
+ mp_obj_t next;
+ while ((next = set_it_iternext(iter)) != MP_OBJ_STOP_ITERATION) {
+ if (!mp_set_lookup(&other->set, next, MP_MAP_LOOKUP)) {
+ out = mp_const_false;
+ break;
+ }
+ }
+ }
+ // TODO: Should free objects altogether
+ if (cleanup_self) {
+ set_clear(MP_OBJ_FROM_PTR(self));
+ }
+ if (cleanup_other) {
+ set_clear(MP_OBJ_FROM_PTR(other));
+ }
+ return out;
+}
+
+STATIC mp_obj_t set_issubset(mp_obj_t self_in, mp_obj_t other_in) {
+ return set_issubset_internal(self_in, other_in, false);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(set_issubset_obj, set_issubset);
+
+STATIC mp_obj_t set_issubset_proper(mp_obj_t self_in, mp_obj_t other_in) {
+ return set_issubset_internal(self_in, other_in, true);
+}
+
+STATIC mp_obj_t set_issuperset(mp_obj_t self_in, mp_obj_t other_in) {
+ return set_issubset_internal(other_in, self_in, false);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(set_issuperset_obj, set_issuperset);
+
+STATIC mp_obj_t set_issuperset_proper(mp_obj_t self_in, mp_obj_t other_in) {
+ return set_issubset_internal(other_in, self_in, true);
+}
+
+STATIC mp_obj_t set_equal(mp_obj_t self_in, mp_obj_t other_in) {
+ assert(is_set_or_frozenset(other_in));
+ check_set_or_frozenset(self_in);
+ mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_set_t *other = MP_OBJ_TO_PTR(other_in);
+ if (self->set.used != other->set.used) {
+ return mp_const_false;
+ }
+ return set_issubset(self_in, other_in);
+}
+
+STATIC mp_obj_t set_pop(mp_obj_t self_in) {
+ check_set(self_in);
+ mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_t obj = mp_set_remove_first(&self->set);
+ if (obj == MP_OBJ_NULL) {
+ mp_raise_msg_varg(&mp_type_KeyError, MP_ERROR_TEXT("pop from empty %q"), MP_QSTR_set);
+ }
+ return obj;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(set_pop_obj, set_pop);
+
+STATIC mp_obj_t set_remove(mp_obj_t self_in, mp_obj_t item) {
+ check_set(self_in);
+ mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in);
+ if (mp_set_lookup(&self->set, item, MP_MAP_LOOKUP_REMOVE_IF_FOUND) == MP_OBJ_NULL) {
+ mp_raise_type_arg(&mp_type_KeyError, item);
+ }
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(set_remove_obj, set_remove);
+
+STATIC mp_obj_t set_symmetric_difference_update(mp_obj_t self_in, mp_obj_t other_in) {
+ check_set_or_frozenset(self_in); // can be frozenset due to call from set_symmetric_difference
+ mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_t iter = mp_getiter(other_in, NULL);
+ mp_obj_t next;
+ while ((next = mp_iternext(iter)) != MP_OBJ_STOP_ITERATION) {
+ mp_set_lookup(&self->set, next, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND_OR_REMOVE_IF_FOUND);
+ }
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(set_symmetric_difference_update_obj, set_symmetric_difference_update);
+
+STATIC mp_obj_t set_symmetric_difference(mp_obj_t self_in, mp_obj_t other_in) {
+ mp_obj_t self_out = set_copy(self_in);
+ set_symmetric_difference_update(self_out, other_in);
+ return self_out;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(set_symmetric_difference_obj, set_symmetric_difference);
+
+STATIC void set_update_int(mp_obj_set_t *self, mp_obj_t other_in) {
+ mp_obj_t iter = mp_getiter(other_in, NULL);
+ mp_obj_t next;
+ while ((next = mp_iternext(iter)) != MP_OBJ_STOP_ITERATION) {
+ mp_set_lookup(&self->set, next, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND);
+ }
+}
+
+STATIC mp_obj_t set_update(size_t n_args, const mp_obj_t *args) {
+ check_set(args[0]);
+ for (size_t i = 1; i < n_args; i++) {
+ set_update_int(MP_OBJ_TO_PTR(args[0]), args[i]);
+ }
+
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR(set_update_obj, 1, set_update);
+
+STATIC mp_obj_t set_union(mp_obj_t self_in, mp_obj_t other_in) {
+ check_set_or_frozenset(self_in);
+ mp_obj_t self = set_copy(self_in);
+ set_update_int(MP_OBJ_TO_PTR(self), other_in);
+ return self;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(set_union_obj, set_union);
+
+STATIC mp_obj_t set_unary_op(mp_unary_op_t op, mp_obj_t self_in) {
+ mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in);
+ switch (op) {
+ case MP_UNARY_OP_BOOL:
+ return mp_obj_new_bool(self->set.used != 0);
+ case MP_UNARY_OP_LEN:
+ return MP_OBJ_NEW_SMALL_INT(self->set.used);
+ #if MICROPY_PY_BUILTINS_FROZENSET
+ case MP_UNARY_OP_HASH:
+ if (mp_obj_is_type(self_in, &mp_type_frozenset)) {
+ // start hash with unique value
+ mp_int_t hash = (mp_int_t)(uintptr_t)&mp_type_frozenset;
+ size_t max = self->set.alloc;
+ mp_set_t *set = &self->set;
+
+ for (size_t i = 0; i < max; i++) {
+ if (mp_set_slot_is_filled(set, i)) {
+ hash += MP_OBJ_SMALL_INT_VALUE(mp_unary_op(MP_UNARY_OP_HASH, set->table[i]));
+ }
+ }
+ return MP_OBJ_NEW_SMALL_INT(hash);
+ }
+ MP_FALLTHROUGH
+ #endif
+ /* FALLTHROUGH */
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+STATIC mp_obj_t set_binary_op(mp_binary_op_t op, mp_obj_t lhs, mp_obj_t rhs) {
+ mp_obj_t args[] = {lhs, rhs};
+ #if MICROPY_PY_BUILTINS_FROZENSET
+ bool update = mp_obj_is_type(lhs, &mp_type_set);
+ #else
+ bool update = true;
+ #endif
+ if (op != MP_BINARY_OP_CONTAINS && !is_set_or_frozenset(rhs)) {
+ // For all ops except containment the RHS must be a set/frozenset
+ return MP_OBJ_NULL;
+ }
+ switch (op) {
+ case MP_BINARY_OP_OR:
+ return set_union(lhs, rhs);
+ case MP_BINARY_OP_XOR:
+ return set_symmetric_difference(lhs, rhs);
+ case MP_BINARY_OP_AND:
+ return set_intersect(lhs, rhs);
+ case MP_BINARY_OP_SUBTRACT:
+ return set_diff(2, args);
+ case MP_BINARY_OP_INPLACE_OR:
+ if (update) {
+ set_update(2, args);
+ return lhs;
+ } else {
+ return set_union(lhs, rhs);
+ }
+ case MP_BINARY_OP_INPLACE_XOR:
+ if (update) {
+ set_symmetric_difference_update(lhs, rhs);
+ return lhs;
+ } else {
+ return set_symmetric_difference(lhs, rhs);
+ }
+ case MP_BINARY_OP_INPLACE_AND:
+ rhs = set_intersect_int(lhs, rhs, update);
+ if (update) {
+ return lhs;
+ } else {
+ return rhs;
+ }
+ case MP_BINARY_OP_INPLACE_SUBTRACT:
+ return set_diff_int(2, args, update);
+ case MP_BINARY_OP_LESS:
+ return set_issubset_proper(lhs, rhs);
+ case MP_BINARY_OP_MORE:
+ return set_issuperset_proper(lhs, rhs);
+ case MP_BINARY_OP_EQUAL:
+ return set_equal(lhs, rhs);
+ case MP_BINARY_OP_LESS_EQUAL:
+ return set_issubset(lhs, rhs);
+ case MP_BINARY_OP_MORE_EQUAL:
+ return set_issuperset(lhs, rhs);
+ case MP_BINARY_OP_CONTAINS: {
+ mp_obj_set_t *o = MP_OBJ_TO_PTR(lhs);
+ mp_obj_t elem = mp_set_lookup(&o->set, rhs, MP_MAP_LOOKUP);
+ return mp_obj_new_bool(elem != MP_OBJ_NULL);
+ }
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+/******************************************************************************/
+/* set constructors & public C API */
+
+STATIC const mp_rom_map_elem_t set_locals_dict_table[] = {
+ { MP_ROM_QSTR(MP_QSTR_add), MP_ROM_PTR(&set_add_obj) },
+ { MP_ROM_QSTR(MP_QSTR_clear), MP_ROM_PTR(&set_clear_obj) },
+ { MP_ROM_QSTR(MP_QSTR_copy), MP_ROM_PTR(&set_copy_obj) },
+ { MP_ROM_QSTR(MP_QSTR_discard), MP_ROM_PTR(&set_discard_obj) },
+ { MP_ROM_QSTR(MP_QSTR_difference), MP_ROM_PTR(&set_diff_obj) },
+ { MP_ROM_QSTR(MP_QSTR_difference_update), MP_ROM_PTR(&set_diff_update_obj) },
+ { MP_ROM_QSTR(MP_QSTR_intersection), MP_ROM_PTR(&set_intersect_obj) },
+ { MP_ROM_QSTR(MP_QSTR_intersection_update), MP_ROM_PTR(&set_intersect_update_obj) },
+ { MP_ROM_QSTR(MP_QSTR_isdisjoint), MP_ROM_PTR(&set_isdisjoint_obj) },
+ { MP_ROM_QSTR(MP_QSTR_issubset), MP_ROM_PTR(&set_issubset_obj) },
+ { MP_ROM_QSTR(MP_QSTR_issuperset), MP_ROM_PTR(&set_issuperset_obj) },
+ { MP_ROM_QSTR(MP_QSTR_pop), MP_ROM_PTR(&set_pop_obj) },
+ { MP_ROM_QSTR(MP_QSTR_remove), MP_ROM_PTR(&set_remove_obj) },
+ { MP_ROM_QSTR(MP_QSTR_symmetric_difference), MP_ROM_PTR(&set_symmetric_difference_obj) },
+ { MP_ROM_QSTR(MP_QSTR_symmetric_difference_update), MP_ROM_PTR(&set_symmetric_difference_update_obj) },
+ { MP_ROM_QSTR(MP_QSTR_union), MP_ROM_PTR(&set_union_obj) },
+ { MP_ROM_QSTR(MP_QSTR_update), MP_ROM_PTR(&set_update_obj) },
+ { MP_ROM_QSTR(MP_QSTR___contains__), MP_ROM_PTR(&mp_op_contains_obj) },
+};
+STATIC MP_DEFINE_CONST_DICT(set_locals_dict, set_locals_dict_table);
+
+const mp_obj_type_t mp_type_set = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_set,
+ .print = set_print,
+ .make_new = set_make_new,
+ .locals_dict = (mp_obj_dict_t *)&set_locals_dict,
+ MP_TYPE_EXTENDED_FIELDS(
+ .unary_op = set_unary_op,
+ .binary_op = set_binary_op,
+ .getiter = set_getiter,
+ ),
+};
+
+#if MICROPY_PY_BUILTINS_FROZENSET
+STATIC const mp_rom_map_elem_t frozenset_locals_dict_table[] = {
+ { MP_ROM_QSTR(MP_QSTR_copy), MP_ROM_PTR(&set_copy_obj) },
+ { MP_ROM_QSTR(MP_QSTR_difference), MP_ROM_PTR(&set_diff_obj) },
+ { MP_ROM_QSTR(MP_QSTR_intersection), MP_ROM_PTR(&set_intersect_obj) },
+ { MP_ROM_QSTR(MP_QSTR_isdisjoint), MP_ROM_PTR(&set_isdisjoint_obj) },
+ { MP_ROM_QSTR(MP_QSTR_issubset), MP_ROM_PTR(&set_issubset_obj) },
+ { MP_ROM_QSTR(MP_QSTR_issuperset), MP_ROM_PTR(&set_issuperset_obj) },
+ { MP_ROM_QSTR(MP_QSTR_symmetric_difference), MP_ROM_PTR(&set_symmetric_difference_obj) },
+ { MP_ROM_QSTR(MP_QSTR_union), MP_ROM_PTR(&set_union_obj) },
+ { MP_ROM_QSTR(MP_QSTR___contains__), MP_ROM_PTR(&mp_op_contains_obj) },
+};
+STATIC MP_DEFINE_CONST_DICT(frozenset_locals_dict, frozenset_locals_dict_table);
+
+const mp_obj_type_t mp_type_frozenset = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EQ_CHECKS_OTHER_TYPE | MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_frozenset,
+ .print = set_print,
+ .make_new = set_make_new,
+ .locals_dict = (mp_obj_dict_t *)&frozenset_locals_dict,
+ MP_TYPE_EXTENDED_FIELDS(
+ .unary_op = set_unary_op,
+ .binary_op = set_binary_op,
+ .getiter = set_getiter,
+ ),
+};
+#endif
+
+mp_obj_t mp_obj_new_set(size_t n_args, mp_obj_t *items) {
+ mp_obj_set_t *o = m_new_obj(mp_obj_set_t);
+ o->base.type = &mp_type_set;
+ mp_set_init(&o->set, n_args);
+ for (size_t i = 0; i < n_args; i++) {
+ mp_set_lookup(&o->set, items[i], MP_MAP_LOOKUP_ADD_IF_NOT_FOUND);
+ }
+ return MP_OBJ_FROM_PTR(o);
+}
+
+void mp_obj_set_store(mp_obj_t self_in, mp_obj_t item) {
+ mp_check_self(mp_obj_is_type(self_in, &mp_type_set));
+ mp_obj_set_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_set_lookup(&self->set, item, MP_MAP_LOOKUP_ADD_IF_NOT_FOUND);
+}
+
+#endif // MICROPY_PY_BUILTINS_SET
diff --git a/circuitpython/py/objsingleton.c b/circuitpython/py/objsingleton.c
new file mode 100644
index 0000000..dfa6876
--- /dev/null
+++ b/circuitpython/py/objsingleton.c
@@ -0,0 +1,59 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+
+#include "py/obj.h"
+
+/******************************************************************************/
+/* singleton objects defined by Python */
+
+typedef struct _mp_obj_singleton_t {
+ mp_obj_base_t base;
+ qstr name;
+} mp_obj_singleton_t;
+
+STATIC void singleton_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_singleton_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_printf(print, "%q", self->name);
+}
+
+const mp_obj_type_t mp_type_singleton = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_,
+ .print = singleton_print,
+ MP_TYPE_EXTENDED_FIELDS(
+ .unary_op = mp_generic_unary_op,
+ ),
+};
+
+const mp_obj_singleton_t mp_const_ellipsis_obj = {{&mp_type_singleton}, MP_QSTR_Ellipsis};
+#if MICROPY_PY_BUILTINS_NOTIMPLEMENTED
+const mp_obj_singleton_t mp_const_notimplemented_obj = {{&mp_type_singleton}, MP_QSTR_NotImplemented};
+#endif
diff --git a/circuitpython/py/objslice.c b/circuitpython/py/objslice.c
new file mode 100644
index 0000000..142f62f
--- /dev/null
+++ b/circuitpython/py/objslice.c
@@ -0,0 +1,213 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+
+#include "py/obj.h"
+#include "py/runtime.h"
+
+#include "supervisor/shared/translate.h"
+
+/******************************************************************************/
+/* slice object */
+
+#if MICROPY_PY_BUILTINS_SLICE
+
+STATIC void slice_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_slice_t *o = MP_OBJ_TO_PTR(o_in);
+ mp_print_str(print, "slice(");
+ mp_obj_print_helper(print, o->start, PRINT_REPR);
+ mp_print_str(print, ", ");
+ mp_obj_print_helper(print, o->stop, PRINT_REPR);
+ mp_print_str(print, ", ");
+ mp_obj_print_helper(print, o->step, PRINT_REPR);
+ mp_print_str(print, ")");
+}
+
+#if MICROPY_PY_BUILTINS_SLICE_INDICES
+STATIC mp_obj_t slice_indices(mp_obj_t self_in, mp_obj_t length_obj) {
+ mp_int_t length = mp_obj_int_get_checked(length_obj);
+ mp_bound_slice_t bound_indices;
+ mp_obj_slice_indices(self_in, length, &bound_indices);
+
+ mp_obj_t results[3] = {
+ MP_OBJ_NEW_SMALL_INT(bound_indices.start),
+ MP_OBJ_NEW_SMALL_INT(bound_indices.stop),
+ MP_OBJ_NEW_SMALL_INT(bound_indices.step),
+ };
+ return mp_obj_new_tuple(3, results);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(slice_indices_obj, slice_indices);
+#endif
+
+#if MICROPY_PY_BUILTINS_SLICE_ATTRS
+STATIC void slice_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+ if (dest[0] != MP_OBJ_NULL) {
+ // not load attribute
+ return;
+ }
+ mp_obj_slice_t *self = MP_OBJ_TO_PTR(self_in);
+
+ if (attr == MP_QSTR_start) {
+ dest[0] = self->start;
+ } else if (attr == MP_QSTR_stop) {
+ dest[0] = self->stop;
+ } else if (attr == MP_QSTR_step) {
+ dest[0] = self->step;
+ #if MICROPY_PY_BUILTINS_SLICE_INDICES
+ } else if (attr == MP_QSTR_indices) {
+ dest[0] = MP_OBJ_FROM_PTR(&slice_indices_obj);
+ dest[1] = self_in;
+ #endif
+ }
+}
+#endif
+
+#if MICROPY_PY_BUILTINS_SLICE_ATTRS
+STATIC mp_obj_t slice_make_new(const mp_obj_type_t *type,
+ size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ if (type != &mp_type_slice) {
+ mp_raise_NotImplementedError(MP_ERROR_TEXT("Cannot subclass slice"));
+ }
+ // check number of arguments
+ mp_arg_check_num(n_args, n_kw, 1, 3, false);
+
+ // 1st argument is the pin
+ mp_obj_t start = mp_const_none;
+ mp_obj_t stop = mp_const_none;
+ mp_obj_t step = mp_const_none;
+ if (n_args == 1) {
+ stop = args[0];
+ } else {
+ start = args[0];
+ stop = args[1];
+ if (n_args == 3) {
+ step = args[2];
+ }
+ }
+
+ return mp_obj_new_slice(start, stop, step);
+}
+#endif
+
+#if MICROPY_PY_BUILTINS_SLICE_INDICES && !MICROPY_PY_BUILTINS_SLICE_ATTRS
+STATIC const mp_rom_map_elem_t slice_locals_dict_table[] = {
+ { MP_ROM_QSTR(MP_QSTR_indices), MP_ROM_PTR(&slice_indices_obj) },
+};
+STATIC MP_DEFINE_CONST_DICT(slice_locals_dict, slice_locals_dict_table);
+#endif
+
+const mp_obj_type_t mp_type_slice = {
+ { &mp_type_type },
+ .name = MP_QSTR_slice,
+ .print = slice_print,
+ #if MICROPY_PY_BUILTINS_SLICE_INDICES || MICROPY_PY_BUILTINS_SLICE_ATTRS
+ .make_new = slice_make_new,
+ #endif
+ #if MICROPY_PY_BUILTINS_SLICE_ATTRS
+ .attr = slice_attr,
+ #elif MICROPY_PY_BUILTINS_SLICE_INDICES
+ .locals_dict = (mp_obj_dict_t *)&slice_locals_dict,
+ #endif
+};
+
+mp_obj_t mp_obj_new_slice(mp_obj_t ostart, mp_obj_t ostop, mp_obj_t ostep) {
+ mp_obj_slice_t *o = m_new_obj(mp_obj_slice_t);
+ o->base.type = &mp_type_slice;
+ o->start = ostart;
+ o->stop = ostop;
+ o->step = ostep;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+// Return the real index and step values for a slice when applied to a sequence of
+// the given length, resolving missing components, negative values and values off
+// the end of the sequence.
+void mp_obj_slice_indices(mp_obj_t self_in, mp_int_t length, mp_bound_slice_t *result) {
+ mp_obj_slice_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_int_t start, stop, step;
+
+ if (self->step == mp_const_none) {
+ step = 1;
+ } else {
+ step = mp_obj_get_int(self->step);
+ if (step == 0) {
+ mp_raise_ValueError(MP_ERROR_TEXT("slice step cannot be zero"));
+ }
+ }
+
+ if (step > 0) {
+ // Positive step
+ if (self->start == mp_const_none) {
+ start = 0;
+ } else {
+ start = mp_obj_get_int(self->start);
+ if (start < 0) {
+ start += length;
+ }
+ start = MIN(length, MAX(start, 0));
+ }
+
+ if (self->stop == mp_const_none) {
+ stop = length;
+ } else {
+ stop = mp_obj_get_int(self->stop);
+ if (stop < 0) {
+ stop += length;
+ }
+ stop = MIN(length, MAX(stop, 0));
+ }
+ } else {
+ // Negative step
+ if (self->start == mp_const_none) {
+ start = length - 1;
+ } else {
+ start = mp_obj_get_int(self->start);
+ if (start < 0) {
+ start += length;
+ }
+ start = MIN(length - 1, MAX(start, -1));
+ }
+
+ if (self->stop == mp_const_none) {
+ stop = -1;
+ } else {
+ stop = mp_obj_get_int(self->stop);
+ if (stop < 0) {
+ stop += length;
+ }
+ stop = MIN(length - 1, MAX(stop, -1));
+ }
+ }
+
+ result->start = start;
+ result->stop = stop;
+ result->step = step;
+}
+
+#endif
diff --git a/circuitpython/py/objstr.c b/circuitpython/py/objstr.c
new file mode 100644
index 0000000..3d45383
--- /dev/null
+++ b/circuitpython/py/objstr.c
@@ -0,0 +1,2274 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ * SPDX-FileCopyrightText: Copyright (c) 2014-2018 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+#include <assert.h>
+
+#include "py/unicode.h"
+#include "py/objstr.h"
+#include "py/objlist.h"
+#include "py/objtype.h"
+#include "py/runtime.h"
+#include "py/stackctrl.h"
+
+#include "supervisor/shared/translate.h"
+
+#if MICROPY_PY_BUILTINS_STR_OP_MODULO
+STATIC mp_obj_t str_modulo_format(mp_obj_t pattern, size_t n_args, const mp_obj_t *args, mp_obj_t dict);
+#endif
+
+STATIC mp_obj_t mp_obj_new_bytes_iterator(mp_obj_t str, mp_obj_iter_buf_t *iter_buf);
+STATIC NORETURN void bad_implicit_conversion(mp_obj_t self_in);
+
+const char nibble_to_hex_upper[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+ 'A', 'B', 'C', 'D', 'E', 'F'};
+
+const char nibble_to_hex_lower[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+ 'a', 'b', 'c', 'd', 'e', 'f'};
+
+/******************************************************************************/
+/* str */
+
+void mp_str_print_quoted(const mp_print_t *print, const byte *str_data, size_t str_len, bool is_bytes) {
+ // this escapes characters, but it will be very slow to print (calling print many times)
+ bool has_single_quote = false;
+ bool has_double_quote = false;
+ for (const byte *s = str_data, *top = str_data + str_len; !has_double_quote && s < top; s++) {
+ if (*s == '\'') {
+ has_single_quote = true;
+ } else if (*s == '"') {
+ has_double_quote = true;
+ }
+ }
+ int quote_char = '\'';
+ if (has_single_quote && !has_double_quote) {
+ quote_char = '"';
+ }
+ mp_printf(print, "%c", quote_char);
+ for (const byte *s = str_data, *top = str_data + str_len; s < top; s++) {
+ if (*s == quote_char) {
+ mp_printf(print, "\\%c", quote_char);
+ } else if (*s == '\\') {
+ mp_print_str(print, "\\\\");
+ } else if (*s >= 0x20 && *s != 0x7f && (!is_bytes || *s < 0x80)) {
+ // In strings, anything which is not ascii control character
+ // is printed as is, this includes characters in range 0x80-0xff
+ // (which can be non-Latin letters, etc.)
+ mp_printf(print, "%c", *s);
+ } else if (*s == '\n') {
+ mp_print_str(print, "\\n");
+ } else if (*s == '\r') {
+ mp_print_str(print, "\\r");
+ } else if (*s == '\t') {
+ mp_print_str(print, "\\t");
+ } else {
+ mp_printf(print, "\\x%02x", *s);
+ }
+ }
+ mp_printf(print, "%c", quote_char);
+}
+
+#if MICROPY_PY_UJSON
+void mp_str_print_json(const mp_print_t *print, const byte *str_data, size_t str_len) {
+ // for JSON spec, see http://www.ietf.org/rfc/rfc4627.txt
+ // if we are given a valid utf8-encoded string, we will print it in a JSON-conforming way
+ mp_print_str(print, "\"");
+ for (const byte *s = str_data, *top = str_data + str_len; s < top; s++) {
+ if (*s == '"' || *s == '\\') {
+ mp_printf(print, "\\%c", *s);
+ } else if (*s >= 32) {
+ // this will handle normal and utf-8 encoded chars
+ mp_printf(print, "%c", *s);
+ } else if (*s == '\n') {
+ mp_print_str(print, "\\n");
+ } else if (*s == '\r') {
+ mp_print_str(print, "\\r");
+ } else if (*s == '\t') {
+ mp_print_str(print, "\\t");
+ } else {
+ // this will handle control chars
+ mp_printf(print, "\\u%04x", *s);
+ }
+ }
+ mp_print_str(print, "\"");
+}
+#endif
+
+STATIC void str_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ GET_STR_DATA_LEN(self_in, str_data, str_len);
+ #if MICROPY_PY_UJSON
+ if (kind == PRINT_JSON) {
+ mp_str_print_json(print, str_data, str_len);
+ return;
+ }
+ #endif
+ #if !MICROPY_PY_BUILTINS_STR_UNICODE
+ bool is_bytes = mp_obj_is_type(self_in, &mp_type_bytes);
+ #else
+ bool is_bytes = true;
+ #endif
+ if (kind == PRINT_RAW || (!MICROPY_PY_BUILTINS_STR_UNICODE && kind == PRINT_STR && !is_bytes)) {
+ print->print_strn(print->data, (const char *)str_data, str_len);
+ } else {
+ if (is_bytes) {
+ print->print_strn(print->data, "b", 1);
+ }
+ mp_str_print_quoted(print, str_data, str_len, is_bytes);
+ }
+}
+
+mp_obj_t mp_obj_str_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_arg_check_num(n_args, n_kw, 0, 3, false);
+
+ switch (n_args) {
+ case 0:
+ return MP_OBJ_NEW_QSTR(MP_QSTR_);
+
+ case 1: {
+ vstr_t vstr;
+ mp_print_t print;
+ vstr_init_print(&vstr, 16, &print);
+ mp_obj_print_helper(&print, args[0], PRINT_STR);
+ return mp_obj_new_str_from_vstr(type, &vstr);
+ }
+
+ default: // 2 or 3 args
+ // TODO: validate 2nd/3rd args
+ if (mp_obj_is_type(args[0], &mp_type_bytes)) {
+ GET_STR_DATA_LEN(args[0], str_data, str_len);
+ GET_STR_HASH(args[0], str_hash);
+ if (str_hash == 0) {
+ str_hash = qstr_compute_hash(str_data, str_len);
+ }
+ #if MICROPY_PY_BUILTINS_STR_UNICODE_CHECK
+ if (!utf8_check(str_data, str_len)) {
+ mp_raise_msg(&mp_type_UnicodeError, NULL);
+ }
+ #endif
+
+ // Check if a qstr with this data already exists
+ qstr q = qstr_find_strn((const char *)str_data, str_len);
+ if (q != MP_QSTRnull) {
+ return MP_OBJ_NEW_QSTR(q);
+ }
+
+ mp_obj_str_t *o = MP_OBJ_TO_PTR(mp_obj_new_str_copy(type, NULL, str_len));
+ o->data = str_data;
+ o->hash = str_hash;
+ return MP_OBJ_FROM_PTR(o);
+ } else {
+ mp_buffer_info_t bufinfo;
+ mp_get_buffer_raise(args[0], &bufinfo, MP_BUFFER_READ);
+ #if MICROPY_PY_BUILTINS_STR_UNICODE_CHECK
+ if (!utf8_check(bufinfo.buf, bufinfo.len)) {
+ mp_raise_msg(&mp_type_UnicodeError, NULL);
+ }
+ #endif
+ return mp_obj_new_str(bufinfo.buf, bufinfo.len);
+ }
+ }
+}
+
+STATIC mp_obj_t bytes_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ (void)type_in;
+
+ #if MICROPY_CPYTHON_COMPAT
+ if (n_kw) {
+ mp_arg_error_unimpl_kw();
+ }
+ #else
+ (void)n_kw;
+ #endif
+
+ if (n_args == 0) {
+ return mp_const_empty_bytes;
+ }
+
+ if (mp_obj_is_type(args[0], &mp_type_bytes)) {
+ return args[0];
+ }
+
+ if (mp_obj_is_str(args[0])) {
+ if (n_args < 2 || n_args > 3) {
+ goto wrong_args;
+ }
+ GET_STR_DATA_LEN(args[0], str_data, str_len);
+ GET_STR_HASH(args[0], str_hash);
+ if (str_hash == 0) {
+ str_hash = qstr_compute_hash(str_data, str_len);
+ }
+ mp_obj_str_t *o = MP_OBJ_TO_PTR(mp_obj_new_str_copy(&mp_type_bytes, NULL, str_len));
+ o->data = str_data;
+ o->hash = str_hash;
+ return MP_OBJ_FROM_PTR(o);
+ }
+
+ if (n_args > 1) {
+ goto wrong_args;
+ }
+
+ if (mp_obj_is_small_int(args[0])) {
+ mp_int_t len = MP_OBJ_SMALL_INT_VALUE(args[0]);
+ if (len < 0) {
+ mp_raise_ValueError(NULL);
+ }
+ vstr_t vstr;
+ vstr_init_len(&vstr, len);
+ memset(vstr.buf, 0, len);
+ return mp_obj_new_str_from_vstr(&mp_type_bytes, &vstr);
+ }
+
+ // check if __bytes__ exists, and if so delegate to it
+ mp_obj_t dest[2];
+ mp_load_method_maybe(args[0], MP_QSTR___bytes__, dest);
+ if (dest[0] != MP_OBJ_NULL) {
+ return mp_call_method_n_kw(0, 0, dest);
+ }
+
+ // check if argument has the buffer protocol
+ mp_buffer_info_t bufinfo;
+ if (mp_get_buffer(args[0], &bufinfo, MP_BUFFER_READ)) {
+ return mp_obj_new_bytes(bufinfo.buf, bufinfo.len);
+ }
+
+ vstr_t vstr;
+ // Try to create array of exact len if initializer len is known
+ mp_obj_t len_in = mp_obj_len_maybe(args[0]);
+ if (len_in == MP_OBJ_NULL) {
+ vstr_init(&vstr, 16);
+ } else {
+ mp_int_t len = MP_OBJ_SMALL_INT_VALUE(len_in);
+ vstr_init(&vstr, len);
+ }
+
+ mp_obj_iter_buf_t iter_buf;
+ mp_obj_t iterable = mp_getiter(args[0], &iter_buf);
+ mp_obj_t item;
+ while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
+ mp_int_t val = mp_obj_get_int(item);
+ #if MICROPY_FULL_CHECKS
+ if (val < 0 || val > 255) {
+ mp_raise_ValueError(MP_ERROR_TEXT("bytes value out of range"));
+ }
+ #endif
+ vstr_add_byte(&vstr, val);
+ }
+
+ return mp_obj_new_str_from_vstr(&mp_type_bytes, &vstr);
+
+wrong_args:
+ mp_raise_TypeError(MP_ERROR_TEXT("wrong number of arguments"));
+}
+
+// like strstr but with specified length and allows \0 bytes
+// TODO replace with something more efficient/standard
+const byte *find_subbytes(const byte *haystack, size_t hlen, const byte *needle, size_t nlen, int direction) {
+ if (hlen >= nlen) {
+ size_t str_index, str_index_end;
+ if (direction > 0) {
+ str_index = 0;
+ str_index_end = hlen - nlen;
+ } else {
+ str_index = hlen - nlen;
+ str_index_end = 0;
+ }
+ for (;;) {
+ if (memcmp(&haystack[str_index], needle, nlen) == 0) {
+ // found
+ return haystack + str_index;
+ }
+ if (str_index == str_index_end) {
+ // not found
+ break;
+ }
+ str_index += direction;
+ }
+ }
+ return NULL;
+}
+
+// Note: this function is used to check if an object is a str or bytes, which
+// works because both those types use it as their binary_op method. Revisit
+// mp_obj_is_str_or_bytes if this fact changes.
+mp_obj_t mp_obj_str_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
+ // check for modulo
+ if (op == MP_BINARY_OP_MODULO) {
+ #if MICROPY_PY_BUILTINS_STR_OP_MODULO
+ mp_obj_t *args = &rhs_in;
+ size_t n_args = 1;
+ mp_obj_t dict = MP_OBJ_NULL;
+ if (mp_obj_is_type(rhs_in, &mp_type_tuple)) {
+ // TODO: Support tuple subclasses?
+ mp_obj_tuple_get(rhs_in, &n_args, &args);
+ } else if (mp_obj_is_type(rhs_in, &mp_type_dict)) {
+ dict = rhs_in;
+ }
+ return str_modulo_format(lhs_in, n_args, args, dict);
+ #else
+ return MP_OBJ_NULL;
+ #endif
+ }
+
+ // from now on we need lhs type and data, so extract them
+ const mp_obj_type_t *lhs_type = mp_obj_get_type(lhs_in);
+ GET_STR_DATA_LEN(lhs_in, lhs_data, lhs_len);
+
+ // check for multiply
+ if (op == MP_BINARY_OP_MULTIPLY) {
+ mp_int_t n;
+ if (!mp_obj_get_int_maybe(rhs_in, &n)) {
+ return MP_OBJ_NULL; // op not supported
+ }
+ if (n <= 0) {
+ if (lhs_type == &mp_type_str) {
+ return MP_OBJ_NEW_QSTR(MP_QSTR_); // empty str
+ } else {
+ return mp_const_empty_bytes;
+ }
+ }
+ size_t new_len = mp_seq_multiply_len(lhs_len, n);
+ vstr_t vstr;
+ vstr_init_len(&vstr, new_len);
+ mp_seq_multiply(lhs_data, sizeof(*lhs_data), lhs_len, n, vstr.buf);
+ return mp_obj_new_str_from_vstr(lhs_type, &vstr);
+ }
+
+ // From now on all operations allow:
+ // - str with str
+ // - bytes with bytes
+ // - bytes with bytearray
+ // - bytes with array.array
+ // To do this efficiently we use the buffer protocol to extract the raw
+ // data for the rhs, but only if the lhs is a bytes object.
+ //
+ // NOTE: CPython does not allow comparison between bytes ard array.array
+ // (even if the array is of type 'b'), even though it allows addition of
+ // such types. We are not compatible with this (we do allow comparison
+ // of bytes with anything that has the buffer protocol). It would be
+ // easy to "fix" this with a bit of extra logic below, but it costs code
+ // size and execution time so we don't.
+
+ const byte *rhs_data;
+ size_t rhs_len;
+ if (lhs_type == mp_obj_get_type(rhs_in)) {
+ GET_STR_DATA_LEN(rhs_in, rhs_data_, rhs_len_);
+ rhs_data = rhs_data_;
+ rhs_len = rhs_len_;
+ } else if (lhs_type == &mp_type_bytes) {
+ mp_buffer_info_t bufinfo;
+ if (!mp_get_buffer(rhs_in, &bufinfo, MP_BUFFER_READ)) {
+ return MP_OBJ_NULL; // op not supported
+ }
+ rhs_data = bufinfo.buf;
+ rhs_len = bufinfo.len;
+ } else {
+ // LHS is str and RHS has an incompatible type
+ // (except if operation is EQUAL, but that's handled by mp_obj_equal)
+ bad_implicit_conversion(rhs_in);
+ }
+
+ switch (op) {
+ case MP_BINARY_OP_ADD:
+ case MP_BINARY_OP_INPLACE_ADD: {
+ if (lhs_len == 0 && mp_obj_get_type(rhs_in) == lhs_type) {
+ return rhs_in;
+ }
+ if (rhs_len == 0) {
+ return lhs_in;
+ }
+
+ vstr_t vstr;
+ vstr_init_len(&vstr, lhs_len + rhs_len);
+ memcpy(vstr.buf, lhs_data, lhs_len);
+ memcpy(vstr.buf + lhs_len, rhs_data, rhs_len);
+ return mp_obj_new_str_from_vstr(lhs_type, &vstr);
+ }
+
+ case MP_BINARY_OP_CONTAINS:
+ return mp_obj_new_bool(find_subbytes(lhs_data, lhs_len, rhs_data, rhs_len, 1) != NULL);
+
+ // case MP_BINARY_OP_NOT_EQUAL: // This is never passed here
+ case MP_BINARY_OP_EQUAL: // This will be passed only for bytes, str is dealt with in mp_obj_equal()
+ case MP_BINARY_OP_LESS:
+ case MP_BINARY_OP_LESS_EQUAL:
+ case MP_BINARY_OP_MORE:
+ case MP_BINARY_OP_MORE_EQUAL:
+ return mp_obj_new_bool(mp_seq_cmp_bytes(op, lhs_data, lhs_len, rhs_data, rhs_len));
+
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+#if !MICROPY_PY_BUILTINS_STR_UNICODE
+// objstrunicode defines own version
+size_t str_offset_to_index(const mp_obj_type_t *type, const byte *self_data, size_t self_len,
+ size_t offset) {
+ if (offset > self_len) {
+ mp_raise_ValueError(MP_ERROR_TEXT("offset out of bounds"));
+ }
+
+ return offset;
+}
+
+const byte *str_index_to_ptr(const mp_obj_type_t *type, const byte *self_data, size_t self_len,
+ mp_obj_t index, bool is_slice) {
+ size_t index_val = mp_get_index(type, self_len, index, is_slice);
+ return self_data + index_val;
+}
+#endif
+
+// This is used for both bytes and 8-bit strings. This is not used for unicode strings.
+STATIC mp_obj_t bytes_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) {
+ const mp_obj_type_t *type = mp_obj_get_type(self_in);
+ GET_STR_DATA_LEN(self_in, self_data, self_len);
+ if (value == MP_OBJ_SENTINEL) {
+ // load
+ #if MICROPY_PY_BUILTINS_SLICE
+ if (mp_obj_is_type(index, &mp_type_slice)) {
+ mp_bound_slice_t slice;
+ if (!mp_seq_get_fast_slice_indexes(self_len, index, &slice)) {
+ mp_raise_NotImplementedError(MP_ERROR_TEXT("only slices with step=1 (aka None) are supported"));
+ }
+ return mp_obj_new_str_of_type(type, self_data + slice.start, slice.stop - slice.start);
+ }
+ #endif
+ size_t index_val = mp_get_index(type, self_len, index, false);
+ // If we have unicode enabled the type will always be bytes, so take the short cut.
+ if (MICROPY_PY_BUILTINS_STR_UNICODE || type == &mp_type_bytes) {
+ return MP_OBJ_NEW_SMALL_INT(self_data[index_val]);
+ } else {
+ return mp_obj_new_str_via_qstr((char *)&self_data[index_val], 1);
+ }
+ } else {
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+STATIC mp_obj_t str_join(mp_obj_t self_in, mp_obj_t arg) {
+ mp_check_self(mp_obj_is_str_or_bytes(self_in));
+ const mp_obj_type_t *self_type = mp_obj_get_type(self_in);
+
+ // get separation string
+ GET_STR_DATA_LEN(self_in, sep_str, sep_len);
+
+ // process args
+ size_t seq_len;
+ mp_obj_t *seq_items;
+
+ if (!mp_obj_is_type(arg, &mp_type_list) && !mp_obj_is_type(arg, &mp_type_tuple)) {
+ // arg is not a list nor a tuple, try to convert it to a list
+ // TODO: Try to optimize?
+ arg = mp_type_list.make_new(&mp_type_list, 1, 0, &arg);
+ }
+ mp_obj_get_array(arg, &seq_len, &seq_items);
+
+ // count required length
+ size_t required_len = 0;
+ for (size_t i = 0; i < seq_len; i++) {
+ if (mp_obj_get_type(seq_items[i]) != self_type) {
+ mp_raise_TypeError(
+ MP_ERROR_TEXT("join expects a list of str/bytes objects consistent with self object"));
+ }
+ if (i > 0) {
+ required_len += sep_len;
+ }
+ GET_STR_LEN(seq_items[i], l);
+ required_len += l;
+ }
+
+ // make joined string
+ vstr_t vstr;
+ vstr_init_len(&vstr, required_len);
+ byte *data = (byte *)vstr.buf;
+ for (size_t i = 0; i < seq_len; i++) {
+ if (i > 0) {
+ memcpy(data, sep_str, sep_len);
+ data += sep_len;
+ }
+ GET_STR_DATA_LEN(seq_items[i], s, l);
+ memcpy(data, s, l);
+ data += l;
+ }
+
+ // return joined string
+ return mp_obj_new_str_from_vstr(self_type, &vstr);
+}
+MP_DEFINE_CONST_FUN_OBJ_2(str_join_obj, str_join);
+
+mp_obj_t mp_obj_str_split(size_t n_args, const mp_obj_t *args) {
+ const mp_obj_type_t *self_type = mp_obj_get_type(args[0]);
+ mp_int_t splits = -1;
+ mp_obj_t sep = mp_const_none;
+ if (n_args > 1) {
+ sep = args[1];
+ if (n_args > 2) {
+ splits = mp_obj_get_int(args[2]);
+ }
+ }
+
+ mp_obj_t res = mp_obj_new_list(0, NULL);
+ GET_STR_DATA_LEN(args[0], s, len);
+ const byte *top = s + len;
+
+ if (sep == mp_const_none) {
+ // sep not given, so separate on whitespace
+
+ // Initial whitespace is not counted as split, so we pre-do it
+ while (s < top && unichar_isspace(*s)) {
+ s++;
+ }
+ while (s < top && splits != 0) {
+ const byte *start = s;
+ while (s < top && !unichar_isspace(*s)) {
+ s++;
+ }
+ mp_obj_list_append(res, mp_obj_new_str_of_type(self_type, start, s - start));
+ if (s >= top) {
+ break;
+ }
+ while (s < top && unichar_isspace(*s)) {
+ s++;
+ }
+ if (splits > 0) {
+ splits--;
+ }
+ }
+
+ if (s < top) {
+ mp_obj_list_append(res, mp_obj_new_str_of_type(self_type, s, top - s));
+ }
+
+ } else {
+ // sep given
+ if (mp_obj_get_type(sep) != self_type) {
+ bad_implicit_conversion(sep);
+ }
+
+ size_t sep_len;
+ const char *sep_str = mp_obj_str_get_data(sep, &sep_len);
+
+ if (sep_len == 0) {
+ mp_raise_ValueError(MP_ERROR_TEXT("empty separator"));
+ }
+
+ for (;;) {
+ const byte *start = s;
+ for (;;) {
+ if (splits == 0 || s + sep_len > top) {
+ s = top;
+ break;
+ } else if (memcmp(s, sep_str, sep_len) == 0) {
+ break;
+ }
+ s++;
+ }
+ mp_obj_list_append(res, mp_obj_new_str_of_type(self_type, start, s - start));
+ if (s >= top) {
+ break;
+ }
+ s += sep_len;
+ if (splits > 0) {
+ splits--;
+ }
+ }
+ }
+
+ return res;
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_split_obj, 1, 3, mp_obj_str_split);
+
+#if MICROPY_PY_BUILTINS_STR_SPLITLINES
+STATIC mp_obj_t str_splitlines(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ enum { ARG_keepends };
+ static const mp_arg_t allowed_args[] = {
+ { MP_QSTR_keepends, MP_ARG_BOOL, {.u_bool = false} },
+ };
+
+ // parse args
+ mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)];
+ mp_arg_parse_all(n_args - 1, pos_args + 1, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args);
+
+ const mp_obj_type_t *self_type = mp_obj_get_type(pos_args[0]);
+ mp_obj_t res = mp_obj_new_list(0, NULL);
+
+ GET_STR_DATA_LEN(pos_args[0], s, len);
+ const byte *top = s + len;
+
+ while (s < top) {
+ const byte *start = s;
+ size_t match = 0;
+ while (s < top) {
+ if (*s == '\n') {
+ match = 1;
+ break;
+ } else if (*s == '\r') {
+ if (s[1] == '\n') {
+ match = 2;
+ } else {
+ match = 1;
+ }
+ break;
+ }
+ s++;
+ }
+ size_t sub_len = s - start;
+ if (args[ARG_keepends].u_bool) {
+ sub_len += match;
+ }
+ mp_obj_list_append(res, mp_obj_new_str_of_type(self_type, start, sub_len));
+ s += match;
+ }
+
+ return res;
+}
+MP_DEFINE_CONST_FUN_OBJ_KW(str_splitlines_obj, 1, str_splitlines);
+#endif
+
+STATIC mp_obj_t str_rsplit(size_t n_args, const mp_obj_t *args) {
+ if (n_args < 3) {
+ // If we don't have split limit, it doesn't matter from which side
+ // we split.
+ return mp_obj_str_split(n_args, args);
+ }
+ const mp_obj_type_t *self_type = mp_obj_get_type(args[0]);
+ mp_obj_t sep = args[1];
+ GET_STR_DATA_LEN(args[0], s, len);
+
+ mp_int_t splits = mp_obj_get_int(args[2]);
+ if (splits < 0) {
+ // Negative limit means no limit, so delegate to split().
+ return mp_obj_str_split(n_args, args);
+ }
+
+ mp_int_t org_splits = splits;
+ // Preallocate list to the max expected # of elements, as we
+ // will fill it from the end.
+ mp_obj_list_t *res = MP_OBJ_TO_PTR(mp_obj_new_list(splits + 1, NULL));
+ mp_int_t idx = splits;
+
+ if (sep == mp_const_none) {
+ mp_raise_NotImplementedError(MP_ERROR_TEXT("rsplit(None,n)"));
+ } else {
+ size_t sep_len;
+ const char *sep_str = mp_obj_str_get_data(sep, &sep_len);
+
+ if (sep_len == 0) {
+ mp_raise_ValueError(MP_ERROR_TEXT("empty separator"));
+ }
+
+ const byte *beg = s;
+ const byte *last = s + len;
+ for (;;) {
+ s = last - sep_len;
+ for (;;) {
+ if (splits == 0 || s < beg) {
+ break;
+ } else if (memcmp(s, sep_str, sep_len) == 0) {
+ break;
+ }
+ s--;
+ }
+ if (s < beg || splits == 0) {
+ res->items[idx] = mp_obj_new_str_of_type(self_type, beg, last - beg);
+ break;
+ }
+ res->items[idx--] = mp_obj_new_str_of_type(self_type, s + sep_len, last - s - sep_len);
+ last = s;
+ splits--;
+ }
+ if (idx != 0) {
+ // We split less parts than split limit, now go cleanup surplus
+ size_t used = org_splits + 1 - idx;
+ memmove(res->items, &res->items[idx], used * sizeof(mp_obj_t));
+ mp_seq_clear(res->items, used, res->alloc, sizeof(*res->items));
+ res->len = used;
+ }
+ }
+
+ return MP_OBJ_FROM_PTR(res);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_rsplit_obj, 1, 3, str_rsplit);
+
+STATIC mp_obj_t str_finder(size_t n_args, const mp_obj_t *args, int direction, bool is_index) {
+ const mp_obj_type_t *self_type = mp_obj_get_type(args[0]);
+ mp_check_self(mp_obj_is_str_or_bytes(args[0]));
+
+ // check argument type
+ if (mp_obj_get_type(args[1]) != self_type) {
+ bad_implicit_conversion(args[1]);
+ }
+
+ GET_STR_DATA_LEN(args[0], haystack, haystack_len);
+ GET_STR_DATA_LEN(args[1], needle, needle_len);
+
+ const byte *start = haystack;
+ const byte *end = haystack + haystack_len;
+ if (n_args >= 3 && args[2] != mp_const_none) {
+ start = str_index_to_ptr(self_type, haystack, haystack_len, args[2], true);
+ }
+ if (n_args >= 4 && args[3] != mp_const_none) {
+ end = str_index_to_ptr(self_type, haystack, haystack_len, args[3], true);
+ }
+
+ if (end < start) {
+ goto out_error;
+ }
+
+ const byte *p = find_subbytes(start, end - start, needle, needle_len, direction);
+ if (p == NULL) {
+ out_error:
+ // not found
+ if (is_index) {
+ mp_raise_ValueError(MP_ERROR_TEXT("substring not found"));
+ } else {
+ return MP_OBJ_NEW_SMALL_INT(-1);
+ }
+ } else {
+ // found
+ #if MICROPY_PY_BUILTINS_STR_UNICODE
+ if (self_type == &mp_type_str) {
+ return MP_OBJ_NEW_SMALL_INT(utf8_ptr_to_index(haystack, p));
+ }
+ #endif
+ return MP_OBJ_NEW_SMALL_INT(p - haystack);
+ }
+}
+
+STATIC mp_obj_t str_find(size_t n_args, const mp_obj_t *args) {
+ return str_finder(n_args, args, 1, false);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_find_obj, 2, 4, str_find);
+
+STATIC mp_obj_t str_rfind(size_t n_args, const mp_obj_t *args) {
+ return str_finder(n_args, args, -1, false);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_rfind_obj, 2, 4, str_rfind);
+
+STATIC mp_obj_t str_index(size_t n_args, const mp_obj_t *args) {
+ return str_finder(n_args, args, 1, true);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_index_obj, 2, 4, str_index);
+
+STATIC mp_obj_t str_rindex(size_t n_args, const mp_obj_t *args) {
+ return str_finder(n_args, args, -1, true);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_rindex_obj, 2, 4, str_rindex);
+
+// TODO: (Much) more variety in args
+STATIC mp_obj_t str_startswith(size_t n_args, const mp_obj_t *args) {
+ const mp_obj_type_t *self_type = mp_obj_get_type(args[0]);
+ GET_STR_DATA_LEN(args[0], str, str_len);
+ size_t prefix_len;
+ const char *prefix = mp_obj_str_get_data(args[1], &prefix_len);
+ const byte *start = str;
+ if (n_args > 2) {
+ start = str_index_to_ptr(self_type, str, str_len, args[2], true);
+ }
+ if (prefix_len + (start - str) > str_len) {
+ return mp_const_false;
+ }
+ return mp_obj_new_bool(memcmp(start, prefix, prefix_len) == 0);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_startswith_obj, 2, 3, str_startswith);
+
+STATIC mp_obj_t str_endswith(size_t n_args, const mp_obj_t *args) {
+ GET_STR_DATA_LEN(args[0], str, str_len);
+ size_t suffix_len;
+ const char *suffix = mp_obj_str_get_data(args[1], &suffix_len);
+ if (n_args > 2) {
+ mp_raise_NotImplementedError(MP_ERROR_TEXT("start/end indices"));
+ }
+
+ if (suffix_len > str_len) {
+ return mp_const_false;
+ }
+ return mp_obj_new_bool(memcmp(str + (str_len - suffix_len), suffix, suffix_len) == 0);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_endswith_obj, 2, 3, str_endswith);
+
+enum { LSTRIP, RSTRIP, STRIP };
+
+STATIC mp_obj_t str_uni_strip(int type, size_t n_args, const mp_obj_t *args) {
+ mp_check_self(mp_obj_is_str_or_bytes(args[0]));
+ const mp_obj_type_t *self_type = mp_obj_get_type(args[0]);
+
+ const byte *chars_to_del;
+ uint chars_to_del_len;
+ static const byte whitespace[] = " \t\n\r\v\f";
+
+ if (n_args == 1) {
+ chars_to_del = whitespace;
+ chars_to_del_len = sizeof(whitespace) - 1;
+ } else {
+ if (mp_obj_get_type(args[1]) != self_type) {
+ bad_implicit_conversion(args[1]);
+ }
+ GET_STR_DATA_LEN(args[1], s, l);
+ chars_to_del = s;
+ chars_to_del_len = l;
+ }
+
+ GET_STR_DATA_LEN(args[0], orig_str, orig_str_len);
+
+ size_t first_good_char_pos = 0;
+ bool first_good_char_pos_set = false;
+ size_t last_good_char_pos = 0;
+ size_t i = 0;
+ int delta = 1;
+ if (type == RSTRIP) {
+ i = orig_str_len - 1;
+ delta = -1;
+ }
+ for (size_t len = orig_str_len; len > 0; len--) {
+ if (find_subbytes(chars_to_del, chars_to_del_len, &orig_str[i], 1, 1) == NULL) {
+ if (!first_good_char_pos_set) {
+ first_good_char_pos_set = true;
+ first_good_char_pos = i;
+ if (type == LSTRIP) {
+ last_good_char_pos = orig_str_len - 1;
+ break;
+ } else if (type == RSTRIP) {
+ first_good_char_pos = 0;
+ last_good_char_pos = i;
+ break;
+ }
+ }
+ last_good_char_pos = i;
+ }
+ i += delta;
+ }
+
+ if (!first_good_char_pos_set) {
+ // string is all whitespace, return ''
+ if (self_type == &mp_type_str) {
+ return MP_OBJ_NEW_QSTR(MP_QSTR_);
+ } else {
+ return mp_const_empty_bytes;
+ }
+ }
+
+ assert(last_good_char_pos >= first_good_char_pos);
+ // +1 to accommodate the last character
+ size_t stripped_len = last_good_char_pos - first_good_char_pos + 1;
+ if (stripped_len == orig_str_len) {
+ // If nothing was stripped, don't bother to dup original string
+ // TODO: watch out for this case when we'll get to bytearray.strip()
+ assert(first_good_char_pos == 0);
+ return args[0];
+ }
+ return mp_obj_new_str_of_type(self_type, orig_str + first_good_char_pos, stripped_len);
+}
+
+STATIC mp_obj_t str_strip(size_t n_args, const mp_obj_t *args) {
+ return str_uni_strip(STRIP, n_args, args);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_strip_obj, 1, 2, str_strip);
+
+STATIC mp_obj_t str_lstrip(size_t n_args, const mp_obj_t *args) {
+ return str_uni_strip(LSTRIP, n_args, args);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_lstrip_obj, 1, 2, str_lstrip);
+
+STATIC mp_obj_t str_rstrip(size_t n_args, const mp_obj_t *args) {
+ return str_uni_strip(RSTRIP, n_args, args);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_rstrip_obj, 1, 2, str_rstrip);
+
+#if MICROPY_PY_BUILTINS_STR_CENTER
+STATIC mp_obj_t str_center(mp_obj_t str_in, mp_obj_t width_in) {
+ GET_STR_DATA_LEN(str_in, str, str_len);
+ mp_uint_t width = mp_obj_get_int(width_in);
+ if (str_len >= width) {
+ return str_in;
+ }
+
+ vstr_t vstr;
+ vstr_init_len(&vstr, width);
+ memset(vstr.buf, ' ', width);
+ int left = (width - str_len) / 2;
+ memcpy(vstr.buf + left, str, str_len);
+ return mp_obj_new_str_from_vstr(mp_obj_get_type(str_in), &vstr);
+}
+MP_DEFINE_CONST_FUN_OBJ_2(str_center_obj, str_center);
+#endif
+
+// Takes an int arg, but only parses unsigned numbers, and only changes
+// *num if at least one digit was parsed.
+STATIC const char *str_to_int(const char *str, const char *top, int *num) {
+ if (str < top && '0' <= *str && *str <= '9') {
+ *num = 0;
+ do {
+ *num = *num * 10 + (*str - '0');
+ str++;
+ }
+ while (str < top && '0' <= *str && *str <= '9');
+ }
+ return str;
+}
+
+STATIC bool isalignment(char ch) {
+ return ch && strchr("<>=^", ch) != NULL;
+}
+
+STATIC bool istype(char ch) {
+ return ch && strchr("bcdeEfFgGnosxX%", ch) != NULL;
+}
+
+STATIC bool arg_looks_integer(mp_obj_t arg) {
+ return mp_obj_is_bool(arg) || mp_obj_is_int(arg);
+}
+
+STATIC bool arg_looks_numeric(mp_obj_t arg) {
+ return arg_looks_integer(arg)
+ #if MICROPY_PY_BUILTINS_FLOAT
+ || mp_obj_is_float(arg)
+ #endif
+ ;
+}
+
+#if MICROPY_PY_BUILTINS_STR_OP_MODULO
+STATIC mp_obj_t arg_as_int(mp_obj_t arg) {
+ #if MICROPY_PY_BUILTINS_FLOAT
+ if (mp_obj_is_float(arg)) {
+ return mp_obj_new_int_from_float(mp_obj_float_get(arg));
+ }
+ #endif
+ return arg;
+}
+#endif
+
+#if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+STATIC NORETURN void terse_str_format_value_error(void) {
+ mp_raise_ValueError(MP_ERROR_TEXT("bad format string"));
+}
+#else
+// define to nothing to improve coverage
+#define terse_str_format_value_error()
+#endif
+
+STATIC vstr_t mp_obj_str_format_helper(const char *str, const char *top, int *arg_i, size_t n_args, const mp_obj_t *args, mp_map_t *kwargs) {
+ vstr_t vstr;
+ mp_print_t print;
+ vstr_init_print(&vstr, 16, &print);
+
+ for (; str < top; str++) {
+ if (*str == '}') {
+ str++;
+ if (str < top && *str == '}') {
+ vstr_add_byte(&vstr, '}');
+ continue;
+ }
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ terse_str_format_value_error();
+ #else
+ mp_raise_ValueError(MP_ERROR_TEXT("single '}' encountered in format string"));
+ #endif
+ }
+ if (*str != '{') {
+ vstr_add_byte(&vstr, *str);
+ continue;
+ }
+
+ str++;
+ if (str < top && *str == '{') {
+ vstr_add_byte(&vstr, '{');
+ continue;
+ }
+
+ // replacement_field ::= "{" [field_name] ["!" conversion] [":" format_spec] "}"
+
+ const char *field_name = NULL;
+ const char *field_name_top = NULL;
+ char conversion = '\0';
+ const char *format_spec = NULL;
+
+ if (str < top && *str != '}' && *str != '!' && *str != ':') {
+ field_name = (const char *)str;
+ while (str < top && *str != '}' && *str != '!' && *str != ':') {
+ ++str;
+ }
+ field_name_top = (const char *)str;
+ }
+
+ // conversion ::= "r" | "s"
+
+ if (str < top && *str == '!') {
+ str++;
+ if (str < top && (*str == 'r' || *str == 's')) {
+ conversion = *str++;
+ } else {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ terse_str_format_value_error();
+ #elif MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_NORMAL
+ mp_raise_ValueError(MP_ERROR_TEXT("bad conversion specifier"));
+ #else
+ if (str >= top) {
+ mp_raise_ValueError(
+ MP_ERROR_TEXT("end of format while looking for conversion specifier"));
+ } else {
+ mp_raise_msg_varg(&mp_type_ValueError,
+ MP_ERROR_TEXT("unknown conversion specifier %c"), *str);
+ }
+ #endif
+ }
+ }
+
+ if (str < top && *str == ':') {
+ str++;
+ // {:} is the same as {}, which is the same as {!s}
+ // This makes a difference when passing in a True or False
+ // '{}'.format(True) returns 'True'
+ // '{:d}'.format(True) returns '1'
+ // So we treat {:} as {} and this later gets treated to be {!s}
+ if (*str != '}') {
+ format_spec = str;
+ for (int nest = 1; str < top;) {
+ if (*str == '{') {
+ ++nest;
+ } else if (*str == '}') {
+ if (--nest == 0) {
+ break;
+ }
+ }
+ ++str;
+ }
+ }
+ }
+ if (str >= top) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ terse_str_format_value_error();
+ #else
+ mp_raise_ValueError(MP_ERROR_TEXT("unmatched '{' in format"));
+ #endif
+ }
+ if (*str != '}') {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ terse_str_format_value_error();
+ #else
+ mp_raise_ValueError(MP_ERROR_TEXT("expected ':' after format specifier"));
+ #endif
+ }
+
+ mp_obj_t arg = mp_const_none;
+
+ if (field_name) {
+ int index = 0;
+ if (MP_LIKELY(unichar_isdigit(*field_name))) {
+ if (*arg_i > 0) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ terse_str_format_value_error();
+ #else
+ mp_raise_ValueError(
+ MP_ERROR_TEXT("can't switch from automatic field numbering to manual field specification"));
+ #endif
+ }
+ field_name = str_to_int(field_name, field_name_top, &index);
+ if ((uint)index >= n_args - 1) {
+ mp_raise_IndexError_varg(MP_ERROR_TEXT("%q index out of range"), MP_QSTR_tuple);
+ }
+ arg = args[index + 1];
+ *arg_i = -1;
+ } else {
+ const char *lookup;
+ for (lookup = field_name; lookup < field_name_top && *lookup != '.' && *lookup != '['; lookup++) {;
+ }
+ mp_obj_t field_q = mp_obj_new_str_via_qstr(field_name, lookup - field_name); // should it be via qstr?
+ field_name = lookup;
+ mp_map_elem_t *key_elem = mp_map_lookup(kwargs, field_q, MP_MAP_LOOKUP);
+ if (key_elem == NULL) {
+ mp_raise_type_arg(&mp_type_KeyError, field_q);
+ }
+ arg = key_elem->value;
+ }
+ if (field_name < field_name_top) {
+ mp_raise_NotImplementedError(MP_ERROR_TEXT("attributes not supported yet"));
+ }
+ } else {
+ if (*arg_i < 0) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ terse_str_format_value_error();
+ #else
+ mp_raise_ValueError(
+ MP_ERROR_TEXT("can't switch from manual field specification to automatic field numbering"));
+ #endif
+ }
+ if ((uint)*arg_i >= n_args - 1) {
+ mp_raise_IndexError_varg(MP_ERROR_TEXT("%q index out of range"), MP_QSTR_tuple);
+ }
+ arg = args[(*arg_i) + 1];
+ (*arg_i)++;
+ }
+ if (!format_spec && !conversion) {
+ conversion = 's';
+ }
+ if (conversion) {
+ mp_print_kind_t print_kind;
+ if (conversion == 's') {
+ print_kind = PRINT_STR;
+ } else {
+ assert(conversion == 'r');
+ print_kind = PRINT_REPR;
+ }
+ vstr_t arg_vstr;
+ mp_print_t arg_print;
+ vstr_init_print(&arg_vstr, 16, &arg_print);
+ mp_obj_print_helper(&arg_print, arg, print_kind);
+ arg = mp_obj_new_str_from_vstr(&mp_type_str, &arg_vstr);
+ }
+
+ char fill = '\0';
+ char align = '\0';
+ int width = -1;
+ int precision = -1;
+ char type = '\0';
+ int flags = 0;
+
+ if (format_spec) {
+ // The format specifier (from http://docs.python.org/2/library/string.html#formatspec)
+ //
+ // [[fill]align][sign][#][0][width][,][.precision][type]
+ // fill ::= <any character>
+ // align ::= "<" | ">" | "=" | "^"
+ // sign ::= "+" | "-" | " "
+ // width ::= integer
+ // precision ::= integer
+ // type ::= "b" | "c" | "d" | "e" | "E" | "f" | "F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"
+
+ // recursively call the formatter to format any nested specifiers
+ MP_STACK_CHECK();
+ vstr_t format_spec_vstr = mp_obj_str_format_helper(format_spec, str, arg_i, n_args, args, kwargs);
+ const char *s = vstr_null_terminated_str(&format_spec_vstr);
+ const char *stop = s + format_spec_vstr.len;
+ if (isalignment(*s)) {
+ align = *s++;
+ } else if (*s && isalignment(s[1])) {
+ fill = *s++;
+ align = *s++;
+ }
+ if (*s == '+' || *s == '-' || *s == ' ') {
+ if (*s == '+') {
+ flags |= PF_FLAG_SHOW_SIGN;
+ } else if (*s == ' ') {
+ flags |= PF_FLAG_SPACE_SIGN;
+ }
+ s++;
+ }
+ if (*s == '#') {
+ flags |= PF_FLAG_SHOW_PREFIX;
+ s++;
+ }
+ if (*s == '0') {
+ if (!align) {
+ align = '=';
+ }
+ if (!fill) {
+ fill = '0';
+ }
+ }
+ s = str_to_int(s, stop, &width);
+ if (*s == ',') {
+ flags |= PF_FLAG_SHOW_COMMA;
+ s++;
+ }
+ if (*s == '.') {
+ s++;
+ s = str_to_int(s, stop, &precision);
+ }
+ if (istype(*s)) {
+ type = *s++;
+ }
+ if (*s) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ terse_str_format_value_error();
+ #else
+ mp_raise_ValueError(MP_ERROR_TEXT("invalid format specifier"));
+ #endif
+ }
+ vstr_clear(&format_spec_vstr);
+ }
+ if (!align) {
+ if (arg_looks_numeric(arg)) {
+ align = '>';
+ } else {
+ align = '<';
+ }
+ }
+ if (!fill) {
+ fill = ' ';
+ }
+
+ if (flags & (PF_FLAG_SHOW_SIGN | PF_FLAG_SPACE_SIGN)) {
+ if (type == 's') {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ terse_str_format_value_error();
+ #else
+ mp_raise_ValueError(MP_ERROR_TEXT("sign not allowed in string format specifier"));
+ #endif
+ }
+ if (type == 'c') {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ terse_str_format_value_error();
+ #else
+ mp_raise_ValueError(
+ MP_ERROR_TEXT("sign not allowed with integer format specifier 'c'"));
+ #endif
+ }
+ }
+
+ switch (align) {
+ case '<':
+ flags |= PF_FLAG_LEFT_ADJUST;
+ break;
+ case '=':
+ flags |= PF_FLAG_PAD_AFTER_SIGN;
+ break;
+ case '^':
+ flags |= PF_FLAG_CENTER_ADJUST;
+ break;
+ }
+
+ if (arg_looks_integer(arg)) {
+ switch (type) {
+ case 'b':
+ mp_print_mp_int(&print, arg, 2, 'a', flags, fill, width, 0);
+ continue;
+
+ case 'c': {
+ char ch = mp_obj_get_int(arg);
+ mp_print_strn(&print, &ch, 1, flags, fill, width);
+ continue;
+ }
+
+ case '\0': // No explicit format type implies 'd'
+ case 'n': // I don't think we support locales in uPy so use 'd'
+ case 'd':
+ mp_print_mp_int(&print, arg, 10, 'a', flags, fill, width, 0);
+ continue;
+
+ case 'o':
+ if (flags & PF_FLAG_SHOW_PREFIX) {
+ flags |= PF_FLAG_SHOW_OCTAL_LETTER;
+ }
+
+ mp_print_mp_int(&print, arg, 8, 'a', flags, fill, width, 0);
+ continue;
+
+ case 'X':
+ case 'x':
+ mp_print_mp_int(&print, arg, 16, type - ('X' - 'A'), flags, fill, width, 0);
+ continue;
+
+ case 'e':
+ case 'E':
+ case 'f':
+ case 'F':
+ case 'g':
+ case 'G':
+ case '%':
+ // The floating point formatters all work with anything that
+ // looks like an integer
+ break;
+
+ default:
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ terse_str_format_value_error();
+ #else
+ mp_raise_ValueError_varg(
+ MP_ERROR_TEXT("unknown format code '%c' for object of type '%q'"),
+ type, mp_obj_get_type_qstr(arg));
+ #endif
+ }
+ }
+
+ // NOTE: no else here. We need the e, f, g etc formats for integer
+ // arguments (from above if) to take this if.
+ if (arg_looks_numeric(arg)) {
+ if (!type) {
+
+ // Even though the docs say that an unspecified type is the same
+ // as 'g', there is one subtle difference, when the exponent
+ // is one less than the precision.
+ //
+ // '{:10.1}'.format(0.0) ==> '0e+00'
+ // '{:10.1g}'.format(0.0) ==> '0'
+ //
+ // TODO: Figure out how to deal with this.
+ //
+ // A proper solution would involve adding a special flag
+ // or something to format_float, and create a format_double
+ // to deal with doubles. In order to fix this when using
+ // sprintf, we'd need to use the e format and tweak the
+ // returned result to strip trailing zeros like the g format
+ // does.
+ //
+ // {:10.3} and {:10.2e} with 1.23e2 both produce 1.23e+02
+ // but with 1.e2 you get 1e+02 and 1.00e+02
+ //
+ // Stripping the trailing 0's (like g) does would make the
+ // e format give us the right format.
+ //
+ // CPython sources say:
+ // Omitted type specifier. Behaves in the same way as repr(x)
+ // and str(x) if no precision is given, else like 'g', but with
+ // at least one digit after the decimal point. */
+
+ type = 'g';
+ }
+ if (type == 'n') {
+ type = 'g';
+ }
+
+ switch (type) {
+ #if MICROPY_PY_BUILTINS_FLOAT
+ case 'e':
+ case 'E':
+ case 'f':
+ case 'F':
+ case 'g':
+ case 'G':
+ mp_print_float(&print, mp_obj_get_float(arg), type, flags, fill, width, precision);
+ break;
+
+ case '%':
+ flags |= PF_FLAG_ADD_PERCENT;
+ #if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
+ #define F100 100.0F
+ #else
+ #define F100 100.0
+ #endif
+ mp_print_float(&print, mp_obj_get_float(arg) * F100, 'f', flags, fill, width, precision);
+#undef F100
+ break;
+ #endif
+
+ default:
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ terse_str_format_value_error();
+ #else
+ mp_raise_ValueError_varg(
+ MP_ERROR_TEXT("unknown format code '%c' for object of type '%q'"),
+ type, mp_obj_get_type_qstr(arg));
+ #endif
+ }
+ } else {
+ // arg doesn't look like a number
+
+ if (align == '=') {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ terse_str_format_value_error();
+ #else
+ mp_raise_ValueError(
+ MP_ERROR_TEXT("'=' alignment not allowed in string format specifier"));
+ #endif
+ }
+
+ switch (type) {
+ case '\0': // no explicit format type implies 's'
+ case 's': {
+ size_t slen;
+ const char *s = mp_obj_str_get_data(arg, &slen);
+ if (precision < 0) {
+ precision = slen;
+ }
+ if (slen > (size_t)precision) {
+ slen = precision;
+ }
+ mp_print_strn(&print, s, slen, flags, fill, width);
+ break;
+ }
+
+ default:
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ terse_str_format_value_error();
+ #else
+ mp_raise_ValueError_varg(
+ MP_ERROR_TEXT("unknown format code '%c' for object of type '%q'"),
+ type, mp_obj_get_type_qstr(arg));
+ #endif
+ }
+ }
+ }
+
+ return vstr;
+}
+
+mp_obj_t mp_obj_str_format(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs) {
+ mp_check_self(mp_obj_is_str_or_bytes(args[0]));
+
+ GET_STR_DATA_LEN(args[0], str, len);
+ int arg_i = 0;
+ vstr_t vstr = mp_obj_str_format_helper((const char *)str, (const char *)str + len, &arg_i, n_args, args, kwargs);
+ return mp_obj_new_str_from_vstr(mp_obj_get_type(args[0]), &vstr);
+}
+MP_DEFINE_CONST_FUN_OBJ_KW(str_format_obj, 1, mp_obj_str_format);
+
+#if MICROPY_PY_BUILTINS_STR_OP_MODULO
+STATIC mp_obj_t str_modulo_format(mp_obj_t pattern, size_t n_args, const mp_obj_t *args, mp_obj_t dict) {
+ mp_check_self(mp_obj_is_str_or_bytes(pattern));
+
+ GET_STR_DATA_LEN(pattern, str, len);
+ #if MICROPY_ERROR_REPORTING > MICROPY_ERROR_REPORTING_TERSE
+ const byte *start_str = str;
+ #endif
+ bool is_bytes = mp_obj_is_type(pattern, &mp_type_bytes);
+ size_t arg_i = 0;
+ vstr_t vstr;
+ mp_print_t print;
+ vstr_init_print(&vstr, 16, &print);
+
+ for (const byte *top = str + len; str < top; str++) {
+ mp_obj_t arg = MP_OBJ_NULL;
+ if (*str != '%') {
+ vstr_add_byte(&vstr, *str);
+ continue;
+ }
+ if (++str >= top) {
+ goto incomplete_format;
+ }
+ if (*str == '%') {
+ vstr_add_byte(&vstr, '%');
+ continue;
+ }
+
+ // Dictionary value lookup
+ if (*str == '(') {
+ if (dict == MP_OBJ_NULL) {
+ mp_raise_TypeError(MP_ERROR_TEXT("format requires a dict"));
+ }
+ arg_i = 1; // we used up the single dict argument
+ const byte *key = ++str;
+ while (*str != ')') {
+ if (str >= top) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ terse_str_format_value_error();
+ #else
+ mp_raise_ValueError(MP_ERROR_TEXT("incomplete format key"));
+ #endif
+ }
+ ++str;
+ }
+ mp_obj_t k_obj = mp_obj_new_str_via_qstr((const char *)key, str - key);
+ arg = mp_obj_dict_get(dict, k_obj);
+ str++;
+ }
+
+ int flags = 0;
+ char fill = ' ';
+ int alt = 0;
+ while (str < top) {
+ if (*str == '-') {
+ flags |= PF_FLAG_LEFT_ADJUST;
+ } else if (*str == '+') {
+ flags |= PF_FLAG_SHOW_SIGN;
+ } else if (*str == ' ') {
+ flags |= PF_FLAG_SPACE_SIGN;
+ } else if (*str == '#') {
+ alt = PF_FLAG_SHOW_PREFIX;
+ } else if (*str == '0') {
+ flags |= PF_FLAG_PAD_AFTER_SIGN;
+ fill = '0';
+ } else {
+ break;
+ }
+ str++;
+ }
+ // parse width, if it exists
+ int width = 0;
+ if (str < top) {
+ if (*str == '*') {
+ if (arg_i >= n_args) {
+ goto not_enough_args;
+ }
+ width = mp_obj_get_int(args[arg_i++]);
+ str++;
+ } else {
+ str = (const byte *)str_to_int((const char *)str, (const char *)top, &width);
+ }
+ }
+ int prec = -1;
+ if (str < top && *str == '.') {
+ if (++str < top) {
+ if (*str == '*') {
+ if (arg_i >= n_args) {
+ goto not_enough_args;
+ }
+ prec = mp_obj_get_int(args[arg_i++]);
+ str++;
+ } else {
+ prec = 0;
+ str = (const byte *)str_to_int((const char *)str, (const char *)top, &prec);
+ }
+ }
+ }
+
+ if (str >= top) {
+ incomplete_format:
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ terse_str_format_value_error();
+ #else
+ mp_raise_ValueError(MP_ERROR_TEXT("incomplete format"));
+ #endif
+ }
+
+ // Tuple value lookup
+ if (arg == MP_OBJ_NULL) {
+ if (arg_i >= n_args) {
+ not_enough_args:
+ mp_raise_TypeError(MP_ERROR_TEXT("not enough arguments for format string"));
+ }
+ arg = args[arg_i++];
+ }
+ switch (*str) {
+ case 'c':
+ if (mp_obj_is_str(arg)) {
+ size_t slen;
+ const char *s = mp_obj_str_get_data(arg, &slen);
+ if (slen != 1) {
+ mp_raise_TypeError(MP_ERROR_TEXT("%%c requires int or char"));
+ }
+ mp_print_strn(&print, s, 1, flags, ' ', width);
+ } else if (arg_looks_integer(arg)) {
+ char ch = mp_obj_get_int(arg);
+ mp_print_strn(&print, &ch, 1, flags, ' ', width);
+ } else {
+ mp_raise_TypeError(MP_ERROR_TEXT("%%c requires int or char"));
+ }
+ break;
+
+ case 'd':
+ case 'i':
+ case 'u':
+ mp_print_mp_int(&print, arg_as_int(arg), 10, 'a', flags, fill, width, prec);
+ break;
+
+ #if MICROPY_PY_BUILTINS_FLOAT
+ case 'e':
+ case 'E':
+ case 'f':
+ case 'F':
+ case 'g':
+ case 'G':
+ mp_print_float(&print, mp_obj_get_float(arg), *str, flags, fill, width, prec);
+ break;
+ #endif
+
+ case 'o':
+ if (alt) {
+ flags |= (PF_FLAG_SHOW_PREFIX | PF_FLAG_SHOW_OCTAL_LETTER);
+ }
+ mp_print_mp_int(&print, arg, 8, 'a', flags, fill, width, prec);
+ break;
+
+ case 'r':
+ case 's': {
+ vstr_t arg_vstr;
+ mp_print_t arg_print;
+ vstr_init_print(&arg_vstr, 16, &arg_print);
+ mp_print_kind_t print_kind = (*str == 'r' ? PRINT_REPR : PRINT_STR);
+ if (print_kind == PRINT_STR && is_bytes && mp_obj_is_type(arg, &mp_type_bytes)) {
+ // If we have something like b"%s" % b"1", bytes arg should be
+ // printed undecorated.
+ print_kind = PRINT_RAW;
+ }
+ mp_obj_print_helper(&arg_print, arg, print_kind);
+ uint vlen = arg_vstr.len;
+ if (prec < 0) {
+ prec = vlen;
+ }
+ if (vlen > (uint)prec) {
+ vlen = prec;
+ }
+ mp_print_strn(&print, arg_vstr.buf, vlen, flags, ' ', width);
+ vstr_clear(&arg_vstr);
+ break;
+ }
+
+ case 'X':
+ case 'x':
+ mp_print_mp_int(&print, arg, 16, *str - ('X' - 'A'), flags | alt, fill, width, prec);
+ break;
+
+ default:
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ terse_str_format_value_error();
+ #else
+ mp_raise_ValueError_varg(
+ MP_ERROR_TEXT("unsupported format character '%c' (0x%x) at index %d"),
+ *str, *str, str - start_str);
+ #endif
+ }
+ }
+
+ if (arg_i != n_args) {
+ mp_raise_TypeError(MP_ERROR_TEXT("not all arguments converted during string formatting"));
+ }
+
+ return mp_obj_new_str_from_vstr(is_bytes ? &mp_type_bytes : &mp_type_str, &vstr);
+}
+#endif
+
+// The implementation is optimized, returning the original string if there's
+// nothing to replace.
+STATIC mp_obj_t str_replace(size_t n_args, const mp_obj_t *args) {
+ mp_check_self(mp_obj_is_str_or_bytes(args[0]));
+
+ mp_int_t max_rep = -1;
+ if (n_args == 4) {
+ max_rep = mp_obj_get_int(args[3]);
+ if (max_rep == 0) {
+ return args[0];
+ } else if (max_rep < 0) {
+ max_rep = -1;
+ }
+ }
+
+ // if max_rep is still -1 by this point we will need to do all possible replacements
+
+ // check argument types
+
+ const mp_obj_type_t *self_type = mp_obj_get_type(args[0]);
+
+ if (mp_obj_get_type(args[1]) != self_type) {
+ bad_implicit_conversion(args[1]);
+ }
+
+ if (mp_obj_get_type(args[2]) != self_type) {
+ bad_implicit_conversion(args[2]);
+ }
+
+ // extract string data
+
+ GET_STR_DATA_LEN(args[0], str, str_len);
+ GET_STR_DATA_LEN(args[1], old, old_len);
+ GET_STR_DATA_LEN(args[2], new, new_len);
+
+ // old won't exist in str if it's longer, so nothing to replace
+ if (old_len > str_len) {
+ return args[0];
+ }
+
+ // data for the replaced string
+ byte *data = NULL;
+ vstr_t vstr;
+
+ // do 2 passes over the string:
+ // first pass computes the required length of the replaced string
+ // second pass does the replacements
+ for (;;) {
+ size_t replaced_str_index = 0;
+ size_t num_replacements_done = 0;
+ const byte *old_occurrence;
+ const byte *offset_ptr = str;
+ size_t str_len_remain = str_len;
+ if (old_len == 0) {
+ // if old_str is empty, copy new_str to start of replaced string
+ // copy the replacement string
+ if (data != NULL) {
+ memcpy(data, new, new_len);
+ }
+ replaced_str_index += new_len;
+ num_replacements_done++;
+ }
+ while (num_replacements_done != (size_t)max_rep && str_len_remain > 0 && (old_occurrence = find_subbytes(offset_ptr, str_len_remain, old, old_len, 1)) != NULL) {
+ if (old_len == 0) {
+ old_occurrence += 1;
+ }
+ // copy from just after end of last occurrence of to-be-replaced string to right before start of next occurrence
+ if (data != NULL) {
+ memcpy(data + replaced_str_index, offset_ptr, old_occurrence - offset_ptr);
+ }
+ replaced_str_index += old_occurrence - offset_ptr;
+ // copy the replacement string
+ if (data != NULL) {
+ memcpy(data + replaced_str_index, new, new_len);
+ }
+ replaced_str_index += new_len;
+ offset_ptr = old_occurrence + old_len;
+ str_len_remain = str + str_len - offset_ptr;
+ num_replacements_done++;
+ }
+
+ // copy from just after end of last occurrence of to-be-replaced string to end of old string
+ if (data != NULL) {
+ memcpy(data + replaced_str_index, offset_ptr, str_len_remain);
+ }
+ replaced_str_index += str_len_remain;
+
+ if (data == NULL) {
+ // first pass
+ if (num_replacements_done == 0) {
+ // no substr found, return original string
+ return args[0];
+ } else {
+ // substr found, allocate new string
+ vstr_init_len(&vstr, replaced_str_index);
+ data = (byte *)vstr.buf;
+ assert(data != NULL);
+ }
+ } else {
+ // second pass, we are done
+ break;
+ }
+ }
+
+ return mp_obj_new_str_from_vstr(self_type, &vstr);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_replace_obj, 3, 4, str_replace);
+
+#if MICROPY_PY_BUILTINS_STR_COUNT
+STATIC mp_obj_t str_count(size_t n_args, const mp_obj_t *args) {
+ const mp_obj_type_t *self_type = mp_obj_get_type(args[0]);
+ mp_check_self(mp_obj_is_str_or_bytes(args[0]));
+
+ // check argument type
+ if (mp_obj_get_type(args[1]) != self_type) {
+ bad_implicit_conversion(args[1]);
+ }
+
+ GET_STR_DATA_LEN(args[0], haystack, haystack_len);
+ GET_STR_DATA_LEN(args[1], needle, needle_len);
+
+ const byte *start = haystack;
+ const byte *end = haystack + haystack_len;
+ if (n_args >= 3 && args[2] != mp_const_none) {
+ start = str_index_to_ptr(self_type, haystack, haystack_len, args[2], true);
+ }
+ if (n_args >= 4 && args[3] != mp_const_none) {
+ end = str_index_to_ptr(self_type, haystack, haystack_len, args[3], true);
+ }
+
+ // if needle_len is zero then we count each gap between characters as an occurrence
+ if (needle_len == 0) {
+ return MP_OBJ_NEW_SMALL_INT(utf8_charlen(start, end - start) + 1);
+ }
+
+ // count the occurrences
+ mp_int_t num_occurrences = 0;
+ for (const byte *haystack_ptr = start; haystack_ptr + needle_len <= end;) {
+ if (memcmp(haystack_ptr, needle, needle_len) == 0) {
+ num_occurrences++;
+ haystack_ptr += needle_len;
+ } else {
+ haystack_ptr = utf8_next_char(haystack_ptr);
+ }
+ }
+
+ return MP_OBJ_NEW_SMALL_INT(num_occurrences);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_count_obj, 2, 4, str_count);
+#endif
+
+#if MICROPY_PY_BUILTINS_STR_PARTITION
+STATIC mp_obj_t str_partitioner(mp_obj_t self_in, mp_obj_t arg, int direction) {
+ mp_check_self(mp_obj_is_str_or_bytes(self_in));
+ const mp_obj_type_t *self_type = mp_obj_get_type(self_in);
+ if (self_type != mp_obj_get_type(arg)) {
+ bad_implicit_conversion(arg);
+ }
+
+ GET_STR_DATA_LEN(self_in, str, str_len);
+ GET_STR_DATA_LEN(arg, sep, sep_len);
+
+ if (sep_len == 0) {
+ mp_raise_ValueError(MP_ERROR_TEXT("empty separator"));
+ }
+
+ mp_obj_t result[3];
+ if (self_type == &mp_type_str) {
+ result[0] = MP_OBJ_NEW_QSTR(MP_QSTR_);
+ result[1] = MP_OBJ_NEW_QSTR(MP_QSTR_);
+ result[2] = MP_OBJ_NEW_QSTR(MP_QSTR_);
+ } else {
+ result[0] = mp_const_empty_bytes;
+ result[1] = mp_const_empty_bytes;
+ result[2] = mp_const_empty_bytes;
+ }
+
+ if (direction > 0) {
+ result[0] = self_in;
+ } else {
+ result[2] = self_in;
+ }
+
+ const byte *position_ptr = find_subbytes(str, str_len, sep, sep_len, direction);
+ if (position_ptr != NULL) {
+ size_t position = position_ptr - str;
+ result[0] = mp_obj_new_str_of_type(self_type, str, position);
+ result[1] = arg;
+ result[2] = mp_obj_new_str_of_type(self_type, str + position + sep_len, str_len - position - sep_len);
+ }
+
+ return mp_obj_new_tuple(3, result);
+}
+
+STATIC mp_obj_t str_partition(mp_obj_t self_in, mp_obj_t arg) {
+ return str_partitioner(self_in, arg, 1);
+}
+MP_DEFINE_CONST_FUN_OBJ_2(str_partition_obj, str_partition);
+
+STATIC mp_obj_t str_rpartition(mp_obj_t self_in, mp_obj_t arg) {
+ return str_partitioner(self_in, arg, -1);
+}
+MP_DEFINE_CONST_FUN_OBJ_2(str_rpartition_obj, str_rpartition);
+#endif
+
+// Supposedly not too critical operations, so optimize for code size
+STATIC mp_obj_t str_caseconv(unichar (*op)(unichar), mp_obj_t self_in) {
+ GET_STR_DATA_LEN(self_in, self_data, self_len);
+ vstr_t vstr;
+ vstr_init_len(&vstr, self_len);
+ byte *data = (byte *)vstr.buf;
+ for (size_t i = 0; i < self_len; i++) {
+ *data++ = op(*self_data++);
+ }
+ return mp_obj_new_str_from_vstr(mp_obj_get_type(self_in), &vstr);
+}
+
+STATIC mp_obj_t str_lower(mp_obj_t self_in) {
+ return str_caseconv(unichar_tolower, self_in);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(str_lower_obj, str_lower);
+
+STATIC mp_obj_t str_upper(mp_obj_t self_in) {
+ return str_caseconv(unichar_toupper, self_in);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(str_upper_obj, str_upper);
+
+STATIC mp_obj_t str_uni_istype(bool (*f)(unichar), mp_obj_t self_in) {
+ GET_STR_DATA_LEN(self_in, self_data, self_len);
+
+ if (self_len == 0) {
+ return mp_const_false; // default to False for empty str
+ }
+
+ if (f != unichar_isupper && f != unichar_islower) {
+ for (size_t i = 0; i < self_len; i++) {
+ if (!f(*self_data++)) {
+ return mp_const_false;
+ }
+ }
+ } else {
+ bool contains_alpha = false;
+
+ for (size_t i = 0; i < self_len; i++) { // only check alphanumeric characters
+ if (unichar_isalpha(*self_data++)) {
+ contains_alpha = true;
+ if (!f(*(self_data - 1))) { // -1 because we already incremented above
+ return mp_const_false;
+ }
+ }
+ }
+
+ if (!contains_alpha) {
+ return mp_const_false;
+ }
+ }
+
+ return mp_const_true;
+}
+
+STATIC mp_obj_t str_isspace(mp_obj_t self_in) {
+ return str_uni_istype(unichar_isspace, self_in);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(str_isspace_obj, str_isspace);
+
+STATIC mp_obj_t str_isalpha(mp_obj_t self_in) {
+ return str_uni_istype(unichar_isalpha, self_in);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(str_isalpha_obj, str_isalpha);
+
+STATIC mp_obj_t str_isdigit(mp_obj_t self_in) {
+ return str_uni_istype(unichar_isdigit, self_in);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(str_isdigit_obj, str_isdigit);
+
+STATIC mp_obj_t str_isupper(mp_obj_t self_in) {
+ return str_uni_istype(unichar_isupper, self_in);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(str_isupper_obj, str_isupper);
+
+STATIC mp_obj_t str_islower(mp_obj_t self_in) {
+ return str_uni_istype(unichar_islower, self_in);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(str_islower_obj, str_islower);
+
+#if MICROPY_CPYTHON_COMPAT
+// These methods are superfluous in the presence of str() and bytes()
+// constructors.
+// TODO: should accept kwargs too
+STATIC mp_obj_t bytes_decode(size_t n_args, const mp_obj_t *args) {
+ mp_obj_t new_args[2];
+ if (n_args == 1) {
+ new_args[0] = args[0];
+ new_args[1] = MP_OBJ_NEW_QSTR(MP_QSTR_utf_hyphen_8);
+ args = new_args;
+ n_args++;
+ }
+ return mp_obj_str_make_new(&mp_type_str, n_args, 0, args);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(bytes_decode_obj, 1, 3, bytes_decode);
+
+// TODO: should accept kwargs too
+STATIC mp_obj_t str_encode(size_t n_args, const mp_obj_t *args) {
+ mp_obj_t new_args[2];
+ if (n_args == 1) {
+ new_args[0] = args[0];
+ new_args[1] = MP_OBJ_NEW_QSTR(MP_QSTR_utf_hyphen_8);
+ args = new_args;
+ n_args++;
+ }
+ return bytes_make_new(NULL, n_args, 0, args);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(str_encode_obj, 1, 3, str_encode);
+#endif
+
+mp_int_t mp_obj_str_get_buffer(mp_obj_t self_in, mp_buffer_info_t *bufinfo, mp_uint_t flags) {
+ if (flags == MP_BUFFER_READ) {
+ GET_STR_DATA_LEN(self_in, str_data, str_len);
+ bufinfo->buf = (void *)str_data;
+ bufinfo->len = str_len;
+ bufinfo->typecode = 'B'; // bytes should be unsigned, so should unicode byte-access
+ return 0;
+ } else {
+ // can't write to a string
+ return 1;
+ }
+}
+
+STATIC const mp_rom_map_elem_t str8_locals_dict_table[] = {
+ #if MICROPY_CPYTHON_COMPAT
+ { MP_ROM_QSTR(MP_QSTR_decode), MP_ROM_PTR(&bytes_decode_obj) },
+ #if !MICROPY_PY_BUILTINS_STR_UNICODE
+ // If we have separate unicode type, then here we have methods only
+ // for bytes type, and it should not have encode() methods. Otherwise,
+ // we have non-compliant-but-practical bytestring type, which shares
+ // method table with bytes, so they both have encode() and decode()
+ // methods (which should do type checking at runtime).
+ { MP_ROM_QSTR(MP_QSTR_encode), MP_ROM_PTR(&str_encode_obj) },
+ #endif
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_find), MP_ROM_PTR(&str_find_obj) },
+ { MP_ROM_QSTR(MP_QSTR_rfind), MP_ROM_PTR(&str_rfind_obj) },
+ { MP_ROM_QSTR(MP_QSTR_index), MP_ROM_PTR(&str_index_obj) },
+ { MP_ROM_QSTR(MP_QSTR_rindex), MP_ROM_PTR(&str_rindex_obj) },
+ { MP_ROM_QSTR(MP_QSTR_join), MP_ROM_PTR(&str_join_obj) },
+ { MP_ROM_QSTR(MP_QSTR_split), MP_ROM_PTR(&str_split_obj) },
+ #if MICROPY_PY_BUILTINS_STR_SPLITLINES
+ { MP_ROM_QSTR(MP_QSTR_splitlines), MP_ROM_PTR(&str_splitlines_obj) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_rsplit), MP_ROM_PTR(&str_rsplit_obj) },
+ { MP_ROM_QSTR(MP_QSTR_startswith), MP_ROM_PTR(&str_startswith_obj) },
+ { MP_ROM_QSTR(MP_QSTR_endswith), MP_ROM_PTR(&str_endswith_obj) },
+ { MP_ROM_QSTR(MP_QSTR_strip), MP_ROM_PTR(&str_strip_obj) },
+ { MP_ROM_QSTR(MP_QSTR_lstrip), MP_ROM_PTR(&str_lstrip_obj) },
+ { MP_ROM_QSTR(MP_QSTR_rstrip), MP_ROM_PTR(&str_rstrip_obj) },
+ { MP_ROM_QSTR(MP_QSTR_format), MP_ROM_PTR(&str_format_obj) },
+ { MP_ROM_QSTR(MP_QSTR_replace), MP_ROM_PTR(&str_replace_obj) },
+ #if MICROPY_PY_BUILTINS_STR_COUNT
+ { MP_ROM_QSTR(MP_QSTR_count), MP_ROM_PTR(&str_count_obj) },
+ #endif
+ #if MICROPY_PY_BUILTINS_STR_PARTITION
+ { MP_ROM_QSTR(MP_QSTR_partition), MP_ROM_PTR(&str_partition_obj) },
+ { MP_ROM_QSTR(MP_QSTR_rpartition), MP_ROM_PTR(&str_rpartition_obj) },
+ #endif
+ #if MICROPY_PY_BUILTINS_STR_CENTER
+ { MP_ROM_QSTR(MP_QSTR_center), MP_ROM_PTR(&str_center_obj) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_lower), MP_ROM_PTR(&str_lower_obj) },
+ { MP_ROM_QSTR(MP_QSTR_upper), MP_ROM_PTR(&str_upper_obj) },
+ { MP_ROM_QSTR(MP_QSTR_isspace), MP_ROM_PTR(&str_isspace_obj) },
+ { MP_ROM_QSTR(MP_QSTR_isalpha), MP_ROM_PTR(&str_isalpha_obj) },
+ { MP_ROM_QSTR(MP_QSTR_isdigit), MP_ROM_PTR(&str_isdigit_obj) },
+ { MP_ROM_QSTR(MP_QSTR_isupper), MP_ROM_PTR(&str_isupper_obj) },
+ { MP_ROM_QSTR(MP_QSTR_islower), MP_ROM_PTR(&str_islower_obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(str8_locals_dict, str8_locals_dict_table);
+
+#if !MICROPY_PY_BUILTINS_STR_UNICODE
+STATIC mp_obj_t mp_obj_new_str_iterator(mp_obj_t str, mp_obj_iter_buf_t *iter_buf);
+
+const mp_obj_type_t mp_type_str = {
+ { &mp_type_type },
+ .name = MP_QSTR_str,
+ .print = str_print,
+ .make_new = mp_obj_str_make_new,
+ .binary_op = mp_obj_str_binary_op,
+ .subscr = bytes_subscr,
+ .getiter = mp_obj_new_str_iterator,
+ .buffer_p = { .get_buffer = mp_obj_str_get_buffer },
+ .locals_dict = (mp_obj_dict_t *)&str8_locals_dict,
+};
+#endif
+
+// Reuses most of methods from str
+const mp_obj_type_t mp_type_bytes = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_bytes,
+ .print = str_print,
+ .make_new = bytes_make_new,
+ .locals_dict = (mp_obj_dict_t *)&str8_locals_dict,
+ MP_TYPE_EXTENDED_FIELDS(
+ .binary_op = mp_obj_str_binary_op,
+ .subscr = bytes_subscr,
+ .getiter = mp_obj_new_bytes_iterator,
+ .buffer_p = { .get_buffer = mp_obj_str_get_buffer },
+ ),
+};
+
+// The zero-length bytes object, with data that includes a null-terminating byte
+const mp_obj_str_t mp_const_empty_bytes_obj = {{&mp_type_bytes}, 0, 0, (const byte *)""};
+
+// Create a str/bytes object using the given data. New memory is allocated and
+// the data is copied across. This function should only be used if the type is bytes,
+// or if the type is str and the string data is known to be not interned.
+mp_obj_t mp_obj_new_str_copy(const mp_obj_type_t *type, const byte *data, size_t len) {
+ mp_obj_str_t *o = m_new_obj(mp_obj_str_t);
+ o->base.type = type;
+ o->len = len;
+ if (data) {
+ o->hash = qstr_compute_hash(data, len);
+ byte *p = m_new(byte, len + 1);
+ o->data = p;
+ memcpy(p, data, len * sizeof(byte));
+ p[len] = '\0'; // for now we add null for compatibility with C ASCIIZ strings
+ }
+ return MP_OBJ_FROM_PTR(o);
+}
+
+// Create a str/bytes object using the given data. If the type is str and the string
+// data is already interned, then a qstr object is returned. Otherwise new memory is
+// allocated for the object and the data is copied across.
+mp_obj_t mp_obj_new_str_of_type(const mp_obj_type_t *type, const byte *data, size_t len) {
+ if (type == &mp_type_str) {
+ return mp_obj_new_str((const char *)data, len);
+ } else {
+ return mp_obj_new_bytes(data, len);
+ }
+}
+
+// Create a str using a qstr to store the data; may use existing or new qstr.
+mp_obj_t mp_obj_new_str_via_qstr(const char *data, size_t len) {
+ return MP_OBJ_NEW_QSTR(qstr_from_strn(data, len));
+}
+
+// Create a str/bytes object from the given vstr. The vstr buffer is resized to
+// the exact length required and then reused for the str/bytes object. The vstr
+// is cleared and can safely be passed to vstr_free if it was heap allocated.
+mp_obj_t mp_obj_new_str_from_vstr(const mp_obj_type_t *type, vstr_t *vstr) {
+ // if not a bytes object, look if a qstr with this data already exists
+ if (type == &mp_type_str) {
+ qstr q = qstr_find_strn(vstr->buf, vstr->len);
+ if (q != MP_QSTRnull) {
+ vstr_clear(vstr);
+ vstr->alloc = 0;
+ return MP_OBJ_NEW_QSTR(q);
+ }
+ }
+
+ // make a new str/bytes object
+ mp_obj_str_t *o = m_new_obj(mp_obj_str_t);
+ o->base.type = type;
+ o->len = vstr->len;
+ o->hash = qstr_compute_hash((byte *)vstr->buf, vstr->len);
+ if (vstr->len + 1 == vstr->alloc) {
+ o->data = (byte *)vstr->buf;
+ } else {
+ o->data = (byte *)m_renew(char, vstr->buf, vstr->alloc, vstr->len + 1);
+ }
+ ((byte *)o->data)[o->len] = '\0'; // add null byte
+ vstr->buf = NULL;
+ vstr->alloc = 0;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+mp_obj_t mp_obj_new_str(const char *data, size_t len) {
+ qstr q = qstr_find_strn(data, len);
+ if (q != MP_QSTRnull) {
+ // qstr with this data already exists
+ return MP_OBJ_NEW_QSTR(q);
+ } else {
+ // no existing qstr, don't make one
+ return mp_obj_new_str_copy(&mp_type_str, (const byte *)data, len);
+ }
+}
+
+mp_obj_t mp_obj_str_intern(mp_obj_t str) {
+ GET_STR_DATA_LEN(str, data, len);
+ return mp_obj_new_str_via_qstr((const char *)data, len);
+}
+
+mp_obj_t mp_obj_str_intern_checked(mp_obj_t obj) {
+ size_t len;
+ const char *data = mp_obj_str_get_data(obj, &len);
+ return mp_obj_new_str_via_qstr((const char *)data, len);
+}
+
+mp_obj_t mp_obj_new_bytes(const byte *data, size_t len) {
+ return mp_obj_new_str_copy(&mp_type_bytes, data, len);
+}
+
+mp_obj_t mp_obj_new_bytes_of_zeros(size_t len) {
+ vstr_t vstr;
+ vstr_init_len(&vstr, len);
+ memset(vstr.buf, 0, len);
+ return mp_obj_new_str_from_vstr(&mp_type_bytes, &vstr);
+}
+
+
+bool mp_obj_str_equal(mp_obj_t s1, mp_obj_t s2) {
+ if (mp_obj_is_qstr(s1) && mp_obj_is_qstr(s2)) {
+ return s1 == s2;
+ } else {
+ GET_STR_HASH(s1, h1);
+ GET_STR_HASH(s2, h2);
+ // If any of hashes is 0, it means it's not valid
+ if (h1 != 0 && h2 != 0 && h1 != h2) {
+ return false;
+ }
+ GET_STR_DATA_LEN(s1, d1, l1);
+ GET_STR_DATA_LEN(s2, d2, l2);
+ if (l1 != l2) {
+ return false;
+ }
+ return memcmp(d1, d2, l1) == 0;
+ }
+}
+
+STATIC NORETURN void bad_implicit_conversion(mp_obj_t self_in) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_TypeError(MP_ERROR_TEXT("can't convert to str implicitly"));
+ #else
+ const qstr src_name = mp_obj_get_type_qstr(self_in);
+ mp_raise_TypeError_varg(MP_ERROR_TEXT("can't convert '%q' object to %q implicitly"),
+ src_name, src_name == MP_QSTR_str ? MP_QSTR_bytes : MP_QSTR_str);
+ #endif
+}
+
+// use this if you will anyway convert the string to a qstr
+// will be more efficient for the case where it's already a qstr
+qstr mp_obj_str_get_qstr(mp_obj_t self_in) {
+ if (mp_obj_is_qstr(self_in)) {
+ return MP_OBJ_QSTR_VALUE(self_in);
+ } else if (mp_obj_is_type(self_in, &mp_type_str)) {
+ mp_obj_str_t *self = MP_OBJ_TO_PTR(self_in);
+ return qstr_from_strn((char *)self->data, self->len);
+ } else {
+ bad_implicit_conversion(self_in);
+ }
+}
+
+// only use this function if you need the str data to be zero terminated
+// at the moment all strings are zero terminated to help with C ASCIIZ compatibility
+const char *mp_obj_str_get_str(mp_obj_t self_in) {
+ if (mp_obj_is_str_or_bytes(self_in)) {
+ GET_STR_DATA_LEN(self_in, s, l);
+ (void)l; // len unused
+ return (const char *)s;
+ } else {
+ bad_implicit_conversion(self_in);
+ }
+}
+
+const char *mp_obj_str_get_data(mp_obj_t self_in, size_t *len) {
+ if (mp_obj_is_str_or_bytes(self_in)) {
+ GET_STR_DATA_LEN(self_in, s, l);
+ *len = l;
+ return (const char *)s;
+ } else {
+ bad_implicit_conversion(self_in);
+ }
+}
+
+#if MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_C || MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_D
+const byte *mp_obj_str_get_data_no_check(mp_obj_t self_in, size_t *len) {
+ if (mp_obj_is_qstr(self_in)) {
+ return qstr_data(MP_OBJ_QSTR_VALUE(self_in), len);
+ } else {
+ *len = ((mp_obj_str_t *)MP_OBJ_TO_PTR(self_in))->len;
+ return ((mp_obj_str_t *)MP_OBJ_TO_PTR(self_in))->data;
+ }
+}
+#endif
+
+/******************************************************************************/
+/* str iterator */
+
+typedef struct _mp_obj_str8_it_t {
+ mp_obj_base_t base;
+ mp_fun_1_t iternext;
+ mp_obj_t str;
+ size_t cur;
+} mp_obj_str8_it_t;
+
+#if !MICROPY_PY_BUILTINS_STR_UNICODE
+STATIC mp_obj_t str_it_iternext(mp_obj_t self_in) {
+ mp_obj_str8_it_t *self = MP_OBJ_TO_PTR(self_in);
+ GET_STR_DATA_LEN(self->str, str, len);
+ if (self->cur < len) {
+ mp_obj_t o_out = mp_obj_new_str_via_qstr((const char *)str + self->cur, 1);
+ self->cur += 1;
+ return o_out;
+ } else {
+ return MP_OBJ_STOP_ITERATION;
+ }
+}
+
+STATIC mp_obj_t mp_obj_new_str_iterator(mp_obj_t str, mp_obj_iter_buf_t *iter_buf) {
+ assert(sizeof(mp_obj_str8_it_t) <= sizeof(mp_obj_iter_buf_t));
+ mp_obj_str8_it_t *o = (mp_obj_str8_it_t *)iter_buf;
+ o->base.type = &mp_type_polymorph_iter;
+ o->iternext = str_it_iternext;
+ o->str = str;
+ o->cur = 0;
+ return MP_OBJ_FROM_PTR(o);
+}
+#endif
+
+STATIC mp_obj_t bytes_it_iternext(mp_obj_t self_in) {
+ mp_obj_str8_it_t *self = MP_OBJ_TO_PTR(self_in);
+ GET_STR_DATA_LEN(self->str, str, len);
+ if (self->cur < len) {
+ mp_obj_t o_out = MP_OBJ_NEW_SMALL_INT(str[self->cur]);
+ self->cur += 1;
+ return o_out;
+ } else {
+ return MP_OBJ_STOP_ITERATION;
+ }
+}
+
+mp_obj_t mp_obj_new_bytes_iterator(mp_obj_t str, mp_obj_iter_buf_t *iter_buf) {
+ assert(sizeof(mp_obj_str8_it_t) <= sizeof(mp_obj_iter_buf_t));
+ mp_obj_str8_it_t *o = (mp_obj_str8_it_t *)iter_buf;
+ o->base.type = &mp_type_polymorph_iter;
+ o->iternext = bytes_it_iternext;
+ o->str = str;
+ o->cur = 0;
+ return MP_OBJ_FROM_PTR(o);
+}
diff --git a/circuitpython/py/objstr.h b/circuitpython/py/objstr.h
new file mode 100644
index 0000000..8031839
--- /dev/null
+++ b/circuitpython/py/objstr.h
@@ -0,0 +1,112 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_OBJSTR_H
+#define MICROPY_INCLUDED_PY_OBJSTR_H
+
+#include "py/obj.h"
+
+typedef struct _mp_obj_str_t {
+ mp_obj_base_t base;
+ mp_uint_t hash;
+ // len == number of bytes used in data, alloc = len + 1 because (at the moment) we also append a null byte
+ size_t len;
+ const byte *data;
+} mp_obj_str_t;
+
+#define MP_DEFINE_STR_OBJ(obj_name, str) mp_obj_str_t obj_name = {{&mp_type_str}, 0, sizeof(str) - 1, (const byte *)str}
+
+// use this macro to extract the string hash
+// warning: the hash can be 0, meaning invalid, and must then be explicitly computed from the data
+#define GET_STR_HASH(str_obj_in, str_hash) \
+ mp_uint_t str_hash; if (mp_obj_is_qstr(str_obj_in)) \
+ { str_hash = qstr_hash(MP_OBJ_QSTR_VALUE(str_obj_in)); } else { str_hash = ((mp_obj_str_t *)MP_OBJ_TO_PTR(str_obj_in))->hash; }
+
+// use this macro to extract the string length
+#define GET_STR_LEN(str_obj_in, str_len) \
+ size_t str_len; if (mp_obj_is_qstr(str_obj_in)) \
+ { str_len = qstr_len(MP_OBJ_QSTR_VALUE(str_obj_in)); } else { str_len = ((mp_obj_str_t *)MP_OBJ_TO_PTR(str_obj_in))->len; }
+
+// use this macro to extract the string data and length
+#if MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_C || MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_D
+const byte *mp_obj_str_get_data_no_check(mp_obj_t self_in, size_t *len);
+#define GET_STR_DATA_LEN(str_obj_in, str_data, str_len) \
+ size_t str_len; const byte *str_data = mp_obj_str_get_data_no_check(str_obj_in, &str_len);
+#else
+#define GET_STR_DATA_LEN(str_obj_in, str_data, str_len) \
+ const byte *str_data; size_t str_len; if (mp_obj_is_qstr(str_obj_in)) \
+ { str_data = qstr_data(MP_OBJ_QSTR_VALUE(str_obj_in), &str_len); } \
+ else { str_len = ((mp_obj_str_t *)MP_OBJ_TO_PTR(str_obj_in))->len; str_data = ((mp_obj_str_t *)MP_OBJ_TO_PTR(str_obj_in))->data; }
+#endif
+
+mp_obj_t mp_obj_str_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args);
+void mp_str_print_json(const mp_print_t *print, const byte *str_data, size_t str_len);
+mp_obj_t mp_obj_str_format(size_t n_args, const mp_obj_t *args, mp_map_t *kwargs);
+mp_obj_t mp_obj_str_split(size_t n_args, const mp_obj_t *args);
+mp_obj_t mp_obj_new_str_copy(const mp_obj_type_t *type, const byte *data, size_t len);
+mp_obj_t mp_obj_new_str_of_type(const mp_obj_type_t *type, const byte *data, size_t len);
+
+mp_obj_t mp_obj_str_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_in);
+mp_int_t mp_obj_str_get_buffer(mp_obj_t self_in, mp_buffer_info_t *bufinfo, mp_uint_t flags);
+
+size_t str_offset_to_index(const mp_obj_type_t *type, const byte *self_data, size_t self_len,
+ size_t offset);
+const byte *str_index_to_ptr(const mp_obj_type_t *type, const byte *self_data, size_t self_len,
+ mp_obj_t index, bool is_slice);
+const byte *find_subbytes(const byte *haystack, size_t hlen, const byte *needle, size_t nlen, int direction);
+
+extern const char nibble_to_hex_upper[16];
+extern const char nibble_to_hex_lower[16];
+
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(str_encode_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(str_find_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(str_rfind_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(str_index_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(str_rindex_obj);
+MP_DECLARE_CONST_FUN_OBJ_2(str_join_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(str_split_obj);
+MP_DECLARE_CONST_FUN_OBJ_KW(str_splitlines_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(str_rsplit_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(str_startswith_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(str_endswith_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(str_strip_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(str_lstrip_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(str_rstrip_obj);
+MP_DECLARE_CONST_FUN_OBJ_KW(str_format_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(str_replace_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(str_count_obj);
+MP_DECLARE_CONST_FUN_OBJ_2(str_partition_obj);
+MP_DECLARE_CONST_FUN_OBJ_2(str_rpartition_obj);
+MP_DECLARE_CONST_FUN_OBJ_2(str_center_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(str_lower_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(str_upper_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(str_isspace_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(str_isalpha_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(str_isdigit_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(str_isupper_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(str_islower_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(bytes_decode_obj);
+
+#endif // MICROPY_INCLUDED_PY_OBJSTR_H
diff --git a/circuitpython/py/objstringio.c b/circuitpython/py/objstringio.c
new file mode 100644
index 0000000..336a041
--- /dev/null
+++ b/circuitpython/py/objstringio.c
@@ -0,0 +1,284 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ * SPDX-FileCopyrightText: Copyright (c) 2014-2017 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include "py/objstr.h"
+#include "py/objstringio.h"
+#include "py/runtime.h"
+#include "py/stream.h"
+
+#include "supervisor/shared/translate.h"
+
+#if MICROPY_PY_IO
+
+#if MICROPY_CPYTHON_COMPAT
+STATIC void check_stringio_is_open(const mp_obj_stringio_t *o) {
+ if (o->vstr == NULL) {
+ mp_raise_ValueError(MP_ERROR_TEXT("I/O operation on closed file"));
+ }
+}
+#else
+#define check_stringio_is_open(o)
+#endif
+
+STATIC void stringio_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_stringio_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_printf(print, self->base.type == &mp_type_stringio ? "<io.StringIO 0x%x>" : "<io.BytesIO 0x%x>", self);
+}
+
+STATIC mp_uint_t stringio_read(mp_obj_t o_in, void *buf, mp_uint_t size, int *errcode) {
+ (void)errcode;
+ mp_obj_stringio_t *o = MP_OBJ_TO_PTR(o_in);
+ check_stringio_is_open(o);
+ if (o->vstr->len <= o->pos) { // read to EOF, or seeked to EOF or beyond
+ return 0;
+ }
+ mp_uint_t remaining = o->vstr->len - o->pos;
+ if (size > remaining) {
+ size = remaining;
+ }
+ memcpy(buf, o->vstr->buf + o->pos, size);
+ o->pos += size;
+ return size;
+}
+
+STATIC void stringio_copy_on_write(mp_obj_stringio_t *o) {
+ const void *buf = o->vstr->buf;
+ o->vstr->buf = m_new(char, o->vstr->len);
+ o->vstr->fixed_buf = false;
+ o->ref_obj = MP_OBJ_NULL;
+ memcpy(o->vstr->buf, buf, o->vstr->len);
+}
+
+STATIC mp_uint_t stringio_write(mp_obj_t o_in, const void *buf, mp_uint_t size, int *errcode) {
+ (void)errcode;
+ mp_obj_stringio_t *o = MP_OBJ_TO_PTR(o_in);
+ check_stringio_is_open(o);
+
+ if (o->vstr->fixed_buf) {
+ stringio_copy_on_write(o);
+ }
+
+ mp_uint_t new_pos = o->pos + size;
+ if (new_pos < size) {
+ // Writing <size> bytes will overflow o->pos beyond limit of mp_uint_t.
+ *errcode = MP_EFBIG;
+ return MP_STREAM_ERROR;
+ }
+ mp_uint_t org_len = o->vstr->len;
+ if (new_pos > o->vstr->alloc) {
+ // Take all what's already allocated...
+ o->vstr->len = o->vstr->alloc;
+ // ... and add more
+ vstr_add_len(o->vstr, new_pos - o->vstr->alloc);
+ }
+ // If there was a seek past EOF, clear the hole
+ if (o->pos > org_len) {
+ memset(o->vstr->buf + org_len, 0, o->pos - org_len);
+ }
+ memcpy(o->vstr->buf + o->pos, buf, size);
+ o->pos = new_pos;
+ if (new_pos > o->vstr->len) {
+ o->vstr->len = new_pos;
+ }
+ return size;
+}
+
+STATIC mp_uint_t stringio_ioctl(mp_obj_t o_in, mp_uint_t request, uintptr_t arg, int *errcode) {
+ (void)errcode;
+ mp_obj_stringio_t *o = MP_OBJ_TO_PTR(o_in);
+ switch (request) {
+ case MP_STREAM_SEEK: {
+ struct mp_stream_seek_t *s = (struct mp_stream_seek_t *)arg;
+ mp_uint_t ref = 0;
+ switch (s->whence) {
+ case MP_SEEK_CUR:
+ ref = o->pos;
+ break;
+ case MP_SEEK_END:
+ ref = o->vstr->len;
+ break;
+ }
+ mp_uint_t new_pos = ref + s->offset;
+
+ // For MP_SEEK_SET, offset is unsigned
+ if (s->whence != MP_SEEK_SET && s->offset < 0) {
+ if (new_pos > ref) {
+ // Negative offset from SEEK_CUR or SEEK_END went past 0.
+ // CPython sets position to 0, POSIX returns an EINVAL error
+ new_pos = 0;
+ }
+ } else if (new_pos < ref) {
+ // positive offset went beyond the limit of mp_uint_t
+ *errcode = MP_EINVAL; // replace with MP_EOVERFLOW when defined
+ return MP_STREAM_ERROR;
+ }
+ s->offset = o->pos = new_pos;
+ return 0;
+ }
+ case MP_STREAM_FLUSH:
+ return 0;
+ case MP_STREAM_CLOSE:
+ #if MICROPY_CPYTHON_COMPAT
+ vstr_free(o->vstr);
+ o->vstr = NULL;
+ #else
+ vstr_clear(o->vstr);
+ o->vstr->alloc = 0;
+ o->vstr->len = 0;
+ o->pos = 0;
+ #endif
+ return 0;
+ default:
+ *errcode = MP_EINVAL;
+ return MP_STREAM_ERROR;
+ }
+}
+
+#define STREAM_TO_CONTENT_TYPE(o) (((o)->base.type == &mp_type_stringio) ? &mp_type_str : &mp_type_bytes)
+
+STATIC mp_obj_t stringio_getvalue(mp_obj_t self_in) {
+ mp_obj_stringio_t *self = MP_OBJ_TO_PTR(self_in);
+ check_stringio_is_open(self);
+ // TODO: Try to avoid copying string
+ return mp_obj_new_str_of_type(STREAM_TO_CONTENT_TYPE(self), (byte *)self->vstr->buf, self->vstr->len);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(stringio_getvalue_obj, stringio_getvalue);
+
+STATIC mp_obj_t stringio___exit__(size_t n_args, const mp_obj_t *args) {
+ (void)n_args;
+ return mp_stream_close(args[0]);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(stringio___exit___obj, 4, 4, stringio___exit__);
+
+STATIC mp_obj_stringio_t *stringio_new(const mp_obj_type_t *type) {
+ mp_obj_stringio_t *o = m_new_obj(mp_obj_stringio_t);
+ o->base.type = type;
+ o->pos = 0;
+ o->ref_obj = MP_OBJ_NULL;
+ return o;
+}
+
+STATIC mp_obj_t stringio_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_arg_check_num(n_args, n_kw, 0, 1, false);
+
+ mp_uint_t sz = 16;
+ bool initdata = false;
+ mp_buffer_info_t bufinfo;
+
+ mp_obj_stringio_t *o = stringio_new(type_in);
+
+ if (n_args > 0) {
+ mp_get_buffer_raise(args[0], &bufinfo, MP_BUFFER_READ);
+
+ if (mp_obj_is_str_or_bytes(args[0])) {
+ o->vstr = m_new_obj(vstr_t);
+ vstr_init_fixed_buf(o->vstr, bufinfo.len, bufinfo.buf);
+ o->vstr->len = bufinfo.len;
+ o->ref_obj = args[0];
+ return MP_OBJ_FROM_PTR(o);
+ }
+
+ sz = bufinfo.len;
+ initdata = true;
+ }
+
+ o->vstr = vstr_new(sz);
+
+ if (initdata) {
+ stringio_write(MP_OBJ_FROM_PTR(o), bufinfo.buf, bufinfo.len, NULL);
+ // Cur ptr is always at the beginning of buffer at the construction
+ o->pos = 0;
+ }
+ return MP_OBJ_FROM_PTR(o);
+}
+
+STATIC const mp_rom_map_elem_t stringio_locals_dict_table[] = {
+ { MP_ROM_QSTR(MP_QSTR_read), MP_ROM_PTR(&mp_stream_read_obj) },
+ { MP_ROM_QSTR(MP_QSTR_readinto), MP_ROM_PTR(&mp_stream_readinto_obj) },
+ { MP_ROM_QSTR(MP_QSTR_readline), MP_ROM_PTR(&mp_stream_unbuffered_readline_obj) },
+ { MP_ROM_QSTR(MP_QSTR_write), MP_ROM_PTR(&mp_stream_write_obj) },
+ { MP_ROM_QSTR(MP_QSTR_seek), MP_ROM_PTR(&mp_stream_seek_obj) },
+ { MP_ROM_QSTR(MP_QSTR_tell), MP_ROM_PTR(&mp_stream_tell_obj) },
+ { MP_ROM_QSTR(MP_QSTR_flush), MP_ROM_PTR(&mp_stream_flush_obj) },
+ { MP_ROM_QSTR(MP_QSTR_close), MP_ROM_PTR(&mp_stream_close_obj) },
+ { MP_ROM_QSTR(MP_QSTR_getvalue), MP_ROM_PTR(&stringio_getvalue_obj) },
+ { MP_ROM_QSTR(MP_QSTR___enter__), MP_ROM_PTR(&mp_identity_obj) },
+ { MP_ROM_QSTR(MP_QSTR___exit__), MP_ROM_PTR(&stringio___exit___obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(stringio_locals_dict, stringio_locals_dict_table);
+
+STATIC const mp_stream_p_t stringio_stream_p = {
+ MP_PROTO_IMPLEMENT(MP_QSTR_protocol_stream)
+ .read = stringio_read,
+ .write = stringio_write,
+ .ioctl = stringio_ioctl,
+ .is_text = true,
+};
+
+const mp_obj_type_t mp_type_stringio = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_StringIO,
+ .print = stringio_print,
+ .make_new = stringio_make_new,
+ .locals_dict = (mp_obj_dict_t *)&stringio_locals_dict,
+ MP_TYPE_EXTENDED_FIELDS(
+ .getiter = mp_identity_getiter,
+ .iternext = mp_stream_unbuffered_iter,
+ .protocol = &stringio_stream_p,
+ ),
+};
+
+#if MICROPY_PY_IO_BYTESIO
+STATIC const mp_stream_p_t bytesio_stream_p = {
+ MP_PROTO_IMPLEMENT(MP_QSTR_protocol_stream)
+ .read = stringio_read,
+ .write = stringio_write,
+ .ioctl = stringio_ioctl,
+};
+
+const mp_obj_type_t mp_type_bytesio = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_BytesIO,
+ .print = stringio_print,
+ .make_new = stringio_make_new,
+ .locals_dict = (mp_obj_dict_t *)&stringio_locals_dict,
+ MP_TYPE_EXTENDED_FIELDS(
+ .getiter = mp_identity_getiter,
+ .iternext = mp_stream_unbuffered_iter,
+ .protocol = &bytesio_stream_p,
+ ),
+};
+#endif
+
+#endif
diff --git a/circuitpython/py/objstringio.h b/circuitpython/py/objstringio.h
new file mode 100644
index 0000000..38778f0
--- /dev/null
+++ b/circuitpython/py/objstringio.h
@@ -0,0 +1,40 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2016 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_OBJSTRINGIO_H
+#define MICROPY_INCLUDED_PY_OBJSTRINGIO_H
+
+#include "py/obj.h"
+
+typedef struct _mp_obj_stringio_t {
+ mp_obj_base_t base;
+ vstr_t *vstr;
+ // StringIO has single pointer used for both reading and writing
+ mp_uint_t pos;
+ // Underlying object buffered by this StringIO
+ mp_obj_t ref_obj;
+} mp_obj_stringio_t;
+
+#endif // MICROPY_INCLUDED_PY_OBJSTRINGIO_H
diff --git a/circuitpython/py/objstrunicode.c b/circuitpython/py/objstrunicode.c
new file mode 100644
index 0000000..eb79d54
--- /dev/null
+++ b/circuitpython/py/objstrunicode.c
@@ -0,0 +1,350 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ * SPDX-FileCopyrightText: Copyright (c) 2014-2016 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+#include <assert.h>
+
+#include "py/objstr.h"
+#include "py/objlist.h"
+#include "py/runtime.h"
+
+#include "supervisor/shared/translate.h"
+
+#if MICROPY_PY_BUILTINS_STR_UNICODE
+
+STATIC mp_obj_t mp_obj_new_str_iterator(mp_obj_t str, mp_obj_iter_buf_t *iter_buf);
+
+/******************************************************************************/
+/* str */
+
+
+// These settings approximate CPython's printability. It is not
+// exhaustive and may print "unprintable" characters. All ASCII control codes
+// are escaped along with variable space widths and paragraph designators.
+// Unlike CPython, we do not escape private use codes or reserved characters.
+// We assume that the unicode is well formed.
+// CPython policy is documented here: https://github.com/python/cpython/blob/bb3e0c240bc60fe08d332ff5955d54197f79751c/Objects/unicodectype.c#L147-L159
+STATIC void uni_print_quoted(const mp_print_t *print, const byte *str_data, uint str_len) {
+ // this escapes characters, but it will be very slow to print (calling print many times)
+ bool has_single_quote = false;
+ bool has_double_quote = false;
+ for (const byte *s = str_data, *top = str_data + str_len; !has_double_quote && s < top; s++) {
+ if (*s == '\'') {
+ has_single_quote = true;
+ } else if (*s == '"') {
+ has_double_quote = true;
+ }
+ }
+ unichar quote_char = '\'';
+ if (has_single_quote && !has_double_quote) {
+ quote_char = '"';
+ }
+ mp_printf(print, "%c", quote_char);
+ const byte *s = str_data, *top = str_data + str_len;
+ while (s < top) {
+ unichar ch;
+ ch = utf8_get_char(s);
+ const byte *start = s;
+ s = utf8_next_char(s);
+ if (ch == quote_char) {
+ mp_printf(print, "\\%c", quote_char);
+ } else if (ch == '\\') {
+ mp_print_str(print, "\\\\");
+ } else if (ch == '\n') {
+ mp_print_str(print, "\\n");
+ } else if (ch == '\r') {
+ mp_print_str(print, "\\r");
+ } else if (ch == '\t') {
+ mp_print_str(print, "\\t");
+ } else if (ch <= 0x1f || (0x7f <= ch && ch <= 0xa0) || ch == 0xad) {
+ mp_printf(print, "\\x%02x", ch);
+ } else if ((0x2000 <= ch && ch <= 0x200f) || ch == 0x2028 || ch == 0x2029) {
+ mp_printf(print, "\\u%04x", ch);
+ } else {
+ // Print the full character out.
+ int width = s - start;
+ mp_print_strn(print, (const char *)start, width, 0, ' ', width);
+ }
+ }
+ mp_printf(print, "%c", quote_char);
+}
+
+STATIC void uni_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ GET_STR_DATA_LEN(self_in, str_data, str_len);
+ #if MICROPY_PY_UJSON
+ if (kind == PRINT_JSON) {
+ mp_str_print_json(print, str_data, str_len);
+ return;
+ }
+ #endif
+ if (kind == PRINT_STR) {
+ print->print_strn(print->data, (const char *)str_data, str_len);
+ } else {
+ uni_print_quoted(print, str_data, str_len);
+ }
+}
+
+STATIC mp_obj_t uni_unary_op(mp_unary_op_t op, mp_obj_t self_in) {
+ GET_STR_DATA_LEN(self_in, str_data, str_len);
+ switch (op) {
+ case MP_UNARY_OP_BOOL:
+ return mp_obj_new_bool(str_len != 0);
+ case MP_UNARY_OP_LEN:
+ return MP_OBJ_NEW_SMALL_INT(utf8_charlen(str_data, str_len));
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+size_t str_offset_to_index(const mp_obj_type_t *type, const byte *self_data, size_t self_len,
+ size_t offset) {
+ if (offset > self_len) {
+ mp_raise_ValueError(MP_ERROR_TEXT("offset out of bounds"));
+ }
+
+ if (type == &mp_type_bytes) {
+ return offset;
+ }
+
+ size_t index_val = 0;
+ const byte *s = self_data;
+ for (size_t i = 0; i < offset; i++, s++) {
+ if (!UTF8_IS_CONT(*s)) {
+ ++index_val;
+ }
+ }
+ return index_val;
+}
+
+// Convert an index into a pointer to its lead byte. Out of bounds indexing will raise IndexError or
+// be capped to the first/last character of the string, depending on is_slice.
+const byte *str_index_to_ptr(const mp_obj_type_t *type, const byte *self_data, size_t self_len,
+ mp_obj_t index, bool is_slice) {
+ // All str functions also handle bytes objects, and they call str_index_to_ptr(),
+ // so it must handle bytes.
+ if (type == &mp_type_bytes) {
+ // Taken from objstr.c:str_index_to_ptr()
+ size_t index_val = mp_get_index(type, self_len, index, is_slice);
+ return self_data + index_val;
+ }
+
+ mp_int_t i;
+ // Copied from mp_get_index; I don't want bounds checking, just give me
+ // the integer as-is. (I can't bounds-check without scanning the whole
+ // string; an out-of-bounds index will be caught in the loops below.)
+ if (mp_obj_is_small_int(index)) {
+ i = MP_OBJ_SMALL_INT_VALUE(index);
+ } else if (!mp_obj_get_int_maybe(index, &i)) {
+ mp_raise_TypeError_varg(MP_ERROR_TEXT("string indices must be integers, not %q"), mp_obj_get_type_qstr(index));
+ }
+ const byte *s, *top = self_data + self_len;
+ if (i < 0) {
+ // Negative indexing is performed by counting from the end of the string.
+ for (s = top - 1; i; --s) {
+ if (s < self_data) {
+ if (is_slice) {
+ return self_data;
+ }
+ mp_raise_IndexError_varg(MP_ERROR_TEXT("%q index out of range"), MP_QSTR_str);
+ }
+ if (!UTF8_IS_CONT(*s)) {
+ ++i;
+ }
+ }
+ ++s;
+ } else {
+ // Positive indexing, correspondingly, counts from the start of the string.
+ // It's assumed that negative indexing will generally be used with small
+ // absolute values (eg str[-1], not str[-1000000]), which means it'll be
+ // more efficient this way.
+ s = self_data;
+ while (1) {
+ // First check out-of-bounds
+ if (s >= top) {
+ if (is_slice) {
+ return top;
+ }
+ mp_raise_IndexError_varg(MP_ERROR_TEXT("%q index out of range"), MP_QSTR_str);
+ }
+ // Then check completion
+ if (i-- == 0) {
+ break;
+ }
+ // Then skip UTF-8 char
+ ++s;
+ while (UTF8_IS_CONT(*s)) {
+ ++s;
+ }
+ }
+ }
+ return s;
+}
+
+STATIC mp_obj_t str_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) {
+ const mp_obj_type_t *type = mp_obj_get_type(self_in);
+ assert(type == &mp_type_str);
+ GET_STR_DATA_LEN(self_in, self_data, self_len);
+ if (value == MP_OBJ_SENTINEL) {
+ // load
+ #if MICROPY_PY_BUILTINS_SLICE
+ if (mp_obj_is_type(index, &mp_type_slice)) {
+ mp_obj_t ostart, ostop, ostep;
+ mp_obj_slice_t *slice = MP_OBJ_TO_PTR(index);
+ ostart = slice->start;
+ ostop = slice->stop;
+ ostep = slice->step;
+
+ if (ostep != mp_const_none && ostep != MP_OBJ_NEW_SMALL_INT(1)) {
+ mp_raise_NotImplementedError(MP_ERROR_TEXT("only slices with step=1 (aka None) are supported"));
+ }
+
+ const byte *pstart, *pstop;
+ if (ostart != mp_const_none) {
+ pstart = str_index_to_ptr(type, self_data, self_len, ostart, true);
+ } else {
+ pstart = self_data;
+ }
+ if (ostop != mp_const_none) {
+ // pstop will point just after the stop character. This depends on
+ // the \0 at the end of the string.
+ pstop = str_index_to_ptr(type, self_data, self_len, ostop, true);
+ } else {
+ pstop = self_data + self_len;
+ }
+ if (pstop < pstart) {
+ return MP_OBJ_NEW_QSTR(MP_QSTR_);
+ }
+ return mp_obj_new_str_of_type(type, (const byte *)pstart, pstop - pstart);
+ }
+ #endif
+ const byte *s = str_index_to_ptr(type, self_data, self_len, index, false);
+ int len = 1;
+ if (UTF8_IS_NONASCII(*s)) {
+ // Count the number of 1 bits (after the first)
+ for (char mask = 0x40; *s & mask; mask >>= 1) {
+ ++len;
+ }
+ }
+ return mp_obj_new_str_via_qstr((const char *)s, len); // This will create a one-character string
+ } else {
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+STATIC const mp_rom_map_elem_t struni_locals_dict_table[] = {
+ #if MICROPY_CPYTHON_COMPAT
+ { MP_ROM_QSTR(MP_QSTR_encode), MP_ROM_PTR(&str_encode_obj) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_find), MP_ROM_PTR(&str_find_obj) },
+ { MP_ROM_QSTR(MP_QSTR_rfind), MP_ROM_PTR(&str_rfind_obj) },
+ { MP_ROM_QSTR(MP_QSTR_index), MP_ROM_PTR(&str_index_obj) },
+ { MP_ROM_QSTR(MP_QSTR_rindex), MP_ROM_PTR(&str_rindex_obj) },
+ { MP_ROM_QSTR(MP_QSTR_join), MP_ROM_PTR(&str_join_obj) },
+ { MP_ROM_QSTR(MP_QSTR_split), MP_ROM_PTR(&str_split_obj) },
+ #if MICROPY_PY_BUILTINS_STR_SPLITLINES
+ { MP_ROM_QSTR(MP_QSTR_splitlines), MP_ROM_PTR(&str_splitlines_obj) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_rsplit), MP_ROM_PTR(&str_rsplit_obj) },
+ { MP_ROM_QSTR(MP_QSTR_startswith), MP_ROM_PTR(&str_startswith_obj) },
+ { MP_ROM_QSTR(MP_QSTR_endswith), MP_ROM_PTR(&str_endswith_obj) },
+ { MP_ROM_QSTR(MP_QSTR_strip), MP_ROM_PTR(&str_strip_obj) },
+ { MP_ROM_QSTR(MP_QSTR_lstrip), MP_ROM_PTR(&str_lstrip_obj) },
+ { MP_ROM_QSTR(MP_QSTR_rstrip), MP_ROM_PTR(&str_rstrip_obj) },
+ { MP_ROM_QSTR(MP_QSTR_format), MP_ROM_PTR(&str_format_obj) },
+ { MP_ROM_QSTR(MP_QSTR_replace), MP_ROM_PTR(&str_replace_obj) },
+ #if MICROPY_PY_BUILTINS_STR_COUNT
+ { MP_ROM_QSTR(MP_QSTR_count), MP_ROM_PTR(&str_count_obj) },
+ #endif
+ #if MICROPY_PY_BUILTINS_STR_PARTITION
+ { MP_ROM_QSTR(MP_QSTR_partition), MP_ROM_PTR(&str_partition_obj) },
+ { MP_ROM_QSTR(MP_QSTR_rpartition), MP_ROM_PTR(&str_rpartition_obj) },
+ #endif
+ #if MICROPY_PY_BUILTINS_STR_CENTER
+ { MP_ROM_QSTR(MP_QSTR_center), MP_ROM_PTR(&str_center_obj) },
+ #endif
+ { MP_ROM_QSTR(MP_QSTR_lower), MP_ROM_PTR(&str_lower_obj) },
+ { MP_ROM_QSTR(MP_QSTR_upper), MP_ROM_PTR(&str_upper_obj) },
+ { MP_ROM_QSTR(MP_QSTR_isspace), MP_ROM_PTR(&str_isspace_obj) },
+ { MP_ROM_QSTR(MP_QSTR_isalpha), MP_ROM_PTR(&str_isalpha_obj) },
+ { MP_ROM_QSTR(MP_QSTR_isdigit), MP_ROM_PTR(&str_isdigit_obj) },
+ { MP_ROM_QSTR(MP_QSTR_isupper), MP_ROM_PTR(&str_isupper_obj) },
+ { MP_ROM_QSTR(MP_QSTR_islower), MP_ROM_PTR(&str_islower_obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(struni_locals_dict, struni_locals_dict_table);
+
+const mp_obj_type_t mp_type_str = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_str,
+ .print = uni_print,
+ .make_new = mp_obj_str_make_new,
+ .locals_dict = (mp_obj_dict_t *)&struni_locals_dict,
+ MP_TYPE_EXTENDED_FIELDS(
+ .unary_op = uni_unary_op,
+ .binary_op = mp_obj_str_binary_op,
+ .subscr = str_subscr,
+ .getiter = mp_obj_new_str_iterator,
+ .buffer_p = { .get_buffer = mp_obj_str_get_buffer },
+ ),
+};
+
+/******************************************************************************/
+/* str iterator */
+
+typedef struct _mp_obj_str_it_t {
+ mp_obj_base_t base;
+ mp_fun_1_t iternext;
+ mp_obj_t str;
+ size_t cur;
+} mp_obj_str_it_t;
+
+STATIC mp_obj_t str_it_iternext(mp_obj_t self_in) {
+ mp_obj_str_it_t *self = MP_OBJ_TO_PTR(self_in);
+ GET_STR_DATA_LEN(self->str, str, len);
+ if (self->cur < len) {
+ const byte *cur = str + self->cur;
+ const byte *end = utf8_next_char(str + self->cur);
+ mp_obj_t o_out = mp_obj_new_str_via_qstr((const char *)cur, end - cur);
+ self->cur += end - cur;
+ return o_out;
+ } else {
+ return MP_OBJ_STOP_ITERATION;
+ }
+}
+
+STATIC mp_obj_t mp_obj_new_str_iterator(mp_obj_t str, mp_obj_iter_buf_t *iter_buf) {
+ assert(sizeof(mp_obj_str_it_t) <= sizeof(mp_obj_iter_buf_t));
+ mp_obj_str_it_t *o = (mp_obj_str_it_t *)iter_buf;
+ o->base.type = &mp_type_polymorph_iter;
+ o->iternext = str_it_iternext;
+ o->str = str;
+ o->cur = 0;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+#endif // MICROPY_PY_BUILTINS_STR_UNICODE
diff --git a/circuitpython/py/objtraceback.c b/circuitpython/py/objtraceback.c
new file mode 100644
index 0000000..989521a
--- /dev/null
+++ b/circuitpython/py/objtraceback.c
@@ -0,0 +1,42 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2021 microDev
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/runtime.h"
+#include "py/objtraceback.h"
+
+const mp_obj_traceback_t mp_const_empty_traceback_obj = {{&mp_type_traceback}, 0, 0, NULL};
+
+STATIC void mp_obj_traceback_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_traceback_t *o = MP_OBJ_TO_PTR(o_in);
+ mp_printf(print, "<%q object at %p>", MP_QSTR_traceback, o);
+}
+
+const mp_obj_type_t mp_type_traceback = {
+ { &mp_type_type },
+ .name = MP_QSTR_traceback,
+ .print = mp_obj_traceback_print,
+};
diff --git a/circuitpython/py/objtraceback.h b/circuitpython/py/objtraceback.h
new file mode 100644
index 0000000..992fe89
--- /dev/null
+++ b/circuitpython/py/objtraceback.h
@@ -0,0 +1,39 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2021 microDev
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef MICROPY_INCLUDED_PY_OBJTRACEBACK_H
+#define MICROPY_INCLUDED_PY_OBJTRACEBACK_H
+
+#include "py/obj.h"
+
+typedef struct _mp_obj_traceback_t {
+ mp_obj_base_t base;
+ size_t alloc : (8 * sizeof(size_t) / 2);
+ size_t len : (8 * sizeof(size_t) / 2);
+ size_t *data;
+} mp_obj_traceback_t;
+
+#endif // MICROPY_INCLUDED_PY_OBJTRACEBACK_H
diff --git a/circuitpython/py/objtuple.c b/circuitpython/py/objtuple.c
new file mode 100644
index 0000000..90b0773
--- /dev/null
+++ b/circuitpython/py/objtuple.c
@@ -0,0 +1,309 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ * SPDX-FileCopyrightText: Copyright (c) 2014-2017 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+#include <assert.h>
+
+#include "py/objtuple.h"
+#include "py/runtime.h"
+#include "py/objtype.h"
+
+#include "supervisor/shared/translate.h"
+
+
+/******************************************************************************/
+/* tuple */
+
+void mp_obj_tuple_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ mp_obj_tuple_t *o = MP_OBJ_TO_PTR(o_in);
+ const char *item_separator = ", ";
+ if (MICROPY_PY_UJSON && kind == PRINT_JSON) {
+ mp_print_str(print, "[");
+ #if MICROPY_PY_UJSON_SEPARATORS
+ item_separator = MP_PRINT_GET_EXT(print)->item_separator;
+ #endif
+ } else {
+ mp_print_str(print, "(");
+ kind = PRINT_REPR;
+ }
+ for (size_t i = 0; i < o->len; i++) {
+ if (i > 0) {
+ mp_print_str(print, item_separator);
+ }
+ mp_obj_print_helper(print, o->items[i], kind);
+ }
+ if (MICROPY_PY_UJSON && kind == PRINT_JSON) {
+ mp_print_str(print, "]");
+ } else {
+ if (o->len == 1) {
+ mp_print_str(print, ",");
+ }
+ mp_print_str(print, ")");
+ }
+}
+
+STATIC mp_obj_t mp_obj_tuple_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ (void)type_in;
+
+ mp_arg_check_num(n_args, n_kw, 0, 1, false);
+
+ switch (n_args) {
+ case 0:
+ // return a empty tuple
+ return mp_const_empty_tuple;
+
+ case 1:
+ default: {
+ // 1 argument, an iterable from which we make a new tuple
+ if (mp_obj_is_type(args[0], &mp_type_tuple)) {
+ return args[0];
+ }
+
+ // TODO optimise for cases where we know the length of the iterator
+
+ size_t alloc = 4;
+ size_t len = 0;
+ mp_obj_t *items = m_new(mp_obj_t, alloc);
+
+ mp_obj_t iterable = mp_getiter(args[0], NULL);
+ mp_obj_t item;
+ while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
+ if (len >= alloc) {
+ items = m_renew(mp_obj_t, items, alloc, alloc * 2);
+ alloc *= 2;
+ }
+ items[len++] = item;
+ }
+
+ mp_obj_t tuple = mp_obj_new_tuple(len, items);
+ m_del(mp_obj_t, items, alloc);
+
+ return tuple;
+ }
+ }
+}
+
+// Don't pass MP_BINARY_OP_NOT_EQUAL here
+STATIC mp_obj_t tuple_cmp_helper(mp_uint_t op, mp_obj_t self_in, mp_obj_t another_in) {
+ mp_check_self(mp_obj_is_tuple_compatible(self_in));
+ const mp_obj_type_t *another_type = mp_obj_get_type(another_in);
+ mp_obj_tuple_t *self = MP_OBJ_TO_PTR(self_in);
+ if (mp_type_get_getiter_slot(another_type) != mp_obj_tuple_getiter) {
+ // Slow path for user subclasses
+ another_in = mp_obj_cast_to_native_base(another_in, MP_OBJ_FROM_PTR(&mp_type_tuple));
+ if (another_in == MP_OBJ_NULL) {
+ return MP_OBJ_NULL;
+ }
+ }
+ mp_obj_tuple_t *another = MP_OBJ_TO_PTR(another_in);
+
+ return mp_obj_new_bool(mp_seq_cmp_objs(op, self->items, self->len, another->items, another->len));
+}
+
+mp_obj_t mp_obj_tuple_unary_op(mp_unary_op_t op, mp_obj_t self_in) {
+ mp_obj_tuple_t *self = MP_OBJ_TO_PTR(self_in);
+ switch (op) {
+ case MP_UNARY_OP_BOOL:
+ return mp_obj_new_bool(self->len != 0);
+ case MP_UNARY_OP_HASH: {
+ // start hash with pointer to empty tuple, to make it fairly unique
+ mp_int_t hash = (mp_int_t)mp_const_empty_tuple;
+ for (size_t i = 0; i < self->len; i++) {
+ hash += MP_OBJ_SMALL_INT_VALUE(mp_unary_op(MP_UNARY_OP_HASH, self->items[i]));
+ }
+ return MP_OBJ_NEW_SMALL_INT(hash);
+ }
+ case MP_UNARY_OP_LEN:
+ return MP_OBJ_NEW_SMALL_INT(self->len);
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+mp_obj_t mp_obj_tuple_binary_op(mp_binary_op_t op, mp_obj_t lhs, mp_obj_t rhs) {
+ mp_obj_tuple_t *o = MP_OBJ_TO_PTR(lhs);
+ switch (op) {
+ case MP_BINARY_OP_ADD:
+ case MP_BINARY_OP_INPLACE_ADD: {
+ if (!mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(mp_obj_get_type(rhs)), MP_OBJ_FROM_PTR(&mp_type_tuple))) {
+ return MP_OBJ_NULL; // op not supported
+ }
+ mp_obj_tuple_t *p = MP_OBJ_TO_PTR(rhs);
+ mp_obj_tuple_t *s = MP_OBJ_TO_PTR(mp_obj_new_tuple(o->len + p->len, NULL));
+ mp_seq_cat(s->items, o->items, o->len, p->items, p->len, mp_obj_t);
+ return MP_OBJ_FROM_PTR(s);
+ }
+ case MP_BINARY_OP_MULTIPLY:
+ case MP_BINARY_OP_INPLACE_MULTIPLY: {
+ mp_int_t n;
+ if (!mp_obj_get_int_maybe(rhs, &n)) {
+ return MP_OBJ_NULL; // op not supported
+ }
+ if (n <= 0) {
+ return mp_const_empty_tuple;
+ }
+ size_t new_len = mp_seq_multiply_len(o->len, n);
+ mp_obj_tuple_t *s = MP_OBJ_TO_PTR(mp_obj_new_tuple(new_len, NULL));
+ mp_seq_multiply(o->items, sizeof(*o->items), o->len, n, s->items);
+ return MP_OBJ_FROM_PTR(s);
+ }
+ case MP_BINARY_OP_EQUAL:
+ case MP_BINARY_OP_LESS:
+ case MP_BINARY_OP_LESS_EQUAL:
+ case MP_BINARY_OP_MORE:
+ case MP_BINARY_OP_MORE_EQUAL:
+ return tuple_cmp_helper(op, lhs, rhs);
+
+ default:
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+mp_obj_t mp_obj_tuple_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) {
+ if (value == MP_OBJ_SENTINEL) {
+ // load
+ mp_obj_tuple_t *self = MP_OBJ_TO_PTR(self_in);
+ // when called with a native type (eg namedtuple) using mp_obj_tuple_subscr, get the native self
+ if (mp_type_get_subscr_slot(self->base.type) != &mp_obj_tuple_subscr) {
+ self = MP_OBJ_TO_PTR(mp_obj_cast_to_native_base(self_in, MP_OBJ_FROM_PTR(&mp_type_tuple)));
+ }
+
+ #if MICROPY_PY_BUILTINS_SLICE
+ if (mp_obj_is_type(index, &mp_type_slice)) {
+ mp_bound_slice_t slice;
+ if (!mp_seq_get_fast_slice_indexes(self->len, index, &slice)) {
+ mp_raise_NotImplementedError(MP_ERROR_TEXT("only slices with step=1 (aka None) are supported"));
+ }
+ mp_obj_tuple_t *res = MP_OBJ_TO_PTR(mp_obj_new_tuple(slice.stop - slice.start, NULL));
+ mp_seq_copy(res->items, self->items + slice.start, res->len, mp_obj_t);
+ return MP_OBJ_FROM_PTR(res);
+ }
+ #endif
+ size_t index_value = mp_get_index(self->base.type, self->len, index, false);
+ return self->items[index_value];
+ } else {
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+STATIC mp_obj_t tuple_count(mp_obj_t self_in, mp_obj_t value) {
+ mp_check_self(mp_obj_is_type(self_in, &mp_type_tuple));
+ mp_obj_tuple_t *self = MP_OBJ_TO_PTR(self_in);
+ return mp_seq_count_obj(self->items, self->len, value);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_2(tuple_count_obj, tuple_count);
+
+STATIC mp_obj_t tuple_index(size_t n_args, const mp_obj_t *args) {
+ mp_check_self(mp_obj_is_type(args[0], &mp_type_tuple));
+ mp_obj_tuple_t *self = MP_OBJ_TO_PTR(args[0]);
+ return mp_seq_index_obj(self->items, self->len, n_args, args);
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(tuple_index_obj, 2, 4, tuple_index);
+
+STATIC const mp_rom_map_elem_t tuple_locals_dict_table[] = {
+ { MP_ROM_QSTR(MP_QSTR_count), MP_ROM_PTR(&tuple_count_obj) },
+ { MP_ROM_QSTR(MP_QSTR_index), MP_ROM_PTR(&tuple_index_obj) },
+};
+
+STATIC MP_DEFINE_CONST_DICT(tuple_locals_dict, tuple_locals_dict_table);
+
+const mp_obj_type_t mp_type_tuple = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_tuple,
+ .print = mp_obj_tuple_print,
+ .make_new = mp_obj_tuple_make_new,
+ .locals_dict = (mp_obj_dict_t *)&tuple_locals_dict,
+ MP_TYPE_EXTENDED_FIELDS(
+ .unary_op = mp_obj_tuple_unary_op,
+ .binary_op = mp_obj_tuple_binary_op,
+ .subscr = mp_obj_tuple_subscr,
+ .getiter = mp_obj_tuple_getiter,
+ ),
+};
+
+// the zero-length tuple
+const mp_obj_tuple_t mp_const_empty_tuple_obj = {{&mp_type_tuple}, 0};
+
+mp_obj_t mp_obj_new_tuple(size_t n, const mp_obj_t *items) {
+ if (n == 0) {
+ return mp_const_empty_tuple;
+ }
+ mp_obj_tuple_t *o = m_new_obj_var(mp_obj_tuple_t, mp_obj_t, n);
+ o->base.type = &mp_type_tuple;
+ o->len = n;
+ if (items) {
+ for (size_t i = 0; i < n; i++) {
+ o->items[i] = items[i];
+ }
+ }
+ return MP_OBJ_FROM_PTR(o);
+}
+
+void mp_obj_tuple_get(mp_obj_t self_in, size_t *len, mp_obj_t **items) {
+ assert(mp_obj_is_tuple_compatible(self_in));
+ mp_obj_tuple_t *self = MP_OBJ_TO_PTR(self_in);
+ *len = self->len;
+ *items = &self->items[0];
+}
+
+void mp_obj_tuple_del(mp_obj_t self_in) {
+ assert(mp_obj_is_type(self_in, &mp_type_tuple));
+ mp_obj_tuple_t *self = MP_OBJ_TO_PTR(self_in);
+ m_del_var(mp_obj_tuple_t, mp_obj_t, self->len, self);
+}
+
+/******************************************************************************/
+/* tuple iterator */
+
+typedef struct _mp_obj_tuple_it_t {
+ mp_obj_base_t base;
+ mp_fun_1_t iternext;
+ mp_obj_tuple_t *tuple;
+ size_t cur;
+} mp_obj_tuple_it_t;
+
+STATIC mp_obj_t tuple_it_iternext(mp_obj_t self_in) {
+ mp_obj_tuple_it_t *self = MP_OBJ_TO_PTR(self_in);
+ if (self->cur < self->tuple->len) {
+ mp_obj_t o_out = self->tuple->items[self->cur];
+ self->cur += 1;
+ return o_out;
+ } else {
+ return MP_OBJ_STOP_ITERATION;
+ }
+}
+
+mp_obj_t mp_obj_tuple_getiter(mp_obj_t o_in, mp_obj_iter_buf_t *iter_buf) {
+ assert(sizeof(mp_obj_tuple_it_t) <= sizeof(mp_obj_iter_buf_t));
+ mp_obj_tuple_it_t *o = (mp_obj_tuple_it_t *)iter_buf;
+ o->base.type = &mp_type_polymorph_iter;
+ o->iternext = tuple_it_iternext;
+ o->tuple = MP_OBJ_TO_PTR(o_in);
+ o->cur = 0;
+ return MP_OBJ_FROM_PTR(o);
+}
diff --git a/circuitpython/py/objtuple.h b/circuitpython/py/objtuple.h
new file mode 100644
index 0000000..7bfb447
--- /dev/null
+++ b/circuitpython/py/objtuple.h
@@ -0,0 +1,66 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_OBJTUPLE_H
+#define MICROPY_INCLUDED_PY_OBJTUPLE_H
+
+#include "py/obj.h"
+
+typedef struct _mp_obj_tuple_t {
+ mp_obj_base_t base;
+ size_t len;
+ mp_obj_t items[];
+} mp_obj_tuple_t;
+
+typedef struct _mp_rom_obj_tuple_t {
+ mp_obj_base_t base;
+ size_t len;
+ mp_rom_obj_t items[];
+} mp_rom_obj_tuple_t;
+
+extern const mp_obj_type_t mp_type_tuple;
+
+void mp_obj_tuple_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind);
+mp_obj_t mp_obj_tuple_unary_op(mp_unary_op_t op, mp_obj_t self_in);
+mp_obj_t mp_obj_tuple_binary_op(mp_binary_op_t op, mp_obj_t lhs, mp_obj_t rhs);
+mp_obj_t mp_obj_tuple_subscr(mp_obj_t base, mp_obj_t index, mp_obj_t value);
+mp_obj_t mp_obj_tuple_getiter(mp_obj_t o_in, mp_obj_iter_buf_t *iter_buf);
+
+extern const mp_obj_type_t mp_type_attrtuple;
+
+#define MP_DEFINE_ATTRTUPLE(tuple_obj_name, fields, nitems, ...) \
+ const mp_rom_obj_tuple_t tuple_obj_name = { \
+ .base = {&mp_type_attrtuple}, \
+ .len = nitems, \
+ .items = { __VA_ARGS__, MP_ROM_PTR((void *)fields) } \
+ }
+
+#if MICROPY_PY_COLLECTIONS
+void mp_obj_attrtuple_print_helper(const mp_print_t *print, const qstr *fields, mp_obj_tuple_t *o);
+#endif
+
+mp_obj_t mp_obj_new_attrtuple(const qstr *fields, size_t n, const mp_obj_t *items);
+
+#endif // MICROPY_INCLUDED_PY_OBJTUPLE_H
diff --git a/circuitpython/py/objtype.c b/circuitpython/py/objtype.c
new file mode 100644
index 0000000..62ea1ed
--- /dev/null
+++ b/circuitpython/py/objtype.c
@@ -0,0 +1,1541 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013-2018 Damien P. George
+ * SPDX-FileCopyrightText: Copyright (c) 2014-2018 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <stddef.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/gc_long_lived.h"
+#include "py/objtype.h"
+#include "py/runtime.h"
+
+#include "supervisor/shared/stack.h"
+#include "supervisor/shared/translate.h"
+
+#if MICROPY_DEBUG_VERBOSE // print debugging info
+#define DEBUG_PRINT (1)
+#define DEBUG_printf DEBUG_printf
+#else // don't print debugging info
+#define DEBUG_PRINT (0)
+#define DEBUG_printf(...) (void)0
+#endif
+
+#define ENABLE_SPECIAL_ACCESSORS \
+ (MICROPY_PY_DESCRIPTORS || MICROPY_PY_DELATTR_SETATTR || MICROPY_PY_BUILTINS_PROPERTY)
+
+STATIC mp_obj_t static_class_method_make_new(const mp_obj_type_t *self, size_t n_args, size_t n_kw, const mp_obj_t *args);
+
+/******************************************************************************/
+// instance object
+
+STATIC int instance_count_native_bases(const mp_obj_type_t *type, const mp_obj_type_t **last_native_base) {
+ int count = 0;
+ for (;;) {
+ if (type == &mp_type_object) {
+ // Not a "real" type, end search here.
+ return count;
+ }
+ if (mp_obj_is_native_type(type)) {
+ // Native types don't have parents (at least not from our perspective) so end.
+ *last_native_base = type;
+ return count + 1;
+ }
+ const void *parent = mp_type_get_parent_slot(type);
+ if (parent == NULL) {
+ // No parents so end search here.
+ return count;
+ #if MICROPY_MULTIPLE_INHERITANCE
+ } else if (((mp_obj_base_t *)parent)->type == &mp_type_tuple) {
+ // Multiple parents, search through them all recursively.
+ const mp_obj_tuple_t *parent_tuple = parent;
+ const mp_obj_t *item = parent_tuple->items;
+ const mp_obj_t *top = item + parent_tuple->len;
+ for (; item < top; ++item) {
+ assert(mp_obj_is_type(*item, &mp_type_type));
+ const mp_obj_type_t *bt = (const mp_obj_type_t *)MP_OBJ_TO_PTR(*item);
+ count += instance_count_native_bases(bt, last_native_base);
+ }
+ return count;
+ #endif
+ } else {
+ // A single parent, use iteration to continue the search.
+ type = parent;
+ }
+ }
+}
+
+// This wrapper function is allows a subclass of a native type to call the
+// __init__() method (corresponding to type->make_new) of the native type.
+STATIC mp_obj_t native_base_init_wrapper(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) {
+ mp_obj_instance_t *self = MP_OBJ_TO_PTR(pos_args[0]);
+ const mp_obj_type_t *native_base = NULL;
+ instance_count_native_bases(self->base.type, &native_base);
+
+ size_t n_kw = kw_args ? kw_args->used : 0;
+
+ // consume the type object
+ pos_args++;
+ n_args--;
+
+ mp_obj_t *args2 = m_new(mp_obj_t, n_args + 2 * n_kw);
+ // copy in args
+ memcpy(args2, pos_args, n_args * sizeof(mp_obj_t));
+ // copy in kwargs
+ memcpy(args2 + n_args, kw_args->table, 2 * n_kw * sizeof(mp_obj_t));
+ self->subobj[0] = native_base->make_new(native_base, n_args, n_kw, args2);
+ m_del(mp_obj_t, args2, n_args + 2 * n_kw);
+
+ return mp_const_none;
+}
+
+STATIC MP_DEFINE_CONST_FUN_OBJ_KW(native_base_init_wrapper_obj, 1, native_base_init_wrapper);
+
+#if !MICROPY_CPYTHON_COMPAT
+STATIC
+#endif
+mp_obj_instance_t *mp_obj_new_instance(const mp_obj_type_t *class, const mp_obj_type_t **native_base) {
+ size_t num_native_bases = instance_count_native_bases(class, native_base);
+ assert(num_native_bases < 2);
+ mp_obj_instance_t *o = m_new_obj_var(mp_obj_instance_t, mp_obj_t, num_native_bases);
+ o->base.type = class;
+ mp_map_init(&o->members, 0);
+ // Initialise the native base-class slot (should be 1 at most) with a valid
+ // object. It doesn't matter which object, so long as it can be uniquely
+ // distinguished from a native class that is initialised.
+ if (num_native_bases != 0) {
+ o->subobj[0] = MP_OBJ_FROM_PTR(&native_base_init_wrapper_obj);
+ }
+ return o;
+}
+
+// When instances are first created they have the base_init wrapper as their native parent's
+// instance because make_new combines __new__ and __init__. This object is invalid for the native
+// code so it must call this method to ensure that the given object has been __init__'d and is
+// valid.
+void mp_obj_assert_native_inited(mp_obj_t native_object) {
+ if (native_object == MP_OBJ_FROM_PTR(&native_base_init_wrapper_obj)) {
+ mp_raise_NotImplementedError(MP_ERROR_TEXT("Call super().__init__() before accessing native object."));
+ }
+}
+
+// TODO
+// This implements depth-first left-to-right MRO, which is not compliant with Python3 MRO
+// http://python-history.blogspot.com/2010/06/method-resolution-order.html
+// https://www.python.org/download/releases/2.3/mro/
+//
+// will keep lookup->dest[0]'s value (should be MP_OBJ_NULL on invocation) if attribute
+// is not found
+// will set lookup->dest[0] to MP_OBJ_SENTINEL if special method was found in a native
+// type base via slot id (as specified by lookup->meth_offset). As there can be only one
+// native base, it's known that it applies to instance->subobj[0]. In most cases, we also
+// don't need to know which type it was - because instance->subobj[0] is of that type.
+// The only exception is when object is not yet constructed, then we need to know base
+// native type to construct its instance->subobj[0] from. But this case is handled via
+// instance_count_native_bases(), which returns a native base which it saw.
+struct class_lookup_data {
+ mp_obj_instance_t *obj;
+ qstr attr;
+ size_t meth_offset;
+ mp_obj_t *dest;
+ bool is_type;
+};
+
+STATIC void mp_obj_class_lookup(struct class_lookup_data *lookup, const mp_obj_type_t *type) {
+ assert(lookup->dest[0] == MP_OBJ_NULL);
+ assert(lookup->dest[1] == MP_OBJ_NULL);
+ for (;;) {
+ DEBUG_printf("mp_obj_class_lookup: Looking up %s in %s\n", qstr_str(lookup->attr), qstr_str(type->name));
+ // Optimize special method lookup for native types
+ // This avoids extra method_name => slot lookup. On the other hand,
+ // this should not be applied to class types, as will result in extra
+ // lookup either.
+ if (lookup->meth_offset != 0 && mp_obj_is_native_type(type)) {
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wcast-align"
+ size_t sz = mp_type_size(type);
+ if (lookup->meth_offset < sz && *(void **)((char *)type + lookup->meth_offset) != NULL) {
+ #pragma GCC diagnostic pop
+ DEBUG_printf("mp_obj_class_lookup: Matched special meth slot (off=%d) for %s\n",
+ lookup->meth_offset, qstr_str(lookup->attr));
+ lookup->dest[0] = MP_OBJ_SENTINEL;
+ return;
+ }
+ }
+
+ if (type->locals_dict != NULL) {
+ // search locals_dict (the set of methods/attributes)
+ assert(mp_obj_is_dict_or_ordereddict(MP_OBJ_FROM_PTR(type->locals_dict))); // MicroPython restriction, for now
+ mp_map_t *locals_map = &type->locals_dict->map;
+ mp_map_elem_t *elem = mp_map_lookup(locals_map, MP_OBJ_NEW_QSTR(lookup->attr), MP_MAP_LOOKUP);
+ if (elem != NULL) {
+ if (lookup->is_type) {
+ // If we look up a class method, we need to return original type for which we
+ // do a lookup, not a (base) type in which we found the class method.
+ const mp_obj_type_t *org_type = (const mp_obj_type_t *)lookup->obj;
+ mp_convert_member_lookup(MP_OBJ_NULL, org_type, elem->value, lookup->dest);
+ } else if (mp_obj_is_type(elem->value, &mp_type_property)) {
+ lookup->dest[0] = elem->value;
+ return;
+ } else {
+ mp_obj_instance_t *obj = lookup->obj;
+ mp_convert_member_lookup(MP_OBJ_FROM_PTR(obj), type, elem->value, lookup->dest);
+ }
+ #if DEBUG_PRINT
+ DEBUG_printf("mp_obj_class_lookup: Returning: ");
+ mp_obj_print_helper(MICROPY_DEBUG_PRINTER, lookup->dest[0], PRINT_REPR);
+ if (lookup->dest[1] != MP_OBJ_NULL) {
+ // Don't try to repr() lookup->dest[1], as we can be called recursively
+ DEBUG_printf(" <%s @%p>", mp_obj_get_type_str(lookup->dest[1]), MP_OBJ_TO_PTR(lookup->dest[1]));
+ }
+ DEBUG_printf("\n");
+ #endif
+ return;
+ }
+ }
+
+ // Previous code block takes care about attributes defined in .locals_dict,
+ // but some attributes of native types may be handled using .load_attr method,
+ // so make sure we try to lookup those too.
+ if (lookup->obj != NULL && !lookup->is_type && mp_obj_is_native_type(type) && type != &mp_type_object /* object is not a real type */) {
+ mp_load_method_maybe(lookup->obj->subobj[0], lookup->attr, lookup->dest);
+ if (lookup->dest[0] != MP_OBJ_NULL) {
+ return;
+ }
+ }
+
+ // attribute not found, keep searching base classes
+
+ const void *parent = mp_type_get_parent_slot(type);
+ if (parent == NULL) {
+ DEBUG_printf("mp_obj_class_lookup: No more parents\n");
+ return;
+ }
+ #if MICROPY_MULTIPLE_INHERITANCE
+ if (((mp_obj_base_t *)parent)->type == &mp_type_tuple) {
+ const mp_obj_tuple_t *parent_tuple = parent;
+ const mp_obj_t *item = parent_tuple->items;
+ const mp_obj_t *top = item + parent_tuple->len - 1;
+ for (; item < top; ++item) {
+ assert(mp_obj_is_type(*item, &mp_type_type));
+ mp_obj_type_t *bt = (mp_obj_type_t *)MP_OBJ_TO_PTR(*item);
+ if (bt == &mp_type_object) {
+ // Not a "real" type
+ continue;
+ }
+ mp_obj_class_lookup(lookup, bt);
+ if (lookup->dest[0] != MP_OBJ_NULL) {
+ return;
+ }
+ }
+
+ // search last base (simple tail recursion elimination)
+ assert(mp_obj_is_type(*item, &mp_type_type));
+ type = (mp_obj_type_t *)MP_OBJ_TO_PTR(*item);
+ continue;
+ }
+ #endif
+ type = parent;
+ if (type == &mp_type_object) {
+ // Not a "real" type
+ return;
+ }
+ }
+}
+
+STATIC void instance_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ mp_obj_instance_t *self = MP_OBJ_TO_PTR(self_in);
+ qstr meth = (kind == PRINT_STR) ? MP_QSTR___str__ : MP_QSTR___repr__;
+ mp_obj_t member[2] = {MP_OBJ_NULL};
+ struct class_lookup_data lookup = {
+ .obj = self,
+ .attr = meth,
+ .meth_offset = offsetof(mp_obj_type_t, print),
+ .dest = member,
+ .is_type = false,
+ };
+ mp_obj_class_lookup(&lookup, self->base.type);
+ if (member[0] == MP_OBJ_NULL && kind == PRINT_STR) {
+ // If there's no __str__, fall back to __repr__
+ lookup.attr = MP_QSTR___repr__;
+ lookup.meth_offset = 0;
+ mp_obj_class_lookup(&lookup, self->base.type);
+ }
+
+ if (member[0] == MP_OBJ_SENTINEL) {
+ // Handle Exception subclasses specially
+ if (mp_obj_is_native_exception_instance(self->subobj[0])) {
+ if (kind != PRINT_STR) {
+ mp_print_str(print, qstr_str(self->base.type->name));
+ }
+ mp_obj_print_helper(print, self->subobj[0], kind | PRINT_EXC_SUBCLASS);
+ } else {
+ mp_obj_print_helper(print, self->subobj[0], kind);
+ }
+ return;
+ }
+
+ if (member[0] != MP_OBJ_NULL) {
+ mp_obj_t r = mp_call_function_1(member[0], self_in);
+ mp_obj_print_helper(print, r, PRINT_STR);
+ return;
+ }
+
+ // TODO: CPython prints fully-qualified type name
+ mp_printf(print, "<%q object at %p>", mp_obj_get_type_qstr(self_in), self);
+}
+
+mp_obj_t mp_obj_instance_make_new(const mp_obj_type_t *self, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ assert(mp_obj_is_instance_type(self));
+
+ // look for __new__ function
+ mp_obj_t init_fn[2] = {MP_OBJ_NULL};
+ struct class_lookup_data lookup = {
+ .obj = NULL,
+ .attr = MP_QSTR___new__,
+ .meth_offset = offsetof(mp_obj_type_t, make_new),
+ .dest = init_fn,
+ .is_type = false,
+ };
+ mp_obj_class_lookup(&lookup, self);
+
+ const mp_obj_type_t *native_base = NULL;
+ mp_obj_instance_t *o;
+ if (init_fn[0] == MP_OBJ_NULL || init_fn[0] == MP_OBJ_SENTINEL) {
+ // Either there is no __new__() method defined or there is a native
+ // constructor. In both cases create a blank instance.
+ o = mp_obj_new_instance(self, &native_base);
+
+ // Since type->make_new() implements both __new__() and __init__() in
+ // one go, of which the latter may be overridden by the Python subclass,
+ // we defer (see the end of this function) the call of the native
+ // constructor to give a chance for the Python __init__() method to call
+ // said native constructor.
+
+ } else {
+ // Call Python class __new__ function with all args to create an instance
+ mp_obj_t new_ret;
+ if (n_args == 0 && n_kw == 0) {
+ mp_obj_t args2[1] = {MP_OBJ_FROM_PTR(self)};
+ new_ret = mp_call_function_n_kw(init_fn[0], 1, 0, args2);
+ } else {
+ // TODO(tannewt): Could this be on the stack? It's deleted below.
+ mp_obj_t *args2 = m_new(mp_obj_t, 1 + n_args + 2 * n_kw);
+ args2[0] = MP_OBJ_FROM_PTR(self);
+ memcpy(args2 + 1, args, (n_args + 2 * n_kw) * sizeof(mp_obj_t));
+ new_ret = mp_call_function_n_kw(init_fn[0], n_args + 1, n_kw, args2);
+ m_del(mp_obj_t, args2, 1 + n_args + 2 * n_kw);
+ }
+
+ // https://docs.python.org/3.4/reference/datamodel.html#object.__new__
+ // "If __new__() does not return an instance of cls, then the new
+ // instance's __init__() method will not be invoked."
+ if (mp_obj_get_type(new_ret) != self) {
+ return new_ret;
+ }
+
+ // The instance returned by __new__() becomes the new object
+ o = MP_OBJ_TO_PTR(new_ret);
+ }
+
+ // now call Python class __init__ function with all args
+ // This method has a chance to call super().__init__() to construct a
+ // possible native base class.
+ init_fn[0] = init_fn[1] = MP_OBJ_NULL;
+ lookup.obj = o;
+ lookup.attr = MP_QSTR___init__;
+ lookup.meth_offset = 0;
+ mp_obj_class_lookup(&lookup, self);
+ if (init_fn[0] != MP_OBJ_NULL) {
+ mp_obj_t init_ret;
+ if (n_args == 0 && n_kw == 0) {
+ init_ret = mp_call_method_n_kw(0, 0, init_fn);
+ } else {
+ // TODO(tannewt): Could this be on the stack? It's deleted below.
+ mp_obj_t *args2 = m_new(mp_obj_t, 2 + n_args + 2 * n_kw);
+ args2[0] = init_fn[0];
+ args2[1] = init_fn[1];
+ // copy in kwargs
+ memcpy(args2 + 2, args, (n_args + 2 * n_kw) * sizeof(mp_obj_t));
+ init_ret = mp_call_method_n_kw(n_args, n_kw, args2);
+ m_del(mp_obj_t, args2, 2 + n_args + 2 * n_kw);
+ }
+ if (init_ret != mp_const_none) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_TypeError(MP_ERROR_TEXT("__init__() should return None"));
+ #else
+ mp_raise_TypeError_varg(MP_ERROR_TEXT("__init__() should return None, not '%q'"),
+ mp_obj_get_type_qstr(init_ret));
+ #endif
+ }
+ }
+
+ // If the type had a native base that was not explicitly initialised
+ // (constructed) by the Python __init__() method then construct it now.
+ if (native_base != NULL && o->subobj[0] == MP_OBJ_FROM_PTR(&native_base_init_wrapper_obj)) {
+ o->subobj[0] = native_base->make_new(native_base, n_args, n_kw, args);
+ }
+
+ return MP_OBJ_FROM_PTR(o);
+}
+
+// Qstrs for special methods are guaranteed to have a small value, so we use byte
+// type to represent them.
+const byte mp_unary_op_method_name[MP_UNARY_OP_NUM_RUNTIME] = {
+ [MP_UNARY_OP_BOOL] = MP_QSTR___bool__,
+ [MP_UNARY_OP_LEN] = MP_QSTR___len__,
+ [MP_UNARY_OP_HASH] = MP_QSTR___hash__,
+ [MP_UNARY_OP_INT] = MP_QSTR___int__,
+ #if MICROPY_PY_ALL_SPECIAL_METHODS
+ [MP_UNARY_OP_POSITIVE] = MP_QSTR___pos__,
+ [MP_UNARY_OP_NEGATIVE] = MP_QSTR___neg__,
+ [MP_UNARY_OP_INVERT] = MP_QSTR___invert__,
+ [MP_UNARY_OP_ABS] = MP_QSTR___abs__,
+ #endif
+ #if MICROPY_PY_SYS_GETSIZEOF
+ [MP_UNARY_OP_SIZEOF] = MP_QSTR___sizeof__,
+ #endif
+};
+
+STATIC mp_obj_t instance_unary_op(mp_unary_op_t op, mp_obj_t self_in) {
+ mp_obj_instance_t *self = MP_OBJ_TO_PTR(self_in);
+
+ #if MICROPY_PY_SYS_GETSIZEOF
+ if (MP_UNLIKELY(op == MP_UNARY_OP_SIZEOF)) {
+ // TODO: This doesn't count inherited objects (self->subobj)
+ const mp_obj_type_t *native_base;
+ size_t num_native_bases = instance_count_native_bases(mp_obj_get_type(self_in), &native_base);
+
+ size_t sz = sizeof(*self) + sizeof(*self->subobj) * num_native_bases
+ + sizeof(*self->members.table) * self->members.alloc;
+ return MP_OBJ_NEW_SMALL_INT(sz);
+ }
+ #endif
+
+ qstr op_name = mp_unary_op_method_name[op];
+ /* Still try to lookup native slot
+ if (op_name == 0) {
+ return MP_OBJ_NULL;
+ }
+ */
+ mp_obj_t member[2] = {MP_OBJ_NULL};
+ struct class_lookup_data lookup = {
+ .obj = self,
+ .attr = op_name,
+ .meth_offset = offsetof(mp_obj_type_t, ext[0].unary_op),
+ .dest = member,
+ .is_type = false,
+ };
+ mp_obj_class_lookup(&lookup, self->base.type);
+ if (member[0] == MP_OBJ_SENTINEL) {
+ return mp_unary_op(op, self->subobj[0]);
+ } else if (member[0] != MP_OBJ_NULL) {
+ mp_obj_t val = mp_call_function_1(member[0], self_in);
+
+ switch (op) {
+ case MP_UNARY_OP_HASH:
+ // __hash__ must return a small int
+ val = MP_OBJ_NEW_SMALL_INT(mp_obj_get_int_truncated(val));
+ break;
+ case MP_UNARY_OP_INT:
+ // Must return int
+ if (!mp_obj_is_int(val)) {
+ mp_raise_TypeError(NULL);
+ }
+ break;
+ default:
+ // No need to do anything
+ ;
+ }
+ return val;
+ } else {
+ if (op == MP_UNARY_OP_HASH) {
+ lookup.attr = MP_QSTR___eq__;
+ mp_obj_class_lookup(&lookup, self->base.type);
+ if (member[0] == MP_OBJ_NULL) {
+ // https://docs.python.org/3/reference/datamodel.html#object.__hash__
+ // "User-defined classes have __eq__() and __hash__() methods by default;
+ // with them, all objects compare unequal (except with themselves) and
+ // x.__hash__() returns an appropriate value such that x == y implies
+ // both that x is y and hash(x) == hash(y)."
+ return MP_OBJ_NEW_SMALL_INT((mp_uint_t)self_in);
+ }
+ // "A class that overrides __eq__() and does not define __hash__() will have its __hash__() implicitly set to None.
+ // When the __hash__() method of a class is None, instances of the class will raise an appropriate TypeError"
+ }
+
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+// Binary-op enum values not listed here will have the default value of 0 in the
+// table, corresponding to MP_QSTRnull, and are therefore unsupported (a lookup will
+// fail). They can be added at the expense of code size for the qstr.
+// Qstrs for special methods are guaranteed to have a small value, so we use byte
+// type to represent them.
+const byte mp_binary_op_method_name[MP_BINARY_OP_NUM_RUNTIME] = {
+ [MP_BINARY_OP_LESS] = MP_QSTR___lt__,
+ [MP_BINARY_OP_MORE] = MP_QSTR___gt__,
+ [MP_BINARY_OP_EQUAL] = MP_QSTR___eq__,
+ [MP_BINARY_OP_LESS_EQUAL] = MP_QSTR___le__,
+ [MP_BINARY_OP_MORE_EQUAL] = MP_QSTR___ge__,
+ [MP_BINARY_OP_NOT_EQUAL] = MP_QSTR___ne__,
+ [MP_BINARY_OP_CONTAINS] = MP_QSTR___contains__,
+
+ // If an inplace method is not found a normal method will be used as a fallback
+ [MP_BINARY_OP_INPLACE_ADD] = MP_QSTR___iadd__,
+ [MP_BINARY_OP_INPLACE_SUBTRACT] = MP_QSTR___isub__,
+ #if MICROPY_PY_ALL_INPLACE_SPECIAL_METHODS
+ [MP_BINARY_OP_INPLACE_MULTIPLY] = MP_QSTR___imul__,
+ [MP_BINARY_OP_INPLACE_MAT_MULTIPLY] = MP_QSTR___imatmul__,
+ [MP_BINARY_OP_INPLACE_FLOOR_DIVIDE] = MP_QSTR___ifloordiv__,
+ [MP_BINARY_OP_INPLACE_TRUE_DIVIDE] = MP_QSTR___itruediv__,
+ [MP_BINARY_OP_INPLACE_MODULO] = MP_QSTR___imod__,
+ [MP_BINARY_OP_INPLACE_POWER] = MP_QSTR___ipow__,
+ [MP_BINARY_OP_INPLACE_OR] = MP_QSTR___ior__,
+ [MP_BINARY_OP_INPLACE_XOR] = MP_QSTR___ixor__,
+ [MP_BINARY_OP_INPLACE_AND] = MP_QSTR___iand__,
+ [MP_BINARY_OP_INPLACE_LSHIFT] = MP_QSTR___ilshift__,
+ [MP_BINARY_OP_INPLACE_RSHIFT] = MP_QSTR___irshift__,
+ #endif
+
+ [MP_BINARY_OP_ADD] = MP_QSTR___add__,
+ [MP_BINARY_OP_SUBTRACT] = MP_QSTR___sub__,
+ #if MICROPY_PY_ALL_SPECIAL_METHODS
+ [MP_BINARY_OP_MULTIPLY] = MP_QSTR___mul__,
+ [MP_BINARY_OP_MAT_MULTIPLY] = MP_QSTR___matmul__,
+ [MP_BINARY_OP_FLOOR_DIVIDE] = MP_QSTR___floordiv__,
+ [MP_BINARY_OP_TRUE_DIVIDE] = MP_QSTR___truediv__,
+ [MP_BINARY_OP_MODULO] = MP_QSTR___mod__,
+ [MP_BINARY_OP_DIVMOD] = MP_QSTR___divmod__,
+ [MP_BINARY_OP_POWER] = MP_QSTR___pow__,
+ [MP_BINARY_OP_OR] = MP_QSTR___or__,
+ [MP_BINARY_OP_XOR] = MP_QSTR___xor__,
+ [MP_BINARY_OP_AND] = MP_QSTR___and__,
+ [MP_BINARY_OP_LSHIFT] = MP_QSTR___lshift__,
+ [MP_BINARY_OP_RSHIFT] = MP_QSTR___rshift__,
+ #endif
+
+ #if MICROPY_PY_REVERSE_SPECIAL_METHODS
+ [MP_BINARY_OP_REVERSE_ADD] = MP_QSTR___radd__,
+ [MP_BINARY_OP_REVERSE_SUBTRACT] = MP_QSTR___rsub__,
+ #if MICROPY_PY_ALL_SPECIAL_METHODS
+ [MP_BINARY_OP_REVERSE_MULTIPLY] = MP_QSTR___rmul__,
+ [MP_BINARY_OP_REVERSE_MAT_MULTIPLY] = MP_QSTR___rmatmul__,
+ [MP_BINARY_OP_REVERSE_FLOOR_DIVIDE] = MP_QSTR___rfloordiv__,
+ [MP_BINARY_OP_REVERSE_TRUE_DIVIDE] = MP_QSTR___rtruediv__,
+ [MP_BINARY_OP_REVERSE_MODULO] = MP_QSTR___rmod__,
+ [MP_BINARY_OP_REVERSE_POWER] = MP_QSTR___rpow__,
+ [MP_BINARY_OP_REVERSE_OR] = MP_QSTR___ror__,
+ [MP_BINARY_OP_REVERSE_XOR] = MP_QSTR___rxor__,
+ [MP_BINARY_OP_REVERSE_AND] = MP_QSTR___rand__,
+ [MP_BINARY_OP_REVERSE_LSHIFT] = MP_QSTR___rlshift__,
+ [MP_BINARY_OP_REVERSE_RSHIFT] = MP_QSTR___rrshift__,
+ #endif
+ #endif
+};
+
+STATIC mp_obj_t instance_binary_op(mp_binary_op_t op, mp_obj_t lhs_in, mp_obj_t rhs_in) {
+ // Note: For ducktyping, CPython does not look in the instance members or use
+ // __getattr__ or __getattribute__. It only looks in the class dictionary.
+ mp_obj_instance_t *lhs = MP_OBJ_TO_PTR(lhs_in);
+retry:;
+ qstr op_name = mp_binary_op_method_name[op];
+ /* Still try to lookup native slot
+ if (op_name == 0) {
+ return MP_OBJ_NULL;
+ }
+ */
+ mp_obj_t dest[3] = {MP_OBJ_NULL};
+ struct class_lookup_data lookup = {
+ .obj = lhs,
+ .attr = op_name,
+ .meth_offset = offsetof(mp_obj_type_t, ext[0].binary_op),
+ .dest = dest,
+ .is_type = false,
+ };
+ mp_obj_class_lookup(&lookup, lhs->base.type);
+
+ mp_obj_t res;
+ if (dest[0] == MP_OBJ_SENTINEL) {
+ res = mp_binary_op(op, lhs->subobj[0], rhs_in);
+ } else if (dest[0] != MP_OBJ_NULL) {
+ dest[2] = rhs_in;
+ res = mp_call_method_n_kw(1, 0, dest);
+ } else {
+ // If this was an inplace method, fallback to normal method
+ // https://docs.python.org/3/reference/datamodel.html#object.__iadd__ :
+ // "If a specific method is not defined, the augmented assignment
+ // falls back to the normal methods."
+ if (op >= MP_BINARY_OP_INPLACE_OR && op <= MP_BINARY_OP_INPLACE_POWER) {
+ op -= MP_BINARY_OP_INPLACE_OR - MP_BINARY_OP_OR;
+ goto retry;
+ }
+ return MP_OBJ_NULL; // op not supported
+ }
+
+ #if MICROPY_PY_BUILTINS_NOTIMPLEMENTED
+ // NotImplemented means "try other fallbacks (like calling __rop__
+ // instead of __op__) and if nothing works, raise TypeError". As
+ // MicroPython doesn't implement any fallbacks, signal to raise
+ // TypeError right away.
+ if (res == mp_const_notimplemented) {
+ return MP_OBJ_NULL; // op not supported
+ }
+ #endif
+
+ return res;
+}
+
+STATIC void mp_obj_instance_load_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+ // logic: look in instance members then class locals
+ assert(mp_obj_is_instance_type(mp_obj_get_type(self_in)));
+ mp_obj_instance_t *self = MP_OBJ_TO_PTR(self_in);
+
+ // Note: This is fast-path'ed in the VM for the MP_BC_LOAD_ATTR operation.
+ mp_map_elem_t *elem = mp_map_lookup(&self->members, MP_OBJ_NEW_QSTR(attr), MP_MAP_LOOKUP);
+ if (elem != NULL) {
+ // object member, always treated as a value
+ dest[0] = elem->value;
+ return;
+ }
+ #if MICROPY_CPYTHON_COMPAT
+ if (attr == MP_QSTR___dict__) {
+ // Create a new dict with a copy of the instance's map items.
+ // This creates, unlike CPython, a read-only __dict__ that can't be modified.
+ mp_obj_dict_t dict;
+ dict.base.type = &mp_type_dict;
+ dict.map = self->members;
+ dest[0] = mp_obj_dict_copy(MP_OBJ_FROM_PTR(&dict));
+ mp_obj_dict_t *dest_dict = MP_OBJ_TO_PTR(dest[0]);
+ dest_dict->map.is_fixed = 1;
+ return;
+ }
+ #endif
+ struct class_lookup_data lookup = {
+ .obj = self,
+ .attr = attr,
+ .meth_offset = 0,
+ .dest = dest,
+ .is_type = false,
+ };
+ mp_obj_class_lookup(&lookup, self->base.type);
+ mp_obj_t member = dest[0];
+ if (member != MP_OBJ_NULL) {
+ if (!(self->base.type->flags & MP_TYPE_FLAG_HAS_SPECIAL_ACCESSORS)) {
+ // Class doesn't have any special accessors to check so return straightaway
+ return;
+ }
+
+ #if MICROPY_PY_BUILTINS_PROPERTY
+ if (mp_obj_is_type(member, &mp_type_property)) {
+ // object member is a property; delegate the load to the property
+ // Note: This is an optimisation for code size and execution time.
+ // The proper way to do it is have the functionality just below
+ // in a __get__ method of the property object, and then it would
+ // be called by the descriptor code down below. But that way
+ // requires overhead for the nested mp_call's and overhead for
+ // the code.
+ size_t n_proxy;
+ const mp_obj_t *proxy = mp_obj_property_get(member, &n_proxy);
+ if (proxy[0] == mp_const_none) {
+ mp_raise_AttributeError(MP_ERROR_TEXT("unreadable attribute"));
+ } else {
+ dest[0] = mp_call_function_n_kw(proxy[0], 1, 0, &self_in);
+ }
+ return;
+ }
+ #endif
+
+ #if MICROPY_PY_DESCRIPTORS
+ // found a class attribute; if it has a __get__ method then call it with the
+ // class instance and class as arguments and return the result
+ // Note that this is functionally correct but very slow: each load_attr
+ // requires an extra mp_load_method_maybe to check for the __get__.
+ mp_obj_t attr_get_method[4];
+ mp_load_method_maybe(member, MP_QSTR___get__, attr_get_method);
+ if (attr_get_method[0] != MP_OBJ_NULL) {
+ attr_get_method[2] = self_in;
+ attr_get_method[3] = MP_OBJ_FROM_PTR(mp_obj_get_type(self_in));
+ dest[0] = mp_call_method_n_kw(2, 0, attr_get_method);
+ }
+ #endif
+ return;
+ }
+
+ // try __getattr__
+ if (attr != MP_QSTR___getattr__) {
+ #if MICROPY_PY_DELATTR_SETATTR
+ // If the requested attr is __setattr__/__delattr__ then don't delegate the lookup
+ // to __getattr__. If we followed CPython's behaviour then __setattr__/__delattr__
+ // would have already been found in the "object" base class.
+ if (attr == MP_QSTR___setattr__ || attr == MP_QSTR___delattr__) {
+ return;
+ }
+ #endif
+
+ mp_obj_t dest2[3];
+ mp_load_method_maybe(self_in, MP_QSTR___getattr__, dest2);
+ if (dest2[0] != MP_OBJ_NULL) {
+ // __getattr__ exists, call it and return its result
+ dest2[2] = MP_OBJ_NEW_QSTR(attr);
+ dest[0] = mp_call_method_n_kw(1, 0, dest2);
+ return;
+ }
+ }
+}
+
+STATIC bool mp_obj_instance_store_attr(mp_obj_t self_in, qstr attr, mp_obj_t value) {
+ mp_obj_instance_t *self = MP_OBJ_TO_PTR(self_in);
+
+ if (!(self->base.type->flags & MP_TYPE_FLAG_HAS_SPECIAL_ACCESSORS)) {
+ // Class doesn't have any special accessors so skip their checks
+ goto skip_special_accessors;
+ }
+
+ #if MICROPY_PY_BUILTINS_PROPERTY || MICROPY_PY_DESCRIPTORS
+ // With property and/or descriptors enabled we need to do a lookup
+ // first in the class dict for the attribute to see if the store should
+ // be delegated.
+ mp_obj_t member[2] = {MP_OBJ_NULL};
+ struct class_lookup_data lookup = {
+ .obj = self,
+ .attr = attr,
+ .meth_offset = 0,
+ .dest = member,
+ .is_type = false,
+ };
+ mp_obj_class_lookup(&lookup, self->base.type);
+
+ if (member[0] != MP_OBJ_NULL) {
+ #if MICROPY_PY_BUILTINS_PROPERTY
+ if (mp_obj_is_type(member[0], &mp_type_property)) {
+ // attribute exists and is a property; delegate the store/delete
+ // Note: This is an optimisation for code size and execution time.
+ // The proper way to do it is have the functionality just below in
+ // a __set__/__delete__ method of the property object, and then it
+ // would be called by the descriptor code down below. But that way
+ // requires overhead for the nested mp_call's and overhead for
+ // the code.
+ size_t n_proxy;
+ const mp_obj_t *proxy = mp_obj_property_get(member[0], &n_proxy);
+ mp_obj_t dest[2] = {self_in, value};
+ if (value == MP_OBJ_NULL) {
+ // delete attribute
+ if (n_proxy < 3 || proxy[2] == mp_const_none) {
+ // TODO better error message?
+ return false;
+ } else {
+ mp_call_function_n_kw(proxy[2], 1, 0, dest);
+ return true;
+ }
+ } else {
+ // store attribute
+ if (n_proxy < 2 || proxy[1] == mp_const_none) {
+ // TODO better error message?
+ return false;
+ } else {
+ mp_call_function_n_kw(proxy[1], 2, 0, dest);
+ return true;
+ }
+ }
+ }
+ #endif
+
+ #if MICROPY_PY_DESCRIPTORS
+ // found a class attribute; if it has a __set__/__delete__ method then
+ // call it with the class instance (and value) as arguments
+ if (value == MP_OBJ_NULL) {
+ // delete attribute
+ mp_obj_t attr_delete_method[3];
+ mp_load_method_maybe(member[0], MP_QSTR___delete__, attr_delete_method);
+ if (attr_delete_method[0] != MP_OBJ_NULL) {
+ attr_delete_method[2] = self_in;
+ mp_call_method_n_kw(1, 0, attr_delete_method);
+ return true;
+ }
+ } else {
+ // store attribute
+ mp_obj_t attr_set_method[4];
+ mp_load_method_maybe(member[0], MP_QSTR___set__, attr_set_method);
+ if (attr_set_method[0] != MP_OBJ_NULL) {
+ attr_set_method[2] = self_in;
+ attr_set_method[3] = value;
+ mp_call_method_n_kw(2, 0, attr_set_method);
+ return true;
+ }
+ }
+ #endif
+ }
+ #endif
+
+ #if MICROPY_PY_DELATTR_SETATTR
+ if (value == MP_OBJ_NULL) {
+ // delete attribute
+ // try __delattr__ first
+ mp_obj_t attr_delattr_method[3];
+ mp_load_method_maybe(self_in, MP_QSTR___delattr__, attr_delattr_method);
+ if (attr_delattr_method[0] != MP_OBJ_NULL) {
+ // __delattr__ exists, so call it
+ attr_delattr_method[2] = MP_OBJ_NEW_QSTR(attr);
+ mp_call_method_n_kw(1, 0, attr_delattr_method);
+ return true;
+ }
+ } else {
+ // store attribute
+ // try __setattr__ first
+ mp_obj_t attr_setattr_method[4];
+ mp_load_method_maybe(self_in, MP_QSTR___setattr__, attr_setattr_method);
+ if (attr_setattr_method[0] != MP_OBJ_NULL) {
+ // __setattr__ exists, so call it
+ attr_setattr_method[2] = MP_OBJ_NEW_QSTR(attr);
+ attr_setattr_method[3] = value;
+ mp_call_method_n_kw(2, 0, attr_setattr_method);
+ return true;
+ }
+ }
+ #endif
+
+skip_special_accessors:
+
+ if (value == MP_OBJ_NULL) {
+ // delete attribute
+ mp_map_elem_t *elem = mp_map_lookup(&self->members, MP_OBJ_NEW_QSTR(attr), MP_MAP_LOOKUP_REMOVE_IF_FOUND);
+ return elem != NULL;
+ } else {
+ // store attribute
+ mp_map_lookup(&self->members, MP_OBJ_NEW_QSTR(attr), MP_MAP_LOOKUP_ADD_IF_NOT_FOUND)->value = value;
+ return true;
+ }
+}
+
+STATIC void mp_obj_instance_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+ if (dest[0] == MP_OBJ_NULL) {
+ mp_obj_instance_load_attr(self_in, attr, dest);
+ } else {
+ if (mp_obj_instance_store_attr(self_in, attr, dest[1])) {
+ dest[0] = MP_OBJ_NULL; // indicate success
+ }
+ }
+}
+
+STATIC mp_obj_t instance_subscr(mp_obj_t self_in, mp_obj_t index, mp_obj_t value) {
+ mp_obj_instance_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_t member[4] = {MP_OBJ_NULL, MP_OBJ_NULL, index, value};
+ struct class_lookup_data lookup = {
+ .obj = self,
+ .meth_offset = offsetof(mp_obj_type_t, ext[0].subscr),
+ .dest = member,
+ .is_type = false,
+ };
+ if (value == MP_OBJ_NULL) {
+ // delete item
+ lookup.attr = MP_QSTR___delitem__;
+ } else if (value == MP_OBJ_SENTINEL) {
+ // load item
+ lookup.attr = MP_QSTR___getitem__;
+ } else {
+ // store item
+ lookup.attr = MP_QSTR___setitem__;
+ }
+ mp_obj_class_lookup(&lookup, self->base.type);
+ if (member[0] == MP_OBJ_SENTINEL) {
+ const mp_obj_type_t *subobj_type = mp_obj_get_type(self->subobj[0]);
+ mp_obj_t ret = subobj_type->ext[0].subscr(self_in, index, value);
+ // May have called port specific C code. Make sure it didn't mess up the heap.
+ assert_heap_ok();
+ return ret;
+ } else if (member[0] != MP_OBJ_NULL) {
+ size_t n_args = value == MP_OBJ_NULL || value == MP_OBJ_SENTINEL ? 1 : 2;
+ mp_obj_t ret = mp_call_method_n_kw(n_args, 0, member);
+ if (value == MP_OBJ_SENTINEL) {
+ return ret;
+ } else {
+ return mp_const_none;
+ }
+ } else {
+ return MP_OBJ_NULL; // op not supported
+ }
+}
+
+STATIC mp_obj_t mp_obj_instance_get_call(mp_obj_t self_in, mp_obj_t *member) {
+ mp_obj_instance_t *self = MP_OBJ_TO_PTR(self_in);
+ struct class_lookup_data lookup = {
+ .obj = self,
+ .attr = MP_QSTR___call__,
+ .meth_offset = offsetof(mp_obj_type_t, ext[0].call),
+ .dest = member,
+ .is_type = false,
+ };
+ mp_obj_class_lookup(&lookup, self->base.type);
+ return member[0];
+}
+
+bool mp_obj_instance_is_callable(mp_obj_t self_in) {
+ mp_obj_t member[2] = {MP_OBJ_NULL, MP_OBJ_NULL};
+ return mp_obj_instance_get_call(self_in, member) != MP_OBJ_NULL;
+}
+
+mp_obj_t mp_obj_instance_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_obj_t member[2] = {MP_OBJ_NULL, MP_OBJ_NULL};
+ mp_obj_t call = mp_obj_instance_get_call(self_in, member);
+ if (call == MP_OBJ_NULL) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_TypeError(MP_ERROR_TEXT("object not callable"));
+ #else
+ mp_raise_TypeError_varg(MP_ERROR_TEXT("'%q' object is not callable"),
+ mp_obj_get_type_qstr(self_in));
+ #endif
+ }
+ mp_obj_instance_t *self = MP_OBJ_TO_PTR(self_in);
+ if (call == MP_OBJ_SENTINEL) {
+ return mp_call_function_n_kw(self->subobj[0], n_args, n_kw, args);
+ }
+
+ return mp_call_method_self_n_kw(member[0], member[1], n_args, n_kw, args);
+}
+
+// Note that iter_buf may be NULL, and needs to be allocated if needed
+mp_obj_t mp_obj_instance_getiter(mp_obj_t self_in, mp_obj_iter_buf_t *iter_buf) {
+ mp_obj_instance_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_t member[2] = {MP_OBJ_NULL};
+ struct class_lookup_data lookup = {
+ .obj = self,
+ .attr = MP_QSTR___iter__,
+ .meth_offset = offsetof(mp_obj_type_t, ext[0].getiter),
+ .dest = member,
+ .is_type = false,
+ };
+ mp_obj_class_lookup(&lookup, self->base.type);
+ if (member[0] == MP_OBJ_NULL) {
+ return MP_OBJ_NULL;
+ } else if (member[0] == MP_OBJ_SENTINEL) {
+ const mp_obj_type_t *type = mp_obj_get_type(self->subobj[0]);
+ if (iter_buf == NULL) {
+ iter_buf = m_new_obj(mp_obj_iter_buf_t);
+ }
+ return type->ext[0].getiter(self->subobj[0], iter_buf);
+ } else {
+ return mp_call_method_n_kw(0, 0, member);
+ }
+}
+
+STATIC mp_int_t instance_get_buffer(mp_obj_t self_in, mp_buffer_info_t *bufinfo, mp_uint_t flags) {
+ mp_obj_instance_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_obj_t member[2] = {MP_OBJ_NULL};
+ struct class_lookup_data lookup = {
+ .obj = self,
+ .attr = MP_QSTR_, // don't actually look for a method
+ .meth_offset = offsetof(mp_obj_type_t, ext[0].buffer_p.get_buffer),
+ .dest = member,
+ .is_type = false,
+ };
+ mp_obj_class_lookup(&lookup, self->base.type);
+ if (member[0] == MP_OBJ_SENTINEL) {
+ const mp_obj_type_t *type = mp_obj_get_type(self->subobj[0]);
+ return type->ext[0].buffer_p.get_buffer(self->subobj[0], bufinfo, flags);
+ } else {
+ return 1; // object does not support buffer protocol
+ }
+}
+
+/******************************************************************************/
+// type object
+// - the struct is mp_obj_type_t and is defined in obj.h so const types can be made
+// - there is a constant mp_obj_type_t (called mp_type_type) for the 'type' object
+// - creating a new class (a new type) creates a new mp_obj_type_t
+
+#if ENABLE_SPECIAL_ACCESSORS
+STATIC bool check_for_special_accessors(mp_obj_t key, mp_obj_t value) {
+ #if MICROPY_PY_DELATTR_SETATTR
+ if (key == MP_OBJ_NEW_QSTR(MP_QSTR___setattr__) || key == MP_OBJ_NEW_QSTR(MP_QSTR___delattr__)) {
+ return true;
+ }
+ #endif
+ #if MICROPY_PY_BUILTINS_PROPERTY
+ if (mp_obj_is_type(value, &mp_type_property)) {
+ return true;
+ }
+ #endif
+ #if MICROPY_PY_DESCRIPTORS
+ static const uint8_t to_check[] = {
+ MP_QSTR___get__, MP_QSTR___set__, MP_QSTR___delete__,
+ };
+ for (size_t i = 0; i < MP_ARRAY_SIZE(to_check); ++i) {
+ mp_obj_t dest_temp[2];
+ mp_load_method_protected(value, to_check[i], dest_temp, true);
+ if (dest_temp[0] != MP_OBJ_NULL) {
+ return true;
+ }
+ }
+ #endif
+ return false;
+}
+
+STATIC bool map_has_special_accessors(const mp_map_t *map) {
+ if (map == NULL) {
+ return false;
+ }
+ for (size_t i = 0; i < map->alloc; i++) {
+ if (mp_map_slot_is_filled(map, i)) {
+ const mp_map_elem_t *elem = &map->table[i];
+ if (check_for_special_accessors(elem->key, elem->value)) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+#endif
+
+STATIC void type_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_type_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_printf(print, "<class '%q'>", self->name);
+}
+
+STATIC mp_obj_t type_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ (void)type_in;
+
+ mp_arg_check_num(n_args, n_kw, 1, 3, false);
+
+ switch (n_args) {
+ case 1:
+ return MP_OBJ_FROM_PTR(mp_obj_get_type(args[0]));
+
+ case 3:
+ // args[0] = name
+ // args[1] = bases tuple
+ // args[2] = locals dict
+ return mp_obj_new_type(mp_obj_str_get_qstr(args[0]), args[1], args[2]);
+
+ default:
+ mp_raise_TypeError(MP_ERROR_TEXT("type takes 1 or 3 arguments"));
+ }
+}
+
+STATIC mp_obj_t type_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ // instantiate an instance of a class
+
+ mp_obj_type_t *self = MP_OBJ_TO_PTR(self_in);
+
+ if (self->make_new == NULL) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_TypeError(MP_ERROR_TEXT("cannot create instance"));
+ #else
+ mp_raise_TypeError_varg(MP_ERROR_TEXT("cannot create '%q' instances"), self->name);
+ #endif
+ }
+
+ mp_obj_t o = self->make_new(self, n_args, n_kw, args);
+
+ // return new instance
+ return o;
+}
+
+STATIC void type_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+ assert(mp_obj_is_type(self_in, &mp_type_type));
+ mp_obj_type_t *self = MP_OBJ_TO_PTR(self_in);
+
+ if (dest[0] == MP_OBJ_NULL) {
+ // load attribute
+ #if MICROPY_CPYTHON_COMPAT
+ if (attr == MP_QSTR___name__) {
+ dest[0] = MP_OBJ_NEW_QSTR(self->name);
+ return;
+ }
+ #if MICROPY_CPYTHON_COMPAT
+ if (attr == MP_QSTR___dict__) {
+ // Returns a read-only dict of the class attributes.
+ // If the internal locals is not fixed, a copy will be created.
+ const mp_obj_dict_t *dict = self->locals_dict;
+ if (!dict) {
+ dict = &mp_const_empty_dict_obj;
+ }
+ if (dict->map.is_fixed) {
+ dest[0] = MP_OBJ_FROM_PTR(dict);
+ } else {
+ dest[0] = mp_obj_dict_copy(MP_OBJ_FROM_PTR(dict));
+ mp_obj_dict_t *dict_copy = MP_OBJ_TO_PTR(dest[0]);
+ dict_copy->map.is_fixed = 1;
+ }
+ return;
+ }
+ #endif
+ if (attr == MP_QSTR___bases__) {
+ if (self == &mp_type_object) {
+ dest[0] = mp_const_empty_tuple;
+ return;
+ }
+ const void *parent = mp_type_get_parent_slot(self);
+ mp_obj_t parent_obj = parent ? MP_OBJ_FROM_PTR(parent) : MP_OBJ_FROM_PTR(&mp_type_object);
+ #if MICROPY_MULTIPLE_INHERITANCE
+ if (mp_obj_is_type(parent_obj, &mp_type_tuple)) {
+ dest[0] = parent_obj;
+ return;
+ }
+ #endif
+ dest[0] = mp_obj_new_tuple(1, &parent_obj);
+ return;
+ }
+ #endif
+ struct class_lookup_data lookup = {
+ .obj = (mp_obj_instance_t *)self,
+ .attr = attr,
+ .meth_offset = 0,
+ .dest = dest,
+ .is_type = true,
+ };
+ mp_obj_class_lookup(&lookup, self);
+ } else {
+ // delete/store attribute
+
+ if (self->locals_dict != NULL) {
+ assert(mp_obj_is_dict_or_ordereddict(MP_OBJ_FROM_PTR(self->locals_dict))); // MicroPython restriction, for now
+ mp_map_t *locals_map = &self->locals_dict->map;
+ if (locals_map->is_fixed) {
+ // can't apply delete/store to a fixed map
+ return;
+ }
+ if (dest[1] == MP_OBJ_NULL) {
+ // delete attribute
+ mp_map_elem_t *elem = mp_map_lookup(locals_map, MP_OBJ_NEW_QSTR(attr), MP_MAP_LOOKUP_REMOVE_IF_FOUND);
+ if (elem != NULL) {
+ dest[0] = MP_OBJ_NULL; // indicate success
+ }
+ } else {
+ #if ENABLE_SPECIAL_ACCESSORS
+ // Check if we add any special accessor methods with this store
+ if (!(self->flags & MP_TYPE_FLAG_HAS_SPECIAL_ACCESSORS)) {
+ if (check_for_special_accessors(MP_OBJ_NEW_QSTR(attr), dest[1])) {
+ if (self->flags & MP_TYPE_FLAG_IS_SUBCLASSED) {
+ // This class is already subclassed so can't have special accessors added
+ mp_raise_msg(&mp_type_AttributeError, MP_ERROR_TEXT("can't add special method to already-subclassed class"));
+ }
+ self->flags |= MP_TYPE_FLAG_HAS_SPECIAL_ACCESSORS;
+ }
+ }
+ #endif
+
+ // store attribute
+ mp_map_elem_t *elem = mp_map_lookup(locals_map, MP_OBJ_NEW_QSTR(attr), MP_MAP_LOOKUP_ADD_IF_NOT_FOUND);
+ elem->value = dest[1];
+ dest[0] = MP_OBJ_NULL; // indicate success
+ }
+ }
+ }
+}
+
+const mp_obj_type_t mp_type_type = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_type,
+ .print = type_print,
+ .make_new = type_make_new,
+ .attr = type_attr,
+ MP_TYPE_EXTENDED_FIELDS(
+ .call = type_call,
+ .unary_op = mp_generic_unary_op,
+ ),
+};
+
+mp_obj_t mp_obj_new_type(qstr name, mp_obj_t bases_tuple, mp_obj_t locals_dict) {
+ // Verify input objects have expected type
+ if (!mp_obj_is_type(bases_tuple, &mp_type_tuple)) {
+ mp_raise_TypeError(NULL);
+ }
+ if (!mp_obj_is_dict_or_ordereddict(locals_dict)) {
+ mp_raise_TypeError(NULL);
+ }
+
+ // TODO might need to make a copy of locals_dict; at least that's how CPython does it
+
+ // Basic validation of base classes
+ uint16_t base_flags = MP_TYPE_FLAG_EQ_NOT_REFLEXIVE
+ | MP_TYPE_FLAG_EQ_CHECKS_OTHER_TYPE | MP_TYPE_FLAG_EQ_HAS_NEQ_TEST | MP_TYPE_FLAG_EXTENDED;
+ size_t bases_len;
+ mp_obj_t *bases_items;
+ mp_obj_tuple_get(bases_tuple, &bases_len, &bases_items);
+ for (size_t i = 0; i < bases_len; i++) {
+ if (!mp_obj_is_type(bases_items[i], &mp_type_type)) {
+ mp_raise_TypeError(MP_ERROR_TEXT("type is not an acceptable base type"));
+ }
+ mp_obj_type_t *t = MP_OBJ_TO_PTR(bases_items[i]);
+ // TODO: Verify with CPy, tested on function type
+ if (t->make_new == NULL) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_TypeError(MP_ERROR_TEXT("type is not an acceptable base type"));
+ #else
+ mp_raise_TypeError_varg(
+ MP_ERROR_TEXT("type '%q' is not an acceptable base type"), t->name);
+ #endif
+ }
+ #if ENABLE_SPECIAL_ACCESSORS
+ if (mp_obj_is_instance_type(t)) {
+ t->flags |= MP_TYPE_FLAG_IS_SUBCLASSED;
+ base_flags |= t->flags & MP_TYPE_FLAG_HAS_SPECIAL_ACCESSORS;
+ }
+ #endif
+ }
+
+ mp_obj_full_type_t *o = m_new0_ll(mp_obj_full_type_t, 1);
+ o->base.type = &mp_type_type;
+ o->flags = base_flags;
+ o->name = name;
+ o->print = instance_print;
+ o->make_new = mp_obj_instance_make_new;
+ o->attr = mp_obj_instance_attr;
+ o->MP_TYPE_CALL = mp_obj_instance_call;
+ o->MP_TYPE_UNARY_OP = instance_unary_op;
+ o->MP_TYPE_BINARY_OP = instance_binary_op;
+ o->MP_TYPE_SUBSCR = instance_subscr;
+ o->MP_TYPE_GETITER = mp_obj_instance_getiter;
+ // o->iternext = ; not implemented
+ o->MP_TYPE_GET_BUFFER = instance_get_buffer;
+
+ if (bases_len > 0) {
+ // Inherit protocol from a base class. This allows to define an
+ // abstract base class which would translate C-level protocol to
+ // Python method calls, and any subclass inheriting from it will
+ // support this feature.
+ o->MP_TYPE_PROTOCOL = mp_type_get_protocol_slot((mp_obj_type_t *)MP_OBJ_TO_PTR(bases_items[0]));
+
+ if (bases_len >= 2) {
+ #if MICROPY_MULTIPLE_INHERITANCE
+ o->parent = MP_OBJ_TO_PTR(bases_tuple);
+ #else
+ mp_raise_NotImplementedError(MP_ERROR_TEXT("multiple inheritance not supported"));
+ #endif
+ } else {
+ o->parent = MP_OBJ_TO_PTR(bases_items[0]);
+ }
+ }
+
+ o->locals_dict = make_dict_long_lived(MP_OBJ_TO_PTR(locals_dict), 10);
+
+ #if ENABLE_SPECIAL_ACCESSORS
+ // Check if the class has any special accessor methods
+ if (!(o->flags & MP_TYPE_FLAG_HAS_SPECIAL_ACCESSORS)) {
+ for (size_t i = 0; i < o->locals_dict->map.alloc; i++) {
+ if (mp_map_slot_is_filled(&o->locals_dict->map, i)) {
+ const mp_map_elem_t *elem = &o->locals_dict->map.table[i];
+ if (check_for_special_accessors(elem->key, elem->value)) {
+ o->flags |= MP_TYPE_FLAG_HAS_SPECIAL_ACCESSORS;
+ break;
+ }
+ }
+ }
+ }
+ #endif
+
+ const mp_obj_type_t *native_base;
+ size_t num_native_bases = instance_count_native_bases((mp_obj_type_t *)o, &native_base);
+ if (num_native_bases > 1) {
+ mp_raise_TypeError(MP_ERROR_TEXT("multiple bases have instance lay-out conflict"));
+ }
+
+ mp_map_t *locals_map = &o->locals_dict->map;
+ #if ENABLE_SPECIAL_ACCESSORS
+ // Check if the class has any special accessor methods
+ if (!(o->flags & MP_TYPE_FLAG_HAS_SPECIAL_ACCESSORS) &&
+ (map_has_special_accessors(locals_map) ||
+ (num_native_bases == 1 &&
+ native_base->locals_dict != NULL &&
+ map_has_special_accessors(&native_base->locals_dict->map)))) {
+ o->flags |= MP_TYPE_FLAG_HAS_SPECIAL_ACCESSORS;
+ }
+ #endif
+
+ mp_map_elem_t *elem = mp_map_lookup(locals_map, MP_OBJ_NEW_QSTR(MP_QSTR___new__), MP_MAP_LOOKUP);
+ if (elem != NULL) {
+ // __new__ slot exists; check if it is a function
+ if (mp_obj_is_fun(elem->value)) {
+ // __new__ is a function, wrap it in a staticmethod decorator
+ elem->value = static_class_method_make_new(&mp_type_staticmethod, 1, 0, &elem->value);
+ }
+ }
+
+ return MP_OBJ_FROM_PTR(o);
+}
+
+/******************************************************************************/
+// super object
+
+typedef struct _mp_obj_super_t {
+ mp_obj_base_t base;
+ mp_obj_t type;
+ mp_obj_t obj;
+} mp_obj_super_t;
+
+STATIC void super_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_super_t *self = MP_OBJ_TO_PTR(self_in);
+ mp_print_str(print, "<super: ");
+ mp_obj_print_helper(print, self->type, PRINT_STR);
+ mp_print_str(print, ", ");
+ mp_obj_print_helper(print, self->obj, PRINT_STR);
+ mp_print_str(print, ">");
+}
+
+STATIC mp_obj_t super_make_new(const mp_obj_type_t *type_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ (void)type_in;
+ // 0 arguments are turned into 2 in the compiler
+ // 1 argument is not yet implemented
+ mp_arg_check_num(n_args, n_kw, 2, 2, false);
+ if (!mp_obj_is_type(args[0], &mp_type_type)) {
+ mp_raise_TypeError(MP_ERROR_TEXT("first argument to super() must be type"));
+ }
+ mp_obj_super_t *o = m_new_obj(mp_obj_super_t);
+ *o = (mp_obj_super_t) {{type_in}, args[0], args[1]};
+ return MP_OBJ_FROM_PTR(o);
+}
+
+STATIC void super_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+ if (dest[0] != MP_OBJ_NULL) {
+ // not load attribute
+ return;
+ }
+
+ assert(mp_obj_is_type(self_in, &mp_type_super));
+ mp_obj_super_t *self = MP_OBJ_TO_PTR(self_in);
+
+ assert(mp_obj_is_type(self->type, &mp_type_type));
+
+ mp_obj_type_t *type = MP_OBJ_TO_PTR(self->type);
+
+ struct class_lookup_data lookup = {
+ .obj = MP_OBJ_TO_PTR(self->obj),
+ .attr = attr,
+ .meth_offset = 0,
+ .dest = dest,
+ .is_type = false,
+ };
+
+ // Allow a call super().__init__() to reach any native base classes
+ if (attr == MP_QSTR___init__) {
+ lookup.meth_offset = offsetof(mp_obj_type_t, make_new);
+ }
+
+ const void *parent = mp_type_get_parent_slot(type);
+ if (parent == NULL) {
+ // no parents, do nothing
+ #if MICROPY_MULTIPLE_INHERITANCE
+ } else if (((mp_obj_base_t *)parent)->type == &mp_type_tuple) {
+ const mp_obj_tuple_t *parent_tuple = parent;
+ size_t len = parent_tuple->len;
+ const mp_obj_t *items = parent_tuple->items;
+ for (size_t i = 0; i < len; i++) {
+ assert(mp_obj_is_type(items[i], &mp_type_type));
+ if (MP_OBJ_TO_PTR(items[i]) == &mp_type_object) {
+ // The "object" type will be searched at the end of this function,
+ // and we don't want to lookup native methods in object.
+ continue;
+ }
+ mp_obj_class_lookup(&lookup, (mp_obj_type_t *)MP_OBJ_TO_PTR(items[i]));
+ if (dest[0] != MP_OBJ_NULL) {
+ break;
+ }
+ }
+ #endif
+ } else if (parent != &mp_type_object) {
+ mp_obj_class_lookup(&lookup, parent);
+ }
+
+ if (dest[0] != MP_OBJ_NULL) {
+ if (dest[0] == MP_OBJ_SENTINEL) {
+ // Looked up native __init__ so defer to it
+ dest[0] = MP_OBJ_FROM_PTR(&native_base_init_wrapper_obj);
+ dest[1] = self->obj;
+ } else {
+ mp_obj_t member = dest[0];
+ // changes to mp_obj_instance_load_attr may require changes
+ // here...
+ #if MICROPY_PY_BUILTINS_PROPERTY
+ if (mp_obj_is_type(member, &mp_type_property)) {
+ size_t n_proxy;
+ const mp_obj_t *proxy = mp_obj_property_get(member, &n_proxy);
+ if (proxy[0] == mp_const_none) {
+ mp_raise_AttributeError(MP_ERROR_TEXT("unreadable attribute"));
+ } else {
+ dest[0] = mp_call_function_n_kw(proxy[0], 1, 0, &self_in);
+ }
+ }
+ #endif
+ #if MICROPY_PY_DESCRIPTORS
+ mp_obj_t attr_get_method[4];
+ mp_load_method_maybe(member, MP_QSTR___get__, attr_get_method);
+ if (attr_get_method[0] != MP_OBJ_NULL) {
+ attr_get_method[2] = self_in;
+ attr_get_method[3] = MP_OBJ_FROM_PTR(mp_obj_get_type(self_in));
+ dest[0] = mp_call_method_n_kw(2, 0, attr_get_method);
+ }
+ #endif
+ }
+ return;
+ }
+
+ // Reset meth_offset so we don't look up any native methods in object,
+ // because object never takes up the native base-class slot.
+ lookup.meth_offset = 0;
+
+ mp_obj_class_lookup(&lookup, &mp_type_object);
+}
+
+const mp_obj_type_t mp_type_super = {
+ { &mp_type_type },
+ .name = MP_QSTR_super,
+ .print = super_print,
+ .make_new = super_make_new,
+ .attr = super_attr,
+};
+
+void mp_load_super_method(qstr attr, mp_obj_t *dest) {
+ mp_obj_super_t super = {{&mp_type_super}, dest[1], dest[2]};
+ mp_load_method(MP_OBJ_FROM_PTR(&super), attr, dest);
+}
+
+/******************************************************************************/
+// subclassing and built-ins specific to types
+
+// object and classinfo should be type objects
+// (but the function will fail gracefully if they are not)
+bool mp_obj_is_subclass_fast(mp_const_obj_t object, mp_const_obj_t classinfo) {
+ for (;;) {
+ if (object == classinfo) {
+ return true;
+ }
+
+ // not equivalent classes, keep searching base classes
+
+ // object should always be a type object, but just return false if it's not
+ if (!mp_obj_is_type(object, &mp_type_type)) {
+ return false;
+ }
+
+ const mp_obj_type_t *self = MP_OBJ_TO_PTR(object);
+ const void *parent = mp_type_get_parent_slot(self);
+
+ if (parent == NULL) {
+ // type has no parents
+ return false;
+ #if MICROPY_MULTIPLE_INHERITANCE
+ } else if (((mp_obj_base_t *)parent)->type == &mp_type_tuple) {
+ // get the base objects (they should be type objects)
+ const mp_obj_tuple_t *parent_tuple = parent;
+ const mp_obj_t *item = parent_tuple->items;
+ const mp_obj_t *top = item + parent_tuple->len - 1;
+
+ // iterate through the base objects
+ for (; item < top; ++item) {
+ if (mp_obj_is_subclass_fast(*item, classinfo)) {
+ return true;
+ }
+ }
+
+ // search last base (simple tail recursion elimination)
+ object = *item;
+ #endif
+ } else {
+ // type has 1 parent
+ object = MP_OBJ_FROM_PTR(parent);
+ }
+ }
+}
+
+STATIC mp_obj_t mp_obj_is_subclass(mp_obj_t object, mp_obj_t classinfo) {
+ size_t len;
+ mp_obj_t *items;
+ if (mp_obj_is_type(classinfo, &mp_type_type)) {
+ len = 1;
+ items = &classinfo;
+ } else if (mp_obj_is_type(classinfo, &mp_type_tuple)) {
+ mp_obj_tuple_get(classinfo, &len, &items);
+ } else {
+ mp_raise_TypeError(MP_ERROR_TEXT("issubclass() arg 2 must be a class or a tuple of classes"));
+ }
+
+ for (size_t i = 0; i < len; i++) {
+ // We explicitly check for 'object' here since no-one explicitly derives from it
+ if (items[i] == MP_OBJ_FROM_PTR(&mp_type_object) || mp_obj_is_subclass_fast(object, items[i])) {
+ return mp_const_true;
+ }
+ }
+ return mp_const_false;
+}
+
+STATIC mp_obj_t mp_builtin_issubclass(mp_obj_t object, mp_obj_t classinfo) {
+ if (!mp_obj_is_type(object, &mp_type_type)) {
+ mp_raise_TypeError(MP_ERROR_TEXT("issubclass() arg 1 must be a class"));
+ }
+ return mp_obj_is_subclass(object, classinfo);
+}
+
+MP_DEFINE_CONST_FUN_OBJ_2(mp_builtin_issubclass_obj, mp_builtin_issubclass);
+
+STATIC mp_obj_t mp_builtin_isinstance(mp_obj_t object, mp_obj_t classinfo) {
+ return mp_obj_is_subclass(MP_OBJ_FROM_PTR(mp_obj_get_type(object)), classinfo);
+}
+
+MP_DEFINE_CONST_FUN_OBJ_2(mp_builtin_isinstance_obj, mp_builtin_isinstance);
+
+mp_obj_t mp_obj_cast_to_native_base(mp_obj_t self_in, mp_const_obj_t native_type) {
+ const mp_obj_type_t *self_type = mp_obj_get_type(self_in);
+
+ if (MP_OBJ_FROM_PTR(self_type) == native_type) {
+ return self_in;
+ } else if (!mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(self_type), native_type)) {
+ return MP_OBJ_NULL;
+ } else {
+ mp_obj_instance_t *self = (mp_obj_instance_t *)MP_OBJ_TO_PTR(self_in);
+ return self->subobj[0];
+ }
+}
+
+/******************************************************************************/
+// staticmethod and classmethod types (probably should go in a different file)
+
+STATIC mp_obj_t static_class_method_make_new(const mp_obj_type_t *self, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ assert(self == &mp_type_staticmethod || self == &mp_type_classmethod);
+
+ mp_arg_check_num(n_args, n_kw, 1, 1, false);
+
+ mp_obj_static_class_method_t *o = m_new_obj(mp_obj_static_class_method_t);
+ *o = (mp_obj_static_class_method_t) {{self}, args[0]};
+ return MP_OBJ_FROM_PTR(o);
+}
+
+const mp_obj_type_t mp_type_staticmethod = {
+ { &mp_type_type },
+ .name = MP_QSTR_staticmethod,
+ .make_new = static_class_method_make_new,
+};
+
+const mp_obj_type_t mp_type_classmethod = {
+ { &mp_type_type },
+ .name = MP_QSTR_classmethod,
+ .make_new = static_class_method_make_new,
+};
diff --git a/circuitpython/py/objtype.h b/circuitpython/py/objtype.h
new file mode 100644
index 0000000..17b8048
--- /dev/null
+++ b/circuitpython/py/objtype.h
@@ -0,0 +1,59 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_OBJTYPE_H
+#define MICROPY_INCLUDED_PY_OBJTYPE_H
+
+#include "py/obj.h"
+
+// instance object
+// creating an instance of a class makes one of these objects
+typedef struct _mp_obj_instance_t {
+ mp_obj_base_t base;
+ mp_map_t members;
+ mp_obj_t subobj[];
+ // TODO maybe cache __getattr__ and __setattr__ for efficient lookup of them
+} mp_obj_instance_t;
+
+void mp_obj_assert_native_inited(mp_obj_t native_object);
+
+#if MICROPY_CPYTHON_COMPAT
+// this is needed for object.__new__
+mp_obj_instance_t *mp_obj_new_instance(const mp_obj_type_t *cls, const mp_obj_type_t **native_base);
+#endif
+
+// these need to be exposed so mp_obj_is_callable can work correctly
+bool mp_obj_instance_is_callable(mp_obj_t self_in);
+mp_obj_t mp_obj_instance_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args);
+
+#define mp_obj_is_instance_type(type) ((type)->make_new == mp_obj_instance_make_new)
+#define mp_obj_is_native_type(type) ((type)->make_new != mp_obj_instance_make_new)
+// this needs to be exposed for the above macros to work correctly
+mp_obj_t mp_obj_instance_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args);
+
+// this needs to be exposed for mp_getiter
+mp_obj_t mp_obj_instance_getiter(mp_obj_t self_in, mp_obj_iter_buf_t *iter_buf);
+
+#endif // MICROPY_INCLUDED_PY_OBJTYPE_H
diff --git a/circuitpython/py/objzip.c b/circuitpython/py/objzip.c
new file mode 100644
index 0000000..9171649
--- /dev/null
+++ b/circuitpython/py/objzip.c
@@ -0,0 +1,79 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+
+#include "py/objtuple.h"
+#include "py/runtime.h"
+
+typedef struct _mp_obj_zip_t {
+ mp_obj_base_t base;
+ size_t n_iters;
+ mp_obj_t iters[];
+} mp_obj_zip_t;
+
+STATIC mp_obj_t zip_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_arg_check_num(n_args, n_kw, 0, MP_OBJ_FUN_ARGS_MAX, false);
+
+ mp_obj_zip_t *o = m_new_obj_var(mp_obj_zip_t, mp_obj_t, n_args);
+ o->base.type = type;
+ o->n_iters = n_args;
+ for (size_t i = 0; i < n_args; i++) {
+ o->iters[i] = mp_getiter(args[i], NULL);
+ }
+ return MP_OBJ_FROM_PTR(o);
+}
+
+STATIC mp_obj_t zip_iternext(mp_obj_t self_in) {
+ mp_check_self(mp_obj_is_type(self_in, &mp_type_zip));
+ mp_obj_zip_t *self = MP_OBJ_TO_PTR(self_in);
+ if (self->n_iters == 0) {
+ return MP_OBJ_STOP_ITERATION;
+ }
+ mp_obj_tuple_t *tuple = MP_OBJ_TO_PTR(mp_obj_new_tuple(self->n_iters, NULL));
+
+ for (size_t i = 0; i < self->n_iters; i++) {
+ mp_obj_t next = mp_iternext(self->iters[i]);
+ if (next == MP_OBJ_STOP_ITERATION) {
+ mp_obj_tuple_del(MP_OBJ_FROM_PTR(tuple));
+ return MP_OBJ_STOP_ITERATION;
+ }
+ tuple->items[i] = next;
+ }
+ return MP_OBJ_FROM_PTR(tuple);
+}
+
+const mp_obj_type_t mp_type_zip = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_zip,
+ .make_new = zip_make_new,
+ MP_TYPE_EXTENDED_FIELDS(
+ .getiter = mp_identity_getiter,
+ .iternext = zip_iternext,
+ ),
+};
diff --git a/circuitpython/py/opmethods.c b/circuitpython/py/opmethods.c
new file mode 100644
index 0000000..6ebd469
--- /dev/null
+++ b/circuitpython/py/opmethods.c
@@ -0,0 +1,53 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/obj.h"
+#include "py/builtin.h"
+
+STATIC mp_obj_t op_getitem(mp_obj_t self_in, mp_obj_t key_in) {
+ const mp_obj_type_t *type = mp_obj_get_type(self_in);
+ return type->ext[0].subscr(self_in, key_in, MP_OBJ_SENTINEL);
+}
+MP_DEFINE_CONST_FUN_OBJ_2(mp_op_getitem_obj, op_getitem);
+
+STATIC mp_obj_t op_setitem(mp_obj_t self_in, mp_obj_t key_in, mp_obj_t value_in) {
+ const mp_obj_type_t *type = mp_obj_get_type(self_in);
+ return type->ext[0].subscr(self_in, key_in, value_in);
+}
+MP_DEFINE_CONST_FUN_OBJ_3(mp_op_setitem_obj, op_setitem);
+
+STATIC mp_obj_t op_delitem(mp_obj_t self_in, mp_obj_t key_in) {
+ const mp_obj_type_t *type = mp_obj_get_type(self_in);
+ return type->ext[0].subscr(self_in, key_in, MP_OBJ_NULL);
+}
+MP_DEFINE_CONST_FUN_OBJ_2(mp_op_delitem_obj, op_delitem);
+
+STATIC mp_obj_t op_contains(mp_obj_t lhs_in, mp_obj_t rhs_in) {
+ const mp_obj_type_t *type = mp_obj_get_type(lhs_in);
+ mp_binary_op_fun_t binary_op = mp_type_get_binary_op_slot(type);
+ return binary_op(MP_BINARY_OP_CONTAINS, lhs_in, rhs_in);
+}
+MP_DEFINE_CONST_FUN_OBJ_2(mp_op_contains_obj, op_contains);
diff --git a/circuitpython/py/pairheap.c b/circuitpython/py/pairheap.c
new file mode 100644
index 0000000..d3a011c
--- /dev/null
+++ b/circuitpython/py/pairheap.c
@@ -0,0 +1,147 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2020 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/pairheap.h"
+
+// The mp_pairheap_t.next pointer can take one of the following values:
+// - NULL: the node is the top of the heap
+// - LSB set: the node is the last of the children and points to its parent node
+// - other: the node is a child and not the last child
+// The macros below help manage this pointer.
+#define NEXT_MAKE_RIGHTMOST_PARENT(parent) ((void *)((uintptr_t)(parent) | 1))
+#define NEXT_IS_RIGHTMOST_PARENT(next) ((uintptr_t)(next) & 1)
+#define NEXT_GET_RIGHTMOST_PARENT(next) ((void *)((uintptr_t)(next) & ~1))
+
+// O(1), stable
+mp_pairheap_t *mp_pairheap_meld(mp_pairheap_lt_t lt, mp_pairheap_t *heap1, mp_pairheap_t *heap2) {
+ if (heap1 == NULL) {
+ return heap2;
+ }
+ if (heap2 == NULL) {
+ return heap1;
+ }
+ if (lt(heap1, heap2)) {
+ if (heap1->child == NULL) {
+ heap1->child = heap2;
+ } else {
+ heap1->child_last->next = heap2;
+ }
+ heap1->child_last = heap2;
+ heap2->next = NEXT_MAKE_RIGHTMOST_PARENT(heap1);
+ return heap1;
+ } else {
+ heap1->next = heap2->child;
+ heap2->child = heap1;
+ if (heap1->next == NULL) {
+ heap2->child_last = heap1;
+ heap1->next = NEXT_MAKE_RIGHTMOST_PARENT(heap2);
+ }
+ return heap2;
+ }
+}
+
+// amortised O(log N), stable
+mp_pairheap_t *mp_pairheap_pairing(mp_pairheap_lt_t lt, mp_pairheap_t *child) {
+ if (child == NULL) {
+ return NULL;
+ }
+ mp_pairheap_t *heap = NULL;
+ while (!NEXT_IS_RIGHTMOST_PARENT(child)) {
+ mp_pairheap_t *n1 = child;
+ child = child->next;
+ n1->next = NULL;
+ if (!NEXT_IS_RIGHTMOST_PARENT(child)) {
+ mp_pairheap_t *n2 = child;
+ child = child->next;
+ n2->next = NULL;
+ n1 = mp_pairheap_meld(lt, n1, n2);
+ }
+ heap = mp_pairheap_meld(lt, heap, n1);
+ }
+ heap->next = NULL;
+ return heap;
+}
+
+// amortised O(log N), stable
+mp_pairheap_t *mp_pairheap_delete(mp_pairheap_lt_t lt, mp_pairheap_t *heap, mp_pairheap_t *node) {
+ // Simple case of the top being the node to delete
+ if (node == heap) {
+ mp_pairheap_t *child = heap->child;
+ node->child = NULL;
+ return mp_pairheap_pairing(lt, child);
+ }
+
+ // Case where node is not in the heap
+ if (node->next == NULL) {
+ return heap;
+ }
+
+ // Find parent of node
+ mp_pairheap_t *parent = node;
+ while (!NEXT_IS_RIGHTMOST_PARENT(parent->next)) {
+ parent = parent->next;
+ }
+ parent = NEXT_GET_RIGHTMOST_PARENT(parent->next);
+
+ // Replace node with pairing of its children
+ mp_pairheap_t *next;
+ if (node == parent->child && node->child == NULL) {
+ if (NEXT_IS_RIGHTMOST_PARENT(node->next)) {
+ parent->child = NULL;
+ } else {
+ parent->child = node->next;
+ }
+ node->next = NULL;
+ return heap;
+ } else if (node == parent->child) {
+ mp_pairheap_t *child = node->child;
+ next = node->next;
+ node->child = NULL;
+ node->next = NULL;
+ node = mp_pairheap_pairing(lt, child);
+ parent->child = node;
+ } else {
+ mp_pairheap_t *n = parent->child;
+ while (node != n->next) {
+ n = n->next;
+ }
+ mp_pairheap_t *child = node->child;
+ next = node->next;
+ node->child = NULL;
+ node->next = NULL;
+ node = mp_pairheap_pairing(lt, child);
+ if (node == NULL) {
+ node = n;
+ } else {
+ n->next = node;
+ }
+ }
+ node->next = next;
+ if (NEXT_IS_RIGHTMOST_PARENT(next)) {
+ parent->child_last = node;
+ }
+ return heap;
+}
diff --git a/circuitpython/py/pairheap.h b/circuitpython/py/pairheap.h
new file mode 100644
index 0000000..68b8b0f
--- /dev/null
+++ b/circuitpython/py/pairheap.h
@@ -0,0 +1,100 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2020 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_PAIRHEAP_H
+#define MICROPY_INCLUDED_PY_PAIRHEAP_H
+
+// This is an implementation of a pairing heap. It is stable and has deletion
+// support. Only the less-than operation needs to be defined on elements.
+//
+// See original paper for details:
+// Michael L. Fredman, Robert Sedjewick, Daniel D. Sleator, and Robert E. Tarjan.
+// The Pairing Heap: A New Form of Self-Adjusting Heap.
+// Algorithmica 1:111-129, 1986.
+// https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf
+
+#include <assert.h>
+#include "py/obj.h"
+
+// This struct forms the nodes of the heap and is intended to be extended, by
+// placing it first in another struct, to include additional information for the
+// element stored in the heap. It includes "base" so it can be a MicroPython
+// object allocated on the heap and the GC can automatically trace all nodes by
+// following the tree structure.
+typedef struct _mp_pairheap_t {
+ mp_obj_base_t base;
+ struct _mp_pairheap_t *child;
+ struct _mp_pairheap_t *child_last;
+ struct _mp_pairheap_t *next;
+} mp_pairheap_t;
+
+// This is the function for the less-than operation on nodes/elements.
+typedef int (*mp_pairheap_lt_t)(mp_pairheap_t *, mp_pairheap_t *);
+
+// Core functions.
+mp_pairheap_t *mp_pairheap_meld(mp_pairheap_lt_t lt, mp_pairheap_t *heap1, mp_pairheap_t *heap2);
+mp_pairheap_t *mp_pairheap_pairing(mp_pairheap_lt_t lt, mp_pairheap_t *child);
+mp_pairheap_t *mp_pairheap_delete(mp_pairheap_lt_t lt, mp_pairheap_t *heap, mp_pairheap_t *node);
+
+// Create a new heap.
+static inline mp_pairheap_t *mp_pairheap_new(mp_pairheap_lt_t lt) {
+ (void)lt;
+ return NULL;
+}
+
+// Initialise a single pairing-heap node so it is ready to push on to a heap.
+static inline void mp_pairheap_init_node(mp_pairheap_lt_t lt, mp_pairheap_t *node) {
+ (void)lt;
+ node->child = NULL;
+ node->next = NULL;
+}
+
+// Test if the heap is empty.
+static inline bool mp_pairheap_is_empty(mp_pairheap_lt_t lt, mp_pairheap_t *heap) {
+ (void)lt;
+ return heap == NULL;
+}
+
+// Peek at the top of the heap. Will return NULL if empty.
+static inline mp_pairheap_t *mp_pairheap_peek(mp_pairheap_lt_t lt, mp_pairheap_t *heap) {
+ (void)lt;
+ return heap;
+}
+
+// Push new node onto existing heap. Returns the new heap.
+static inline mp_pairheap_t *mp_pairheap_push(mp_pairheap_lt_t lt, mp_pairheap_t *heap, mp_pairheap_t *node) {
+ assert(node->child == NULL && node->next == NULL);
+ return mp_pairheap_meld(lt, node, heap); // node is first to be stable
+}
+
+// Pop the top off the heap, which must not be empty. Returns the new heap.
+static inline mp_pairheap_t *mp_pairheap_pop(mp_pairheap_lt_t lt, mp_pairheap_t *heap) {
+ assert(heap->next == NULL);
+ mp_pairheap_t *child = heap->child;
+ heap->child = NULL;
+ return mp_pairheap_pairing(lt, child);
+}
+
+#endif // MICROPY_INCLUDED_PY_PAIRHEAP_H
diff --git a/circuitpython/py/parse.c b/circuitpython/py/parse.c
new file mode 100644
index 0000000..dee662b
--- /dev/null
+++ b/circuitpython/py/parse.c
@@ -0,0 +1,1256 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013-2017 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <unistd.h> // for ssize_t
+#include <assert.h>
+#include <string.h>
+
+#include "py/lexer.h"
+#include "py/parse.h"
+#include "py/parsenum.h"
+#include "py/runtime.h"
+#include "py/objint.h"
+#include "py/objstr.h"
+#include "py/builtin.h"
+
+#include "supervisor/shared/translate.h"
+
+#if MICROPY_ENABLE_COMPILER
+
+#define RULE_ACT_ARG_MASK (0x0f)
+#define RULE_ACT_KIND_MASK (0x30)
+#define RULE_ACT_ALLOW_IDENT (0x40)
+#define RULE_ACT_ADD_BLANK (0x80)
+#define RULE_ACT_OR (0x10)
+#define RULE_ACT_AND (0x20)
+#define RULE_ACT_LIST (0x30)
+
+#define RULE_ARG_KIND_MASK (0xf000)
+#define RULE_ARG_ARG_MASK (0x0fff)
+#define RULE_ARG_TOK (0x1000)
+#define RULE_ARG_RULE (0x2000)
+#define RULE_ARG_OPT_RULE (0x3000)
+
+// *FORMAT-OFF*
+
+enum {
+// define rules with a compile function
+#define DEF_RULE(rule, comp, kind, ...) RULE_##rule,
+#define DEF_RULE_NC(rule, kind, ...)
+ #include "py/grammar.h"
+#undef DEF_RULE
+#undef DEF_RULE_NC
+ RULE_const_object, // special node for a constant, generic Python object
+
+// define rules without a compile function
+#define DEF_RULE(rule, comp, kind, ...)
+#define DEF_RULE_NC(rule, kind, ...) RULE_##rule,
+ #include "py/grammar.h"
+#undef DEF_RULE
+#undef DEF_RULE_NC
+};
+
+// Define an array of actions corresponding to each rule
+STATIC const uint8_t rule_act_table[] = {
+#define or(n) (RULE_ACT_OR | n)
+#define and(n) (RULE_ACT_AND | n)
+#define and_ident(n) (RULE_ACT_AND | n | RULE_ACT_ALLOW_IDENT)
+#define and_blank(n) (RULE_ACT_AND | n | RULE_ACT_ADD_BLANK)
+#define one_or_more (RULE_ACT_LIST | 2)
+#define list (RULE_ACT_LIST | 1)
+#define list_with_end (RULE_ACT_LIST | 3)
+
+#define DEF_RULE(rule, comp, kind, ...) kind,
+#define DEF_RULE_NC(rule, kind, ...)
+ #include "py/grammar.h"
+#undef DEF_RULE
+#undef DEF_RULE_NC
+
+ 0, // RULE_const_object
+
+#define DEF_RULE(rule, comp, kind, ...)
+#define DEF_RULE_NC(rule, kind, ...) kind,
+ #include "py/grammar.h"
+#undef DEF_RULE
+#undef DEF_RULE_NC
+
+#undef or
+#undef and
+#undef and_ident
+#undef and_blank
+#undef one_or_more
+#undef list
+#undef list_with_end
+};
+
+// Define the argument data for each rule, as a combined array
+STATIC const uint16_t rule_arg_combined_table[] = {
+#define tok(t) (RULE_ARG_TOK | MP_TOKEN_##t)
+#define rule(r) (RULE_ARG_RULE | RULE_##r)
+#define opt_rule(r) (RULE_ARG_OPT_RULE | RULE_##r)
+
+#define DEF_RULE(rule, comp, kind, ...) __VA_ARGS__,
+#define DEF_RULE_NC(rule, kind, ...)
+ #include "py/grammar.h"
+#undef DEF_RULE
+#undef DEF_RULE_NC
+
+#define DEF_RULE(rule, comp, kind, ...)
+#define DEF_RULE_NC(rule, kind, ...) __VA_ARGS__,
+ #include "py/grammar.h"
+#undef DEF_RULE
+#undef DEF_RULE_NC
+
+#undef tok
+#undef rule
+#undef opt_rule
+};
+
+// Macro to create a list of N identifiers where N is the number of variable arguments to the macro
+#define RULE_EXPAND(x) x
+#define RULE_PADDING(rule, ...) RULE_PADDING2(rule, __VA_ARGS__, RULE_PADDING_IDS(rule))
+#define RULE_PADDING2(rule, ...) RULE_EXPAND(RULE_PADDING3(rule, __VA_ARGS__))
+#define RULE_PADDING3(rule, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, ...) __VA_ARGS__
+#define RULE_PADDING_IDS(r) PAD13_##r, PAD12_##r, PAD11_##r, PAD10_##r, PAD9_##r, PAD8_##r, PAD7_##r, PAD6_##r, PAD5_##r, PAD4_##r, PAD3_##r, PAD2_##r, PAD1_##r,
+
+// Use an enum to create constants specifying how much room a rule takes in rule_arg_combined_table
+enum {
+#define DEF_RULE(rule, comp, kind, ...) RULE_PADDING(rule, __VA_ARGS__)
+#define DEF_RULE_NC(rule, kind, ...)
+ #include "py/grammar.h"
+#undef DEF_RULE
+#undef DEF_RULE_NC
+#define DEF_RULE(rule, comp, kind, ...)
+#define DEF_RULE_NC(rule, kind, ...) RULE_PADDING(rule, __VA_ARGS__)
+ #include "py/grammar.h"
+#undef DEF_RULE
+#undef DEF_RULE_NC
+};
+
+// Macro to compute the start of a rule in rule_arg_combined_table
+#define RULE_ARG_OFFSET(rule, ...) RULE_ARG_OFFSET2(rule, __VA_ARGS__, RULE_ARG_OFFSET_IDS(rule))
+#define RULE_ARG_OFFSET2(rule, ...) RULE_EXPAND(RULE_ARG_OFFSET3(rule, __VA_ARGS__))
+#define RULE_ARG_OFFSET3(rule, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, ...) _14
+#define RULE_ARG_OFFSET_IDS(r) PAD13_##r, PAD12_##r, PAD11_##r, PAD10_##r, PAD9_##r, PAD8_##r, PAD7_##r, PAD6_##r, PAD5_##r, PAD4_##r, PAD3_##r, PAD2_##r, PAD1_##r, PAD0_##r,
+
+// Use the above enum values to create a table of offsets for each rule's arg
+// data, which indexes rule_arg_combined_table. The offsets require 9 bits of
+// storage but only the lower 8 bits are stored here. The 9th bit is computed
+// in get_rule_arg using the FIRST_RULE_WITH_OFFSET_ABOVE_255 constant.
+STATIC const uint8_t rule_arg_offset_table[] = {
+#define DEF_RULE(rule, comp, kind, ...) RULE_ARG_OFFSET(rule, __VA_ARGS__) & 0xff,
+#define DEF_RULE_NC(rule, kind, ...)
+ #include "py/grammar.h"
+#undef DEF_RULE
+#undef DEF_RULE_NC
+ 0, // RULE_const_object
+#define DEF_RULE(rule, comp, kind, ...)
+#define DEF_RULE_NC(rule, kind, ...) RULE_ARG_OFFSET(rule, __VA_ARGS__) & 0xff,
+ #include "py/grammar.h"
+#undef DEF_RULE
+#undef DEF_RULE_NC
+};
+
+// Define a constant that's used to determine the 9th bit of the values in rule_arg_offset_table
+static const size_t FIRST_RULE_WITH_OFFSET_ABOVE_255 =
+#define DEF_RULE(rule, comp, kind, ...) RULE_ARG_OFFSET(rule, __VA_ARGS__) >= 0x100 ? RULE_##rule :
+#define DEF_RULE_NC(rule, kind, ...)
+#include "py/grammar.h"
+#undef DEF_RULE
+#undef DEF_RULE_NC
+#define DEF_RULE(rule, comp, kind, ...)
+#define DEF_RULE_NC(rule, kind, ...) RULE_ARG_OFFSET(rule, __VA_ARGS__) >= 0x100 ? RULE_##rule :
+#include "py/grammar.h"
+#undef DEF_RULE
+#undef DEF_RULE_NC
+ 0;
+
+#if MICROPY_DEBUG_PARSE_RULE_NAME
+// Define an array of rule names corresponding to each rule
+STATIC const char *const rule_name_table[] = {
+#define DEF_RULE(rule, comp, kind, ...) #rule,
+#define DEF_RULE_NC(rule, kind, ...)
+ #include "py/grammar.h"
+#undef DEF_RULE
+#undef DEF_RULE_NC
+ "", // RULE_const_object
+#define DEF_RULE(rule, comp, kind, ...)
+#define DEF_RULE_NC(rule, kind, ...) #rule,
+ #include "py/grammar.h"
+#undef DEF_RULE
+#undef DEF_RULE_NC
+};
+#endif
+
+// *FORMAT-ON*
+
+typedef struct _rule_stack_t {
+ size_t src_line : (8 * sizeof(size_t) - 8); // maximum bits storing source line number
+ size_t rule_id : 8; // this must be large enough to fit largest rule number
+ size_t arg_i; // this dictates the maximum nodes in a "list" of things
+} rule_stack_t;
+
+typedef struct _mp_parse_chunk_t {
+ size_t alloc;
+ union {
+ size_t used;
+ struct _mp_parse_chunk_t *next;
+ } union_;
+ byte data[];
+} mp_parse_chunk_t;
+
+typedef struct _parser_t {
+ size_t rule_stack_alloc;
+ size_t rule_stack_top;
+ rule_stack_t *rule_stack;
+
+ size_t result_stack_alloc;
+ size_t result_stack_top;
+ mp_parse_node_t *result_stack;
+
+ mp_lexer_t *lexer;
+
+ mp_parse_tree_t tree;
+ mp_parse_chunk_t *cur_chunk;
+
+ #if MICROPY_COMP_CONST
+ mp_map_t consts;
+ #endif
+} parser_t;
+
+STATIC const uint16_t *get_rule_arg(uint8_t r_id) {
+ size_t off = rule_arg_offset_table[r_id];
+ if (r_id >= FIRST_RULE_WITH_OFFSET_ABOVE_255) {
+ off |= 0x100;
+ }
+ return &rule_arg_combined_table[off];
+}
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-align"
+
+STATIC void *parser_alloc(parser_t *parser, size_t num_bytes) {
+ // use a custom memory allocator to store parse nodes sequentially in large chunks
+
+ mp_parse_chunk_t *chunk = parser->cur_chunk;
+
+ if (chunk != NULL && chunk->union_.used + num_bytes > chunk->alloc) {
+ // not enough room at end of previously allocated chunk so try to grow
+ mp_parse_chunk_t *new_data = (mp_parse_chunk_t *)m_renew_maybe(byte, chunk,
+ sizeof(mp_parse_chunk_t) + chunk->alloc,
+ sizeof(mp_parse_chunk_t) + chunk->alloc + num_bytes, false);
+ if (new_data == NULL) {
+ // could not grow existing memory; shrink it to fit previous
+ (void)m_renew_maybe(byte, chunk, sizeof(mp_parse_chunk_t) + chunk->alloc,
+ sizeof(mp_parse_chunk_t) + chunk->union_.used, false);
+ chunk->alloc = chunk->union_.used;
+ chunk->union_.next = parser->tree.chunk;
+ parser->tree.chunk = chunk;
+ chunk = NULL;
+ } else {
+ // could grow existing memory
+ chunk->alloc += num_bytes;
+ }
+ }
+
+ if (chunk == NULL) {
+ // no previous chunk, allocate a new chunk
+ size_t alloc = MICROPY_ALLOC_PARSE_CHUNK_INIT;
+ if (alloc < num_bytes) {
+ alloc = num_bytes;
+ }
+ chunk = (mp_parse_chunk_t *)m_new(byte, sizeof(mp_parse_chunk_t) + alloc);
+ chunk->alloc = alloc;
+ chunk->union_.used = 0;
+ parser->cur_chunk = chunk;
+ }
+
+ byte *ret = chunk->data + chunk->union_.used;
+ chunk->union_.used += num_bytes;
+ return ret;
+}
+#pragma GCC diagnostic pop
+
+STATIC void push_rule(parser_t *parser, size_t src_line, uint8_t rule_id, size_t arg_i) {
+ if (parser->rule_stack_top >= parser->rule_stack_alloc) {
+ rule_stack_t *rs = m_renew(rule_stack_t, parser->rule_stack, parser->rule_stack_alloc, parser->rule_stack_alloc + MICROPY_ALLOC_PARSE_RULE_INC);
+ parser->rule_stack = rs;
+ parser->rule_stack_alloc += MICROPY_ALLOC_PARSE_RULE_INC;
+ }
+ rule_stack_t *rs = &parser->rule_stack[parser->rule_stack_top++];
+ rs->src_line = src_line;
+ rs->rule_id = rule_id;
+ rs->arg_i = arg_i;
+}
+
+STATIC void push_rule_from_arg(parser_t *parser, size_t arg) {
+ assert((arg & RULE_ARG_KIND_MASK) == RULE_ARG_RULE || (arg & RULE_ARG_KIND_MASK) == RULE_ARG_OPT_RULE);
+ size_t rule_id = arg & RULE_ARG_ARG_MASK;
+ push_rule(parser, parser->lexer->tok_line, rule_id, 0);
+}
+
+STATIC uint8_t pop_rule(parser_t *parser, size_t *arg_i, size_t *src_line) {
+ parser->rule_stack_top -= 1;
+ uint8_t rule_id = parser->rule_stack[parser->rule_stack_top].rule_id;
+ *arg_i = parser->rule_stack[parser->rule_stack_top].arg_i;
+ *src_line = parser->rule_stack[parser->rule_stack_top].src_line;
+ return rule_id;
+}
+
+bool mp_parse_node_is_const_false(mp_parse_node_t pn) {
+ return MP_PARSE_NODE_IS_TOKEN_KIND(pn, MP_TOKEN_KW_FALSE)
+ || (MP_PARSE_NODE_IS_SMALL_INT(pn) && MP_PARSE_NODE_LEAF_SMALL_INT(pn) == 0);
+}
+
+bool mp_parse_node_is_const_true(mp_parse_node_t pn) {
+ return MP_PARSE_NODE_IS_TOKEN_KIND(pn, MP_TOKEN_KW_TRUE)
+ || (MP_PARSE_NODE_IS_SMALL_INT(pn) && MP_PARSE_NODE_LEAF_SMALL_INT(pn) != 0);
+}
+
+bool mp_parse_node_get_int_maybe(mp_parse_node_t pn, mp_obj_t *o) {
+ if (MP_PARSE_NODE_IS_SMALL_INT(pn)) {
+ *o = MP_OBJ_NEW_SMALL_INT(MP_PARSE_NODE_LEAF_SMALL_INT(pn));
+ return true;
+ } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, RULE_const_object)) {
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+ #if MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_D
+ // nodes are 32-bit pointers, but need to extract 64-bit object
+ *o = (uint64_t)pns->nodes[0] | ((uint64_t)pns->nodes[1] << 32);
+ #else
+ *o = (mp_obj_t)pns->nodes[0];
+ #endif
+ return mp_obj_is_int(*o);
+ } else {
+ return false;
+ }
+}
+
+size_t mp_parse_node_extract_list(mp_parse_node_t *pn, size_t pn_kind, mp_parse_node_t **nodes) {
+ if (MP_PARSE_NODE_IS_NULL(*pn)) {
+ *nodes = NULL;
+ return 0;
+ } else if (MP_PARSE_NODE_IS_LEAF(*pn)) {
+ *nodes = pn;
+ return 1;
+ } else {
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)(*pn);
+ if (MP_PARSE_NODE_STRUCT_KIND(pns) != pn_kind) {
+ *nodes = pn;
+ return 1;
+ } else {
+ *nodes = pns->nodes;
+ return MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+ }
+ }
+}
+
+#if MICROPY_DEBUG_PRINTERS
+void mp_parse_node_print(const mp_print_t *print, mp_parse_node_t pn, size_t indent) {
+ if (MP_PARSE_NODE_IS_STRUCT(pn)) {
+ mp_printf(print, "[% 4d] ", (int)((mp_parse_node_struct_t *)pn)->source_line);
+ } else {
+ mp_printf(print, " ");
+ }
+ for (size_t i = 0; i < indent; i++) {
+ mp_printf(print, " ");
+ }
+ if (MP_PARSE_NODE_IS_NULL(pn)) {
+ mp_printf(print, "NULL\n");
+ } else if (MP_PARSE_NODE_IS_SMALL_INT(pn)) {
+ mp_int_t arg = MP_PARSE_NODE_LEAF_SMALL_INT(pn);
+ mp_printf(print, "int(" INT_FMT ")\n", arg);
+ } else if (MP_PARSE_NODE_IS_LEAF(pn)) {
+ uintptr_t arg = MP_PARSE_NODE_LEAF_ARG(pn);
+ switch (MP_PARSE_NODE_LEAF_KIND(pn)) {
+ case MP_PARSE_NODE_ID:
+ mp_printf(print, "id(%s)\n", qstr_str(arg));
+ break;
+ case MP_PARSE_NODE_STRING:
+ mp_printf(print, "str(%s)\n", qstr_str(arg));
+ break;
+ case MP_PARSE_NODE_BYTES:
+ mp_printf(print, "bytes(%s)\n", qstr_str(arg));
+ break;
+ default:
+ assert(MP_PARSE_NODE_LEAF_KIND(pn) == MP_PARSE_NODE_TOKEN);
+ mp_printf(print, "tok(%u)\n", (uint)arg);
+ break;
+ }
+ } else {
+ // node must be a mp_parse_node_struct_t
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+ if (MP_PARSE_NODE_STRUCT_KIND(pns) == RULE_const_object) {
+ #if MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_D
+ mp_printf(print, "literal const(%016llx)\n", (uint64_t)pns->nodes[0] | ((uint64_t)pns->nodes[1] << 32));
+ #else
+ mp_printf(print, "literal const(%p)\n", (mp_obj_t)pns->nodes[0]);
+ #endif
+ } else {
+ size_t n = MP_PARSE_NODE_STRUCT_NUM_NODES(pns);
+ #if MICROPY_DEBUG_PARSE_RULE_NAME
+ mp_printf(print, "%s(%u) (n=%u)\n", rule_name_table[MP_PARSE_NODE_STRUCT_KIND(pns)], (uint)MP_PARSE_NODE_STRUCT_KIND(pns), (uint)n);
+ #else
+ mp_printf(print, "rule(%u) (n=%u)\n", (uint)MP_PARSE_NODE_STRUCT_KIND(pns), (uint)n);
+ #endif
+ for (size_t i = 0; i < n; i++) {
+ mp_parse_node_print(print, pns->nodes[i], indent + 2);
+ }
+ }
+ }
+}
+#endif // MICROPY_DEBUG_PRINTERS
+
+/*
+STATIC void result_stack_show(const mp_print_t *print, parser_t *parser) {
+ mp_printf(print, "result stack, most recent first\n");
+ for (ssize_t i = parser->result_stack_top - 1; i >= 0; i--) {
+ mp_parse_node_print(print, parser->result_stack[i], 0);
+ }
+}
+*/
+
+STATIC mp_parse_node_t pop_result(parser_t *parser) {
+ assert(parser->result_stack_top > 0);
+ return parser->result_stack[--parser->result_stack_top];
+}
+
+STATIC mp_parse_node_t peek_result(parser_t *parser, size_t pos) {
+ assert(parser->result_stack_top > pos);
+ return parser->result_stack[parser->result_stack_top - 1 - pos];
+}
+
+STATIC void push_result_node(parser_t *parser, mp_parse_node_t pn) {
+ if (parser->result_stack_top >= parser->result_stack_alloc) {
+ mp_parse_node_t *stack = m_renew(mp_parse_node_t, parser->result_stack, parser->result_stack_alloc, parser->result_stack_alloc + MICROPY_ALLOC_PARSE_RESULT_INC);
+ parser->result_stack = stack;
+ parser->result_stack_alloc += MICROPY_ALLOC_PARSE_RESULT_INC;
+ }
+ parser->result_stack[parser->result_stack_top++] = pn;
+}
+
+STATIC mp_parse_node_t make_node_const_object(parser_t *parser, size_t src_line, mp_obj_t obj) {
+ mp_parse_node_struct_t *pn = parser_alloc(parser, sizeof(mp_parse_node_struct_t) + sizeof(mp_obj_t));
+ pn->source_line = src_line;
+ #if MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_D
+ // nodes are 32-bit pointers, but need to store 64-bit object
+ pn->kind_num_nodes = RULE_const_object | (2 << 8);
+ pn->nodes[0] = (uint64_t)obj;
+ pn->nodes[1] = (uint64_t)obj >> 32;
+ #else
+ pn->kind_num_nodes = RULE_const_object | (1 << 8);
+ pn->nodes[0] = (uintptr_t)obj;
+ #endif
+ return (mp_parse_node_t)pn;
+}
+
+STATIC mp_parse_node_t mp_parse_node_new_small_int_checked(parser_t *parser, mp_obj_t o_val) {
+ (void)parser;
+ mp_int_t val = MP_OBJ_SMALL_INT_VALUE(o_val);
+ #if MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_D
+ // A parse node is only 32-bits and the small-int value must fit in 31-bits
+ if (((val ^ (val << 1)) & 0xffffffff80000000) != 0) {
+ return make_node_const_object(parser, 0, o_val);
+ }
+ #endif
+ return mp_parse_node_new_small_int(val);
+}
+
+STATIC void push_result_token(parser_t *parser, uint8_t rule_id) {
+ mp_parse_node_t pn;
+ mp_lexer_t *lex = parser->lexer;
+ if (lex->tok_kind == MP_TOKEN_NAME) {
+ qstr id = qstr_from_strn(lex->vstr.buf, lex->vstr.len);
+ #if MICROPY_COMP_CONST
+ // if name is a standalone identifier, look it up in the table of dynamic constants
+ mp_map_elem_t *elem;
+ if (rule_id == RULE_atom
+ && (elem = mp_map_lookup(&parser->consts, MP_OBJ_NEW_QSTR(id), MP_MAP_LOOKUP)) != NULL) {
+ if (mp_obj_is_small_int(elem->value)) {
+ pn = mp_parse_node_new_small_int_checked(parser, elem->value);
+ } else {
+ pn = make_node_const_object(parser, lex->tok_line, elem->value);
+ }
+ } else {
+ pn = mp_parse_node_new_leaf(MP_PARSE_NODE_ID, id);
+ }
+ #else
+ (void)rule_id;
+ pn = mp_parse_node_new_leaf(MP_PARSE_NODE_ID, id);
+ #endif
+ } else if (lex->tok_kind == MP_TOKEN_INTEGER) {
+ mp_obj_t o = mp_parse_num_integer(lex->vstr.buf, lex->vstr.len, 0, lex);
+ if (mp_obj_is_small_int(o)) {
+ pn = mp_parse_node_new_small_int_checked(parser, o);
+ } else {
+ pn = make_node_const_object(parser, lex->tok_line, o);
+ }
+ } else if (lex->tok_kind == MP_TOKEN_FLOAT_OR_IMAG) {
+ mp_obj_t o = mp_parse_num_decimal(lex->vstr.buf, lex->vstr.len, true, false, lex);
+ pn = make_node_const_object(parser, lex->tok_line, o);
+ } else if (lex->tok_kind == MP_TOKEN_STRING || lex->tok_kind == MP_TOKEN_BYTES) {
+ // Don't automatically intern all strings/bytes. doc strings (which are usually large)
+ // will be discarded by the compiler, and so we shouldn't intern them.
+ qstr qst = MP_QSTRnull;
+ if (lex->vstr.len <= MICROPY_ALLOC_PARSE_INTERN_STRING_LEN) {
+ // intern short strings
+ qst = qstr_from_strn(lex->vstr.buf, lex->vstr.len);
+ } else {
+ // check if this string is already interned
+ qst = qstr_find_strn(lex->vstr.buf, lex->vstr.len);
+ }
+ if (qst != MP_QSTRnull) {
+ // qstr exists, make a leaf node
+ pn = mp_parse_node_new_leaf(lex->tok_kind == MP_TOKEN_STRING ? MP_PARSE_NODE_STRING : MP_PARSE_NODE_BYTES, qst);
+ } else {
+ // not interned, make a node holding a pointer to the string/bytes object
+ mp_obj_t o = mp_obj_new_str_copy(
+ lex->tok_kind == MP_TOKEN_STRING ? &mp_type_str : &mp_type_bytes,
+ (const byte *)lex->vstr.buf, lex->vstr.len);
+ pn = make_node_const_object(parser, lex->tok_line, o);
+ }
+ } else {
+ pn = mp_parse_node_new_leaf(MP_PARSE_NODE_TOKEN, lex->tok_kind);
+ }
+ push_result_node(parser, pn);
+}
+
+#if MICROPY_COMP_MODULE_CONST
+STATIC const mp_rom_map_elem_t mp_constants_table[] = {
+ #if MICROPY_PY_UERRNO
+ { MP_ROM_QSTR(MP_QSTR_errno), MP_ROM_PTR(&mp_module_uerrno) },
+ #endif
+ #if MICROPY_PY_UCTYPES
+ { MP_ROM_QSTR(MP_QSTR_uctypes), MP_ROM_PTR(&mp_module_uctypes) },
+ #endif
+ // Extra constants as defined by a port
+ MICROPY_PORT_CONSTANTS
+};
+STATIC MP_DEFINE_CONST_MAP(mp_constants_map, mp_constants_table);
+#endif
+
+STATIC void push_result_rule(parser_t *parser, size_t src_line, uint8_t rule_id, size_t num_args);
+
+#if MICROPY_COMP_CONST_FOLDING
+STATIC bool fold_logical_constants(parser_t *parser, uint8_t rule_id, size_t *num_args) {
+ if (rule_id == RULE_or_test
+ || rule_id == RULE_and_test) {
+ // folding for binary logical ops: or and
+ size_t copy_to = *num_args;
+ for (size_t i = copy_to; i > 0;) {
+ mp_parse_node_t pn = peek_result(parser, --i);
+ parser->result_stack[parser->result_stack_top - copy_to] = pn;
+ if (i == 0) {
+ // always need to keep the last value
+ break;
+ }
+ if (rule_id == RULE_or_test) {
+ if (mp_parse_node_is_const_true(pn)) {
+ //
+ break;
+ } else if (!mp_parse_node_is_const_false(pn)) {
+ copy_to -= 1;
+ }
+ } else {
+ // RULE_and_test
+ if (mp_parse_node_is_const_false(pn)) {
+ break;
+ } else if (!mp_parse_node_is_const_true(pn)) {
+ copy_to -= 1;
+ }
+ }
+ }
+ copy_to -= 1; // copy_to now contains number of args to pop
+
+ // pop and discard all the short-circuited expressions
+ for (size_t i = 0; i < copy_to; ++i) {
+ pop_result(parser);
+ }
+ *num_args -= copy_to;
+
+ // we did a complete folding if there's only 1 arg left
+ return *num_args == 1;
+
+ } else if (rule_id == RULE_not_test_2) {
+ // folding for unary logical op: not
+ mp_parse_node_t pn = peek_result(parser, 0);
+ if (mp_parse_node_is_const_false(pn)) {
+ pn = mp_parse_node_new_leaf(MP_PARSE_NODE_TOKEN, MP_TOKEN_KW_TRUE);
+ } else if (mp_parse_node_is_const_true(pn)) {
+ pn = mp_parse_node_new_leaf(MP_PARSE_NODE_TOKEN, MP_TOKEN_KW_FALSE);
+ } else {
+ return false;
+ }
+ pop_result(parser);
+ push_result_node(parser, pn);
+ return true;
+ }
+
+ return false;
+}
+
+STATIC bool fold_constants(parser_t *parser, uint8_t rule_id, size_t num_args) {
+ // this code does folding of arbitrary integer expressions, eg 1 + 2 * 3 + 4
+ // it does not do partial folding, eg 1 + 2 + x -> 3 + x
+
+ mp_obj_t arg0;
+ if (rule_id == RULE_expr
+ || rule_id == RULE_xor_expr
+ || rule_id == RULE_and_expr
+ || rule_id == RULE_power) {
+ // folding for binary ops: | ^ & **
+ mp_parse_node_t pn = peek_result(parser, num_args - 1);
+ if (!mp_parse_node_get_int_maybe(pn, &arg0)) {
+ return false;
+ }
+ mp_binary_op_t op;
+ if (rule_id == RULE_expr) {
+ op = MP_BINARY_OP_OR;
+ } else if (rule_id == RULE_xor_expr) {
+ op = MP_BINARY_OP_XOR;
+ } else if (rule_id == RULE_and_expr) {
+ op = MP_BINARY_OP_AND;
+ } else {
+ op = MP_BINARY_OP_POWER;
+ }
+ for (ssize_t i = num_args - 2; i >= 0; --i) {
+ pn = peek_result(parser, i);
+ mp_obj_t arg1;
+ if (!mp_parse_node_get_int_maybe(pn, &arg1)) {
+ return false;
+ }
+ if (op == MP_BINARY_OP_POWER && mp_obj_int_sign(arg1) < 0) {
+ // ** can't have negative rhs
+ return false;
+ }
+ arg0 = mp_binary_op(op, arg0, arg1);
+ }
+ } else if (rule_id == RULE_shift_expr
+ || rule_id == RULE_arith_expr
+ || rule_id == RULE_term) {
+ // folding for binary ops: << >> + - * @ / % //
+ mp_parse_node_t pn = peek_result(parser, num_args - 1);
+ if (!mp_parse_node_get_int_maybe(pn, &arg0)) {
+ return false;
+ }
+ for (ssize_t i = num_args - 2; i >= 1; i -= 2) {
+ pn = peek_result(parser, i - 1);
+ mp_obj_t arg1;
+ if (!mp_parse_node_get_int_maybe(pn, &arg1)) {
+ return false;
+ }
+ mp_token_kind_t tok = MP_PARSE_NODE_LEAF_ARG(peek_result(parser, i));
+ if (tok == MP_TOKEN_OP_AT || tok == MP_TOKEN_OP_SLASH) {
+ // Can't fold @ or /
+ return false;
+ }
+ mp_binary_op_t op = MP_BINARY_OP_LSHIFT + (tok - MP_TOKEN_OP_DBL_LESS);
+ int rhs_sign = mp_obj_int_sign(arg1);
+ if (op <= MP_BINARY_OP_RSHIFT) {
+ // << and >> can't have negative rhs
+ if (rhs_sign < 0) {
+ return false;
+ }
+ } else if (op >= MP_BINARY_OP_FLOOR_DIVIDE) {
+ // % and // can't have zero rhs
+ if (rhs_sign == 0) {
+ return false;
+ }
+ }
+ arg0 = mp_binary_op(op, arg0, arg1);
+ }
+ } else if (rule_id == RULE_factor_2) {
+ // folding for unary ops: + - ~
+ mp_parse_node_t pn = peek_result(parser, 0);
+ if (!mp_parse_node_get_int_maybe(pn, &arg0)) {
+ return false;
+ }
+ mp_token_kind_t tok = MP_PARSE_NODE_LEAF_ARG(peek_result(parser, 1));
+ mp_unary_op_t op;
+ if (tok == MP_TOKEN_OP_TILDE) {
+ op = MP_UNARY_OP_INVERT;
+ } else {
+ assert(tok == MP_TOKEN_OP_PLUS || tok == MP_TOKEN_OP_MINUS); // should be
+ op = MP_UNARY_OP_POSITIVE + (tok - MP_TOKEN_OP_PLUS);
+ }
+ arg0 = mp_unary_op(op, arg0);
+
+ #if MICROPY_COMP_CONST
+ } else if (rule_id == RULE_expr_stmt) {
+ mp_parse_node_t pn1 = peek_result(parser, 0);
+ if (!MP_PARSE_NODE_IS_NULL(pn1)
+ && !(MP_PARSE_NODE_IS_STRUCT_KIND(pn1, RULE_expr_stmt_augassign)
+ || MP_PARSE_NODE_IS_STRUCT_KIND(pn1, RULE_expr_stmt_assign_list))) {
+ // this node is of the form <x> = <y>
+ mp_parse_node_t pn0 = peek_result(parser, 1);
+ if (MP_PARSE_NODE_IS_ID(pn0)
+ && MP_PARSE_NODE_IS_STRUCT_KIND(pn1, RULE_atom_expr_normal)
+ && MP_PARSE_NODE_IS_ID(((mp_parse_node_struct_t *)pn1)->nodes[0])
+ && MP_PARSE_NODE_LEAF_ARG(((mp_parse_node_struct_t *)pn1)->nodes[0]) == MP_QSTR_const
+ && MP_PARSE_NODE_IS_STRUCT_KIND(((mp_parse_node_struct_t *)pn1)->nodes[1], RULE_trailer_paren)
+ ) {
+ // code to assign dynamic constants: id = const(value)
+
+ // get the id
+ qstr id = MP_PARSE_NODE_LEAF_ARG(pn0);
+
+ // get the value
+ mp_parse_node_t pn_value = ((mp_parse_node_struct_t *)((mp_parse_node_struct_t *)pn1)->nodes[1])->nodes[0];
+ mp_obj_t value;
+ if (!mp_parse_node_get_int_maybe(pn_value, &value)) {
+ mp_obj_t exc = mp_obj_new_exception_msg(&mp_type_SyntaxError,
+ MP_ERROR_TEXT("constant must be an integer"));
+ mp_obj_exception_add_traceback(exc, parser->lexer->source_name,
+ ((mp_parse_node_struct_t *)pn1)->source_line, MP_QSTRnull);
+ nlr_raise(exc);
+ }
+
+ // store the value in the table of dynamic constants
+ mp_map_elem_t *elem = mp_map_lookup(&parser->consts, MP_OBJ_NEW_QSTR(id), MP_MAP_LOOKUP_ADD_IF_NOT_FOUND);
+ assert(elem->value == MP_OBJ_NULL);
+ elem->value = value;
+
+ // If the constant starts with an underscore then treat it as a private
+ // variable and don't emit any code to store the value to the id.
+ if (qstr_str(id)[0] == '_') {
+ pop_result(parser); // pop const(value)
+ pop_result(parser); // pop id
+ push_result_rule(parser, 0, RULE_pass_stmt, 0); // replace with "pass"
+ return true;
+ }
+
+ // replace const(value) with value
+ pop_result(parser);
+ push_result_node(parser, pn_value);
+
+ // finished folding this assignment, but we still want it to be part of the tree
+ return false;
+ }
+ }
+ return false;
+ #endif
+
+ #if MICROPY_COMP_MODULE_CONST
+ } else if (rule_id == RULE_atom_expr_normal) {
+ mp_parse_node_t pn0 = peek_result(parser, 1);
+ mp_parse_node_t pn1 = peek_result(parser, 0);
+ if (!(MP_PARSE_NODE_IS_ID(pn0)
+ && MP_PARSE_NODE_IS_STRUCT_KIND(pn1, RULE_trailer_period))) {
+ return false;
+ }
+ // id1.id2
+ // look it up in constant table, see if it can be replaced with an integer
+ mp_parse_node_struct_t *pns1 = (mp_parse_node_struct_t *)pn1;
+ assert(MP_PARSE_NODE_IS_ID(pns1->nodes[0]));
+ qstr q_base = MP_PARSE_NODE_LEAF_ARG(pn0);
+ qstr q_attr = MP_PARSE_NODE_LEAF_ARG(pns1->nodes[0]);
+ mp_map_elem_t *elem = mp_map_lookup((mp_map_t *)&mp_constants_map, MP_OBJ_NEW_QSTR(q_base), MP_MAP_LOOKUP);
+ if (elem == NULL) {
+ return false;
+ }
+ mp_obj_t dest[2];
+ mp_load_method_maybe(elem->value, q_attr, dest);
+ if (!(dest[0] != MP_OBJ_NULL && mp_obj_is_int(dest[0]) && dest[1] == MP_OBJ_NULL)) {
+ return false;
+ }
+ arg0 = dest[0];
+ #endif
+
+ } else {
+ return false;
+ }
+
+ // success folding this rule
+
+ for (size_t i = num_args; i > 0; i--) {
+ pop_result(parser);
+ }
+ if (mp_obj_is_small_int(arg0)) {
+ push_result_node(parser, mp_parse_node_new_small_int_checked(parser, arg0));
+ } else {
+ // TODO reuse memory for parse node struct?
+ push_result_node(parser, make_node_const_object(parser, 0, arg0));
+ }
+
+ return true;
+}
+#endif
+
+STATIC void push_result_rule(parser_t *parser, size_t src_line, uint8_t rule_id, size_t num_args) {
+ // Simplify and optimise certain rules, to reduce memory usage and simplify the compiler.
+ if (rule_id == RULE_atom_paren) {
+ // Remove parenthesis around a single expression if possible.
+ // This atom_paren rule always has a single argument, and after this
+ // optimisation that argument is either NULL or testlist_comp.
+ mp_parse_node_t pn = peek_result(parser, 0);
+ if (MP_PARSE_NODE_IS_NULL(pn)) {
+ // need to keep parenthesis for ()
+ } else if (MP_PARSE_NODE_IS_STRUCT_KIND(pn, RULE_testlist_comp)) {
+ // need to keep parenthesis for (a, b, ...)
+ } else {
+ // parenthesis around a single expression, so it's just the expression
+ return;
+ }
+ } else if (rule_id == RULE_testlist_comp) {
+ // The testlist_comp rule can be the sole argument to either atom_parent
+ // or atom_bracket, for (...) and [...] respectively.
+ assert(num_args == 2);
+ mp_parse_node_t pn = peek_result(parser, 0);
+ if (MP_PARSE_NODE_IS_STRUCT(pn)) {
+ mp_parse_node_struct_t *pns = (mp_parse_node_struct_t *)pn;
+ if (MP_PARSE_NODE_STRUCT_KIND(pns) == RULE_testlist_comp_3b) {
+ // tuple of one item, with trailing comma
+ pop_result(parser);
+ --num_args;
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == RULE_testlist_comp_3c) {
+ // tuple of many items, convert testlist_comp_3c to testlist_comp
+ pop_result(parser);
+ assert(pn == peek_result(parser, 0));
+ pns->kind_num_nodes = rule_id | MP_PARSE_NODE_STRUCT_NUM_NODES(pns) << 8;
+ return;
+ } else if (MP_PARSE_NODE_STRUCT_KIND(pns) == RULE_comp_for) {
+ // generator expression
+ } else {
+ // tuple with 2 items
+ }
+ } else {
+ // tuple with 2 items
+ }
+ } else if (rule_id == RULE_testlist_comp_3c) {
+ // steal first arg of outer testlist_comp rule
+ ++num_args;
+ }
+
+ #if MICROPY_COMP_CONST_FOLDING
+ if (fold_logical_constants(parser, rule_id, &num_args)) {
+ // we folded this rule so return straight away
+ return;
+ }
+ if (fold_constants(parser, rule_id, num_args)) {
+ // we folded this rule so return straight away
+ return;
+ }
+ #endif
+
+ mp_parse_node_struct_t *pn = parser_alloc(parser, sizeof(mp_parse_node_struct_t) + sizeof(mp_parse_node_t) * num_args);
+ pn->source_line = src_line;
+ pn->kind_num_nodes = (rule_id & 0xff) | (num_args << 8);
+ for (size_t i = num_args; i > 0; i--) {
+ pn->nodes[i - 1] = pop_result(parser);
+ }
+ if (rule_id == RULE_testlist_comp_3c) {
+ // need to push something non-null to replace stolen first arg of testlist_comp
+ push_result_node(parser, (mp_parse_node_t)pn);
+ }
+ push_result_node(parser, (mp_parse_node_t)pn);
+}
+
+mp_parse_tree_t mp_parse(mp_lexer_t *lex, mp_parse_input_kind_t input_kind) {
+
+ // initialise parser and allocate memory for its stacks
+
+ parser_t parser;
+
+ parser.rule_stack_alloc = MICROPY_ALLOC_PARSE_RULE_INIT;
+ parser.rule_stack_top = 0;
+ parser.rule_stack = NULL;
+ while (parser.rule_stack_alloc > 1) {
+ parser.rule_stack = m_new_maybe(rule_stack_t, parser.rule_stack_alloc);
+ if (parser.rule_stack != NULL) {
+ break;
+ } else {
+ parser.rule_stack_alloc /= 2;
+ }
+ }
+
+ parser.result_stack_alloc = MICROPY_ALLOC_PARSE_RESULT_INIT;
+ parser.result_stack_top = 0;
+ parser.result_stack = NULL;
+ while (parser.result_stack_alloc > 1) {
+ parser.result_stack = m_new_maybe(mp_parse_node_t, parser.result_stack_alloc);
+ if (parser.result_stack != NULL) {
+ break;
+ } else {
+ parser.result_stack_alloc /= 2;
+ }
+ }
+ if (parser.rule_stack == NULL || parser.result_stack == NULL) {
+ mp_raise_msg(&mp_type_MemoryError, MP_ERROR_TEXT("Unable to init parser"));
+ }
+
+ parser.lexer = lex;
+
+ parser.tree.chunk = NULL;
+ parser.cur_chunk = NULL;
+
+ #if MICROPY_COMP_CONST
+ mp_map_init(&parser.consts, 0);
+ #endif
+
+ // work out the top-level rule to use, and push it on the stack
+ size_t top_level_rule;
+ switch (input_kind) {
+ case MP_PARSE_SINGLE_INPUT:
+ top_level_rule = RULE_single_input;
+ break;
+ case MP_PARSE_EVAL_INPUT:
+ top_level_rule = RULE_eval_input;
+ break;
+ default:
+ top_level_rule = RULE_file_input;
+ }
+ push_rule(&parser, lex->tok_line, top_level_rule, 0);
+
+ // parse!
+
+ bool backtrack = false;
+
+ for (;;) {
+ next_rule:
+ if (parser.rule_stack_top == 0) {
+ break;
+ }
+
+ // Pop the next rule to process it
+ size_t i; // state for the current rule
+ size_t rule_src_line; // source line for the first token matched by the current rule
+ uint8_t rule_id = pop_rule(&parser, &i, &rule_src_line);
+ uint8_t rule_act = rule_act_table[rule_id];
+ const uint16_t *rule_arg = get_rule_arg(rule_id);
+ size_t n = rule_act & RULE_ACT_ARG_MASK;
+
+ #if 0
+ // debugging
+ printf("depth=" UINT_FMT " ", parser.rule_stack_top);
+ for (int j = 0; j < parser.rule_stack_top; ++j) {
+ printf(" ");
+ }
+ printf("%s n=" UINT_FMT " i=" UINT_FMT " bt=%d\n", rule_name_table[rule_id], n, i, backtrack);
+ #endif
+
+ switch (rule_act & RULE_ACT_KIND_MASK) {
+ case RULE_ACT_OR:
+ if (i > 0 && !backtrack) {
+ goto next_rule;
+ } else {
+ backtrack = false;
+ }
+ for (; i < n; ++i) {
+ // printf("--> inside for @L924\n");
+ uint16_t kind = rule_arg[i] & RULE_ARG_KIND_MASK;
+ if (kind == RULE_ARG_TOK) {
+ if (lex->tok_kind == (rule_arg[i] & RULE_ARG_ARG_MASK)) {
+ push_result_token(&parser, rule_id);
+ mp_lexer_to_next(lex);
+ goto next_rule;
+ }
+ } else {
+ assert(kind == RULE_ARG_RULE);
+ if (i + 1 < n) {
+ push_rule(&parser, rule_src_line, rule_id, i + 1); // save this or-rule
+ }
+ push_rule_from_arg(&parser, rule_arg[i]); // push child of or-rule
+ goto next_rule;
+ }
+ }
+ backtrack = true;
+ break;
+
+ case RULE_ACT_AND: {
+
+ // failed, backtrack if we can, else syntax error
+ if (backtrack) {
+ assert(i > 0);
+ if ((rule_arg[i - 1] & RULE_ARG_KIND_MASK) == RULE_ARG_OPT_RULE) {
+ // an optional rule that failed, so continue with next arg
+ push_result_node(&parser, MP_PARSE_NODE_NULL);
+ backtrack = false;
+ } else {
+ // a mandatory rule that failed, so propagate backtrack
+ if (i > 1) {
+ // already eaten tokens so can't backtrack
+ goto syntax_error;
+ } else {
+ goto next_rule;
+ }
+ }
+ }
+
+ // progress through the rule
+ for (; i < n; ++i) {
+ if ((rule_arg[i] & RULE_ARG_KIND_MASK) == RULE_ARG_TOK) {
+ // need to match a token
+ mp_token_kind_t tok_kind = rule_arg[i] & RULE_ARG_ARG_MASK;
+ if (lex->tok_kind == tok_kind) {
+ // matched token
+ if (tok_kind == MP_TOKEN_NAME) {
+ push_result_token(&parser, rule_id);
+ }
+ mp_lexer_to_next(lex);
+ } else {
+ // failed to match token
+ if (i > 0) {
+ // already eaten tokens so can't backtrack
+ goto syntax_error;
+ } else {
+ // this rule failed, so backtrack
+ backtrack = true;
+ goto next_rule;
+ }
+ }
+ } else {
+ push_rule(&parser, rule_src_line, rule_id, i + 1); // save this and-rule
+ push_rule_from_arg(&parser, rule_arg[i]); // push child of and-rule
+ goto next_rule;
+ }
+ }
+
+ assert(i == n);
+
+ // matched the rule, so now build the corresponding parse_node
+
+ #if !MICROPY_ENABLE_DOC_STRING
+ // this code discards lonely statements, such as doc strings
+ if (input_kind != MP_PARSE_SINGLE_INPUT && rule_id == RULE_expr_stmt && peek_result(&parser, 0) == MP_PARSE_NODE_NULL) {
+ mp_parse_node_t p = peek_result(&parser, 1);
+ if ((MP_PARSE_NODE_IS_LEAF(p) && !MP_PARSE_NODE_IS_ID(p))
+ || MP_PARSE_NODE_IS_STRUCT_KIND(p, RULE_const_object)) {
+ pop_result(&parser); // MP_PARSE_NODE_NULL
+ pop_result(&parser); // const expression (leaf or RULE_const_object)
+ // Pushing the "pass" rule here will overwrite any RULE_const_object
+ // entry that was on the result stack, allowing the GC to reclaim
+ // the memory from the const object when needed.
+ push_result_rule(&parser, rule_src_line, RULE_pass_stmt, 0);
+ break;
+ }
+ }
+ #endif
+
+ // count number of arguments for the parse node
+ i = 0;
+ size_t num_not_nil = 0;
+ for (size_t x = n; x > 0;) {
+ --x;
+ if ((rule_arg[x] & RULE_ARG_KIND_MASK) == RULE_ARG_TOK) {
+ mp_token_kind_t tok_kind = rule_arg[x] & RULE_ARG_ARG_MASK;
+ if (tok_kind == MP_TOKEN_NAME) {
+ // only tokens which were names are pushed to stack
+ i += 1;
+ num_not_nil += 1;
+ }
+ } else {
+ // rules are always pushed
+ if (peek_result(&parser, i) != MP_PARSE_NODE_NULL) {
+ num_not_nil += 1;
+ }
+ i += 1;
+ }
+ }
+
+ if (num_not_nil == 1 && (rule_act & RULE_ACT_ALLOW_IDENT)) {
+ // this rule has only 1 argument and should not be emitted
+ mp_parse_node_t pn = MP_PARSE_NODE_NULL;
+ for (size_t x = 0; x < i; ++x) {
+ mp_parse_node_t pn2 = pop_result(&parser);
+ if (pn2 != MP_PARSE_NODE_NULL) {
+ pn = pn2;
+ }
+ }
+ push_result_node(&parser, pn);
+ } else {
+ // this rule must be emitted
+
+ if (rule_act & RULE_ACT_ADD_BLANK) {
+ // and add an extra blank node at the end (used by the compiler to store data)
+ push_result_node(&parser, MP_PARSE_NODE_NULL);
+ i += 1;
+ }
+
+ push_result_rule(&parser, rule_src_line, rule_id, i);
+ }
+ break;
+ }
+
+ default: {
+ assert((rule_act & RULE_ACT_KIND_MASK) == RULE_ACT_LIST);
+
+ // n=2 is: item item*
+ // n=1 is: item (sep item)*
+ // n=3 is: item (sep item)* [sep]
+ bool had_trailing_sep;
+ if (backtrack) {
+ list_backtrack:
+ had_trailing_sep = false;
+ if (n == 2) {
+ if (i == 1) {
+ // fail on item, first time round; propagate backtrack
+ goto next_rule;
+ } else {
+ // fail on item, in later rounds; finish with this rule
+ backtrack = false;
+ }
+ } else {
+ if (i == 1) {
+ // fail on item, first time round; propagate backtrack
+ goto next_rule;
+ } else if ((i & 1) == 1) {
+ // fail on item, in later rounds; have eaten tokens so can't backtrack
+ if (n == 3) {
+ // list allows trailing separator; finish parsing list
+ had_trailing_sep = true;
+ backtrack = false;
+ } else {
+ // list doesn't allowing trailing separator; fail
+ goto syntax_error;
+ }
+ } else {
+ // fail on separator; finish parsing list
+ backtrack = false;
+ }
+ }
+ } else {
+ for (;;) {
+ size_t arg = rule_arg[i & 1 & n];
+ if ((arg & RULE_ARG_KIND_MASK) == RULE_ARG_TOK) {
+ if (lex->tok_kind == (arg & RULE_ARG_ARG_MASK)) {
+ if (i & 1 & n) {
+ // separators which are tokens are not pushed to result stack
+ } else {
+ push_result_token(&parser, rule_id);
+ }
+ mp_lexer_to_next(lex);
+ // got element of list, so continue parsing list
+ i += 1;
+ } else {
+ // couldn't get element of list
+ i += 1;
+ backtrack = true;
+ goto list_backtrack;
+ }
+ } else {
+ assert((arg & RULE_ARG_KIND_MASK) == RULE_ARG_RULE);
+ push_rule(&parser, rule_src_line, rule_id, i + 1); // save this list-rule
+ push_rule_from_arg(&parser, arg); // push child of list-rule
+ goto next_rule;
+ }
+ }
+ }
+ assert(i >= 1);
+
+ // compute number of elements in list, result in i
+ i -= 1;
+ if ((n & 1) && (rule_arg[1] & RULE_ARG_KIND_MASK) == RULE_ARG_TOK) {
+ // don't count separators when they are tokens
+ i = (i + 1) / 2;
+ }
+
+ if (i == 1) {
+ // list matched single item
+ if (had_trailing_sep) {
+ // if there was a trailing separator, make a list of a single item
+ push_result_rule(&parser, rule_src_line, rule_id, i);
+ } else {
+ // just leave single item on stack (ie don't wrap in a list)
+ }
+ } else {
+ push_result_rule(&parser, rule_src_line, rule_id, i);
+ }
+ break;
+ }
+ }
+ }
+
+ #if MICROPY_COMP_CONST
+ mp_map_deinit(&parser.consts);
+ #endif
+
+ // truncate final chunk and link into chain of chunks
+ if (parser.cur_chunk != NULL) {
+ (void)m_renew_maybe(byte, parser.cur_chunk,
+ sizeof(mp_parse_chunk_t) + parser.cur_chunk->alloc,
+ sizeof(mp_parse_chunk_t) + parser.cur_chunk->union_.used,
+ false);
+ parser.cur_chunk->alloc = parser.cur_chunk->union_.used;
+ parser.cur_chunk->union_.next = parser.tree.chunk;
+ parser.tree.chunk = parser.cur_chunk;
+ }
+
+ if (
+ lex->tok_kind != MP_TOKEN_END // check we are at the end of the token stream
+ || parser.result_stack_top == 0 // check that we got a node (can fail on empty input)
+ ) {
+ syntax_error:;
+ mp_obj_t exc;
+ if (lex->tok_kind == MP_TOKEN_INDENT) {
+ exc = mp_obj_new_exception_msg(&mp_type_IndentationError,
+ MP_ERROR_TEXT("unexpected indent"));
+ } else if (lex->tok_kind == MP_TOKEN_DEDENT_MISMATCH) {
+ exc = mp_obj_new_exception_msg(&mp_type_IndentationError,
+ MP_ERROR_TEXT("unindent doesn't match any outer indent level"));
+ #if MICROPY_PY_FSTRINGS
+ } else if (lex->tok_kind == MP_TOKEN_MALFORMED_FSTRING) {
+ exc = mp_obj_new_exception_msg(&mp_type_SyntaxError,
+ MP_ERROR_TEXT("malformed f-string"));
+ } else if (lex->tok_kind == MP_TOKEN_FSTRING_RAW) {
+ exc = mp_obj_new_exception_msg(&mp_type_SyntaxError,
+ MP_ERROR_TEXT("raw f-strings are not supported"));
+ #endif
+ } else {
+ exc = mp_obj_new_exception_msg(&mp_type_SyntaxError,
+ MP_ERROR_TEXT("invalid syntax"));
+ }
+ // add traceback to give info about file name and location
+ // we don't have a 'block' name, so just pass the NULL qstr to indicate this
+ mp_obj_exception_add_traceback(exc, lex->source_name, lex->tok_line, MP_QSTRnull);
+ nlr_raise(exc);
+ }
+
+ // get the root parse node that we created
+ assert(parser.result_stack_top == 1);
+ parser.tree.root = parser.result_stack[0];
+
+ // free the memory that we don't need anymore
+ m_del(rule_stack_t, parser.rule_stack, parser.rule_stack_alloc);
+ m_del(mp_parse_node_t, parser.result_stack, parser.result_stack_alloc);
+
+ // we also free the lexer on behalf of the caller
+ mp_lexer_free(lex);
+
+ return parser.tree;
+}
+
+void mp_parse_tree_clear(mp_parse_tree_t *tree) {
+ mp_parse_chunk_t *chunk = tree->chunk;
+ while (chunk != NULL) {
+ mp_parse_chunk_t *next = chunk->union_.next;
+ m_del(byte, chunk, sizeof(mp_parse_chunk_t) + chunk->alloc);
+ chunk = next;
+ }
+}
+
+#endif // MICROPY_ENABLE_COMPILER
diff --git a/circuitpython/py/parse.h b/circuitpython/py/parse.h
new file mode 100644
index 0000000..5f1e30c
--- /dev/null
+++ b/circuitpython/py/parse.h
@@ -0,0 +1,107 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_PARSE_H
+#define MICROPY_INCLUDED_PY_PARSE_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "py/obj.h"
+
+struct _mp_lexer_t;
+
+// a mp_parse_node_t is:
+// - 0000...0000: no node
+// - xxxx...xxx1: a small integer; bits 1 and above are the signed value, 2's complement
+// - xxxx...xx00: pointer to mp_parse_node_struct_t
+// - xx...xx0010: an identifier; bits 4 and above are the qstr
+// - xx...xx0110: a string; bits 4 and above are the qstr holding the value
+// - xx...xx1010: a string of bytes; bits 4 and above are the qstr holding the value
+// - xx...xx1110: a token; bits 4 and above are mp_token_kind_t
+
+#define MP_PARSE_NODE_NULL (0)
+#define MP_PARSE_NODE_SMALL_INT (0x1)
+#define MP_PARSE_NODE_ID (0x02)
+#define MP_PARSE_NODE_STRING (0x06)
+#define MP_PARSE_NODE_BYTES (0x0a)
+#define MP_PARSE_NODE_TOKEN (0x0e)
+
+typedef uintptr_t mp_parse_node_t; // must be pointer size
+
+typedef struct _mp_parse_node_struct_t {
+ uint32_t source_line; // line number in source file
+ uint32_t kind_num_nodes; // parse node kind, and number of nodes
+ mp_parse_node_t nodes[]; // nodes
+} mp_parse_node_struct_t;
+
+// macros for mp_parse_node_t usage
+// some of these evaluate their argument more than once
+
+#define MP_PARSE_NODE_IS_NULL(pn) ((pn) == MP_PARSE_NODE_NULL)
+#define MP_PARSE_NODE_IS_LEAF(pn) ((pn) & 3)
+#define MP_PARSE_NODE_IS_STRUCT(pn) ((pn) != MP_PARSE_NODE_NULL && ((pn) & 3) == 0)
+#define MP_PARSE_NODE_IS_STRUCT_KIND(pn, k) ((pn) != MP_PARSE_NODE_NULL && ((pn) & 3) == 0 && MP_PARSE_NODE_STRUCT_KIND((mp_parse_node_struct_t *)(pn)) == (k))
+
+#define MP_PARSE_NODE_IS_SMALL_INT(pn) (((pn) & 0x1) == MP_PARSE_NODE_SMALL_INT)
+#define MP_PARSE_NODE_IS_ID(pn) (((pn) & 0x0f) == MP_PARSE_NODE_ID)
+#define MP_PARSE_NODE_IS_TOKEN(pn) (((pn) & 0x0f) == MP_PARSE_NODE_TOKEN)
+#define MP_PARSE_NODE_IS_TOKEN_KIND(pn, k) ((pn) == (MP_PARSE_NODE_TOKEN | ((k) << 4)))
+
+#define MP_PARSE_NODE_LEAF_KIND(pn) ((pn) & 0x0f)
+#define MP_PARSE_NODE_LEAF_ARG(pn) (((uintptr_t)(pn)) >> 4)
+#define MP_PARSE_NODE_LEAF_SMALL_INT(pn) (((mp_int_t)(intptr_t)(pn)) >> 1)
+#define MP_PARSE_NODE_STRUCT_KIND(pns) ((pns)->kind_num_nodes & 0xff)
+#define MP_PARSE_NODE_STRUCT_NUM_NODES(pns) ((pns)->kind_num_nodes >> 8)
+
+static inline mp_parse_node_t mp_parse_node_new_small_int(mp_int_t val) {
+ return (mp_parse_node_t)(MP_PARSE_NODE_SMALL_INT | ((mp_uint_t)val << 1));
+}
+static inline mp_parse_node_t mp_parse_node_new_leaf(size_t kind, mp_int_t arg) {
+ return (mp_parse_node_t)(kind | ((mp_uint_t)arg << 4));
+}
+bool mp_parse_node_is_const_false(mp_parse_node_t pn);
+bool mp_parse_node_is_const_true(mp_parse_node_t pn);
+bool mp_parse_node_get_int_maybe(mp_parse_node_t pn, mp_obj_t *o);
+size_t mp_parse_node_extract_list(mp_parse_node_t *pn, size_t pn_kind, mp_parse_node_t **nodes);
+void mp_parse_node_print(const mp_print_t *print, mp_parse_node_t pn, size_t indent);
+
+typedef enum {
+ MP_PARSE_SINGLE_INPUT,
+ MP_PARSE_FILE_INPUT,
+ MP_PARSE_EVAL_INPUT,
+} mp_parse_input_kind_t;
+
+typedef struct _mp_parse_t {
+ mp_parse_node_t root;
+ struct _mp_parse_chunk_t *chunk;
+} mp_parse_tree_t;
+
+// the parser will raise an exception if an error occurred
+// the parser will free the lexer before it returns
+mp_parse_tree_t mp_parse(struct _mp_lexer_t *lex, mp_parse_input_kind_t input_kind);
+void mp_parse_tree_clear(mp_parse_tree_t *tree);
+
+#endif // MICROPY_INCLUDED_PY_PARSE_H
diff --git a/circuitpython/py/parsenum.c b/circuitpython/py/parsenum.c
new file mode 100644
index 0000000..bd41488
--- /dev/null
+++ b/circuitpython/py/parsenum.c
@@ -0,0 +1,362 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdbool.h>
+#include <stdlib.h>
+
+#include "py/runtime.h"
+#include "py/parsenumbase.h"
+#include "py/parsenum.h"
+#include "py/smallint.h"
+
+#include "supervisor/shared/translate.h"
+
+#if MICROPY_PY_BUILTINS_FLOAT
+#include <math.h>
+#endif
+
+STATIC NORETURN void raise_exc(mp_obj_t exc, mp_lexer_t *lex) {
+ // if lex!=NULL then the parser called us and we need to convert the
+ // exception's type from ValueError to SyntaxError and add traceback info
+ if (lex != NULL) {
+ ((mp_obj_base_t *)MP_OBJ_TO_PTR(exc))->type = &mp_type_SyntaxError;
+ mp_obj_exception_add_traceback(exc, lex->source_name, lex->tok_line, MP_QSTRnull);
+ }
+ nlr_raise(exc);
+}
+
+mp_obj_t mp_parse_num_integer(const char *restrict str_, size_t len, int base, mp_lexer_t *lex) {
+ const byte *restrict str = (const byte *)str_;
+ const byte *restrict top = str + len;
+ bool neg = false;
+ mp_obj_t ret_val;
+
+ // check radix base
+ if ((base != 0 && base < 2) || base > 36) {
+ // this won't be reached if lex!=NULL
+ mp_raise_ValueError(MP_ERROR_TEXT("int() arg 2 must be >= 2 and <= 36"));
+ }
+
+ // skip leading space
+ for (; str < top && unichar_isspace(*str); str++) {
+ }
+
+ // parse optional sign
+ if (str < top) {
+ if (*str == '+') {
+ str++;
+ } else if (*str == '-') {
+ str++;
+ neg = true;
+ }
+ }
+
+ // parse optional base prefix
+ str += mp_parse_num_base((const char *)str, top - str, &base);
+
+ // string should be an integer number
+ mp_int_t int_val = 0;
+ const byte *restrict str_val_start = str;
+ for (; str < top; str++) {
+ // get next digit as a value
+ mp_uint_t dig = *str;
+ if ('0' <= dig && dig <= '9') {
+ dig -= '0';
+ } else if (dig == '_') {
+ continue;
+ } else {
+ dig |= 0x20; // make digit lower-case
+ if ('a' <= dig && dig <= 'z') {
+ dig -= 'a' - 10;
+ } else {
+ // unknown character
+ break;
+ }
+ }
+ if (dig >= (mp_uint_t)base) {
+ break;
+ }
+
+ // add next digi and check for overflow
+ if (mp_small_int_mul_overflow(int_val, base)) {
+ goto overflow;
+ }
+ int_val = int_val * base + dig;
+ if (!MP_SMALL_INT_FITS(int_val)) {
+ goto overflow;
+ }
+ }
+
+ // negate value if needed
+ if (neg) {
+ int_val = -int_val;
+ }
+
+ // create the small int
+ ret_val = MP_OBJ_NEW_SMALL_INT(int_val);
+
+have_ret_val:
+ // check we parsed something
+ if (str == str_val_start) {
+ goto value_error;
+ }
+
+ // skip trailing space
+ for (; str < top && unichar_isspace(*str); str++) {
+ }
+
+ // check we reached the end of the string
+ if (str != top) {
+ goto value_error;
+ }
+
+ // return the object
+ return ret_val;
+
+overflow:
+ // reparse using long int
+ {
+ const char *s2 = (const char *)str_val_start;
+ ret_val = mp_obj_new_int_from_str_len(&s2, top - str_val_start, neg, base);
+ str = (const byte *)s2;
+ goto have_ret_val;
+ }
+
+value_error:
+ {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_obj_t exc = mp_obj_new_exception_msg(&mp_type_ValueError,
+ MP_ERROR_TEXT("invalid syntax for integer"));
+ raise_exc(exc, lex);
+ #elif MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_NORMAL
+ mp_obj_t exc = mp_obj_new_exception_msg_varg(&mp_type_ValueError,
+ MP_ERROR_TEXT("invalid syntax for integer with base %d"), base);
+ raise_exc(exc, lex);
+ #else
+ vstr_t vstr;
+ mp_print_t print;
+ vstr_init_print(&vstr, 50, &print);
+ mp_printf(&print, "invalid syntax for integer with base %d: ", base);
+ mp_str_print_quoted(&print, str_val_start, top - str_val_start, true);
+ mp_obj_t exc = mp_obj_new_exception_arg1(&mp_type_ValueError,
+ mp_obj_new_str_from_vstr(&mp_type_str, &vstr));
+ raise_exc(exc, lex);
+ #endif
+ }
+}
+
+typedef enum {
+ PARSE_DEC_IN_INTG,
+ PARSE_DEC_IN_FRAC,
+ PARSE_DEC_IN_EXP,
+} parse_dec_in_t;
+
+mp_obj_t mp_parse_num_decimal(const char *str, size_t len, bool allow_imag, bool force_complex, mp_lexer_t *lex) {
+ #if MICROPY_PY_BUILTINS_FLOAT
+
+// DEC_VAL_MAX only needs to be rough and is used to retain precision while not overflowing
+// SMALL_NORMAL_VAL is the smallest power of 10 that is still a normal float
+// EXACT_POWER_OF_10 is the largest value of x so that 10^x can be stored exactly in a float
+// Note: EXACT_POWER_OF_10 is at least floor(log_5(2^mantissa_length)). Indeed, 10^n = 2^n * 5^n
+// so we only have to store the 5^n part in the mantissa (the 2^n part will go into the float's
+// exponent).
+ #if MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_FLOAT
+#define DEC_VAL_MAX 1e20F
+#define SMALL_NORMAL_VAL (1e-37F)
+#define SMALL_NORMAL_EXP (-37)
+#define EXACT_POWER_OF_10 (9)
+ #elif MICROPY_FLOAT_IMPL == MICROPY_FLOAT_IMPL_DOUBLE
+#define DEC_VAL_MAX 1e200
+#define SMALL_NORMAL_VAL (1e-307)
+#define SMALL_NORMAL_EXP (-307)
+#define EXACT_POWER_OF_10 (22)
+ #endif
+
+ const char *top = str + len;
+ mp_float_t dec_val = 0;
+ bool dec_neg = false;
+ bool imag = false;
+
+ // skip leading space
+ for (; str < top && unichar_isspace(*str); str++) {
+ }
+
+ // parse optional sign
+ if (str < top) {
+ if (*str == '+') {
+ str++;
+ } else if (*str == '-') {
+ str++;
+ dec_neg = true;
+ }
+ }
+
+ const char *str_val_start = str;
+
+ // determine what the string is
+ if (str < top && (str[0] | 0x20) == 'i') {
+ // string starts with 'i', should be 'inf' or 'infinity' (case insensitive)
+ if (str + 2 < top && (str[1] | 0x20) == 'n' && (str[2] | 0x20) == 'f') {
+ // inf
+ str += 3;
+ dec_val = (mp_float_t)INFINITY;
+ if (str + 4 < top && (str[0] | 0x20) == 'i' && (str[1] | 0x20) == 'n' && (str[2] | 0x20) == 'i' && (str[3] | 0x20) == 't' && (str[4] | 0x20) == 'y') {
+ // infinity
+ str += 5;
+ }
+ }
+ } else if (str < top && (str[0] | 0x20) == 'n') {
+ // string starts with 'n', should be 'nan' (case insensitive)
+ if (str + 2 < top && (str[1] | 0x20) == 'a' && (str[2] | 0x20) == 'n') {
+ // NaN
+ str += 3;
+ dec_val = MICROPY_FLOAT_C_FUN(nan)("");
+ }
+ } else {
+ // string should be a decimal number
+ parse_dec_in_t in = PARSE_DEC_IN_INTG;
+ bool exp_neg = false;
+ int exp_val = 0;
+ int exp_extra = 0;
+ while (str < top) {
+ unsigned int dig = *str++;
+ if ('0' <= dig && dig <= '9') {
+ dig -= '0';
+ if (in == PARSE_DEC_IN_EXP) {
+ // don't overflow exp_val when adding next digit, instead just truncate
+ // it and the resulting float will still be correct, either inf or 0.0
+ // (use INT_MAX/2 to allow adding exp_extra at the end without overflow)
+ if (exp_val < (INT_MAX / 2 - 9) / 10) {
+ exp_val = 10 * exp_val + dig;
+ }
+ } else {
+ if (dec_val < DEC_VAL_MAX) {
+ // dec_val won't overflow so keep accumulating
+ dec_val = 10 * dec_val + dig;
+ if (in == PARSE_DEC_IN_FRAC) {
+ --exp_extra;
+ }
+ } else {
+ // dec_val might overflow and we anyway can't represent more digits
+ // of precision, so ignore the digit and just adjust the exponent
+ if (in == PARSE_DEC_IN_INTG) {
+ ++exp_extra;
+ }
+ }
+ }
+ } else if (in == PARSE_DEC_IN_INTG && dig == '.') {
+ in = PARSE_DEC_IN_FRAC;
+ } else if (in != PARSE_DEC_IN_EXP && ((dig | 0x20) == 'e')) {
+ in = PARSE_DEC_IN_EXP;
+ if (str < top) {
+ if (str[0] == '+') {
+ str++;
+ } else if (str[0] == '-') {
+ str++;
+ exp_neg = true;
+ }
+ }
+ if (str == top) {
+ goto value_error;
+ }
+ } else if (allow_imag && (dig | 0x20) == 'j') {
+ imag = true;
+ break;
+ } else if (dig == '_') {
+ continue;
+ } else {
+ // unknown character
+ str--;
+ break;
+ }
+ }
+
+ // work out the exponent
+ if (exp_neg) {
+ exp_val = -exp_val;
+ }
+
+ // apply the exponent, making sure it's not a subnormal value
+ exp_val += exp_extra;
+ if (exp_val < SMALL_NORMAL_EXP) {
+ exp_val -= SMALL_NORMAL_EXP;
+ dec_val *= SMALL_NORMAL_VAL;
+ }
+
+ // At this point, we need to multiply the mantissa by its base 10 exponent. If possible,
+ // we would rather manipulate numbers that have an exact representation in IEEE754. It
+ // turns out small positive powers of 10 do, whereas small negative powers of 10 don't.
+ // So in that case, we'll yield a division of exact values rather than a multiplication
+ // of slightly erroneous values.
+ if (exp_val < 0 && exp_val >= -EXACT_POWER_OF_10) {
+ dec_val /= MICROPY_FLOAT_C_FUN(pow)(10, -exp_val);
+ } else {
+ dec_val *= MICROPY_FLOAT_C_FUN(pow)(10, exp_val);
+ }
+ }
+
+ // negate value if needed
+ if (dec_neg) {
+ dec_val = -dec_val;
+ }
+
+ // check we parsed something
+ if (str == str_val_start) {
+ goto value_error;
+ }
+
+ // skip trailing space
+ for (; str < top && unichar_isspace(*str); str++) {
+ }
+
+ // check we reached the end of the string
+ if (str != top) {
+ goto value_error;
+ }
+
+ // return the object
+ #if MICROPY_PY_BUILTINS_COMPLEX
+ if (imag) {
+ return mp_obj_new_complex(0, dec_val);
+ } else if (force_complex) {
+ return mp_obj_new_complex(dec_val, 0);
+ }
+ #else
+ if (imag || force_complex) {
+ raise_exc(mp_obj_new_exception_msg(&mp_type_ValueError, MP_ERROR_TEXT("complex values not supported")), lex);
+ }
+ #endif
+ else {
+ return mp_obj_new_float(dec_val);
+ }
+
+value_error:
+ raise_exc(mp_obj_new_exception_msg(&mp_type_ValueError, MP_ERROR_TEXT("invalid syntax for number")), lex);
+
+ #else
+ raise_exc(mp_obj_new_exception_msg(&mp_type_ValueError, MP_ERROR_TEXT("decimal numbers not supported")), lex);
+ #endif
+}
diff --git a/circuitpython/py/parsenum.h b/circuitpython/py/parsenum.h
new file mode 100644
index 0000000..a91ca53
--- /dev/null
+++ b/circuitpython/py/parsenum.h
@@ -0,0 +1,37 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_PARSENUM_H
+#define MICROPY_INCLUDED_PY_PARSENUM_H
+
+#include "py/mpconfig.h"
+#include "py/lexer.h"
+#include "py/obj.h"
+
+// these functions raise a SyntaxError if lex!=NULL, else a ValueError
+mp_obj_t mp_parse_num_integer(const char *restrict str, size_t len, int base, mp_lexer_t *lex);
+mp_obj_t mp_parse_num_decimal(const char *str, size_t len, bool allow_imag, bool force_complex, mp_lexer_t *lex);
+
+#endif // MICROPY_INCLUDED_PY_PARSENUM_H
diff --git a/circuitpython/py/parsenumbase.c b/circuitpython/py/parsenumbase.c
new file mode 100644
index 0000000..0802b43
--- /dev/null
+++ b/circuitpython/py/parsenumbase.c
@@ -0,0 +1,71 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/mpconfig.h"
+#include "py/misc.h"
+#include "py/parsenumbase.h"
+
+// find real radix base, and strip preceding '0x', '0o' and '0b'
+// puts base in *base, and returns number of bytes to skip the prefix
+size_t mp_parse_num_base(const char *str, size_t len, int *base) {
+ const byte *p = (const byte *)str;
+ if (len <= 1) {
+ goto no_prefix;
+ }
+ unichar c = *(p++);
+ if ((*base == 0 || *base == 16) && c == '0') {
+ c = *(p++);
+ if ((c | 32) == 'x') {
+ *base = 16;
+ } else if (*base == 0 && (c | 32) == 'o') {
+ *base = 8;
+ } else if (*base == 0 && (c | 32) == 'b') {
+ *base = 2;
+ } else {
+ if (*base == 0) {
+ *base = 10;
+ }
+ p -= 2;
+ }
+ } else if (*base == 8 && c == '0') {
+ c = *(p++);
+ if ((c | 32) != 'o') {
+ p -= 2;
+ }
+ } else if (*base == 2 && c == '0') {
+ c = *(p++);
+ if ((c | 32) != 'b') {
+ p -= 2;
+ }
+ } else {
+ p--;
+ no_prefix:
+ if (*base == 0) {
+ *base = 10;
+ }
+ }
+ return p - (const byte *)str;
+}
diff --git a/circuitpython/py/parsenumbase.h b/circuitpython/py/parsenumbase.h
new file mode 100644
index 0000000..43dcc23
--- /dev/null
+++ b/circuitpython/py/parsenumbase.h
@@ -0,0 +1,33 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_PARSENUMBASE_H
+#define MICROPY_INCLUDED_PY_PARSENUMBASE_H
+
+#include "py/mpconfig.h"
+
+size_t mp_parse_num_base(const char *str, size_t len, int *base);
+
+#endif // MICROPY_INCLUDED_PY_PARSENUMBASE_H
diff --git a/circuitpython/py/persistentcode.c b/circuitpython/py/persistentcode.c
new file mode 100644
index 0000000..e604569
--- /dev/null
+++ b/circuitpython/py/persistentcode.c
@@ -0,0 +1,883 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013-2020 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/reader.h"
+#include "py/nativeglue.h"
+#include "py/persistentcode.h"
+#include "py/bc0.h"
+#include "py/objstr.h"
+#include "py/mpthread.h"
+
+#include "supervisor/shared/translate.h"
+
+#if MICROPY_PERSISTENT_CODE_LOAD || MICROPY_PERSISTENT_CODE_SAVE
+
+#include "py/smallint.h"
+
+#define QSTR_LAST_STATIC MP_QSTR_zip
+
+#if MICROPY_DYNAMIC_COMPILER
+#define MPY_FEATURE_ARCH_DYNAMIC mp_dynamic_compiler.native_arch
+#else
+#define MPY_FEATURE_ARCH_DYNAMIC MPY_FEATURE_ARCH
+#endif
+
+#if MICROPY_PERSISTENT_CODE_LOAD || (MICROPY_PERSISTENT_CODE_SAVE && !MICROPY_DYNAMIC_COMPILER)
+// The bytecode will depend on the number of bits in a small-int, and
+// this function computes that (could make it a fixed constant, but it
+// would need to be defined in mpconfigport.h).
+STATIC int mp_small_int_bits(void) {
+ mp_int_t i = MP_SMALL_INT_MAX;
+ int n = 1;
+ while (i != 0) {
+ i >>= 1;
+ ++n;
+ }
+ return n;
+}
+#endif
+
+#define QSTR_WINDOW_SIZE (32)
+
+typedef struct _qstr_window_t {
+ uint16_t idx; // indexes the head of the window
+ uint16_t window[QSTR_WINDOW_SIZE];
+} qstr_window_t;
+
+// Push a qstr to the head of the window, and the tail qstr is overwritten
+STATIC void qstr_window_push(qstr_window_t *qw, qstr qst) {
+ qw->idx = (qw->idx + 1) % QSTR_WINDOW_SIZE;
+ qw->window[qw->idx] = qst;
+}
+
+// Pull an existing qstr from within the window to the head of the window
+STATIC qstr qstr_window_pull(qstr_window_t *qw, size_t idx) {
+ qstr qst = qw->window[idx];
+ if (idx > qw->idx) {
+ memmove(&qw->window[idx], &qw->window[idx + 1], (QSTR_WINDOW_SIZE - idx - 1) * sizeof(uint16_t));
+ qw->window[QSTR_WINDOW_SIZE - 1] = qw->window[0];
+ idx = 0;
+ }
+ memmove(&qw->window[idx], &qw->window[idx + 1], (qw->idx - idx) * sizeof(uint16_t));
+ qw->window[qw->idx] = qst;
+ return qst;
+}
+
+#if MICROPY_PERSISTENT_CODE_LOAD
+
+// Access a qstr at the given index, relative to the head of the window (0=head)
+STATIC qstr qstr_window_access(qstr_window_t *qw, size_t idx) {
+ return qstr_window_pull(qw, (qw->idx + QSTR_WINDOW_SIZE - idx) % QSTR_WINDOW_SIZE);
+}
+
+#endif
+
+#if MICROPY_PERSISTENT_CODE_SAVE
+
+// Insert a qstr at the head of the window, either by pulling an existing one or pushing a new one
+STATIC size_t qstr_window_insert(qstr_window_t *qw, qstr qst) {
+ for (size_t idx = 0; idx < QSTR_WINDOW_SIZE; ++idx) {
+ if (qw->window[idx] == qst) {
+ qstr_window_pull(qw, idx);
+ return (qw->idx + QSTR_WINDOW_SIZE - idx) % QSTR_WINDOW_SIZE;
+ }
+ }
+ qstr_window_push(qw, qst);
+ return QSTR_WINDOW_SIZE;
+}
+
+#endif
+
+typedef struct _bytecode_prelude_t {
+ uint n_state;
+ uint n_exc_stack;
+ uint scope_flags;
+ uint n_pos_args;
+ uint n_kwonly_args;
+ uint n_def_pos_args;
+ uint code_info_size;
+} bytecode_prelude_t;
+
+// ip will point to start of opcodes
+// return value will point to simple_name, source_file qstrs
+STATIC byte *extract_prelude(const byte **ip, bytecode_prelude_t *prelude) {
+ MP_BC_PRELUDE_SIG_DECODE(*ip);
+ prelude->n_state = n_state;
+ prelude->n_exc_stack = n_exc_stack;
+ prelude->scope_flags = scope_flags;
+ prelude->n_pos_args = n_pos_args;
+ prelude->n_kwonly_args = n_kwonly_args;
+ prelude->n_def_pos_args = n_def_pos_args;
+ MP_BC_PRELUDE_SIZE_DECODE(*ip);
+ byte *ip_info = (byte *)*ip;
+ *ip += n_info;
+ *ip += n_cell;
+ return ip_info;
+}
+
+#endif // MICROPY_PERSISTENT_CODE_LOAD || MICROPY_PERSISTENT_CODE_SAVE
+
+#if MICROPY_PERSISTENT_CODE_LOAD
+
+#include "py/parsenum.h"
+
+STATIC void raise_corrupt_mpy(void) {
+ mp_raise_RuntimeError(MP_ERROR_TEXT("Corrupt .mpy file"));
+}
+
+STATIC int read_byte(mp_reader_t *reader);
+STATIC size_t read_uint(mp_reader_t *reader, byte **out);
+
+#if MICROPY_EMIT_MACHINE_CODE
+
+typedef struct _reloc_info_t {
+ mp_reader_t *reader;
+ mp_uint_t *const_table;
+} reloc_info_t;
+
+#if MICROPY_EMIT_THUMB
+STATIC void asm_thumb_rewrite_mov(uint8_t *pc, uint16_t val) {
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wcast-align"
+ // high part
+ *(uint16_t *)pc = (*(uint16_t *)pc & 0xfbf0) | (val >> 1 & 0x0400) | (val >> 12);
+ // low part
+ *(uint16_t *)(pc + 2) = (*(uint16_t *)(pc + 2) & 0x0f00) | (val << 4 & 0x7000) | (val & 0x00ff);
+ #pragma GCC diagnostic pop
+}
+#endif
+
+STATIC void arch_link_qstr(uint8_t *pc, bool is_obj, qstr qst) {
+ mp_uint_t val = qst;
+ if (is_obj) {
+ val = (mp_uint_t)MP_OBJ_NEW_QSTR(qst);
+ }
+ #if MICROPY_EMIT_X86 || MICROPY_EMIT_X64 || MICROPY_EMIT_ARM || MICROPY_EMIT_XTENSA || MICROPY_EMIT_XTENSAWIN
+ pc[0] = val & 0xff;
+ pc[1] = (val >> 8) & 0xff;
+ pc[2] = (val >> 16) & 0xff;
+ pc[3] = (val >> 24) & 0xff;
+ #elif MICROPY_EMIT_THUMB
+ if (is_obj) {
+ // qstr object, movw and movt
+ asm_thumb_rewrite_mov(pc, val); // movw
+ asm_thumb_rewrite_mov(pc + 4, val >> 16); // movt
+ } else {
+ // qstr number, movw instruction
+ asm_thumb_rewrite_mov(pc, val); // movw
+ }
+ #endif
+}
+
+void mp_native_relocate(void *ri_in, uint8_t *text, uintptr_t reloc_text) {
+ // Relocate native code
+ reloc_info_t *ri = ri_in;
+ uintptr_t *addr_to_adjust = NULL;
+
+ // Read the byte directly so that we don't error on EOF.
+ mp_uint_t op = ri->reader->readbyte(ri->reader->data);
+ while (op != 0xff && op != MP_READER_EOF) {
+ if (op & 1) {
+ // Point to new location to make adjustments
+ size_t addr = read_uint(ri->reader, NULL);
+ if ((addr & 1) == 0) {
+ // Point to somewhere in text
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wcast-align"
+ addr_to_adjust = &((uintptr_t *)text)[addr >> 1];
+ #pragma GCC diagnostic pop
+ } else {
+ // Point to somewhere in rodata
+ addr_to_adjust = &((uintptr_t *)ri->const_table[1])[addr >> 1];
+ }
+ }
+ op >>= 1;
+ uintptr_t dest;
+ size_t n = 1;
+ if (op <= 5) {
+ if (op & 1) {
+ // Read in number of adjustments to make
+ n = read_uint(ri->reader, NULL);
+ }
+ op >>= 1;
+ if (op == 0) {
+ // Destination is text
+ dest = reloc_text;
+ } else {
+ // Destination is rodata (op=1) or bss (op=1 if no rodata, else op=2)
+ dest = ri->const_table[op];
+ }
+ } else if (op == 6) {
+ // Destination is mp_fun_table itself
+ dest = (uintptr_t)&mp_fun_table;
+ } else {
+ // Destination is an entry in mp_fun_table
+ dest = ((uintptr_t *)&mp_fun_table)[op - 7];
+ }
+ while (n--) {
+ *addr_to_adjust++ += dest;
+ }
+ op = ri->reader->readbyte(ri->reader->data);
+ }
+}
+
+#endif
+
+STATIC int read_byte(mp_reader_t *reader) {
+ mp_uint_t b = reader->readbyte(reader->data);
+ if (b == MP_READER_EOF) {
+ raise_corrupt_mpy();
+ }
+ return b;
+}
+
+STATIC void read_bytes(mp_reader_t *reader, byte *buf, size_t len) {
+ while (len-- > 0) {
+ mp_uint_t b = reader->readbyte(reader->data);
+ if (b == MP_READER_EOF) {
+ raise_corrupt_mpy();
+ }
+ *buf++ = b;
+ }
+}
+
+STATIC size_t read_uint(mp_reader_t *reader, byte **out) {
+ size_t unum = 0;
+ for (;;) {
+ mp_uint_t b = reader->readbyte(reader->data);
+ if (b == MP_READER_EOF) {
+ raise_corrupt_mpy();
+ }
+ if (out != NULL) {
+ **out = b;
+ ++*out;
+ }
+ unum = (unum << 7) | (b & 0x7f);
+ if ((b & 0x80) == 0) {
+ break;
+ }
+ }
+ return unum;
+}
+
+STATIC qstr load_qstr(mp_reader_t *reader, qstr_window_t *qw) {
+ size_t len = read_uint(reader, NULL);
+ if (len == 0) {
+ // static qstr
+ return read_byte(reader);
+ }
+ if (len & 1) {
+ // qstr in window
+ return qstr_window_access(qw, len >> 1);
+ }
+ len >>= 1;
+ char *str = m_new(char, len);
+ read_bytes(reader, (byte *)str, len);
+ qstr qst = qstr_from_strn(str, len);
+ m_del(char, str, len);
+ qstr_window_push(qw, qst);
+ return qst;
+}
+
+STATIC mp_obj_t load_obj(mp_reader_t *reader) {
+ byte obj_type = read_byte(reader);
+ if (obj_type == 'e') {
+ return MP_OBJ_FROM_PTR(&mp_const_ellipsis_obj);
+ } else {
+ size_t len = read_uint(reader, NULL);
+ vstr_t vstr;
+ vstr_init_len(&vstr, len);
+ read_bytes(reader, (byte *)vstr.buf, len);
+ if (obj_type == 's' || obj_type == 'b') {
+ return mp_obj_new_str_from_vstr(obj_type == 's' ? &mp_type_str : &mp_type_bytes, &vstr);
+ } else if (obj_type == 'i') {
+ return mp_parse_num_integer(vstr.buf, vstr.len, 10, NULL);
+ } else {
+ assert(obj_type == 'f' || obj_type == 'c');
+ return mp_parse_num_decimal(vstr.buf, vstr.len, obj_type == 'c', false, NULL);
+ }
+ }
+}
+
+STATIC void load_prelude_qstrs(mp_reader_t *reader, qstr_window_t *qw, byte *ip) {
+ qstr simple_name = load_qstr(reader, qw);
+ ip[0] = simple_name;
+ ip[1] = simple_name >> 8;
+ qstr source_file = load_qstr(reader, qw);
+ ip[2] = source_file;
+ ip[3] = source_file >> 8;
+}
+
+STATIC void load_prelude(mp_reader_t *reader, qstr_window_t *qw, byte **ip, bytecode_prelude_t *prelude) {
+ // Read in the prelude header
+ byte *ip_read = *ip;
+ read_uint(reader, &ip_read); // read in n_state/etc (is effectively a var-uint)
+ read_uint(reader, &ip_read); // read in n_info/n_cell (is effectively a var-uint)
+
+ // Prelude header has been read into *ip, now decode and extract values from it
+ extract_prelude((const byte **)ip, prelude);
+
+ // Load qstrs in prelude
+ load_prelude_qstrs(reader, qw, ip_read);
+ ip_read += 4;
+
+ // Read remaining code info
+ read_bytes(reader, ip_read, *ip - ip_read);
+}
+
+STATIC void load_bytecode(mp_reader_t *reader, qstr_window_t *qw, byte *ip, byte *ip_top) {
+ while (ip < ip_top) {
+ *ip = read_byte(reader);
+ size_t sz;
+ uint f = mp_opcode_format(ip, &sz, false);
+ ++ip;
+ --sz;
+ if (f == MP_BC_FORMAT_QSTR) {
+ qstr qst = load_qstr(reader, qw);
+ *ip++ = qst;
+ *ip++ = qst >> 8;
+ sz -= 2;
+ } else if (f == MP_BC_FORMAT_VAR_UINT) {
+ while ((*ip++ = read_byte(reader)) & 0x80) {
+ }
+ }
+ read_bytes(reader, ip, sz);
+ ip += sz;
+ }
+}
+
+STATIC mp_raw_code_t *load_raw_code(mp_reader_t *reader, qstr_window_t *qw) {
+ // Load function kind and data length
+ size_t kind_len = read_uint(reader, NULL);
+ int kind = (kind_len & 3) + MP_CODE_BYTECODE;
+ size_t fun_data_len = kind_len >> 2;
+
+ #if !MICROPY_EMIT_MACHINE_CODE
+ if (kind != MP_CODE_BYTECODE) {
+
+ }
+ #endif
+
+ uint8_t *fun_data = NULL;
+ bytecode_prelude_t prelude = {0};
+ #if MICROPY_EMIT_MACHINE_CODE
+ size_t prelude_offset = 0;
+ mp_uint_t type_sig = 0;
+ size_t n_qstr_link = 0;
+ #endif
+
+ if (kind == MP_CODE_BYTECODE) {
+ // Allocate memory for the bytecode
+ fun_data = m_new(uint8_t, fun_data_len);
+
+ // Load prelude
+ byte *ip = fun_data;
+ load_prelude(reader, qw, &ip, &prelude);
+
+ // Load bytecode
+ load_bytecode(reader, qw, ip, fun_data + fun_data_len);
+
+ #if MICROPY_EMIT_MACHINE_CODE
+ } else {
+ // Allocate memory for native data and load it
+ size_t fun_alloc;
+ MP_PLAT_ALLOC_EXEC(fun_data_len, (void **)&fun_data, &fun_alloc);
+ read_bytes(reader, fun_data, fun_data_len);
+
+ if (kind == MP_CODE_NATIVE_PY || kind == MP_CODE_NATIVE_VIPER) {
+ // Parse qstr link table and link native code
+ n_qstr_link = read_uint(reader, NULL);
+ for (size_t i = 0; i < n_qstr_link; ++i) {
+ size_t off = read_uint(reader, NULL);
+ qstr qst = load_qstr(reader, qw);
+ uint8_t *dest = fun_data + (off >> 2);
+ if ((off & 3) == 0) {
+ // Generic 16-bit link
+ dest[0] = qst & 0xff;
+ dest[1] = (qst >> 8) & 0xff;
+ } else if ((off & 3) == 3) {
+ // Generic, aligned qstr-object link
+ #pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wcast-align"
+ *(mp_obj_t *)dest = MP_OBJ_NEW_QSTR(qst);
+ #pragma GCC diagnostic pop
+ } else {
+ // Architecture-specific link
+ arch_link_qstr(dest, (off & 3) == 2, qst);
+ }
+ }
+ }
+
+ if (kind == MP_CODE_NATIVE_PY) {
+ // Extract prelude for later use
+ prelude_offset = read_uint(reader, NULL);
+ const byte *ip = fun_data + prelude_offset;
+ byte *ip_info = extract_prelude(&ip, &prelude);
+ // Load qstrs in prelude
+ load_prelude_qstrs(reader, qw, ip_info);
+ } else {
+ // Load basic scope info for viper and asm
+ prelude.scope_flags = read_uint(reader, NULL);
+ prelude.n_pos_args = 0;
+ prelude.n_kwonly_args = 0;
+ if (kind == MP_CODE_NATIVE_ASM) {
+ prelude.n_pos_args = read_uint(reader, NULL);
+ type_sig = read_uint(reader, NULL);
+ }
+ }
+ #endif
+ }
+
+ size_t n_obj = 0;
+ size_t n_raw_code = 0;
+ mp_uint_t *const_table = NULL;
+
+ if (kind != MP_CODE_NATIVE_ASM) {
+ // Load constant table for bytecode, native and viper
+
+ // Number of entries in constant table
+ n_obj = read_uint(reader, NULL);
+ n_raw_code = read_uint(reader, NULL);
+
+ // Allocate constant table
+ size_t n_alloc = prelude.n_pos_args + prelude.n_kwonly_args + n_obj + n_raw_code;
+ #if MICROPY_EMIT_MACHINE_CODE
+ if (kind != MP_CODE_BYTECODE) {
+ ++n_alloc; // additional entry for mp_fun_table
+ if (prelude.scope_flags & MP_SCOPE_FLAG_VIPERRODATA) {
+ ++n_alloc; // additional entry for rodata
+ }
+ if (prelude.scope_flags & MP_SCOPE_FLAG_VIPERBSS) {
+ ++n_alloc; // additional entry for BSS
+ }
+ }
+ #endif
+
+ const_table = m_new(mp_uint_t, n_alloc);
+ mp_uint_t *ct = const_table;
+
+ // Load function argument names (initial entries in const_table)
+ // (viper has n_pos_args=n_kwonly_args=0 so doesn't load any qstrs here)
+ for (size_t i = 0; i < prelude.n_pos_args + prelude.n_kwonly_args; ++i) {
+ *ct++ = (mp_uint_t)MP_OBJ_NEW_QSTR(load_qstr(reader, qw));
+ }
+
+ #if MICROPY_EMIT_MACHINE_CODE
+ if (kind != MP_CODE_BYTECODE) {
+ // Populate mp_fun_table entry
+ *ct++ = (mp_uint_t)(uintptr_t)&mp_fun_table;
+
+ // Allocate and load rodata if needed
+ if (prelude.scope_flags & MP_SCOPE_FLAG_VIPERRODATA) {
+ size_t size = read_uint(reader, NULL);
+ uint8_t *rodata = m_new(uint8_t, size);
+ read_bytes(reader, rodata, size);
+ *ct++ = (uintptr_t)rodata;
+ }
+
+ // Allocate BSS if needed
+ if (prelude.scope_flags & MP_SCOPE_FLAG_VIPERBSS) {
+ size_t size = read_uint(reader, NULL);
+ uint8_t *bss = m_new0(uint8_t, size);
+ *ct++ = (uintptr_t)bss;
+ }
+ }
+ #endif
+
+ // Load constant objects and raw code children
+ for (size_t i = 0; i < n_obj; ++i) {
+ *ct++ = (mp_uint_t)load_obj(reader);
+ }
+ for (size_t i = 0; i < n_raw_code; ++i) {
+ *ct++ = (mp_uint_t)(uintptr_t)load_raw_code(reader, qw);
+ }
+ }
+
+ // Create raw_code and return it
+ mp_raw_code_t *rc = mp_emit_glue_new_raw_code();
+ if (kind == MP_CODE_BYTECODE) {
+ // Assign bytecode to raw code object
+ mp_emit_glue_assign_bytecode(rc, fun_data,
+ #if MICROPY_PERSISTENT_CODE_SAVE || MICROPY_DEBUG_PRINTERS
+ fun_data_len,
+ #endif
+ const_table,
+ #if MICROPY_PERSISTENT_CODE_SAVE
+ n_obj, n_raw_code,
+ #endif
+ prelude.scope_flags);
+
+ #if MICROPY_EMIT_MACHINE_CODE
+ } else {
+ // Relocate and commit code to executable address space
+ reloc_info_t ri = {reader, const_table};
+ #if defined(MP_PLAT_COMMIT_EXEC)
+ void *opt_ri = (prelude.scope_flags & MP_SCOPE_FLAG_VIPERRELOC) ? &ri : NULL;
+ fun_data = MP_PLAT_COMMIT_EXEC(fun_data, fun_data_len, opt_ri);
+ #else
+ if (prelude.scope_flags & MP_SCOPE_FLAG_VIPERRELOC) {
+ #if MICROPY_PERSISTENT_CODE_TRACK_RELOC_CODE
+ // If native code needs relocations then it's not guaranteed that a pointer to
+ // the head of `buf` (containing the machine code) will be retained for the GC
+ // to trace. This is because native functions can start inside `buf` and so
+ // it's possible that the only GC-reachable pointers are pointers inside `buf`.
+ // So put this `buf` on a list of reachable root pointers.
+ if (MP_STATE_PORT(track_reloc_code_list) == MP_OBJ_NULL) {
+ MP_STATE_PORT(track_reloc_code_list) = mp_obj_new_list(0, NULL);
+ }
+ mp_obj_list_append(MP_STATE_PORT(track_reloc_code_list), MP_OBJ_FROM_PTR(fun_data));
+ #endif
+ // Do the relocations.
+ mp_native_relocate(&ri, fun_data, (uintptr_t)fun_data);
+ }
+ #endif
+
+ // Assign native code to raw code object
+ mp_emit_glue_assign_native(rc, kind,
+ fun_data, fun_data_len, const_table,
+ #if MICROPY_PERSISTENT_CODE_SAVE
+ prelude_offset,
+ n_obj, n_raw_code,
+ n_qstr_link, NULL,
+ #endif
+ prelude.n_pos_args, prelude.scope_flags, type_sig);
+ #endif
+ }
+ return rc;
+}
+
+mp_raw_code_t *mp_raw_code_load(mp_reader_t *reader) {
+ byte header[4];
+ read_bytes(reader, header, sizeof(header));
+ if (header[0] != 'C'
+ || header[1] != MPY_VERSION
+ || MPY_FEATURE_DECODE_FLAGS(header[2]) != MPY_FEATURE_FLAGS
+ || header[3] > mp_small_int_bits()
+ || read_uint(reader, NULL) > QSTR_WINDOW_SIZE) {
+ mp_raise_MpyError(MP_ERROR_TEXT("Incompatible .mpy file. Please update all .mpy files. See http://adafru.it/mpy-update for more info."));
+ }
+ if (MPY_FEATURE_DECODE_ARCH(header[2]) != MP_NATIVE_ARCH_NONE) {
+ byte arch = MPY_FEATURE_DECODE_ARCH(header[2]);
+ if (!MPY_FEATURE_ARCH_TEST(arch)) {
+ mp_raise_ValueError(MP_ERROR_TEXT("incompatible native .mpy architecture"));
+ }
+ }
+ qstr_window_t qw;
+ qw.idx = 0;
+ mp_raw_code_t *rc = load_raw_code(reader, &qw);
+ reader->close(reader->data);
+ return rc;
+}
+
+mp_raw_code_t *mp_raw_code_load_mem(const byte *buf, size_t len) {
+ mp_reader_t reader;
+ mp_reader_new_mem(&reader, buf, len, 0);
+ return mp_raw_code_load(&reader);
+}
+
+#if MICROPY_HAS_FILE_READER
+
+mp_raw_code_t *mp_raw_code_load_file(const char *filename) {
+ mp_reader_t reader;
+ mp_reader_new_file(&reader, filename);
+ return mp_raw_code_load(&reader);
+}
+
+#endif // MICROPY_HAS_FILE_READER
+
+#endif // MICROPY_PERSISTENT_CODE_LOAD
+
+#if MICROPY_PERSISTENT_CODE_SAVE
+
+#include "py/objstr.h"
+
+STATIC void mp_print_bytes(mp_print_t *print, const byte *data, size_t len) {
+ print->print_strn(print->data, (const char *)data, len);
+}
+
+#define BYTES_FOR_INT ((MP_BYTES_PER_OBJ_WORD * 8 + 6) / 7)
+STATIC void mp_print_uint(mp_print_t *print, size_t n) {
+ byte buf[BYTES_FOR_INT];
+ byte *p = buf + sizeof(buf);
+ *--p = n & 0x7f;
+ n >>= 7;
+ for (; n != 0; n >>= 7) {
+ *--p = 0x80 | (n & 0x7f);
+ }
+ print->print_strn(print->data, (char *)p, buf + sizeof(buf) - p);
+}
+
+STATIC void save_qstr(mp_print_t *print, qstr_window_t *qw, qstr qst) {
+ if (qst <= QSTR_LAST_STATIC) {
+ // encode static qstr
+ byte buf[2] = {0, qst & 0xff};
+ mp_print_bytes(print, buf, 2);
+ return;
+ }
+ size_t idx = qstr_window_insert(qw, qst);
+ if (idx < QSTR_WINDOW_SIZE) {
+ // qstr found in window, encode index to it
+ mp_print_uint(print, idx << 1 | 1);
+ return;
+ }
+ size_t len;
+ const byte *str = qstr_data(qst, &len);
+ mp_print_uint(print, len << 1);
+ mp_print_bytes(print, str, len);
+}
+
+STATIC void save_obj(mp_print_t *print, mp_obj_t o) {
+ if (mp_obj_is_str_or_bytes(o)) {
+ byte obj_type;
+ if (mp_obj_is_str(o)) {
+ obj_type = 's';
+ } else {
+ obj_type = 'b';
+ }
+ size_t len;
+ const char *str = mp_obj_str_get_data(o, &len);
+ mp_print_bytes(print, &obj_type, 1);
+ mp_print_uint(print, len);
+ mp_print_bytes(print, (const byte *)str, len);
+ } else if (MP_OBJ_TO_PTR(o) == &mp_const_ellipsis_obj) {
+ byte obj_type = 'e';
+ mp_print_bytes(print, &obj_type, 1);
+ } else {
+ // we save numbers using a simplistic text representation
+ // TODO could be improved
+ byte obj_type;
+ if (mp_obj_is_type(o, &mp_type_int)) {
+ obj_type = 'i';
+ #if MICROPY_PY_BUILTINS_COMPLEX
+ } else if (mp_obj_is_type(o, &mp_type_complex)) {
+ obj_type = 'c';
+ #endif
+ } else {
+ assert(mp_obj_is_float(o));
+ obj_type = 'f';
+ }
+ vstr_t vstr;
+ mp_print_t pr;
+ vstr_init_print(&vstr, 10, &pr);
+ mp_obj_print_helper(&pr, o, PRINT_REPR);
+ mp_print_bytes(print, &obj_type, 1);
+ mp_print_uint(print, vstr.len);
+ mp_print_bytes(print, (const byte *)vstr.buf, vstr.len);
+ vstr_clear(&vstr);
+ }
+}
+
+STATIC void save_prelude_qstrs(mp_print_t *print, qstr_window_t *qw, const byte *ip) {
+ save_qstr(print, qw, ip[0] | (ip[1] << 8)); // simple_name
+ save_qstr(print, qw, ip[2] | (ip[3] << 8)); // source_file
+}
+
+STATIC void save_bytecode(mp_print_t *print, qstr_window_t *qw, const byte *ip, const byte *ip_top) {
+ while (ip < ip_top) {
+ size_t sz;
+ uint f = mp_opcode_format(ip, &sz, true);
+ if (f == MP_BC_FORMAT_QSTR) {
+ mp_print_bytes(print, ip, 1);
+ qstr qst = ip[1] | (ip[2] << 8);
+ save_qstr(print, qw, qst);
+ ip += 3;
+ sz -= 3;
+ }
+ mp_print_bytes(print, ip, sz);
+ ip += sz;
+ }
+}
+
+STATIC void save_raw_code(mp_print_t *print, mp_raw_code_t *rc, qstr_window_t *qstr_window) {
+ // Save function kind and data length
+ mp_print_uint(print, (rc->fun_data_len << 2) | (rc->kind - MP_CODE_BYTECODE));
+
+ bytecode_prelude_t prelude;
+
+ if (rc->kind == MP_CODE_BYTECODE) {
+ // Extract prelude
+ const byte *ip = rc->fun_data;
+ const byte *ip_info = extract_prelude(&ip, &prelude);
+
+ // Save prelude
+ mp_print_bytes(print, rc->fun_data, ip_info - (const byte *)rc->fun_data);
+ save_prelude_qstrs(print, qstr_window, ip_info);
+ ip_info += 4;
+ mp_print_bytes(print, ip_info, ip - ip_info);
+
+ // Save bytecode
+ const byte *ip_top = (const byte *)rc->fun_data + rc->fun_data_len;
+ save_bytecode(print, qstr_window, ip, ip_top);
+ #if MICROPY_EMIT_MACHINE_CODE
+ } else {
+ // Save native code
+ mp_print_bytes(print, rc->fun_data, rc->fun_data_len);
+
+ if (rc->kind == MP_CODE_NATIVE_PY || rc->kind == MP_CODE_NATIVE_VIPER) {
+ // Save qstr link table for native code
+ mp_print_uint(print, rc->n_qstr);
+ for (size_t i = 0; i < rc->n_qstr; ++i) {
+ mp_print_uint(print, rc->qstr_link[i].off);
+ save_qstr(print, qstr_window, rc->qstr_link[i].qst);
+ }
+ }
+
+ if (rc->kind == MP_CODE_NATIVE_PY) {
+ // Save prelude size
+ mp_print_uint(print, rc->prelude_offset);
+
+ // Extract prelude and save qstrs in prelude
+ const byte *ip = (const byte *)rc->fun_data + rc->prelude_offset;
+ const byte *ip_info = extract_prelude(&ip, &prelude);
+ save_prelude_qstrs(print, qstr_window, ip_info);
+ } else {
+ // Save basic scope info for viper and asm
+ mp_print_uint(print, rc->scope_flags & MP_SCOPE_FLAG_ALL_SIG);
+ prelude.n_pos_args = 0;
+ prelude.n_kwonly_args = 0;
+ if (rc->kind == MP_CODE_NATIVE_ASM) {
+ mp_print_uint(print, rc->n_pos_args);
+ mp_print_uint(print, rc->type_sig);
+ }
+ }
+ #endif
+ }
+
+ if (rc->kind != MP_CODE_NATIVE_ASM) {
+ // Save constant table for bytecode, native and viper
+
+ // Number of entries in constant table
+ mp_print_uint(print, rc->n_obj);
+ mp_print_uint(print, rc->n_raw_code);
+
+ const mp_uint_t *const_table = rc->const_table;
+
+ // Save function argument names (initial entries in const_table)
+ // (viper has n_pos_args=n_kwonly_args=0 so doesn't save any qstrs here)
+ for (size_t i = 0; i < prelude.n_pos_args + prelude.n_kwonly_args; ++i) {
+ mp_obj_t o = (mp_obj_t)*const_table++;
+ save_qstr(print, qstr_window, MP_OBJ_QSTR_VALUE(o));
+ }
+
+ if (rc->kind != MP_CODE_BYTECODE) {
+ // Skip saving mp_fun_table entry
+ ++const_table;
+ }
+
+ // Save constant objects and raw code children
+ for (size_t i = 0; i < rc->n_obj; ++i) {
+ save_obj(print, (mp_obj_t)*const_table++);
+ }
+ for (size_t i = 0; i < rc->n_raw_code; ++i) {
+ save_raw_code(print, (mp_raw_code_t *)(uintptr_t)*const_table++, qstr_window);
+ }
+ }
+}
+
+STATIC bool mp_raw_code_has_native(mp_raw_code_t *rc) {
+ if (rc->kind != MP_CODE_BYTECODE) {
+ return true;
+ }
+
+ const byte *ip = rc->fun_data;
+ bytecode_prelude_t prelude;
+ extract_prelude(&ip, &prelude);
+
+ const mp_uint_t *const_table = rc->const_table
+ + prelude.n_pos_args + prelude.n_kwonly_args
+ + rc->n_obj;
+
+ for (size_t i = 0; i < rc->n_raw_code; ++i) {
+ if (mp_raw_code_has_native((mp_raw_code_t *)(uintptr_t)*const_table++)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void mp_raw_code_save(mp_raw_code_t *rc, mp_print_t *print) {
+ // header contains:
+ // byte 'C'
+ // byte version
+ // byte feature flags
+ // byte number of bits in a small int
+ // uint size of qstr window
+ byte header[4] = {
+ 'C',
+ MPY_VERSION,
+ MPY_FEATURE_ENCODE_FLAGS(MPY_FEATURE_FLAGS_DYNAMIC),
+ #if MICROPY_DYNAMIC_COMPILER
+ mp_dynamic_compiler.small_int_bits,
+ #else
+ mp_small_int_bits(),
+ #endif
+ };
+ if (mp_raw_code_has_native(rc)) {
+ header[2] |= MPY_FEATURE_ENCODE_ARCH(MPY_FEATURE_ARCH_DYNAMIC);
+ }
+ mp_print_bytes(print, header, sizeof(header));
+ mp_print_uint(print, QSTR_WINDOW_SIZE);
+
+ qstr_window_t qw;
+ qw.idx = 0;
+ memset(qw.window, 0, sizeof(qw.window));
+ save_raw_code(print, rc, &qw);
+}
+
+#if MICROPY_PERSISTENT_CODE_SAVE_FILE
+
+#include <unistd.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+STATIC void fd_print_strn(void *env, const char *str, size_t len) {
+ int fd = (intptr_t)env;
+ MP_THREAD_GIL_EXIT();
+ ssize_t ret = write(fd, str, len);
+ MP_THREAD_GIL_ENTER();
+ (void)ret;
+}
+
+void mp_raw_code_save_file(mp_raw_code_t *rc, const char *filename) {
+ MP_THREAD_GIL_EXIT();
+ int fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0644);
+ MP_THREAD_GIL_ENTER();
+ mp_print_t fd_print = {(void *)(intptr_t)fd, fd_print_strn};
+ mp_raw_code_save(rc, &fd_print);
+ MP_THREAD_GIL_EXIT();
+ close(fd);
+ MP_THREAD_GIL_ENTER();
+}
+
+#endif // MICROPY_PERSISTENT_CODE_SAVE_FILE
+
+#endif // MICROPY_PERSISTENT_CODE_SAVE
diff --git a/circuitpython/py/persistentcode.h b/circuitpython/py/persistentcode.h
new file mode 100644
index 0000000..1c53ca1
--- /dev/null
+++ b/circuitpython/py/persistentcode.h
@@ -0,0 +1,114 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013-2016 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_PERSISTENTCODE_H
+#define MICROPY_INCLUDED_PY_PERSISTENTCODE_H
+
+#include "py/mpprint.h"
+#include "py/reader.h"
+#include "py/emitglue.h"
+
+// The current version of .mpy files
+#define MPY_VERSION 5
+
+// Macros to encode/decode flags to/from the feature byte
+#define MPY_FEATURE_ENCODE_FLAGS(flags) (flags)
+#define MPY_FEATURE_DECODE_FLAGS(feat) ((feat) & 3)
+
+// Macros to encode/decode native architecture to/from the feature byte
+#define MPY_FEATURE_ENCODE_ARCH(arch) ((arch) << 2)
+#define MPY_FEATURE_DECODE_ARCH(feat) ((feat) >> 2)
+
+// The feature flag bits encode the compile-time config options that affect
+// the generate bytecode. Note: position 0 is now unused
+// (formerly MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE).
+#define MPY_FEATURE_FLAGS ( \
+ ((MICROPY_PY_BUILTINS_STR_UNICODE) << 1) \
+ )
+// This is a version of the flags that can be configured at runtime.
+#define MPY_FEATURE_FLAGS_DYNAMIC ( \
+ ((MICROPY_PY_BUILTINS_STR_UNICODE_DYNAMIC) << 1) \
+ )
+
+// Define the host architecture
+#if MICROPY_EMIT_X86
+ #define MPY_FEATURE_ARCH (MP_NATIVE_ARCH_X86)
+#elif MICROPY_EMIT_X64
+ #define MPY_FEATURE_ARCH (MP_NATIVE_ARCH_X64)
+#elif MICROPY_EMIT_THUMB
+ #if defined(__thumb2__)
+ #if defined(__ARM_FP) && (__ARM_FP & 8) == 8
+ #define MPY_FEATURE_ARCH (MP_NATIVE_ARCH_ARMV7EMDP)
+ #elif defined(__ARM_FP) && (__ARM_FP & 4) == 4
+ #define MPY_FEATURE_ARCH (MP_NATIVE_ARCH_ARMV7EMSP)
+ #else
+ #define MPY_FEATURE_ARCH (MP_NATIVE_ARCH_ARMV7EM)
+ #endif
+ #else
+ #define MPY_FEATURE_ARCH (MP_NATIVE_ARCH_ARMV7M)
+ #endif
+ #define MPY_FEATURE_ARCH_TEST(x) (MP_NATIVE_ARCH_ARMV6M <= (x) && (x) <= MPY_FEATURE_ARCH)
+#elif MICROPY_EMIT_ARM
+ #define MPY_FEATURE_ARCH (MP_NATIVE_ARCH_ARMV6)
+#elif MICROPY_EMIT_XTENSA
+ #define MPY_FEATURE_ARCH (MP_NATIVE_ARCH_XTENSA)
+#elif MICROPY_EMIT_XTENSAWIN
+ #define MPY_FEATURE_ARCH (MP_NATIVE_ARCH_XTENSAWIN)
+#else
+ #define MPY_FEATURE_ARCH (MP_NATIVE_ARCH_NONE)
+#endif
+
+#ifndef MPY_FEATURE_ARCH_TEST
+#define MPY_FEATURE_ARCH_TEST(x) ((x) == MPY_FEATURE_ARCH)
+#endif
+
+// 16-bit little-endian integer with the second and third bytes of supported .mpy files
+#define MPY_FILE_HEADER_INT (MPY_VERSION \
+ | (MPY_FEATURE_ENCODE_FLAGS(MPY_FEATURE_FLAGS) | MPY_FEATURE_ENCODE_ARCH(MPY_FEATURE_ARCH)) << 8)
+
+enum {
+ MP_NATIVE_ARCH_NONE = 0,
+ MP_NATIVE_ARCH_X86,
+ MP_NATIVE_ARCH_X64,
+ MP_NATIVE_ARCH_ARMV6,
+ MP_NATIVE_ARCH_ARMV6M,
+ MP_NATIVE_ARCH_ARMV7M,
+ MP_NATIVE_ARCH_ARMV7EM,
+ MP_NATIVE_ARCH_ARMV7EMSP,
+ MP_NATIVE_ARCH_ARMV7EMDP,
+ MP_NATIVE_ARCH_XTENSA,
+ MP_NATIVE_ARCH_XTENSAWIN,
+};
+
+mp_raw_code_t *mp_raw_code_load(mp_reader_t *reader);
+mp_raw_code_t *mp_raw_code_load_mem(const byte *buf, size_t len);
+mp_raw_code_t *mp_raw_code_load_file(const char *filename);
+
+void mp_raw_code_save(mp_raw_code_t *rc, mp_print_t *print);
+void mp_raw_code_save_file(mp_raw_code_t *rc, const char *filename);
+
+void mp_native_relocate(void *reloc, uint8_t *text, uintptr_t reloc_text);
+
+#endif // MICROPY_INCLUDED_PY_PERSISTENTCODE_H
diff --git a/circuitpython/py/profile.c b/circuitpython/py/profile.c
new file mode 100644
index 0000000..81c80f1
--- /dev/null
+++ b/circuitpython/py/profile.c
@@ -0,0 +1,970 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) SatoshiLabs
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/profile.h"
+#include "py/bc0.h"
+#include "py/gc.h"
+
+#if MICROPY_PY_SYS_SETTRACE
+
+#define prof_trace_cb MP_STATE_THREAD(prof_trace_callback)
+
+STATIC uint mp_prof_bytecode_lineno(const mp_raw_code_t *rc, size_t bc) {
+ const mp_bytecode_prelude_t *prelude = &rc->prelude;
+ return mp_bytecode_get_source_line(prelude->line_info, bc);
+}
+
+void mp_prof_extract_prelude(const byte *bytecode, mp_bytecode_prelude_t *prelude) {
+ const byte *ip = bytecode;
+
+ MP_BC_PRELUDE_SIG_DECODE(ip);
+ prelude->n_state = n_state;
+ prelude->n_exc_stack = n_exc_stack;
+ prelude->scope_flags = scope_flags;
+ prelude->n_pos_args = n_pos_args;
+ prelude->n_kwonly_args = n_kwonly_args;
+ prelude->n_def_pos_args = n_def_pos_args;
+
+ MP_BC_PRELUDE_SIZE_DECODE(ip);
+
+ prelude->line_info = ip + 4;
+ prelude->opcodes = ip + n_info + n_cell;
+
+ qstr block_name = ip[0] | (ip[1] << 8);
+ qstr source_file = ip[2] | (ip[3] << 8);
+ prelude->qstr_block_name = block_name;
+ prelude->qstr_source_file = source_file;
+}
+
+/******************************************************************************/
+// code object
+
+STATIC void code_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_code_t *o = MP_OBJ_TO_PTR(o_in);
+ const mp_raw_code_t *rc = o->rc;
+ const mp_bytecode_prelude_t *prelude = &rc->prelude;
+ mp_printf(print,
+ "<code object %q at %p, file \"%q\", line %d>",
+ prelude->qstr_block_name,
+ o,
+ prelude->qstr_source_file,
+ rc->line_of_definition
+ );
+}
+
+STATIC mp_obj_tuple_t *code_consts(const mp_raw_code_t *rc) {
+ const mp_bytecode_prelude_t *prelude = &rc->prelude;
+ int start = prelude->n_pos_args + prelude->n_kwonly_args + rc->n_obj;
+ int stop = prelude->n_pos_args + prelude->n_kwonly_args + rc->n_obj + rc->n_raw_code;
+ mp_obj_tuple_t *consts = MP_OBJ_TO_PTR(mp_obj_new_tuple(stop - start + 1, NULL));
+
+ size_t const_no = 0;
+ for (int i = start; i < stop; ++i) {
+ mp_obj_t code = mp_obj_new_code((const mp_raw_code_t *)MP_OBJ_TO_PTR(rc->const_table[i]));
+ if (code == MP_OBJ_NULL) {
+ m_malloc_fail(sizeof(mp_obj_code_t));
+ }
+ consts->items[const_no++] = code;
+ }
+ consts->items[const_no++] = mp_const_none;
+
+ return consts;
+}
+
+STATIC mp_obj_t raw_code_lnotab(const mp_raw_code_t *rc) {
+ // const mp_bytecode_prelude_t *prelude = &rc->prelude;
+ uint start = 0;
+ uint stop = rc->fun_data_len - start;
+
+ uint last_lineno = mp_prof_bytecode_lineno(rc, start);
+ uint lasti = 0;
+
+ const uint buffer_chunk_size = (stop - start) >> 2; // heuristic magic
+ uint buffer_size = buffer_chunk_size;
+ byte *buffer = m_new(byte, buffer_size);
+ uint buffer_index = 0;
+
+ for (uint i = start; i < stop; ++i) {
+ uint lineno = mp_prof_bytecode_lineno(rc, i);
+ size_t line_diff = lineno - last_lineno;
+ if (line_diff > 0) {
+ uint instr_diff = (i - start) - lasti;
+
+ assert(instr_diff < 256);
+ assert(line_diff < 256);
+
+ if (buffer_index + 2 > buffer_size) {
+ buffer = m_renew(byte, buffer, buffer_size, buffer_size + buffer_chunk_size);
+ buffer_size = buffer_size + buffer_chunk_size;
+ }
+ last_lineno = lineno;
+ lasti = i - start;
+ buffer[buffer_index++] = instr_diff;
+ buffer[buffer_index++] = line_diff;
+ }
+ }
+
+ mp_obj_t o = mp_obj_new_bytes(buffer, buffer_index);
+ m_del(byte, buffer, buffer_size);
+ return o;
+}
+
+STATIC void code_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+ if (dest[0] != MP_OBJ_NULL) {
+ // not load attribute
+ return;
+ }
+ mp_obj_code_t *o = MP_OBJ_TO_PTR(self_in);
+ const mp_raw_code_t *rc = o->rc;
+ const mp_bytecode_prelude_t *prelude = &rc->prelude;
+ switch (attr) {
+ case MP_QSTR_co_code:
+ dest[0] = mp_obj_new_bytes(
+ (void *)prelude->opcodes,
+ rc->fun_data_len - (prelude->opcodes - (const byte *)rc->fun_data)
+ );
+ break;
+ case MP_QSTR_co_consts:
+ dest[0] = MP_OBJ_FROM_PTR(code_consts(rc));
+ break;
+ case MP_QSTR_co_filename:
+ dest[0] = MP_OBJ_NEW_QSTR(prelude->qstr_source_file);
+ break;
+ case MP_QSTR_co_firstlineno:
+ dest[0] = MP_OBJ_NEW_SMALL_INT(mp_prof_bytecode_lineno(rc, 0));
+ break;
+ case MP_QSTR_co_name:
+ dest[0] = MP_OBJ_NEW_QSTR(prelude->qstr_block_name);
+ break;
+ case MP_QSTR_co_names:
+ dest[0] = MP_OBJ_FROM_PTR(o->dict_locals);
+ break;
+ case MP_QSTR_co_lnotab:
+ if (!o->lnotab) {
+ o->lnotab = raw_code_lnotab(rc);
+ }
+ dest[0] = o->lnotab;
+ break;
+ }
+}
+
+const mp_obj_type_t mp_type_settrace_codeobj = {
+ { &mp_type_type },
+ .name = MP_QSTR_code,
+ .print = code_print,
+ .unary_op = mp_generic_unary_op,
+ .attr = code_attr,
+};
+
+mp_obj_t mp_obj_new_code(const mp_raw_code_t *rc) {
+ mp_obj_code_t *o = m_new_obj_maybe(mp_obj_code_t);
+ if (o == NULL) {
+ return MP_OBJ_NULL;
+ }
+ o->base.type = &mp_type_settrace_codeobj;
+ o->rc = rc;
+ o->dict_locals = mp_locals_get(); // this is a wrong! how to do this properly?
+ o->lnotab = MP_OBJ_NULL;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+/******************************************************************************/
+// frame object
+
+STATIC void frame_print(const mp_print_t *print, mp_obj_t o_in, mp_print_kind_t kind) {
+ (void)kind;
+ mp_obj_frame_t *frame = MP_OBJ_TO_PTR(o_in);
+ mp_obj_code_t *code = frame->code;
+ const mp_raw_code_t *rc = code->rc;
+ const mp_bytecode_prelude_t *prelude = &rc->prelude;
+ mp_printf(print,
+ "<frame at %p, file '%q', line %d, code %q>",
+ frame,
+ prelude->qstr_source_file,
+ frame->lineno,
+ prelude->qstr_block_name
+ );
+}
+
+STATIC void frame_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
+ if (dest[0] != MP_OBJ_NULL) {
+ // not load attribute
+ return;
+ }
+
+ mp_obj_frame_t *o = MP_OBJ_TO_PTR(self_in);
+
+ switch (attr) {
+ case MP_QSTR_f_back:
+ dest[0] = mp_const_none;
+ if (o->code_state->prev_state) {
+ dest[0] = MP_OBJ_FROM_PTR(o->code_state->prev_state->frame);
+ }
+ break;
+ case MP_QSTR_f_code:
+ dest[0] = MP_OBJ_FROM_PTR(o->code);
+ break;
+ case MP_QSTR_f_globals:
+ dest[0] = MP_OBJ_FROM_PTR(o->code_state->fun_bc->globals);
+ break;
+ case MP_QSTR_f_lasti:
+ dest[0] = MP_OBJ_NEW_SMALL_INT(o->lasti);
+ break;
+ case MP_QSTR_f_lineno:
+ dest[0] = MP_OBJ_NEW_SMALL_INT(o->lineno);
+ break;
+ }
+}
+
+const mp_obj_type_t mp_type_frame = {
+ { &mp_type_type },
+ .name = MP_QSTR_frame,
+ .print = frame_print,
+ .unary_op = mp_generic_unary_op,
+ .attr = frame_attr,
+};
+
+mp_obj_t mp_obj_new_frame(const mp_code_state_t *code_state) {
+ if (gc_is_locked()) {
+ return MP_OBJ_NULL;
+ }
+
+ mp_obj_frame_t *o = m_new_obj_maybe(mp_obj_frame_t);
+ if (o == NULL) {
+ return MP_OBJ_NULL;
+ }
+
+ mp_obj_code_t *code = o->code = MP_OBJ_TO_PTR(mp_obj_new_code(code_state->fun_bc->rc));
+ if (code == NULL) {
+ return MP_OBJ_NULL;
+ }
+
+ const mp_raw_code_t *rc = code->rc;
+ const mp_bytecode_prelude_t *prelude = &rc->prelude;
+ o->code_state = code_state;
+ o->base.type = &mp_type_frame;
+ o->back = NULL;
+ o->code = code;
+ o->lasti = code_state->ip - prelude->opcodes;
+ o->lineno = mp_prof_bytecode_lineno(rc, o->lasti);
+ o->trace_opcodes = false;
+ o->callback = MP_OBJ_NULL;
+
+ return MP_OBJ_FROM_PTR(o);
+}
+
+
+/******************************************************************************/
+// Trace logic
+
+typedef struct {
+ struct _mp_obj_frame_t *frame;
+ mp_obj_t event;
+ mp_obj_t arg;
+} prof_callback_args_t;
+
+STATIC mp_obj_t mp_prof_callback_invoke(mp_obj_t callback, prof_callback_args_t *args) {
+ assert(mp_obj_is_callable(callback));
+
+ mp_prof_is_executing = true;
+
+ mp_obj_t a[3] = {MP_OBJ_FROM_PTR(args->frame), args->event, args->arg};
+ mp_obj_t top = mp_call_function_n_kw(callback, 3, 0, a);
+
+ mp_prof_is_executing = false;
+
+ if (MP_STATE_THREAD(mp_pending_exception) != MP_OBJ_NULL) {
+ mp_handle_pending(true);
+ }
+ return top;
+}
+
+mp_obj_t mp_prof_settrace(mp_obj_t callback) {
+ if (mp_obj_is_callable(callback)) {
+ prof_trace_cb = callback;
+ } else {
+ prof_trace_cb = MP_OBJ_NULL;
+ }
+ return mp_const_none;
+}
+
+mp_obj_t mp_prof_frame_enter(mp_code_state_t *code_state) {
+ assert(!mp_prof_is_executing);
+
+ mp_obj_frame_t *frame = MP_OBJ_TO_PTR(mp_obj_new_frame(code_state));
+ if (frame == NULL) {
+ // Couldn't allocate a frame object
+ return MP_OBJ_NULL;
+ }
+
+ if (code_state->prev_state && code_state->frame == NULL) {
+ // We are entering not-yet-traced frame
+ // which means it's a CALL event (not a GENERATOR)
+ // so set the function definition line.
+ const mp_raw_code_t *rc = code_state->fun_bc->rc;
+ frame->lineno = rc->line_of_definition;
+ if (!rc->line_of_definition) {
+ frame->lineno = mp_prof_bytecode_lineno(rc, 0);
+ }
+ }
+ code_state->frame = frame;
+
+ if (!prof_trace_cb) {
+ return MP_OBJ_NULL;
+ }
+
+ mp_obj_t top;
+ prof_callback_args_t _args, *args = &_args;
+ args->frame = code_state->frame;
+
+ // SETTRACE event CALL
+ args->event = MP_OBJ_NEW_QSTR(MP_QSTR_call);
+ args->arg = mp_const_none;
+ top = mp_prof_callback_invoke(prof_trace_cb, args);
+
+ code_state->frame->callback = mp_obj_is_callable(top) ? top : MP_OBJ_NULL;
+
+ // Invalidate the last executed line number so the LINE trace can trigger after this CALL.
+ frame->lineno = 0;
+
+ return top;
+}
+
+mp_obj_t mp_prof_frame_update(const mp_code_state_t *code_state) {
+ mp_obj_frame_t *frame = code_state->frame;
+ if (frame == NULL) {
+ // Frame was not allocated (eg because there was no memory available)
+ return MP_OBJ_NULL;
+ }
+
+ mp_obj_frame_t *o = frame;
+ mp_obj_code_t *code = o->code;
+ const mp_raw_code_t *rc = code->rc;
+ const mp_bytecode_prelude_t *prelude = &rc->prelude;
+
+ assert(o->code_state == code_state);
+
+ o->lasti = code_state->ip - prelude->opcodes;
+ o->lineno = mp_prof_bytecode_lineno(rc, o->lasti);
+
+ return MP_OBJ_FROM_PTR(o);
+}
+
+mp_obj_t mp_prof_instr_tick(mp_code_state_t *code_state, bool is_exception) {
+ // Detect execution recursion
+ assert(!mp_prof_is_executing);
+ assert(code_state->frame);
+ assert(mp_obj_get_type(code_state->frame) == &mp_type_frame);
+
+ // Detect data recursion
+ assert(code_state != code_state->prev_state);
+
+ mp_obj_t top = mp_const_none;
+ mp_obj_t callback = code_state->frame->callback;
+
+ prof_callback_args_t _args, *args = &_args;
+ args->frame = code_state->frame;
+ args->event = mp_const_none;
+ args->arg = mp_const_none;
+
+ // Call event's are handled inside mp_prof_frame_enter
+
+ // SETTRACE event EXCEPTION
+ if (is_exception) {
+ args->event = MP_OBJ_NEW_QSTR(MP_QSTR_exception);
+ top = mp_prof_callback_invoke(callback, args);
+ return top;
+ }
+
+ // SETTRACE event LINE
+ const mp_raw_code_t *rc = code_state->fun_bc->rc;
+ const mp_bytecode_prelude_t *prelude = &rc->prelude;
+ size_t prev_line_no = args->frame->lineno;
+ size_t current_line_no = mp_prof_bytecode_lineno(rc, code_state->ip - prelude->opcodes);
+ if (prev_line_no != current_line_no) {
+ args->frame->lineno = current_line_no;
+ args->event = MP_OBJ_NEW_QSTR(MP_QSTR_line);
+ top = mp_prof_callback_invoke(callback, args);
+ }
+
+ // SETTRACE event RETURN
+ const byte *ip = code_state->ip;
+ if (*ip == MP_BC_RETURN_VALUE || *ip == MP_BC_YIELD_VALUE) {
+ args->event = MP_OBJ_NEW_QSTR(MP_QSTR_return);
+ top = mp_prof_callback_invoke(callback, args);
+ if (code_state->prev_state && *ip == MP_BC_RETURN_VALUE) {
+ code_state->frame->callback = MP_OBJ_NULL;
+ }
+ }
+
+ // SETTRACE event OPCODE
+ // TODO: frame.f_trace_opcodes=True
+ if (false) {
+ args->event = MP_OBJ_NEW_QSTR(MP_QSTR_opcode);
+ }
+
+ return top;
+}
+
+/******************************************************************************/
+// DEBUG
+
+// This section is for debugging the settrace feature itself, and is not intended
+// to be included in production/release builds. The code structure for this block
+// was taken from py/showbc.c and should not be used as a reference. To enable
+// this debug feature enable MICROPY_PROF_INSTR_DEBUG_PRINT_ENABLE in py/profile.h.
+#if MICROPY_PROF_INSTR_DEBUG_PRINT_ENABLE
+
+#include "runtime0.h"
+
+#define DECODE_UINT { \
+ unum = 0; \
+ do { \
+ unum = (unum << 7) + (*ip & 0x7f); \
+ } while ((*ip++ & 0x80) != 0); \
+}
+#define DECODE_ULABEL do { unum = (ip[0] | (ip[1] << 8)); ip += 2; } while (0)
+#define DECODE_SLABEL do { unum = (ip[0] | (ip[1] << 8)) - 0x8000; ip += 2; } while (0)
+
+#define DECODE_QSTR \
+ qst = ip[0] | ip[1] << 8; \
+ ip += 2;
+#define DECODE_PTR \
+ DECODE_UINT; \
+ ptr = (const byte *)const_table[unum]
+#define DECODE_OBJ \
+ DECODE_UINT; \
+ obj = (mp_obj_t)const_table[unum]
+
+typedef struct _mp_dis_instruction_t {
+ mp_uint_t qstr_opname;
+ mp_uint_t arg;
+ mp_obj_t argobj;
+ mp_obj_t argobjex_cache;
+} mp_dis_instruction_t;
+
+STATIC const byte *mp_prof_opcode_decode(const byte *ip, const mp_uint_t *const_table, mp_dis_instruction_t *instruction) {
+ mp_uint_t unum;
+ const byte *ptr;
+ mp_obj_t obj;
+ qstr qst;
+
+ instruction->qstr_opname = MP_QSTR_;
+ instruction->arg = 0;
+ instruction->argobj = mp_const_none;
+ instruction->argobjex_cache = mp_const_none;
+
+ switch (*ip++) {
+ case MP_BC_LOAD_CONST_FALSE:
+ instruction->qstr_opname = MP_QSTR_LOAD_CONST_FALSE;
+ break;
+
+ case MP_BC_LOAD_CONST_NONE:
+ instruction->qstr_opname = MP_QSTR_LOAD_CONST_NONE;
+ break;
+
+ case MP_BC_LOAD_CONST_TRUE:
+ instruction->qstr_opname = MP_QSTR_LOAD_CONST_TRUE;
+ break;
+
+ case MP_BC_LOAD_CONST_SMALL_INT: {
+ mp_int_t num = 0;
+ if ((ip[0] & 0x40) != 0) {
+ // Number is negative
+ num--;
+ }
+ do {
+ num = (num << 7) | (*ip & 0x7f);
+ } while ((*ip++ & 0x80) != 0);
+ instruction->qstr_opname = MP_QSTR_LOAD_CONST_SMALL_INT;
+ instruction->arg = num;
+ break;
+ }
+
+ case MP_BC_LOAD_CONST_STRING:
+ DECODE_QSTR;
+ instruction->qstr_opname = MP_QSTR_LOAD_CONST_STRING;
+ instruction->arg = qst;
+ instruction->argobj = MP_OBJ_NEW_QSTR(qst);
+ break;
+
+ case MP_BC_LOAD_CONST_OBJ:
+ DECODE_OBJ;
+ instruction->qstr_opname = MP_QSTR_LOAD_CONST_OBJ;
+ instruction->arg = unum;
+ instruction->argobj = obj;
+ break;
+
+ case MP_BC_LOAD_NULL:
+ instruction->qstr_opname = MP_QSTR_LOAD_NULL;
+ break;
+
+ case MP_BC_LOAD_FAST_N:
+ DECODE_UINT;
+ instruction->qstr_opname = MP_QSTR_LOAD_FAST_N;
+ instruction->arg = unum;
+ break;
+
+ case MP_BC_LOAD_DEREF:
+ DECODE_UINT;
+ instruction->qstr_opname = MP_QSTR_LOAD_DEREF;
+ instruction->arg = unum;
+ break;
+
+ case MP_BC_LOAD_NAME:
+ DECODE_QSTR;
+ instruction->qstr_opname = MP_QSTR_LOAD_NAME;
+ instruction->arg = qst;
+ instruction->argobj = MP_OBJ_NEW_QSTR(qst);
+ break;
+
+ case MP_BC_LOAD_GLOBAL:
+ DECODE_QSTR;
+ instruction->qstr_opname = MP_QSTR_LOAD_GLOBAL;
+ instruction->arg = qst;
+ instruction->argobj = MP_OBJ_NEW_QSTR(qst);
+ break;
+
+ case MP_BC_LOAD_ATTR:
+ DECODE_QSTR;
+ instruction->qstr_opname = MP_QSTR_LOAD_ATTR;
+ instruction->arg = qst;
+ instruction->argobj = MP_OBJ_NEW_QSTR(qst);
+ break;
+
+ case MP_BC_LOAD_METHOD:
+ DECODE_QSTR;
+ instruction->qstr_opname = MP_QSTR_LOAD_METHOD;
+ instruction->arg = qst;
+ instruction->argobj = MP_OBJ_NEW_QSTR(qst);
+ break;
+
+ case MP_BC_LOAD_SUPER_METHOD:
+ DECODE_QSTR;
+ instruction->qstr_opname = MP_QSTR_LOAD_SUPER_METHOD;
+ instruction->arg = qst;
+ instruction->argobj = MP_OBJ_NEW_QSTR(qst);
+ break;
+
+ case MP_BC_LOAD_BUILD_CLASS:
+ instruction->qstr_opname = MP_QSTR_LOAD_BUILD_CLASS;
+ break;
+
+ case MP_BC_LOAD_SUBSCR:
+ instruction->qstr_opname = MP_QSTR_LOAD_SUBSCR;
+ break;
+
+ case MP_BC_STORE_FAST_N:
+ DECODE_UINT;
+ instruction->qstr_opname = MP_QSTR_STORE_FAST_N;
+ instruction->arg = unum;
+ break;
+
+ case MP_BC_STORE_DEREF:
+ DECODE_UINT;
+ instruction->qstr_opname = MP_QSTR_STORE_DEREF;
+ instruction->arg = unum;
+ break;
+
+ case MP_BC_STORE_NAME:
+ DECODE_QSTR;
+ instruction->qstr_opname = MP_QSTR_STORE_NAME;
+ instruction->arg = qst;
+ instruction->argobj = MP_OBJ_NEW_QSTR(qst);
+ break;
+
+ case MP_BC_STORE_GLOBAL:
+ DECODE_QSTR;
+ instruction->qstr_opname = MP_QSTR_STORE_GLOBAL;
+ instruction->arg = qst;
+ instruction->argobj = MP_OBJ_NEW_QSTR(qst);
+ break;
+
+ case MP_BC_STORE_ATTR:
+ DECODE_QSTR;
+ instruction->qstr_opname = MP_QSTR_STORE_ATTR;
+ instruction->arg = qst;
+ instruction->argobj = MP_OBJ_NEW_QSTR(qst);
+ break;
+
+ case MP_BC_STORE_SUBSCR:
+ instruction->qstr_opname = MP_QSTR_STORE_SUBSCR;
+ break;
+
+ case MP_BC_DELETE_FAST:
+ DECODE_UINT;
+ instruction->qstr_opname = MP_QSTR_DELETE_FAST;
+ instruction->arg = unum;
+ break;
+
+ case MP_BC_DELETE_DEREF:
+ DECODE_UINT;
+ instruction->qstr_opname = MP_QSTR_DELETE_DEREF;
+ instruction->arg = unum;
+ break;
+
+ case MP_BC_DELETE_NAME:
+ DECODE_QSTR;
+ instruction->qstr_opname = MP_QSTR_DELETE_NAME;
+ instruction->arg = qst;
+ instruction->argobj = MP_OBJ_NEW_QSTR(qst);
+ break;
+
+ case MP_BC_DELETE_GLOBAL:
+ DECODE_QSTR;
+ instruction->qstr_opname = MP_QSTR_DELETE_GLOBAL;
+ instruction->arg = qst;
+ instruction->argobj = MP_OBJ_NEW_QSTR(qst);
+ break;
+
+ case MP_BC_DUP_TOP:
+ instruction->qstr_opname = MP_QSTR_DUP_TOP;
+ break;
+
+ case MP_BC_DUP_TOP_TWO:
+ instruction->qstr_opname = MP_QSTR_DUP_TOP_TWO;
+ break;
+
+ case MP_BC_POP_TOP:
+ instruction->qstr_opname = MP_QSTR_POP_TOP;
+ break;
+
+ case MP_BC_ROT_TWO:
+ instruction->qstr_opname = MP_QSTR_ROT_TWO;
+ break;
+
+ case MP_BC_ROT_THREE:
+ instruction->qstr_opname = MP_QSTR_ROT_THREE;
+ break;
+
+ case MP_BC_JUMP:
+ DECODE_SLABEL;
+ instruction->qstr_opname = MP_QSTR_JUMP;
+ instruction->arg = unum;
+ break;
+
+ case MP_BC_POP_JUMP_IF_TRUE:
+ DECODE_SLABEL;
+ instruction->qstr_opname = MP_QSTR_POP_JUMP_IF_TRUE;
+ instruction->arg = unum;
+ break;
+
+ case MP_BC_POP_JUMP_IF_FALSE:
+ DECODE_SLABEL;
+ instruction->qstr_opname = MP_QSTR_POP_JUMP_IF_FALSE;
+ instruction->arg = unum;
+ break;
+
+ case MP_BC_JUMP_IF_TRUE_OR_POP:
+ DECODE_SLABEL;
+ instruction->qstr_opname = MP_QSTR_JUMP_IF_TRUE_OR_POP;
+ instruction->arg = unum;
+ break;
+
+ case MP_BC_JUMP_IF_FALSE_OR_POP:
+ DECODE_SLABEL;
+ instruction->qstr_opname = MP_QSTR_JUMP_IF_FALSE_OR_POP;
+ instruction->arg = unum;
+ break;
+
+ case MP_BC_SETUP_WITH:
+ DECODE_ULABEL; // loop-like labels are always forward
+ instruction->qstr_opname = MP_QSTR_SETUP_WITH;
+ instruction->arg = unum;
+ break;
+
+ case MP_BC_WITH_CLEANUP:
+ instruction->qstr_opname = MP_QSTR_WITH_CLEANUP;
+ break;
+
+ case MP_BC_UNWIND_JUMP:
+ DECODE_SLABEL;
+ instruction->qstr_opname = MP_QSTR_UNWIND_JUMP;
+ instruction->arg = unum;
+ break;
+
+ case MP_BC_SETUP_EXCEPT:
+ DECODE_ULABEL; // except labels are always forward
+ instruction->qstr_opname = MP_QSTR_SETUP_EXCEPT;
+ instruction->arg = unum;
+ break;
+
+ case MP_BC_SETUP_FINALLY:
+ DECODE_ULABEL; // except labels are always forward
+ instruction->qstr_opname = MP_QSTR_SETUP_FINALLY;
+ instruction->arg = unum;
+ break;
+
+ case MP_BC_END_FINALLY:
+ // if TOS is an exception, reraises the exception (3 values on TOS)
+ // if TOS is an integer, does something else
+ // if TOS is None, just pops it and continues
+ // else error
+ instruction->qstr_opname = MP_QSTR_END_FINALLY;
+ break;
+
+ case MP_BC_GET_ITER:
+ instruction->qstr_opname = MP_QSTR_GET_ITER;
+ break;
+
+ case MP_BC_GET_ITER_STACK:
+ instruction->qstr_opname = MP_QSTR_GET_ITER_STACK;
+ break;
+
+ case MP_BC_FOR_ITER:
+ DECODE_ULABEL; // the jump offset if iteration finishes; for labels are always forward
+ instruction->qstr_opname = MP_QSTR_FOR_ITER;
+ instruction->arg = unum;
+ break;
+
+ case MP_BC_BUILD_TUPLE:
+ DECODE_UINT;
+ instruction->qstr_opname = MP_QSTR_BUILD_TUPLE;
+ instruction->arg = unum;
+ break;
+
+ case MP_BC_BUILD_LIST:
+ DECODE_UINT;
+ instruction->qstr_opname = MP_QSTR_BUILD_LIST;
+ instruction->arg = unum;
+ break;
+
+ case MP_BC_BUILD_MAP:
+ DECODE_UINT;
+ instruction->qstr_opname = MP_QSTR_BUILD_MAP;
+ instruction->arg = unum;
+ break;
+
+ case MP_BC_STORE_MAP:
+ instruction->qstr_opname = MP_QSTR_STORE_MAP;
+ break;
+
+ case MP_BC_BUILD_SET:
+ DECODE_UINT;
+ instruction->qstr_opname = MP_QSTR_BUILD_SET;
+ instruction->arg = unum;
+ break;
+
+ #if MICROPY_PY_BUILTINS_SLICE
+ case MP_BC_BUILD_SLICE:
+ DECODE_UINT;
+ instruction->qstr_opname = MP_QSTR_BUILD_SLICE;
+ instruction->arg = unum;
+ break;
+ #endif
+
+ case MP_BC_STORE_COMP:
+ DECODE_UINT;
+ instruction->qstr_opname = MP_QSTR_STORE_COMP;
+ instruction->arg = unum;
+ break;
+
+ case MP_BC_UNPACK_SEQUENCE:
+ DECODE_UINT;
+ instruction->qstr_opname = MP_QSTR_UNPACK_SEQUENCE;
+ instruction->arg = unum;
+ break;
+
+ case MP_BC_UNPACK_EX:
+ DECODE_UINT;
+ instruction->qstr_opname = MP_QSTR_UNPACK_EX;
+ instruction->arg = unum;
+ break;
+
+ case MP_BC_MAKE_FUNCTION:
+ DECODE_PTR;
+ instruction->qstr_opname = MP_QSTR_MAKE_FUNCTION;
+ instruction->arg = unum;
+ instruction->argobj = mp_obj_new_int_from_ull((uint64_t)ptr);
+ break;
+
+ case MP_BC_MAKE_FUNCTION_DEFARGS:
+ DECODE_PTR;
+ instruction->qstr_opname = MP_QSTR_MAKE_FUNCTION_DEFARGS;
+ instruction->arg = unum;
+ instruction->argobj = mp_obj_new_int_from_ull((uint64_t)ptr);
+ break;
+
+ case MP_BC_MAKE_CLOSURE: {
+ DECODE_PTR;
+ mp_uint_t n_closed_over = *ip++;
+ instruction->qstr_opname = MP_QSTR_MAKE_CLOSURE;
+ instruction->arg = unum;
+ instruction->argobj = mp_obj_new_int_from_ull((uint64_t)ptr);
+ instruction->argobjex_cache = MP_OBJ_NEW_SMALL_INT(n_closed_over);
+ break;
+ }
+
+ case MP_BC_MAKE_CLOSURE_DEFARGS: {
+ DECODE_PTR;
+ mp_uint_t n_closed_over = *ip++;
+ instruction->qstr_opname = MP_QSTR_MAKE_CLOSURE_DEFARGS;
+ instruction->arg = unum;
+ instruction->argobj = mp_obj_new_int_from_ull((uint64_t)ptr);
+ instruction->argobjex_cache = MP_OBJ_NEW_SMALL_INT(n_closed_over);
+ break;
+ }
+
+ case MP_BC_CALL_FUNCTION:
+ DECODE_UINT;
+ instruction->qstr_opname = MP_QSTR_CALL_FUNCTION;
+ instruction->arg = unum & 0xff;
+ instruction->argobjex_cache = MP_OBJ_NEW_SMALL_INT((unum >> 8) & 0xff);
+ break;
+
+ case MP_BC_CALL_FUNCTION_VAR_KW:
+ DECODE_UINT;
+ instruction->qstr_opname = MP_QSTR_CALL_FUNCTION_VAR_KW;
+ instruction->arg = unum & 0xff;
+ instruction->argobjex_cache = MP_OBJ_NEW_SMALL_INT((unum >> 8) & 0xff);
+ break;
+
+ case MP_BC_CALL_METHOD:
+ DECODE_UINT;
+ instruction->qstr_opname = MP_QSTR_CALL_METHOD;
+ instruction->arg = unum & 0xff;
+ instruction->argobjex_cache = MP_OBJ_NEW_SMALL_INT((unum >> 8) & 0xff);
+ break;
+
+ case MP_BC_CALL_METHOD_VAR_KW:
+ DECODE_UINT;
+ instruction->qstr_opname = MP_QSTR_CALL_METHOD_VAR_KW;
+ instruction->arg = unum & 0xff;
+ instruction->argobjex_cache = MP_OBJ_NEW_SMALL_INT((unum >> 8) & 0xff);
+ break;
+
+ case MP_BC_RETURN_VALUE:
+ instruction->qstr_opname = MP_QSTR_RETURN_VALUE;
+ break;
+
+ case MP_BC_RAISE_LAST:
+ instruction->qstr_opname = MP_QSTR_RAISE_LAST;
+ break;
+
+ case MP_BC_RAISE_OBJ:
+ instruction->qstr_opname = MP_QSTR_RAISE_OBJ;
+ break;
+
+ case MP_BC_RAISE_FROM:
+ instruction->qstr_opname = MP_QSTR_RAISE_FROM;
+ break;
+
+ case MP_BC_YIELD_VALUE:
+ instruction->qstr_opname = MP_QSTR_YIELD_VALUE;
+ break;
+
+ case MP_BC_YIELD_FROM:
+ instruction->qstr_opname = MP_QSTR_YIELD_FROM;
+ break;
+
+ case MP_BC_IMPORT_NAME:
+ DECODE_QSTR;
+ instruction->qstr_opname = MP_QSTR_IMPORT_NAME;
+ instruction->arg = qst;
+ instruction->argobj = MP_OBJ_NEW_QSTR(qst);
+ break;
+
+ case MP_BC_IMPORT_FROM:
+ DECODE_QSTR;
+ instruction->qstr_opname = MP_QSTR_IMPORT_FROM;
+ instruction->arg = qst;
+ instruction->argobj = MP_OBJ_NEW_QSTR(qst);
+ break;
+
+ case MP_BC_IMPORT_STAR:
+ instruction->qstr_opname = MP_QSTR_IMPORT_STAR;
+ break;
+
+ default:
+ if (ip[-1] < MP_BC_LOAD_CONST_SMALL_INT_MULTI + 64) {
+ instruction->qstr_opname = MP_QSTR_LOAD_CONST_SMALL_INT;
+ instruction->arg = (mp_int_t)ip[-1] - MP_BC_LOAD_CONST_SMALL_INT_MULTI - 16;
+ } else if (ip[-1] < MP_BC_LOAD_FAST_MULTI + 16) {
+ instruction->qstr_opname = MP_QSTR_LOAD_FAST;
+ instruction->arg = (mp_uint_t)ip[-1] - MP_BC_LOAD_FAST_MULTI;
+ } else if (ip[-1] < MP_BC_STORE_FAST_MULTI + 16) {
+ instruction->qstr_opname = MP_QSTR_STORE_FAST;
+ instruction->arg = (mp_uint_t)ip[-1] - MP_BC_STORE_FAST_MULTI;
+ } else if (ip[-1] < MP_BC_UNARY_OP_MULTI + MP_UNARY_OP_NUM_BYTECODE) {
+ instruction->qstr_opname = MP_QSTR_UNARY_OP;
+ instruction->arg = (mp_uint_t)ip[-1] - MP_BC_UNARY_OP_MULTI;
+ } else if (ip[-1] < MP_BC_BINARY_OP_MULTI + MP_BINARY_OP_NUM_BYTECODE) {
+ mp_uint_t op = ip[-1] - MP_BC_BINARY_OP_MULTI;
+ instruction->qstr_opname = MP_QSTR_BINARY_OP;
+ instruction->arg = op;
+ } else {
+ mp_printf(&mp_plat_print, "code %p, opcode 0x%02x not implemented\n", ip - 1, ip[-1]);
+ assert(0);
+ return ip;
+ }
+ break;
+ }
+
+ return ip;
+}
+
+void mp_prof_print_instr(const byte *ip, mp_code_state_t *code_state) {
+ mp_dis_instruction_t _instruction, *instruction = &_instruction;
+ mp_prof_opcode_decode(ip, code_state->fun_bc->rc->const_table, instruction);
+ const mp_raw_code_t *rc = code_state->fun_bc->rc;
+ const mp_bytecode_prelude_t *prelude = &rc->prelude;
+
+ mp_uint_t offset = ip - prelude->opcodes;
+ mp_printf(&mp_plat_print, "instr");
+
+ /* long path */ if (1) {
+ mp_printf(&mp_plat_print,
+ "@%p:%q:%q+0x%04x:%d",
+ ip,
+ prelude->qstr_source_file,
+ prelude->qstr_block_name,
+ offset,
+ mp_prof_bytecode_lineno(rc, offset)
+ );
+ }
+
+ /* bytecode */ if (0) {
+ mp_printf(&mp_plat_print, " %02x %02x %02x %02x", ip[0], ip[1], ip[2], ip[3]);
+ }
+
+ mp_printf(&mp_plat_print, " 0x%02x %q [%d]", *ip, instruction->qstr_opname, instruction->arg);
+
+ if (instruction->argobj != mp_const_none) {
+ mp_printf(&mp_plat_print, " $");
+ mp_obj_print_helper(&mp_plat_print, instruction->argobj, PRINT_REPR);
+ }
+ if (instruction->argobjex_cache != mp_const_none) {
+ mp_printf(&mp_plat_print, " #");
+ mp_obj_print_helper(&mp_plat_print, instruction->argobjex_cache, PRINT_REPR);
+ }
+
+ mp_printf(&mp_plat_print, "\n");
+}
+
+#endif // MICROPY_PROF_INSTR_DEBUG_PRINT_ENABLE
+
+#endif // MICROPY_PY_SYS_SETTRACE
diff --git a/circuitpython/py/profile.h b/circuitpython/py/profile.h
new file mode 100644
index 0000000..64e207d
--- /dev/null
+++ b/circuitpython/py/profile.h
@@ -0,0 +1,79 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) SatoshiLabs
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef MICROPY_INCLUDED_PY_PROFILING_H
+#define MICROPY_INCLUDED_PY_PROFILING_H
+
+#include "py/emitglue.h"
+
+#if MICROPY_PY_SYS_SETTRACE
+
+#define mp_prof_is_executing MP_STATE_THREAD(prof_callback_is_executing)
+
+typedef struct _mp_obj_code_t {
+ mp_obj_base_t base;
+ const mp_raw_code_t *rc;
+ mp_obj_dict_t *dict_locals;
+ mp_obj_t lnotab;
+} mp_obj_code_t;
+
+typedef struct _mp_obj_frame_t {
+ mp_obj_base_t base;
+ const mp_code_state_t *code_state;
+ struct _mp_obj_frame_t *back;
+ mp_obj_t callback;
+ mp_obj_code_t *code;
+ mp_uint_t lasti;
+ mp_uint_t lineno;
+ bool trace_opcodes;
+} mp_obj_frame_t;
+
+void mp_prof_extract_prelude(const byte *bytecode, mp_bytecode_prelude_t *prelude);
+
+mp_obj_t mp_obj_new_code(const mp_raw_code_t *rc);
+mp_obj_t mp_obj_new_frame(const mp_code_state_t *code_state);
+
+// This is the implementation for the sys.settrace
+mp_obj_t mp_prof_settrace(mp_obj_t callback);
+
+mp_obj_t mp_prof_frame_enter(mp_code_state_t *code_state);
+mp_obj_t mp_prof_frame_update(const mp_code_state_t *code_state);
+
+// For every VM instruction tick this function deduces events from the state
+mp_obj_t mp_prof_instr_tick(mp_code_state_t *code_state, bool is_exception);
+
+// This section is for debugging the settrace feature itself, and is not intended
+// to be included in production/release builds.
+#define MICROPY_PROF_INSTR_DEBUG_PRINT_ENABLE 0
+#if MICROPY_PROF_INSTR_DEBUG_PRINT_ENABLE
+void mp_prof_print_instr(const byte *ip, mp_code_state_t *code_state);
+#define MP_PROF_INSTR_DEBUG_PRINT(current_ip) mp_prof_print_instr((current_ip), code_state)
+#else
+#define MP_PROF_INSTR_DEBUG_PRINT(current_ip)
+#endif
+
+#endif // MICROPY_PY_SYS_SETTRACE
+#endif // MICROPY_INCLUDED_PY_PROFILING_H
diff --git a/circuitpython/py/proto.c b/circuitpython/py/proto.c
new file mode 100644
index 0000000..9f110b9
--- /dev/null
+++ b/circuitpython/py/proto.c
@@ -0,0 +1,53 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2019 Jeff Epler for Adafruit Industries
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/obj.h"
+#include "py/proto.h"
+#include "py/runtime.h"
+
+#ifndef MICROPY_UNSAFE_PROTO
+const void *mp_proto_get(uint16_t name, mp_const_obj_t obj) {
+ const mp_obj_type_t *type = mp_obj_get_type(obj);
+ const void *protocol = mp_type_get_protocol_slot(type);
+ if (!protocol) {
+ return NULL;
+ }
+ uint16_t proto_name = *(const uint16_t *)protocol;
+ if (proto_name == name) {
+ return protocol;
+ }
+ return NULL;
+}
+#endif
+
+const void *mp_proto_get_or_throw(uint16_t name, mp_const_obj_t obj) {
+ const void *proto = mp_proto_get(name, obj);
+ if (proto) {
+ return proto;
+ }
+ mp_raise_TypeError_varg(MP_ERROR_TEXT("'%q' object does not support '%q'"),
+ mp_obj_get_type_qstr(obj), name);
+}
diff --git a/circuitpython/py/proto.h b/circuitpython/py/proto.h
new file mode 100644
index 0000000..a7b9b52
--- /dev/null
+++ b/circuitpython/py/proto.h
@@ -0,0 +1,44 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2019 Jeff Epler for Adafruit Industries
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef MICROPY_INCLUDED_PY_PROTO_H
+#define MICROPY_INCLUDED_PY_PROTO_H
+
+#ifdef MICROPY_UNSAFE_PROTO
+#define MP_PROTOCOL_HEAD /* NOTHING */
+#define MP_PROTO_IMPLEMENT(name) /* NOTHING */
+static inline void *mp_proto_get(uint16_t name, mp_const_obj_type_t obj) {
+ return mp_type_get_protocol_slot(mp_obj_get_type(obj));
+}
+#else
+#define MP_PROTOCOL_HEAD \
+ uint16_t name; // The name of this protocol, a qstr
+#define MP_PROTO_IMPLEMENT(n) .name = n,
+const void *mp_proto_get(uint16_t name, mp_const_obj_t obj);
+const void *mp_proto_get_or_throw(uint16_t name, mp_const_obj_t obj);
+#endif
+
+#endif
diff --git a/circuitpython/py/py.cmake b/circuitpython/py/py.cmake
new file mode 100644
index 0000000..6f0514e
--- /dev/null
+++ b/circuitpython/py/py.cmake
@@ -0,0 +1,149 @@
+# CMake fragment for MicroPython core py component
+
+set(MICROPY_PY_DIR "${MICROPY_DIR}/py")
+
+list(APPEND MICROPY_INC_CORE "${MICROPY_DIR}")
+
+# All py/ source files
+set(MICROPY_SOURCE_PY
+ ${MICROPY_PY_DIR}/argcheck.c
+ ${MICROPY_PY_DIR}/asmarm.c
+ ${MICROPY_PY_DIR}/asmbase.c
+ ${MICROPY_PY_DIR}/asmthumb.c
+ ${MICROPY_PY_DIR}/asmx64.c
+ ${MICROPY_PY_DIR}/asmx86.c
+ ${MICROPY_PY_DIR}/asmxtensa.c
+ ${MICROPY_PY_DIR}/bc.c
+ ${MICROPY_PY_DIR}/binary.c
+ ${MICROPY_PY_DIR}/builtinevex.c
+ ${MICROPY_PY_DIR}/builtinhelp.c
+ ${MICROPY_PY_DIR}/builtinimport.c
+ ${MICROPY_PY_DIR}/compile.c
+ ${MICROPY_PY_DIR}/emitbc.c
+ ${MICROPY_PY_DIR}/emitcommon.c
+ ${MICROPY_PY_DIR}/emitglue.c
+ ${MICROPY_PY_DIR}/emitinlinethumb.c
+ ${MICROPY_PY_DIR}/emitinlinextensa.c
+ ${MICROPY_PY_DIR}/emitnarm.c
+ ${MICROPY_PY_DIR}/emitnthumb.c
+ ${MICROPY_PY_DIR}/emitnx64.c
+ ${MICROPY_PY_DIR}/emitnx86.c
+ ${MICROPY_PY_DIR}/emitnxtensa.c
+ ${MICROPY_PY_DIR}/emitnxtensawin.c
+ ${MICROPY_PY_DIR}/formatfloat.c
+ ${MICROPY_PY_DIR}/frozenmod.c
+ ${MICROPY_PY_DIR}/gc.c
+ ${MICROPY_PY_DIR}/lexer.c
+ ${MICROPY_PY_DIR}/malloc.c
+ ${MICROPY_PY_DIR}/map.c
+ ${MICROPY_PY_DIR}/modarray.c
+ ${MICROPY_PY_DIR}/modbuiltins.c
+ ${MICROPY_PY_DIR}/modcmath.c
+ ${MICROPY_PY_DIR}/modcollections.c
+ ${MICROPY_PY_DIR}/modgc.c
+ ${MICROPY_PY_DIR}/modio.c
+ ${MICROPY_PY_DIR}/modmath.c
+ ${MICROPY_PY_DIR}/modmicropython.c
+ ${MICROPY_PY_DIR}/modstruct.c
+ ${MICROPY_PY_DIR}/modsys.c
+ ${MICROPY_PY_DIR}/modthread.c
+ ${MICROPY_PY_DIR}/moduerrno.c
+ ${MICROPY_PY_DIR}/mpprint.c
+ ${MICROPY_PY_DIR}/mpstate.c
+ ${MICROPY_PY_DIR}/mpz.c
+ ${MICROPY_PY_DIR}/nativeglue.c
+ ${MICROPY_PY_DIR}/nlr.c
+ ${MICROPY_PY_DIR}/nlrpowerpc.c
+ ${MICROPY_PY_DIR}/nlrsetjmp.c
+ ${MICROPY_PY_DIR}/nlrthumb.c
+ ${MICROPY_PY_DIR}/nlrx64.c
+ ${MICROPY_PY_DIR}/nlrx86.c
+ ${MICROPY_PY_DIR}/nlrxtensa.c
+ ${MICROPY_PY_DIR}/obj.c
+ ${MICROPY_PY_DIR}/objarray.c
+ ${MICROPY_PY_DIR}/objattrtuple.c
+ ${MICROPY_PY_DIR}/objbool.c
+ ${MICROPY_PY_DIR}/objboundmeth.c
+ ${MICROPY_PY_DIR}/objcell.c
+ ${MICROPY_PY_DIR}/objclosure.c
+ ${MICROPY_PY_DIR}/objcomplex.c
+ ${MICROPY_PY_DIR}/objdeque.c
+ ${MICROPY_PY_DIR}/objdict.c
+ ${MICROPY_PY_DIR}/objenumerate.c
+ ${MICROPY_PY_DIR}/objexcept.c
+ ${MICROPY_PY_DIR}/objfilter.c
+ ${MICROPY_PY_DIR}/objfloat.c
+ ${MICROPY_PY_DIR}/objfun.c
+ ${MICROPY_PY_DIR}/objgenerator.c
+ ${MICROPY_PY_DIR}/objgetitemiter.c
+ ${MICROPY_PY_DIR}/objint.c
+ ${MICROPY_PY_DIR}/objint_longlong.c
+ ${MICROPY_PY_DIR}/objint_mpz.c
+ ${MICROPY_PY_DIR}/objlist.c
+ ${MICROPY_PY_DIR}/objmap.c
+ ${MICROPY_PY_DIR}/objmodule.c
+ ${MICROPY_PY_DIR}/objnamedtuple.c
+ ${MICROPY_PY_DIR}/objnone.c
+ ${MICROPY_PY_DIR}/objobject.c
+ ${MICROPY_PY_DIR}/objpolyiter.c
+ ${MICROPY_PY_DIR}/objproperty.c
+ ${MICROPY_PY_DIR}/objrange.c
+ ${MICROPY_PY_DIR}/objreversed.c
+ ${MICROPY_PY_DIR}/objset.c
+ ${MICROPY_PY_DIR}/objsingleton.c
+ ${MICROPY_PY_DIR}/objslice.c
+ ${MICROPY_PY_DIR}/objstr.c
+ ${MICROPY_PY_DIR}/objstringio.c
+ ${MICROPY_PY_DIR}/objstrunicode.c
+ ${MICROPY_PY_DIR}/objtraceback.c
+ ${MICROPY_PY_DIR}/objtuple.c
+ ${MICROPY_PY_DIR}/objtype.c
+ ${MICROPY_PY_DIR}/objzip.c
+ ${MICROPY_PY_DIR}/opmethods.c
+ ${MICROPY_PY_DIR}/pairheap.c
+ ${MICROPY_PY_DIR}/parse.c
+ ${MICROPY_PY_DIR}/parsenum.c
+ ${MICROPY_PY_DIR}/parsenumbase.c
+ ${MICROPY_PY_DIR}/persistentcode.c
+ ${MICROPY_PY_DIR}/profile.c
+ ${MICROPY_PY_DIR}/pystack.c
+ ${MICROPY_PY_DIR}/qstr.c
+ ${MICROPY_PY_DIR}/reader.c
+ ${MICROPY_PY_DIR}/repl.c
+ ${MICROPY_PY_DIR}/ringbuf.c
+ ${MICROPY_PY_DIR}/runtime.c
+ ${MICROPY_PY_DIR}/runtime_utils.c
+ ${MICROPY_PY_DIR}/scheduler.c
+ ${MICROPY_PY_DIR}/scope.c
+ ${MICROPY_PY_DIR}/sequence.c
+ ${MICROPY_PY_DIR}/showbc.c
+ ${MICROPY_PY_DIR}/smallint.c
+ ${MICROPY_PY_DIR}/stackctrl.c
+ ${MICROPY_PY_DIR}/stream.c
+ ${MICROPY_PY_DIR}/unicode.c
+ ${MICROPY_PY_DIR}/vm.c
+ ${MICROPY_PY_DIR}/vstr.c
+ ${MICROPY_PY_DIR}/warning.c
+)
+
+# Helper macro to collect include directories and compile definitions for qstr processing.
+macro(micropy_gather_target_properties targ)
+ if(TARGET ${targ})
+ get_target_property(type ${targ} TYPE)
+ set(_inc OFF)
+ set(_def OFF)
+ if(${type} STREQUAL STATIC_LIBRARY)
+ get_target_property(_inc ${targ} INCLUDE_DIRECTORIES)
+ get_target_property(_def ${targ} COMPILE_DEFINITIONS)
+ elseif(${type} STREQUAL INTERFACE_LIBRARY)
+ get_target_property(_inc ${targ} INTERFACE_INCLUDE_DIRECTORIES)
+ get_target_property(_def ${targ} INTERFACE_COMPILE_DEFINITIONS)
+ endif()
+ if(_inc)
+ list(APPEND MICROPY_CPP_INC_EXTRA ${_inc})
+ endif()
+ if(_def)
+ list(APPEND MICROPY_CPP_DEF_EXTRA ${_def})
+ endif()
+ endif()
+endmacro()
diff --git a/circuitpython/py/py.mk b/circuitpython/py/py.mk
new file mode 100644
index 0000000..f1864e5
--- /dev/null
+++ b/circuitpython/py/py.mk
@@ -0,0 +1,310 @@
+# where py object files go (they have a name prefix to prevent filename clashes)
+PY_BUILD = $(BUILD)/py
+
+# where autogenerated header files go
+HEADER_BUILD = $(BUILD)/genhdr
+
+# file containing qstr defs for the core Python bit
+PY_QSTR_DEFS = $(PY_SRC)/qstrdefs.h
+
+TRANSLATION ?= en_US
+
+# If qstr autogeneration is not disabled we specify the output header
+# for all collected qstrings.
+ifneq ($(QSTR_AUTOGEN_DISABLE),1)
+QSTR_DEFS_COLLECTED = $(HEADER_BUILD)/qstrdefs.collected.h
+endif
+
+# Any files listed by these variables will cause a full regeneration of qstrs
+# DEPENDENCIES: included in qstr processing; REQUIREMENTS: not included
+QSTR_GLOBAL_DEPENDENCIES += $(PY_SRC)/mpconfig.h mpconfigport.h
+QSTR_GLOBAL_REQUIREMENTS += $(HEADER_BUILD)/mpversion.h
+
+# some code is performance bottleneck and compiled with other optimization options
+CSUPEROPT = -O3
+
+# Enable building 32-bit code on 64-bit host.
+ifeq ($(MICROPY_FORCE_32BIT),1)
+CC += -m32
+CXX += -m32
+LD += -m32
+endif
+
+# External modules written in C.
+ifneq ($(USER_C_MODULES),)
+# pre-define USERMOD variables as expanded so that variables are immediate
+# expanded as they're added to them
+SRC_USERMOD :=
+SRC_USERMOD_CXX :=
+CFLAGS_USERMOD :=
+CXXFLAGS_USERMOD :=
+LDFLAGS_USERMOD :=
+$(foreach module, $(wildcard $(USER_C_MODULES)/*/micropython.mk), \
+ $(eval USERMOD_DIR = $(patsubst %/,%,$(dir $(module))))\
+ $(info Including User C Module from $(USERMOD_DIR))\
+ $(eval include $(module))\
+)
+
+SRC_MOD += $(patsubst $(USER_C_MODULES)/%.c,%.c,$(SRC_USERMOD))
+SRC_MOD_CXX += $(patsubst $(USER_C_MODULES)/%.cpp,%.cpp,$(SRC_USERMOD_CXX))
+CFLAGS_MOD += $(CFLAGS_USERMOD)
+CXXFLAGS_MOD += $(CXXFLAGS_USERMOD)
+LDFLAGS_MOD += $(LDFLAGS_USERMOD)
+endif
+
+ifeq ($(CIRCUITPY_ULAB),1)
+ULAB_SRCS := $(shell find $(TOP)/extmod/ulab/code -type f -name "*.c")
+SRC_MOD += $(patsubst $(TOP)/%,%,$(ULAB_SRCS))
+CFLAGS_MOD += -DCIRCUITPY_ULAB=1 -DMODULE_ULAB_ENABLED=1 -DULAB_HAS_USER_MODULE=0 -iquote $(TOP)/extmod/ulab/code
+$(BUILD)/extmod/ulab/code/%.o: CFLAGS += -Wno-missing-declarations -Wno-missing-prototypes -Wno-unused-parameter -Wno-float-equal -Wno-sign-compare -Wno-cast-align -Wno-shadow -DCIRCUITPY
+endif
+
+# py object files
+PY_CORE_O_BASENAME = $(addprefix py/,\
+ mpstate.o \
+ nlr.o \
+ nlrx86.o \
+ nlrx64.o \
+ nlrthumb.o \
+ nlraarch64.o \
+ nlrpowerpc.o \
+ nlrxtensa.o \
+ nlrsetjmp.o \
+ malloc.o \
+ gc.o \
+ gc_long_lived.o \
+ pystack.o \
+ qstr.o \
+ vstr.o \
+ mpprint.o \
+ unicode.o \
+ mpz.o \
+ reader.o \
+ lexer.o \
+ parse.o \
+ scope.o \
+ compile.o \
+ emitcommon.o \
+ emitbc.o \
+ asmbase.o \
+ asmx64.o \
+ emitnx64.o \
+ asmx86.o \
+ emitnx86.o \
+ asmthumb.o \
+ emitnthumb.o \
+ emitinlinethumb.o \
+ asmarm.o \
+ emitnarm.o \
+ asmxtensa.o \
+ emitnxtensa.o \
+ emitinlinextensa.o \
+ emitnxtensawin.o \
+ formatfloat.o \
+ parsenumbase.o \
+ parsenum.o \
+ emitglue.o \
+ persistentcode.o \
+ runtime.o \
+ runtime_utils.o \
+ scheduler.o \
+ nativeglue.o \
+ pairheap.o \
+ ringbuf.o \
+ stackctrl.o \
+ argcheck.o \
+ warning.o \
+ profile.o \
+ map.o \
+ enum.o \
+ obj.o \
+ objarray.o \
+ objattrtuple.o \
+ objbool.o \
+ objboundmeth.o \
+ objcell.o \
+ objclosure.o \
+ objcomplex.o \
+ objdeque.o \
+ objdict.o \
+ objenumerate.o \
+ objexcept.o \
+ objfilter.o \
+ objfloat.o \
+ objfun.o \
+ objgenerator.o \
+ objgetitemiter.o \
+ objint.o \
+ objint_longlong.o \
+ objint_mpz.o \
+ objlist.o \
+ objmap.o \
+ objmodule.o \
+ objobject.o \
+ objpolyiter.o \
+ objproperty.o \
+ objnone.o \
+ objnamedtuple.o \
+ objrange.o \
+ objreversed.o \
+ objset.o \
+ objsingleton.o \
+ objslice.o \
+ objstr.o \
+ objstrunicode.o \
+ objstringio.o \
+ objtraceback.o \
+ objtuple.o \
+ objtype.o \
+ objzip.o \
+ opmethods.o \
+ proto.o \
+ sequence.o \
+ stream.o \
+ binary.o \
+ builtinimport.o \
+ builtinevex.o \
+ builtinhelp.o \
+ modarray.o \
+ modbuiltins.o \
+ modcollections.o \
+ modgc.o \
+ modio.o \
+ modmath.o \
+ modcmath.o \
+ modmicropython.o \
+ modstruct.o \
+ modsys.o \
+ moduerrno.o \
+ modthread.o \
+ vm.o \
+ bc.o \
+ showbc.o \
+ repl.o \
+ smallint.o \
+ frozenmod.o \
+ )
+
+PY_EXTMOD_O_BASENAME = \
+ extmod/moduasyncio.o \
+ extmod/moductypes.o \
+ extmod/modujson.o \
+ extmod/modure.o \
+ extmod/moduzlib.o \
+ extmod/moduheapq.o \
+ extmod/modutimeq.o \
+ extmod/moduhashlib.o \
+ extmod/modubinascii.o \
+ extmod/modurandom.o \
+ extmod/moduselect.o \
+ extmod/modframebuf.o \
+ extmod/vfs.o \
+ extmod/vfs_blockdev.o \
+ extmod/vfs_reader.o \
+ extmod/vfs_posix.o \
+ extmod/vfs_posix_file.o \
+ extmod/vfs_fat.o \
+ extmod/vfs_fat_diskio.o \
+ extmod/vfs_fat_file.o \
+ extmod/vfs_lfs.o \
+ extmod/utime_mphal.o \
+ shared/libc/abort_.o \
+ shared/libc/printf.o \
+
+# prepend the build destination prefix to the py object files
+PY_CORE_O = $(addprefix $(BUILD)/, $(PY_CORE_O_BASENAME))
+PY_EXTMOD_O = $(addprefix $(BUILD)/, $(PY_EXTMOD_O_BASENAME))
+
+# this is a convenience variable for ports that want core, extmod and frozen code
+PY_O = $(PY_CORE_O) $(PY_EXTMOD_O)
+
+# object file for frozen code specified via a manifest
+ifneq ($(FROZEN_MANIFEST),)
+PY_O += $(BUILD)/$(BUILD)/frozen_content.o
+endif
+
+# Sources that may contain qstrings
+SRC_QSTR_IGNORE = py/nlr%
+SRC_QSTR_EMITNATIVE = py/emitn%
+SRC_QSTR += $(SRC_MOD) $(filter-out $(SRC_QSTR_IGNORE),$(PY_CORE_O_BASENAME:.o=.c)) $(PY_EXTMOD_O_BASENAME:.o=.c)
+# Sources that only hold QSTRs after pre-processing.
+SRC_QSTR_PREPROCESSOR = $(addprefix $(TOP)/, $(filter $(SRC_QSTR_EMITNATIVE),$(PY_CORE_O_BASENAME:.o=.c)))
+
+# Anything that depends on FORCE will be considered out-of-date
+FORCE:
+.PHONY: FORCE
+
+$(HEADER_BUILD)/mpversion.h: FORCE | $(HEADER_BUILD)
+ $(STEPECHO) "GEN $@"
+ $(Q)$(PYTHON) $(PY_SRC)/makeversionhdr.py $@
+
+# mpconfigport.mk is optional, but changes to it may drastically change
+# overall config, so they need to be caught
+MPCONFIGPORT_MK = $(wildcard mpconfigport.mk)
+
+$(HEADER_BUILD)/$(TRANSLATION).mo: $(TOP)/locale/$(TRANSLATION).po | $(HEADER_BUILD)
+ $(Q)msgfmt -o $@ $^
+
+$(HEADER_BUILD)/qstrdefs.preprocessed.h: $(PY_QSTR_DEFS) $(QSTR_DEFS) $(QSTR_DEFS_COLLECTED) mpconfigport.h $(MPCONFIGPORT_MK) $(PY_SRC)/mpconfig.h | $(HEADER_BUILD)
+ $(STEPECHO) "GEN $@"
+ $(Q)cat $(PY_QSTR_DEFS) $(QSTR_DEFS) $(QSTR_DEFS_COLLECTED) | $(SED) 's/^Q(.*)/"&"/' | $(CPP) $(CFLAGS) - | $(SED) 's/^"\(Q(.*)\)"/\1/' > $@
+
+# qstr data
+$(HEADER_BUILD)/qstrdefs.enum.h: $(PY_SRC)/makeqstrdata.py $(HEADER_BUILD)/qstrdefs.preprocessed.h
+ $(STEPECHO) "GEN $@"
+ $(Q)$(PYTHON) $(PY_SRC)/makeqstrdata.py $(HEADER_BUILD)/qstrdefs.preprocessed.h > $@
+
+# Adding an order only dependency on $(HEADER_BUILD) causes $(HEADER_BUILD) to get
+# created before we run the script to generate the .h
+# Note: we need to protect the qstr names from the preprocessor, so we wrap
+# the lines in "" and then unwrap after the preprocessor is finished.
+$(HEADER_BUILD)/qstrdefs.generated.h: $(PY_SRC)/makeqstrdata.py $(HEADER_BUILD)/$(TRANSLATION).mo $(HEADER_BUILD)/qstrdefs.preprocessed.h
+ $(STEPECHO) "GEN $@"
+ $(Q)$(PYTHON) $(PY_SRC)/makeqstrdata.py --compression_filename $(HEADER_BUILD)/compression.generated.h --translation $(HEADER_BUILD)/$(TRANSLATION).mo $(HEADER_BUILD)/qstrdefs.preprocessed.h > $@
+
+$(PY_BUILD)/qstr.o: $(HEADER_BUILD)/qstrdefs.generated.h
+
+
+# build a list of registered modules for py/objmodule.c.
+$(HEADER_BUILD)/moduledefs.h: $(SRC_QSTR) $(QSTR_GLOBAL_DEPENDENCIES) | $(HEADER_BUILD)/mpversion.h
+ @$(ECHO) "GEN $@"
+ $(Q)$(PYTHON) $(PY_SRC)/makemoduledefs.py --vpath="., $(TOP), $(USER_C_MODULES)" $(SRC_QSTR) > $@
+
+# Standard C functions like memset need to be compiled with special flags so
+# the compiler does not optimise these functions in terms of themselves.
+CFLAGS_BUILTIN ?= -ffreestanding -fno-builtin -fno-lto
+$(BUILD)/shared/libc/string0.o: CFLAGS += $(CFLAGS_BUILTIN)
+
+# Force nlr code to always be compiled with space-saving optimisation so
+# that the function preludes are of a minimal and predictable form.
+$(PY_BUILD)/nlr%.o: CFLAGS += -Os
+
+# optimising gc for speed; 5ms down to 4ms on pybv2
+ifndef SUPEROPT_GC
+ SUPEROPT_GC = 1
+endif
+
+ifeq ($(SUPEROPT_GC),1)
+$(PY_BUILD)/gc.o: CFLAGS += $(CSUPEROPT)
+endif
+
+# optimising vm for speed, adds only a small amount to code size but makes a huge difference to speed (20% faster)
+ifndef SUPEROPT_VM
+ SUPEROPT_VM = 1
+endif
+
+ifeq ($(SUPEROPT_VM),1)
+$(PY_BUILD)/vm.o: CFLAGS += $(CSUPEROPT)
+endif
+
+# Optimizing vm.o for modern deeply pipelined CPUs with branch predictors
+# may require disabling tail jump optimization. This will make sure that
+# each opcode has its own dispatching jump which will improve branch
+# branch predictor efficiency.
+# https://marc.info/?l=lua-l&m=129778596120851
+# http://hg.python.org/cpython/file/b127046831e2/Python/ceval.c#l828
+# http://www.emulators.com/docs/nx25_nostradamus.htm
+#-fno-crossjumping
+
+# Include rules for extmod related code
+include $(TOP)/extmod/extmod.mk
diff --git a/circuitpython/py/pystack.c b/circuitpython/py/pystack.c
new file mode 100644
index 0000000..43dfd4e
--- /dev/null
+++ b/circuitpython/py/pystack.c
@@ -0,0 +1,56 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2017 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+
+#include "py/runtime.h"
+
+#if MICROPY_ENABLE_PYSTACK
+
+void mp_pystack_init(void *start, void *end) {
+ MP_STATE_THREAD(pystack_start) = start;
+ MP_STATE_THREAD(pystack_end) = end;
+ MP_STATE_THREAD(pystack_cur) = start;
+}
+
+void *mp_pystack_alloc(size_t n_bytes) {
+ n_bytes = (n_bytes + (MICROPY_PYSTACK_ALIGN - 1)) & ~(MICROPY_PYSTACK_ALIGN - 1);
+ #if MP_PYSTACK_DEBUG
+ n_bytes += MICROPY_PYSTACK_ALIGN;
+ #endif
+ if (MP_STATE_THREAD(pystack_cur) + n_bytes > MP_STATE_THREAD(pystack_end)) {
+ // out of memory in the pystack
+ mp_raise_type_arg(&mp_type_RuntimeError, MP_OBJ_NEW_QSTR(MP_QSTR_pystack_space_exhausted));
+ }
+ void *ptr = MP_STATE_THREAD(pystack_cur);
+ MP_STATE_THREAD(pystack_cur) += n_bytes;
+ #if MP_PYSTACK_DEBUG
+ *(size_t *)(MP_STATE_THREAD(pystack_cur) - MICROPY_PYSTACK_ALIGN) = n_bytes;
+ #endif
+ return ptr;
+}
+
+#endif
diff --git a/circuitpython/py/pystack.h b/circuitpython/py/pystack.h
new file mode 100644
index 0000000..ed51e0c
--- /dev/null
+++ b/circuitpython/py/pystack.h
@@ -0,0 +1,124 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2017 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_PYSTACK_H
+#define MICROPY_INCLUDED_PY_PYSTACK_H
+
+#include "py/mpconfig.h"
+#include "py/mpstate.h"
+
+// Enable this debugging option to check that the amount of memory freed is
+// consistent with amounts that were previously allocated.
+#define MP_PYSTACK_DEBUG (0)
+
+#if MICROPY_ENABLE_PYSTACK
+
+void mp_pystack_init(void *start, void *end);
+void *mp_pystack_alloc(size_t n_bytes);
+
+// This function can free multiple continuous blocks at once: just pass the
+// pointer to the block that was allocated first and it and all subsequently
+// allocated blocks will be freed.
+static inline void mp_pystack_free(void *ptr) {
+ assert((uint8_t *)ptr >= MP_STATE_THREAD(pystack_start));
+ assert((uint8_t *)ptr <= MP_STATE_THREAD(pystack_cur));
+ #if MP_PYSTACK_DEBUG
+ size_t n_bytes_to_free = MP_STATE_THREAD(pystack_cur) - (uint8_t *)ptr;
+ size_t n_bytes = *(size_t *)(MP_STATE_THREAD(pystack_cur) - MICROPY_PYSTACK_ALIGN);
+ while (n_bytes < n_bytes_to_free) {
+ n_bytes += *(size_t *)(MP_STATE_THREAD(pystack_cur) - n_bytes - MICROPY_PYSTACK_ALIGN);
+ }
+ if (n_bytes != n_bytes_to_free) {
+ mp_printf(&mp_plat_print, "mp_pystack_free() failed: %u != %u\n", (uint)n_bytes_to_free,
+ (uint)*(size_t *)(MP_STATE_THREAD(pystack_cur) - MICROPY_PYSTACK_ALIGN));
+ assert(0);
+ }
+ #endif
+ MP_STATE_THREAD(pystack_cur) = (uint8_t *)ptr;
+}
+
+static inline void mp_pystack_realloc(void *ptr, size_t n_bytes) {
+ mp_pystack_free(ptr);
+ mp_pystack_alloc(n_bytes);
+}
+
+static inline size_t mp_pystack_usage(void) {
+ return MP_STATE_THREAD(pystack_cur) - MP_STATE_THREAD(pystack_start);
+}
+
+static inline size_t mp_pystack_limit(void) {
+ return MP_STATE_THREAD(pystack_end) - MP_STATE_THREAD(pystack_start);
+}
+
+#endif
+
+#if !MICROPY_ENABLE_PYSTACK
+
+#define mp_local_alloc(n_bytes) alloca(n_bytes)
+
+static inline void mp_local_free(void *ptr) {
+ (void)ptr;
+}
+
+static inline void *mp_nonlocal_alloc(size_t n_bytes) {
+ return m_new(uint8_t, n_bytes);
+}
+
+static inline void *mp_nonlocal_realloc(void *ptr, size_t old_n_bytes, size_t new_n_bytes) {
+ return m_renew(uint8_t, ptr, old_n_bytes, new_n_bytes);
+}
+
+static inline void mp_nonlocal_free(void *ptr, size_t n_bytes) {
+ m_del(uint8_t, ptr, n_bytes);
+}
+
+#else
+
+static inline void *mp_local_alloc(size_t n_bytes) {
+ return mp_pystack_alloc(n_bytes);
+}
+
+static inline void mp_local_free(void *ptr) {
+ mp_pystack_free(ptr);
+}
+
+static inline void *mp_nonlocal_alloc(size_t n_bytes) {
+ return mp_pystack_alloc(n_bytes);
+}
+
+static inline void *mp_nonlocal_realloc(void *ptr, size_t old_n_bytes, size_t new_n_bytes) {
+ (void)old_n_bytes;
+ mp_pystack_realloc(ptr, new_n_bytes);
+ return ptr;
+}
+
+static inline void mp_nonlocal_free(void *ptr, size_t n_bytes) {
+ (void)n_bytes;
+ mp_pystack_free(ptr);
+}
+
+#endif
+
+#endif // MICROPY_INCLUDED_PY_PYSTACK_H
diff --git a/circuitpython/py/qstr.c b/circuitpython/py/qstr.c
new file mode 100644
index 0000000..41176ec
--- /dev/null
+++ b/circuitpython/py/qstr.c
@@ -0,0 +1,342 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <string.h>
+#include <stdio.h>
+
+#include "py/gc.h"
+#include "py/mpstate.h"
+#include "py/qstr.h"
+#include "py/gc.h"
+#include "py/runtime.h"
+
+#include "supervisor/linker.h"
+
+// NOTE: we are using linear arrays to store and search for qstr's (unique strings, interned strings)
+// ultimately we will replace this with a static hash table of some kind
+
+#if MICROPY_DEBUG_VERBOSE // print debugging info
+#define DEBUG_printf DEBUG_printf
+#else // don't print debugging info
+#define DEBUG_printf(...) (void)0
+#endif
+
+// A qstr is an index into the qstr pool.
+// The data for a qstr is \0 terminated (so they can be printed using printf)
+
+#define Q_HASH_MASK ((1 << (8 * MICROPY_QSTR_BYTES_IN_HASH)) - 1)
+
+#if MICROPY_PY_THREAD && !MICROPY_PY_THREAD_GIL
+#define QSTR_ENTER() mp_thread_mutex_lock(&MP_STATE_VM(qstr_mutex), 1)
+#define QSTR_EXIT() mp_thread_mutex_unlock(&MP_STATE_VM(qstr_mutex))
+#else
+#define QSTR_ENTER()
+#define QSTR_EXIT()
+#endif
+
+// Initial number of entries for qstr pool, set so that the first dynamically
+// allocated pool is twice this size. The value here must be <= MP_QSTRnumber_of.
+#define MICROPY_ALLOC_QSTR_ENTRIES_INIT (10)
+
+// this must match the equivalent function in makeqstrdata.py
+mp_uint_t qstr_compute_hash(const byte *data, size_t len) {
+ // djb2 algorithm; see http://www.cse.yorku.ca/~oz/hash.html
+ mp_uint_t hash = 5381;
+ for (const byte *top = data + len; data < top; data++) {
+ hash = ((hash << 5) + hash) ^ (*data); // hash * 33 ^ data
+ }
+ hash &= Q_HASH_MASK;
+ // Make sure that valid hash is never zero, zero means "hash not computed"
+ if (hash == 0) {
+ hash++;
+ }
+ return hash;
+}
+#ifndef CIRCUITPY_PRECOMPUTE_QSTR_ATTR
+#define CIRCUITPY_PRECOMPUTE_QSTR_ATTR (1)
+#endif
+#if CIRCUITPY_PRECOMPUTE_QSTR_ATTR == 1
+const qstr_attr_t mp_qstr_const_attr[MP_QSTRnumber_of] = {
+ #ifndef NO_QSTR
+#define QDEF(id, hash, len, str) { hash, len },
+#define TRANSLATION(id, length, compressed ...)
+ #include "genhdr/qstrdefs.generated.h"
+#undef TRANSLATION
+#undef QDEF
+ #endif
+};
+#else
+qstr_attr_t mp_qstr_const_attr[MP_QSTRnumber_of];
+#endif
+
+const qstr_pool_t mp_qstr_const_pool = {
+ NULL, // no previous pool
+ 0, // no previous pool
+ MICROPY_ALLOC_QSTR_ENTRIES_INIT,
+ MP_QSTRnumber_of, // corresponds to number of strings in array just below
+ (qstr_attr_t *)mp_qstr_const_attr,
+ {
+ #ifndef NO_QSTR
+#define QDEF(id, hash, len, str) str,
+#define TRANSLATION(id, length, compressed ...)
+ #include "genhdr/qstrdefs.generated.h"
+#undef TRANSLATION
+#undef QDEF
+ #endif
+ },
+};
+
+#ifdef MICROPY_QSTR_EXTRA_POOL
+extern const qstr_pool_t MICROPY_QSTR_EXTRA_POOL;
+#define CONST_POOL MICROPY_QSTR_EXTRA_POOL
+#else
+#define CONST_POOL mp_qstr_const_pool
+#endif
+
+void qstr_init(void) {
+ MP_STATE_VM(last_pool) = (qstr_pool_t *)&CONST_POOL; // we won't modify the const_pool since it has no allocated room left
+ MP_STATE_VM(qstr_last_chunk) = NULL;
+
+ #if CIRCUITPY_PRECOMPUTE_QSTR_ATTR == 0
+ if (mp_qstr_const_attr[MP_QSTR_circuitpython].len == 0) {
+ for (size_t i = 0; i < mp_qstr_const_pool.len; i++) {
+ size_t len = strlen(mp_qstr_const_pool.qstrs[i]);
+ mp_qstr_const_attr[i].hash = qstr_compute_hash((const byte *)mp_qstr_const_pool.qstrs[i], len);
+ mp_qstr_const_attr[i].len = len;
+ }
+ }
+ #endif
+
+ #if MICROPY_PY_THREAD && !MICROPY_PY_THREAD_GIL
+ mp_thread_mutex_init(&MP_STATE_VM(qstr_mutex));
+ #endif
+}
+
+STATIC const char *find_qstr(qstr q, qstr_attr_t *attr) {
+ // search pool for this qstr
+ // total_prev_len==0 in the final pool, so the loop will always terminate
+ const qstr_pool_t *pool = MP_STATE_VM(last_pool);
+ while (q < pool->total_prev_len) {
+ pool = pool->prev;
+ }
+ q -= pool->total_prev_len;
+ assert(q < pool->len);
+ *attr = pool->attrs[q];
+ return pool->qstrs[q];
+}
+
+// qstr_mutex must be taken while in this function
+STATIC qstr qstr_add(mp_uint_t hash, mp_uint_t len, const char *q_ptr) {
+ DEBUG_printf("QSTR: add hash=%d len=%d data=%.*s\n", hash, len, len, q_ptr);
+
+ // make sure we have room in the pool for a new qstr
+ if (MP_STATE_VM(last_pool)->len >= MP_STATE_VM(last_pool)->alloc) {
+ uint32_t new_pool_length = MP_STATE_VM(last_pool)->alloc * 2;
+ if (new_pool_length > MICROPY_QSTR_POOL_MAX_ENTRIES) {
+ new_pool_length = MICROPY_QSTR_POOL_MAX_ENTRIES;
+ }
+ #ifdef MICROPY_QSTR_EXTRA_POOL
+ // Put a lower bound on the allocation size in case the extra qstr pool has few entries
+ if (new_pool_length < MICROPY_ALLOC_QSTR_ENTRIES_INIT) {
+ new_pool_length = MICROPY_ALLOC_QSTR_ENTRIES_INIT;
+ }
+ #endif
+ mp_uint_t pool_size = sizeof(qstr_pool_t)
+ + (sizeof(const char *) + sizeof(qstr_attr_t)) * new_pool_length;
+ qstr_pool_t *pool = (qstr_pool_t *)m_malloc_maybe(pool_size, true);
+ if (pool == NULL) {
+ // Keep qstr_last_chunk consistent with qstr_pool_t: qstr_last_chunk is not scanned
+ // at garbage collection since it's reachable from a qstr_pool_t. And the caller of
+ // this function expects q_ptr to be stored in a qstr_pool_t so it can be reached
+ // by the collector. If qstr_pool_t allocation failed, qstr_last_chunk needs to be
+ // NULL'd. Otherwise it may become a dangling pointer at the next garbage collection.
+ MP_STATE_VM(qstr_last_chunk) = NULL;
+ QSTR_EXIT();
+ m_malloc_fail(new_pool_length);
+ }
+ pool->attrs = (qstr_attr_t *)(pool->qstrs + new_pool_length);
+ pool->prev = MP_STATE_VM(last_pool);
+ pool->total_prev_len = MP_STATE_VM(last_pool)->total_prev_len + MP_STATE_VM(last_pool)->len;
+ pool->alloc = new_pool_length;
+ pool->len = 0;
+ MP_STATE_VM(last_pool) = pool;
+ DEBUG_printf("QSTR: allocate new pool of size %d\n", MP_STATE_VM(last_pool)->alloc);
+ }
+
+ // add the new qstr
+ mp_uint_t at = MP_STATE_VM(last_pool)->len;
+ MP_STATE_VM(last_pool)->attrs[at].hash = hash;
+ MP_STATE_VM(last_pool)->attrs[at].len = len;
+ MP_STATE_VM(last_pool)->qstrs[at] = q_ptr;
+ MP_STATE_VM(last_pool)->len++;
+
+ // return id for the newly-added qstr
+ return MP_STATE_VM(last_pool)->total_prev_len + at;
+}
+
+qstr qstr_find_strn(const char *str, size_t str_len) {
+ // work out hash of str
+ mp_uint_t str_hash = qstr_compute_hash((const byte *)str, str_len);
+
+ // search pools for the data
+ for (const qstr_pool_t *pool = MP_STATE_VM(last_pool); pool != NULL; pool = pool->prev) {
+ qstr_attr_t *attrs = pool->attrs;
+ for (mp_uint_t at = 0, top = pool->len; at < top; at++) {
+ if (attrs[at].hash == str_hash && attrs[at].len == str_len && memcmp(pool->qstrs[at], str, str_len) == 0) {
+ return pool->total_prev_len + at;
+ }
+ }
+ }
+
+ // not found; return null qstr
+ return 0;
+}
+
+qstr qstr_from_str(const char *str) {
+ return qstr_from_strn(str, strlen(str));
+}
+
+qstr qstr_from_strn(const char *str, size_t len) {
+ QSTR_ENTER();
+ qstr q = qstr_find_strn(str, len);
+ if (q == 0) {
+ // qstr does not exist in interned pool so need to add it
+
+ // check that len is not too big
+ if (len >= (1 << (8 * MICROPY_QSTR_BYTES_IN_LEN))) {
+ QSTR_EXIT();
+ mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("Name too long"));
+ }
+
+ // compute number of bytes needed to intern this string
+ size_t n_bytes = len + 1;
+
+ if (MP_STATE_VM(qstr_last_chunk) != NULL && MP_STATE_VM(qstr_last_used) + n_bytes > MP_STATE_VM(qstr_last_alloc)) {
+ // not enough room at end of previously interned string so try to grow
+ char *new_p = m_renew_maybe(char, MP_STATE_VM(qstr_last_chunk), MP_STATE_VM(qstr_last_alloc), MP_STATE_VM(qstr_last_alloc) + n_bytes, false);
+ if (new_p == NULL) {
+ // could not grow existing memory; shrink it to fit previous
+ (void)m_renew_maybe(char, MP_STATE_VM(qstr_last_chunk), MP_STATE_VM(qstr_last_alloc), MP_STATE_VM(qstr_last_used), false);
+ MP_STATE_VM(qstr_last_chunk) = NULL;
+ } else {
+ // could grow existing memory
+ MP_STATE_VM(qstr_last_alloc) += n_bytes;
+ }
+ }
+
+ if (MP_STATE_VM(qstr_last_chunk) == NULL) {
+ // no existing memory for the interned string so allocate a new chunk
+ size_t al = n_bytes;
+ if (al < MICROPY_ALLOC_QSTR_CHUNK_INIT) {
+ al = MICROPY_ALLOC_QSTR_CHUNK_INIT;
+ }
+ MP_STATE_VM(qstr_last_chunk) = m_new_ll_maybe(char, al);
+ if (MP_STATE_VM(qstr_last_chunk) == NULL) {
+ // failed to allocate a large chunk so try with exact size
+ MP_STATE_VM(qstr_last_chunk) = m_new_ll_maybe(char, n_bytes);
+ if (MP_STATE_VM(qstr_last_chunk) == NULL) {
+ QSTR_EXIT();
+ m_malloc_fail(n_bytes);
+ }
+ al = n_bytes;
+ }
+ MP_STATE_VM(qstr_last_alloc) = al;
+ MP_STATE_VM(qstr_last_used) = 0;
+ }
+
+ // allocate memory from the chunk for this new interned string's data
+ char *q_ptr = MP_STATE_VM(qstr_last_chunk) + MP_STATE_VM(qstr_last_used);
+ MP_STATE_VM(qstr_last_used) += n_bytes;
+
+ // store the interned strings' data
+ mp_uint_t hash = qstr_compute_hash((const byte *)str, len);
+ memcpy(q_ptr, str, len);
+ q_ptr[len] = '\0';
+ q = qstr_add(hash, len, q_ptr);
+ }
+ QSTR_EXIT();
+ return q;
+}
+
+mp_uint_t PLACE_IN_ITCM(qstr_hash)(qstr q) {
+ qstr_attr_t attr;
+ find_qstr(q, &attr);
+ return attr.hash;
+}
+
+size_t qstr_len(qstr q) {
+ qstr_attr_t attr;
+ find_qstr(q, &attr);
+ return attr.len;
+}
+
+const char *qstr_str(qstr q) {
+ qstr_attr_t attr;
+ return find_qstr(q, &attr);
+}
+
+const byte *qstr_data(qstr q, size_t *len) {
+ qstr_attr_t attr;
+ const char *qd = find_qstr(q, &attr);
+ *len = attr.len;
+ return (byte *)qd;
+}
+
+void qstr_pool_info(size_t *n_pool, size_t *n_qstr, size_t *n_str_data_bytes, size_t *n_total_bytes) {
+ QSTR_ENTER();
+ *n_pool = 0;
+ *n_qstr = 0;
+ *n_str_data_bytes = 0;
+ *n_total_bytes = 0;
+ for (const qstr_pool_t *pool = MP_STATE_VM(last_pool); pool != NULL && pool != &CONST_POOL; pool = pool->prev) {
+ *n_pool += 1;
+ *n_qstr += pool->len;
+ for (const qstr_attr_t *q = pool->attrs, *q_top = pool->attrs + pool->len; q < q_top; q++) {
+ *n_str_data_bytes += sizeof(*q) + q->len + 1;
+ }
+ #if MICROPY_ENABLE_GC
+ // this counts actual bytes used in heap
+ *n_total_bytes += gc_nbytes(pool) - sizeof(qstr_attr_t) * pool->alloc;
+ #else
+ *n_total_bytes += sizeof(qstr_pool_t) + sizeof(const char *) * pool->alloc;
+ #endif
+ }
+ *n_total_bytes += *n_str_data_bytes;
+ QSTR_EXIT();
+}
+
+#if MICROPY_PY_MICROPYTHON_MEM_INFO
+void qstr_dump_data(void) {
+ QSTR_ENTER();
+ for (const qstr_pool_t *pool = MP_STATE_VM(last_pool); pool != NULL && pool != &CONST_POOL; pool = pool->prev) {
+ for (const char *const *q = pool->qstrs, *const *q_top = pool->qstrs + pool->len; q < q_top; q++) {
+ mp_printf(&mp_plat_print, "Q(%s)\n", *q);
+ }
+ }
+ QSTR_EXIT();
+}
+#endif
diff --git a/circuitpython/py/qstr.h b/circuitpython/py/qstr.h
new file mode 100644
index 0000000..7820de2
--- /dev/null
+++ b/circuitpython/py/qstr.h
@@ -0,0 +1,94 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_QSTR_H
+#define MICROPY_INCLUDED_PY_QSTR_H
+
+#include "py/mpconfig.h"
+#include "py/misc.h"
+
+// See qstrdefs.h for a list of qstr's that are available as constants.
+// Reference them as MP_QSTR_xxxx.
+//
+// Note: it would be possible to define MP_QSTR_xxx as qstr_from_str("xxx")
+// for qstrs that are referenced this way, but you don't want to have them in ROM.
+
+// first entry in enum will be MP_QSTRnull=0, which indicates invalid/no qstr
+enum {
+ #ifndef NO_QSTR
+#define QENUM(id) id,
+ #include "genhdr/qstrdefs.enum.h"
+#undef QENUM
+ #endif
+ MP_QSTRnumber_of, // no underscore so it can't clash with any of the above
+};
+
+typedef size_t qstr;
+
+typedef struct _qstr_attr_t {
+ #if MICROPY_QSTR_BYTES_IN_HASH == 1
+ uint8_t hash;
+ #elif MICROPY_QSTR_BYTES_IN_HASH == 2
+ uint16_t hash;
+ #else
+ #error unimplemented qstr hash decoding
+ #endif
+ #if MICROPY_QSTR_BYTES_IN_LEN == 1
+ uint8_t len;
+ #elif MICROPY_QSTR_BYTES_IN_LEN == 2
+ uint16_t len;
+ #else
+ #error unimplemented qstr length decoding
+ #endif
+} qstr_attr_t;
+
+typedef struct _qstr_pool_t {
+ const struct _qstr_pool_t *prev;
+ size_t total_prev_len;
+ size_t alloc;
+ size_t len;
+ qstr_attr_t *attrs;
+ const char *qstrs[];
+} qstr_pool_t;
+
+#define QSTR_TOTAL() (MP_STATE_VM(last_pool)->total_prev_len + MP_STATE_VM(last_pool)->len)
+
+void qstr_init(void);
+
+mp_uint_t qstr_compute_hash(const byte *data, size_t len);
+qstr qstr_find_strn(const char *str, size_t str_len); // returns MP_QSTRnull if not found
+
+qstr qstr_from_str(const char *str);
+qstr qstr_from_strn(const char *str, size_t len);
+
+mp_uint_t qstr_hash(qstr q);
+const char *qstr_str(qstr q);
+size_t qstr_len(qstr q);
+const byte *qstr_data(qstr q, size_t *len);
+
+void qstr_pool_info(size_t *n_pool, size_t *n_qstr, size_t *n_str_data_bytes, size_t *n_total_bytes);
+void qstr_dump_data(void);
+
+#endif // MICROPY_INCLUDED_PY_QSTR_H
diff --git a/circuitpython/py/qstrdefs.h b/circuitpython/py/qstrdefs.h
new file mode 100644
index 0000000..02b87f4
--- /dev/null
+++ b/circuitpython/py/qstrdefs.h
@@ -0,0 +1,69 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+// *FORMAT-OFF*
+
+#include "py/mpconfig.h"
+
+// All the qstr definitions in this file are available as constants.
+// That is, they are in ROM and you can reference them simply as MP_QSTR_xxxx.
+
+// qstr configuration passed to makeqstrdata.py of the form QCFG(key, value)
+QCFG(BYTES_IN_LEN, MICROPY_QSTR_BYTES_IN_LEN)
+QCFG(BYTES_IN_HASH, MICROPY_QSTR_BYTES_IN_HASH)
+
+Q()
+Q(*)
+Q(_)
+Q(/)
+#if MICROPY_PY_BUILTINS_STR_OP_MODULO
+Q(%#o)
+Q(%#x)
+#else
+Q({:#o})
+Q({:#x})
+#endif
+Q({:#b})
+Q( )
+Q(\n)
+Q(maximum recursion depth exceeded)
+Q(<module>)
+Q(<lambda>)
+Q(<listcomp>)
+Q(<dictcomp>)
+Q(<setcomp>)
+Q(<genexpr>)
+Q(<string>)
+Q(<stdin>)
+Q(utf-8)
+
+#if MICROPY_MODULE_FROZEN
+Q(.frozen)
+#endif
+
+#if MICROPY_ENABLE_PYSTACK
+Q(pystack exhausted)
+#endif
diff --git a/circuitpython/py/reader.c b/circuitpython/py/reader.c
new file mode 100644
index 0000000..ecc8515
--- /dev/null
+++ b/circuitpython/py/reader.c
@@ -0,0 +1,148 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013-2016 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <assert.h>
+
+#include "py/runtime.h"
+#include "py/mperrno.h"
+#include "py/mpthread.h"
+#include "py/reader.h"
+
+typedef struct _mp_reader_mem_t {
+ size_t free_len; // if >0 mem is freed on close by: m_free(beg, free_len)
+ const byte *beg;
+ const byte *cur;
+ const byte *end;
+} mp_reader_mem_t;
+
+STATIC mp_uint_t mp_reader_mem_readbyte(void *data) {
+ mp_reader_mem_t *reader = (mp_reader_mem_t *)data;
+ if (reader->cur < reader->end) {
+ return *reader->cur++;
+ } else {
+ return MP_READER_EOF;
+ }
+}
+
+STATIC void mp_reader_mem_close(void *data) {
+ mp_reader_mem_t *reader = (mp_reader_mem_t *)data;
+ if (reader->free_len > 0) {
+ m_del(char, (char *)reader->beg, reader->free_len);
+ }
+ m_del_obj(mp_reader_mem_t, reader);
+}
+
+void mp_reader_new_mem(mp_reader_t *reader, const byte *buf, size_t len, size_t free_len) {
+ mp_reader_mem_t *rm = m_new_obj(mp_reader_mem_t);
+ rm->free_len = free_len;
+ rm->beg = buf;
+ rm->cur = buf;
+ rm->end = buf + len;
+ reader->data = rm;
+ reader->readbyte = mp_reader_mem_readbyte;
+ reader->close = mp_reader_mem_close;
+}
+
+#if MICROPY_READER_POSIX
+
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+typedef struct _mp_reader_posix_t {
+ bool close_fd;
+ int fd;
+ size_t len;
+ size_t pos;
+ byte buf[20];
+} mp_reader_posix_t;
+
+STATIC mp_uint_t mp_reader_posix_readbyte(void *data) {
+ mp_reader_posix_t *reader = (mp_reader_posix_t *)data;
+ if (reader->pos >= reader->len) {
+ if (reader->len == 0) {
+ return MP_READER_EOF;
+ } else {
+ MP_THREAD_GIL_EXIT();
+ int n = read(reader->fd, reader->buf, sizeof(reader->buf));
+ MP_THREAD_GIL_ENTER();
+ if (n <= 0) {
+ reader->len = 0;
+ return MP_READER_EOF;
+ }
+ reader->len = n;
+ reader->pos = 0;
+ }
+ }
+ return reader->buf[reader->pos++];
+}
+
+STATIC void mp_reader_posix_close(void *data) {
+ mp_reader_posix_t *reader = (mp_reader_posix_t *)data;
+ if (reader->close_fd) {
+ MP_THREAD_GIL_EXIT();
+ close(reader->fd);
+ MP_THREAD_GIL_ENTER();
+ }
+ m_del_obj(mp_reader_posix_t, reader);
+}
+
+void mp_reader_new_file_from_fd(mp_reader_t *reader, int fd, bool close_fd) {
+ mp_reader_posix_t *rp = m_new_obj(mp_reader_posix_t);
+ rp->close_fd = close_fd;
+ rp->fd = fd;
+ MP_THREAD_GIL_EXIT();
+ int n = read(rp->fd, rp->buf, sizeof(rp->buf));
+ if (n == -1) {
+ if (close_fd) {
+ close(fd);
+ }
+ MP_THREAD_GIL_ENTER();
+ mp_raise_OSError(errno);
+ }
+ MP_THREAD_GIL_ENTER();
+ rp->len = n;
+ rp->pos = 0;
+ reader->data = rp;
+ reader->readbyte = mp_reader_posix_readbyte;
+ reader->close = mp_reader_posix_close;
+}
+
+#if !MICROPY_VFS_POSIX
+// If MICROPY_VFS_POSIX is defined then this function is provided by the VFS layer
+void mp_reader_new_file(mp_reader_t *reader, const char *filename) {
+ MP_THREAD_GIL_EXIT();
+ int fd = open(filename, O_RDONLY, 0644);
+ MP_THREAD_GIL_ENTER();
+ if (fd < 0) {
+ mp_raise_OSError(errno);
+ }
+ mp_reader_new_file_from_fd(reader, fd, true);
+}
+#endif
+
+#endif
diff --git a/circuitpython/py/reader.h b/circuitpython/py/reader.h
new file mode 100644
index 0000000..6d8565d
--- /dev/null
+++ b/circuitpython/py/reader.h
@@ -0,0 +1,46 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013-2016 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_READER_H
+#define MICROPY_INCLUDED_PY_READER_H
+
+#include "py/obj.h"
+
+// the readbyte function must return the next byte in the input stream
+// it must return MP_READER_EOF if end of stream
+// it can be called again after returning MP_READER_EOF, and in that case must return MP_READER_EOF
+#define MP_READER_EOF ((mp_uint_t)(-1))
+
+typedef struct _mp_reader_t {
+ void *data;
+ mp_uint_t (*readbyte)(void *data);
+ void (*close)(void *data);
+} mp_reader_t;
+
+void mp_reader_new_mem(mp_reader_t *reader, const byte *buf, size_t len, size_t free_len);
+void mp_reader_new_file(mp_reader_t *reader, const char *filename);
+void mp_reader_new_file_from_fd(mp_reader_t *reader, int fd, bool close_fd);
+
+#endif // MICROPY_INCLUDED_PY_READER_H
diff --git a/circuitpython/py/repl.c b/circuitpython/py/repl.c
new file mode 100644
index 0000000..78367e6
--- /dev/null
+++ b/circuitpython/py/repl.c
@@ -0,0 +1,324 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013-2015 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+#include "py/obj.h"
+#include "py/objmodule.h"
+#include "py/runtime.h"
+#include "py/builtin.h"
+#include "py/repl.h"
+
+#if MICROPY_HELPER_REPL
+
+STATIC bool str_startswith_word(const char *str, const char *head) {
+ size_t i;
+ for (i = 0; str[i] && head[i]; i++) {
+ if (str[i] != head[i]) {
+ return false;
+ }
+ }
+ return head[i] == '\0' && (str[i] == '\0' || !unichar_isident(str[i]));
+}
+
+bool mp_repl_continue_with_input(const char *input) {
+ // check for blank input
+ if (input[0] == '\0') {
+ return false;
+ }
+
+ // check if input starts with a certain keyword
+ bool starts_with_compound_keyword =
+ input[0] == '@'
+ || str_startswith_word(input, "if")
+ || str_startswith_word(input, "while")
+ || str_startswith_word(input, "for")
+ || str_startswith_word(input, "try")
+ || str_startswith_word(input, "with")
+ || str_startswith_word(input, "def")
+ || str_startswith_word(input, "class")
+ #if MICROPY_PY_ASYNC_AWAIT
+ || str_startswith_word(input, "async")
+ #endif
+ ;
+
+ // check for unmatched open bracket, quote or escape quote
+ #define Q_NONE (0)
+ #define Q_1_SINGLE (1)
+ #define Q_1_DOUBLE (2)
+ #define Q_3_SINGLE (3)
+ #define Q_3_DOUBLE (4)
+ int n_paren = 0;
+ int n_brack = 0;
+ int n_brace = 0;
+ int in_quote = Q_NONE;
+ const char *i;
+ for (i = input; *i; i++) {
+ if (*i == '\'') {
+ if ((in_quote == Q_NONE || in_quote == Q_3_SINGLE) && i[1] == '\'' && i[2] == '\'') {
+ i += 2;
+ in_quote = Q_3_SINGLE - in_quote;
+ } else if (in_quote == Q_NONE || in_quote == Q_1_SINGLE) {
+ in_quote = Q_1_SINGLE - in_quote;
+ }
+ } else if (*i == '"') {
+ if ((in_quote == Q_NONE || in_quote == Q_3_DOUBLE) && i[1] == '"' && i[2] == '"') {
+ i += 2;
+ in_quote = Q_3_DOUBLE - in_quote;
+ } else if (in_quote == Q_NONE || in_quote == Q_1_DOUBLE) {
+ in_quote = Q_1_DOUBLE - in_quote;
+ }
+ } else if (*i == '\\' && (i[1] == '\'' || i[1] == '"' || i[1] == '\\')) {
+ if (in_quote != Q_NONE) {
+ i++;
+ }
+ } else if (in_quote == Q_NONE) {
+ switch (*i) {
+ case '(':
+ n_paren += 1;
+ break;
+ case ')':
+ n_paren -= 1;
+ break;
+ case '[':
+ n_brack += 1;
+ break;
+ case ']':
+ n_brack -= 1;
+ break;
+ case '{':
+ n_brace += 1;
+ break;
+ case '}':
+ n_brace -= 1;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ // continue if unmatched 3-quotes
+ if (in_quote == Q_3_SINGLE || in_quote == Q_3_DOUBLE) {
+ return true;
+ }
+
+ // continue if unmatched brackets, but only if not in a 1-quote
+ if ((n_paren > 0 || n_brack > 0 || n_brace > 0) && in_quote == Q_NONE) {
+ return true;
+ }
+
+ // continue if last character was backslash (for line continuation)
+ if (i[-1] == '\\') {
+ return true;
+ }
+
+ // continue if compound keyword and last line was not empty
+ if (starts_with_compound_keyword && i[-1] != '\n') {
+ return true;
+ }
+
+ // otherwise, don't continue
+ return false;
+}
+
+STATIC bool test_qstr(mp_obj_t obj, qstr name) {
+ if (obj) {
+ // try object member
+ mp_obj_t dest[2];
+ mp_load_method_protected(obj, name, dest, true);
+ return dest[0] != MP_OBJ_NULL;
+ } else {
+ // try builtin module
+ return mp_map_lookup((mp_map_t *)&mp_builtin_module_map,
+ MP_OBJ_NEW_QSTR(name), MP_MAP_LOOKUP) != NULL;
+ }
+}
+
+STATIC const char *find_completions(const char *s_start, size_t s_len,
+ mp_obj_t obj, size_t *match_len, qstr *q_first, qstr *q_last) {
+
+ const char *match_str = NULL;
+ *match_len = 0;
+ *q_first = *q_last = 0;
+ size_t nqstr = QSTR_TOTAL();
+ for (qstr q = MP_QSTR_ + 1; q < nqstr; ++q) {
+ size_t d_len;
+ const char *d_str = (const char *)qstr_data(q, &d_len);
+ // special case; filter out words that begin with underscore
+ // unless there's already a partial match
+ if (s_len == 0 && d_str[0] == '_') {
+ continue;
+ }
+ if (s_len <= d_len && strncmp(s_start, d_str, s_len) == 0) {
+ if (test_qstr(obj, q)) {
+ if (match_str == NULL) {
+ match_str = d_str;
+ *match_len = d_len;
+ } else {
+ // search for longest common prefix of match_str and d_str
+ // (assumes these strings are null-terminated)
+ for (size_t j = s_len; j <= *match_len && j <= d_len; ++j) {
+ if (match_str[j] != d_str[j]) {
+ *match_len = j;
+ break;
+ }
+ }
+ }
+ if (*q_first == 0) {
+ *q_first = q;
+ }
+ *q_last = q;
+ }
+ }
+ }
+ return match_str;
+}
+
+STATIC void print_completions(const mp_print_t *print,
+ const char *s_start, size_t s_len,
+ mp_obj_t obj, qstr q_first, qstr q_last) {
+
+ #define WORD_SLOT_LEN (16)
+ #define MAX_LINE_LEN (4 * WORD_SLOT_LEN)
+
+ int line_len = MAX_LINE_LEN; // force a newline for first word
+ for (qstr q = q_first; q <= q_last; ++q) {
+ size_t d_len;
+ const char *d_str = (const char *)qstr_data(q, &d_len);
+ if (s_len <= d_len && strncmp(s_start, d_str, s_len) == 0) {
+ if (test_qstr(obj, q)) {
+ int gap = (line_len + WORD_SLOT_LEN - 1) / WORD_SLOT_LEN * WORD_SLOT_LEN - line_len;
+ if (gap < 2) {
+ gap += WORD_SLOT_LEN;
+ }
+ if (line_len + gap + d_len <= MAX_LINE_LEN) {
+ // TODO optimise printing of gap?
+ for (int j = 0; j < gap; ++j) {
+ mp_print_str(print, " ");
+ }
+ mp_print_str(print, d_str);
+ line_len += gap + d_len;
+ } else {
+ mp_printf(print, "\n%s", d_str);
+ line_len = d_len;
+ }
+ }
+ }
+ }
+ mp_print_str(print, "\n");
+}
+
+size_t mp_repl_autocomplete(const char *str, size_t len, const mp_print_t *print, const char **compl_str) {
+ // scan backwards to find start of "a.b.c" chain
+ const char *org_str = str;
+ const char *top = str + len;
+ for (const char *s = top; --s >= str;) {
+ if (!(unichar_isalpha(*s) || unichar_isdigit(*s) || *s == '_' || *s == '.')) {
+ ++s;
+ str = s;
+ break;
+ }
+ }
+
+ // begin search in outer global dict which is accessed from __main__
+ mp_obj_t obj = MP_OBJ_FROM_PTR(&mp_module___main__);
+ mp_obj_t dest[2];
+
+ const char *s_start;
+ size_t s_len;
+
+ for (;;) {
+ // get next word in string to complete
+ s_start = str;
+ while (str < top && *str != '.') {
+ ++str;
+ }
+ s_len = str - s_start;
+
+ if (str == top) {
+ // end of string, do completion on this partial name
+ break;
+ }
+
+ // a complete word, lookup in current object
+ qstr q = qstr_find_strn(s_start, s_len);
+ if (q == MP_QSTRnull) {
+ // lookup will fail
+ return 0;
+ }
+ mp_load_method_protected(obj, q, dest, true);
+ obj = dest[0]; // attribute, method, or MP_OBJ_NULL if nothing found
+
+ if (obj == MP_OBJ_NULL) {
+ // lookup failed
+ return 0;
+ }
+
+ // skip '.' to move to next word
+ ++str;
+ }
+
+ // after "import", suggest built-in modules
+ static const char import_str[] = "import ";
+ if (len >= 7 && !memcmp(org_str, import_str, 7)) {
+ obj = MP_OBJ_NULL;
+ }
+
+ // look for matches
+ size_t match_len;
+ qstr q_first, q_last;
+ const char *match_str =
+ find_completions(s_start, s_len, obj, &match_len, &q_first, &q_last);
+
+ // nothing found
+ if (q_first == 0) {
+ // If there're no better alternatives, and if it's first word
+ // in the line, try to complete "import".
+ if (s_start == org_str && s_len > 0 && s_len < sizeof(import_str) - 1) {
+ if (memcmp(s_start, import_str, s_len) == 0) {
+ *compl_str = import_str + s_len;
+ return sizeof(import_str) - 1 - s_len;
+ }
+ }
+ if (q_first == 0) {
+ *compl_str = " ";
+ return s_len ? 0 : 4;
+ }
+ }
+
+ // 1 match found, or multiple matches with a common prefix
+ if (q_first == q_last || match_len > s_len) {
+ *compl_str = match_str + s_len;
+ return match_len - s_len;
+ }
+
+ // multiple matches found, print them out
+ print_completions(print, s_start, s_len, obj, q_first, q_last);
+
+ return (size_t)(-1); // indicate many matches
+}
+
+#endif // MICROPY_HELPER_REPL
diff --git a/circuitpython/py/repl.h b/circuitpython/py/repl.h
new file mode 100644
index 0000000..89b64c4
--- /dev/null
+++ b/circuitpython/py/repl.h
@@ -0,0 +1,38 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_REPL_H
+#define MICROPY_INCLUDED_PY_REPL_H
+
+#include "py/mpconfig.h"
+#include "py/misc.h"
+#include "py/mpprint.h"
+
+#if MICROPY_HELPER_REPL
+bool mp_repl_continue_with_input(const char *input);
+size_t mp_repl_autocomplete(const char *str, size_t len, const mp_print_t *print, const char **compl_str);
+#endif
+
+#endif // MICROPY_INCLUDED_PY_REPL_H
diff --git a/circuitpython/py/ringbuf.c b/circuitpython/py/ringbuf.c
new file mode 100644
index 0000000..fe47b50
--- /dev/null
+++ b/circuitpython/py/ringbuf.c
@@ -0,0 +1,170 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2016 Paul Sokolovsky
+ * Copyright (c) 2019 Jim Mussared
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "ringbuf.h"
+
+bool ringbuf_init(ringbuf_t *r, uint8_t *buf, size_t capacity) {
+ r->buf = buf;
+ r->size = capacity;
+ r->iget = r->iput = 0;
+ return r->buf != NULL;
+}
+
+// Dynamic initialization. This should be accessible from a root pointer.
+// capacity is the number of bytes the ring buffer can hold. The actual
+// size of the buffer is one greater than that, due to how the buffer
+// handles empty and full statuses.
+bool ringbuf_alloc(ringbuf_t *r, size_t capacity, bool long_lived) {
+ r->buf = gc_alloc(capacity + 1, false, long_lived);
+ r->size = capacity + 1;
+ r->iget = r->iput = 0;
+ return r->buf != NULL;
+}
+
+void ringbuf_free(ringbuf_t *r) {
+ // Free buf by letting gc take care of it. If the VM has finished already,
+ // this will be safe.
+ r->buf = (uint8_t *)NULL;
+ r->size = 0;
+ ringbuf_clear(r);
+}
+
+size_t ringbuf_capacity(ringbuf_t *r) {
+ return r->size - 1;
+}
+
+// Returns -1 if buffer is empty, else returns byte fetched.
+int ringbuf_get(ringbuf_t *r) {
+ if (r->iget == r->iput) {
+ return -1;
+ }
+ uint8_t v = r->buf[r->iget++];
+ if (r->iget >= r->size) {
+ r->iget = 0;
+ }
+ return v;
+}
+
+int ringbuf_get16(ringbuf_t *r) {
+ int v = ringbuf_peek16(r);
+ if (v == -1) {
+ return v;
+ }
+ r->iget += 2;
+ if (r->iget >= r->size) {
+ r->iget -= r->size;
+ }
+ return v;
+}
+
+// Returns -1 if no room in buffer, else returns 0.
+int ringbuf_put(ringbuf_t *r, uint8_t v) {
+ uint32_t iput_new = r->iput + 1;
+ if (iput_new >= r->size) {
+ iput_new = 0;
+ }
+ if (iput_new == r->iget) {
+ return -1;
+ }
+ r->buf[r->iput] = v;
+ r->iput = iput_new;
+ return 0;
+}
+
+void ringbuf_clear(ringbuf_t *r) {
+ r->iput = r->iget = 0;
+}
+
+// Number of free slots that can be written.
+size_t ringbuf_num_empty(ringbuf_t *r) {
+ return (r->size + r->iget - r->iput - 1) % r->size;
+}
+
+// Number of bytes available to read.
+size_t ringbuf_num_filled(ringbuf_t *r) {
+ return (r->size + r->iput - r->iget) % r->size;
+}
+
+// If the ring buffer fills up, not all bytes will be written.
+// Returns how many bytes were successfully written.
+size_t ringbuf_put_n(ringbuf_t *r, const uint8_t *buf, size_t bufsize) {
+ for (size_t i = 0; i < bufsize; i++) {
+ if (ringbuf_put(r, buf[i]) < 0) {
+ // If ringbuf is full, give up and return how many bytes
+ // we wrote so far.
+ return i;
+ }
+ }
+ return bufsize;
+}
+
+// Returns how many bytes were fetched.
+size_t ringbuf_get_n(ringbuf_t *r, uint8_t *buf, size_t bufsize) {
+ for (size_t i = 0; i < bufsize; i++) {
+ int b = ringbuf_get(r);
+ if (b < 0) {
+ return i;
+ }
+ buf[i] = b;
+ }
+ return bufsize;
+}
+
+int ringbuf_peek16(ringbuf_t *r) {
+ if (r->iget == r->iput) {
+ return -1;
+ }
+ uint32_t iget_a = r->iget + 1;
+ if (iget_a == r->size) {
+ iget_a = 0;
+ }
+ if (iget_a == r->iput) {
+ return -1;
+ }
+ return (r->buf[r->iget] << 8) | (r->buf[iget_a]);
+}
+
+int ringbuf_put16(ringbuf_t *r, uint16_t v) {
+ uint32_t iput_a = r->iput + 1;
+ if (iput_a == r->size) {
+ iput_a = 0;
+ }
+ if (iput_a == r->iget) {
+ return -1;
+ }
+ uint32_t iput_b = iput_a + 1;
+ if (iput_b == r->size) {
+ iput_b = 0;
+ }
+ if (iput_b == r->iget) {
+ return -1;
+ }
+ r->buf[r->iput] = (v >> 8) & 0xff;
+ r->buf[iput_a] = v & 0xff;
+ r->iput = iput_b;
+ return 0;
+}
diff --git a/circuitpython/py/ringbuf.h b/circuitpython/py/ringbuf.h
new file mode 100644
index 0000000..d868eff
--- /dev/null
+++ b/circuitpython/py/ringbuf.h
@@ -0,0 +1,62 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2016 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_RINGBUF_H
+#define MICROPY_INCLUDED_PY_RINGBUF_H
+
+#include "py/gc.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+typedef struct _ringbuf_t {
+ uint8_t *buf;
+ // Allocated size; capacity is one less. Don't reference this directly.
+ uint32_t size;
+ uint32_t iget;
+ uint32_t iput;
+} ringbuf_t;
+
+// Note that the capacity of the buffer is N-1!
+
+// For static initialization use ringbuf_init()
+bool ringbuf_init(ringbuf_t *r, uint8_t *buf, size_t capacity);
+bool ringbuf_alloc(ringbuf_t *r, size_t capacity, bool long_lived);
+void ringbuf_free(ringbuf_t *r);
+size_t ringbuf_capacity(ringbuf_t *r);
+int ringbuf_get(ringbuf_t *r);
+int ringbuf_put(ringbuf_t *r, uint8_t v);
+void ringbuf_clear(ringbuf_t *r);
+size_t ringbuf_num_empty(ringbuf_t *r);
+size_t ringbuf_num_filled(ringbuf_t *r);
+size_t ringbuf_put_n(ringbuf_t *r, const uint8_t *buf, size_t bufsize);
+size_t ringbuf_get_n(ringbuf_t *r, uint8_t *buf, size_t bufsize);
+
+// Note: big-endian. No-op if not enough room available for both bytes.
+int ringbuf_get16(ringbuf_t *r);
+int ringbuf_peek16(ringbuf_t *r);
+int ringbuf_put16(ringbuf_t *r, uint16_t v);
+
+#endif // MICROPY_INCLUDED_PY_RINGBUF_H
diff --git a/circuitpython/py/runtime.c b/circuitpython/py/runtime.c
new file mode 100644
index 0000000..f2ad872
--- /dev/null
+++ b/circuitpython/py/runtime.c
@@ -0,0 +1,1771 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ * SPDX-FileCopyrightText: Copyright (c) 2014-2018 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/parsenum.h"
+#include "py/compile.h"
+#include "py/mperrno.h"
+#include "py/objstr.h"
+#include "py/objtuple.h"
+#include "py/objlist.h"
+#include "py/objtype.h"
+#include "py/objmodule.h"
+#include "py/objgenerator.h"
+#include "py/smallint.h"
+#include "py/runtime.h"
+#include "py/builtin.h"
+#include "py/stackctrl.h"
+#include "py/gc.h"
+
+#include "supervisor/shared/translate.h"
+
+#if MICROPY_DEBUG_VERBOSE // print debugging info
+#define DEBUG_PRINT (1)
+#define DEBUG_printf DEBUG_printf
+#define DEBUG_OP_printf(...) DEBUG_printf(__VA_ARGS__)
+#else // don't print debugging info
+#define DEBUG_printf(...) (void)0
+#define DEBUG_OP_printf(...) (void)0
+#endif
+
+const mp_obj_module_t mp_module___main__ = {
+ .base = { &mp_type_module },
+ .globals = (mp_obj_dict_t *)&MP_STATE_VM(dict_main),
+};
+
+void mp_init(void) {
+ qstr_init();
+
+ // no pending exceptions to start with
+ MP_STATE_THREAD(mp_pending_exception) = MP_OBJ_NULL;
+ #if MICROPY_ENABLE_SCHEDULER
+ MP_STATE_VM(sched_state) = MP_SCHED_IDLE;
+ MP_STATE_VM(sched_idx) = 0;
+ MP_STATE_VM(sched_len) = 0;
+ #endif
+
+ #if MICROPY_ENABLE_EMERGENCY_EXCEPTION_BUF
+ mp_init_emergency_exception_buf();
+ #endif
+
+ #if MICROPY_KBD_EXCEPTION
+ // initialise the exception object for raising KeyboardInterrupt
+ MP_STATE_VM(mp_kbd_exception).base.type = &mp_type_KeyboardInterrupt;
+ MP_STATE_VM(mp_kbd_exception).args = (mp_obj_tuple_t *)&mp_const_empty_tuple_obj;
+ MP_STATE_VM(mp_kbd_exception).traceback = (mp_obj_traceback_t *)&mp_const_empty_traceback_obj;
+ #endif
+
+ MP_STATE_VM(mp_reload_exception).base.type = &mp_type_ReloadException;
+ MP_STATE_VM(mp_reload_exception).args = (mp_obj_tuple_t *)&mp_const_empty_tuple_obj;
+ MP_STATE_VM(mp_reload_exception).traceback = (mp_obj_traceback_t *)&mp_const_empty_traceback_obj;
+
+ // call port specific initialization if any
+ #ifdef MICROPY_PORT_INIT_FUNC
+ MICROPY_PORT_INIT_FUNC;
+ #endif
+
+ #if MICROPY_ENABLE_COMPILER
+ // optimization disabled by default
+ MP_STATE_VM(mp_optimise_value) = 0;
+ #if MICROPY_EMIT_NATIVE
+ MP_STATE_VM(default_emit_opt) = MP_EMIT_OPT_NONE;
+ #endif
+ #endif
+
+ // init global module dict
+ mp_obj_dict_init(&MP_STATE_VM(mp_loaded_modules_dict), MICROPY_LOADED_MODULES_DICT_SIZE);
+
+ // initialise the __main__ module
+ mp_obj_dict_init(&MP_STATE_VM(dict_main), 1);
+ mp_obj_dict_store(MP_OBJ_FROM_PTR(&MP_STATE_VM(dict_main)), MP_OBJ_NEW_QSTR(MP_QSTR___name__), MP_OBJ_NEW_QSTR(MP_QSTR___main__));
+
+ // locals = globals for outer module (see Objects/frameobject.c/PyFrame_New())
+ mp_locals_set(&MP_STATE_VM(dict_main));
+ mp_globals_set(&MP_STATE_VM(dict_main));
+
+ #if MICROPY_CAN_OVERRIDE_BUILTINS
+ // start with no extensions to builtins
+ MP_STATE_VM(mp_module_builtins_override_dict) = NULL;
+ #endif
+
+ #if MICROPY_PERSISTENT_CODE_TRACK_RELOC_CODE
+ MP_STATE_VM(track_reloc_code_list) = MP_OBJ_NULL;
+ #endif
+
+ #ifdef MICROPY_FSUSERMOUNT
+ // zero out the pointers to the user-mounted devices
+ memset(MP_STATE_VM(fs_user_mount) + MICROPY_FATFS_NUM_PERSISTENT, 0,
+ sizeof(MP_STATE_VM(fs_user_mount)) - MICROPY_FATFS_NUM_PERSISTENT);
+ #endif
+
+ #if MICROPY_PY_SYS_PATH_ARGV_DEFAULTS
+ mp_obj_list_init(MP_OBJ_TO_PTR(mp_sys_path), 0);
+ mp_obj_list_append(mp_sys_path, MP_OBJ_NEW_QSTR(MP_QSTR_)); // current dir (or base dir of the script)
+ #if MICROPY_MODULE_FROZEN
+ mp_obj_list_append(mp_sys_path, MP_OBJ_NEW_QSTR(MP_QSTR__dot_frozen));
+ #endif
+ mp_obj_list_init(MP_OBJ_TO_PTR(mp_sys_argv), 0);
+ #endif
+
+ #if MICROPY_PY_SYS_ATEXIT
+ MP_STATE_VM(sys_exitfunc) = mp_const_none;
+ #endif
+
+ #if MICROPY_PY_SYS_SETTRACE
+ MP_STATE_THREAD(prof_trace_callback) = MP_OBJ_NULL;
+ MP_STATE_THREAD(prof_callback_is_executing) = false;
+ MP_STATE_THREAD(current_code_state) = NULL;
+ #endif
+
+ #if MICROPY_PY_THREAD_GIL
+ mp_thread_mutex_init(&MP_STATE_VM(gil_mutex));
+ #endif
+
+ // call port specific initialization if any
+ #ifdef MICROPY_PORT_INIT_FUNC
+ MICROPY_PORT_INIT_FUNC;
+ #endif
+
+ MP_THREAD_GIL_ENTER();
+}
+
+void mp_deinit(void) {
+ MP_THREAD_GIL_EXIT();
+
+ // call port specific deinitialization if any
+ #ifdef MICROPY_PORT_DEINIT_FUNC
+ MICROPY_PORT_DEINIT_FUNC;
+ #endif
+}
+
+mp_obj_t MICROPY_WRAP_MP_LOAD_NAME(mp_load_name)(qstr qst) {
+ // logic: search locals, globals, builtins
+ DEBUG_OP_printf("load name %s\n", qstr_str(qst));
+ // If we're at the outer scope (locals == globals), dispatch to load_global right away
+ if (mp_locals_get() != mp_globals_get()) {
+ mp_map_elem_t *elem = mp_map_lookup(&mp_locals_get()->map, MP_OBJ_NEW_QSTR(qst), MP_MAP_LOOKUP);
+ if (elem != NULL) {
+ return elem->value;
+ }
+ }
+ return mp_load_global(qst);
+}
+
+mp_obj_t MICROPY_WRAP_MP_LOAD_GLOBAL(mp_load_global)(qstr qst) {
+ // logic: search globals, builtins
+ DEBUG_OP_printf("load global %s\n", qstr_str(qst));
+ mp_map_elem_t *elem = mp_map_lookup(&mp_globals_get()->map, MP_OBJ_NEW_QSTR(qst), MP_MAP_LOOKUP);
+ if (elem == NULL) {
+ #if MICROPY_CAN_OVERRIDE_BUILTINS
+ if (MP_STATE_VM(mp_module_builtins_override_dict) != NULL) {
+ // lookup in additional dynamic table of builtins first
+ elem = mp_map_lookup(&MP_STATE_VM(mp_module_builtins_override_dict)->map, MP_OBJ_NEW_QSTR(qst), MP_MAP_LOOKUP);
+ if (elem != NULL) {
+ return elem->value;
+ }
+ }
+ #endif
+ elem = mp_map_lookup((mp_map_t *)&mp_module_builtins_globals.map, MP_OBJ_NEW_QSTR(qst), MP_MAP_LOOKUP);
+ if (elem == NULL) {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_msg(&mp_type_NameError, MP_ERROR_TEXT("name not defined"));
+ #else
+ mp_raise_msg_varg(&mp_type_NameError, MP_ERROR_TEXT("name '%q' is not defined"), qst);
+ #endif
+ }
+ }
+ return elem->value;
+}
+
+mp_obj_t mp_load_build_class(void) {
+ DEBUG_OP_printf("load_build_class\n");
+ #if MICROPY_CAN_OVERRIDE_BUILTINS
+ if (MP_STATE_VM(mp_module_builtins_override_dict) != NULL) {
+ // lookup in additional dynamic table of builtins first
+ mp_map_elem_t *elem = mp_map_lookup(&MP_STATE_VM(mp_module_builtins_override_dict)->map, MP_OBJ_NEW_QSTR(MP_QSTR___build_class__), MP_MAP_LOOKUP);
+ if (elem != NULL) {
+ return elem->value;
+ }
+ }
+ #endif
+ return MP_OBJ_FROM_PTR(&mp_builtin___build_class___obj);
+}
+
+void PLACE_IN_ITCM(mp_store_name)(qstr qst, mp_obj_t obj) {
+ DEBUG_OP_printf("store name %s <- %p\n", qstr_str(qst), obj);
+ mp_obj_dict_store(MP_OBJ_FROM_PTR(mp_locals_get()), MP_OBJ_NEW_QSTR(qst), obj);
+}
+
+void mp_delete_name(qstr qst) {
+ DEBUG_OP_printf("delete name %s\n", qstr_str(qst));
+ // TODO convert KeyError to NameError if qst not found
+ mp_obj_dict_delete(MP_OBJ_FROM_PTR(mp_locals_get()), MP_OBJ_NEW_QSTR(qst));
+}
+
+void PLACE_IN_ITCM(mp_store_global)(qstr qst, mp_obj_t obj) {
+ DEBUG_OP_printf("store global %s <- %p\n", qstr_str(qst), obj);
+ mp_obj_dict_store(MP_OBJ_FROM_PTR(mp_globals_get()), MP_OBJ_NEW_QSTR(qst), obj);
+}
+
+void mp_delete_global(qstr qst) {
+ DEBUG_OP_printf("delete global %s\n", qstr_str(qst));
+ // TODO convert KeyError to NameError if qst not found
+ mp_obj_dict_delete(MP_OBJ_FROM_PTR(mp_globals_get()), MP_OBJ_NEW_QSTR(qst));
+}
+
+mp_obj_t mp_unary_op(mp_unary_op_t op, mp_obj_t arg) {
+ DEBUG_OP_printf("unary " UINT_FMT " %q %p\n", op, mp_unary_op_method_name[op], arg);
+
+ if (op == MP_UNARY_OP_NOT) {
+ // "not x" is the negative of whether "x" is true per Python semantics
+ return mp_obj_new_bool(mp_obj_is_true(arg) == 0);
+ } else if (mp_obj_is_small_int(arg)) {
+ mp_int_t val = MP_OBJ_SMALL_INT_VALUE(arg);
+ switch (op) {
+ case MP_UNARY_OP_BOOL:
+ return mp_obj_new_bool(val != 0);
+ case MP_UNARY_OP_HASH:
+ return arg;
+ case MP_UNARY_OP_POSITIVE:
+ case MP_UNARY_OP_INT:
+ return arg;
+ case MP_UNARY_OP_NEGATIVE:
+ // check for overflow
+ if (val == MP_SMALL_INT_MIN) {
+ return mp_obj_new_int(-val);
+ } else {
+ return MP_OBJ_NEW_SMALL_INT(-val);
+ }
+ case MP_UNARY_OP_ABS:
+ if (val >= 0) {
+ return arg;
+ } else if (val == MP_SMALL_INT_MIN) {
+ // check for overflow
+ return mp_obj_new_int(-val);
+ } else {
+ return MP_OBJ_NEW_SMALL_INT(-val);
+ }
+ default:
+ assert(op == MP_UNARY_OP_INVERT);
+ return MP_OBJ_NEW_SMALL_INT(~val);
+ }
+ } else if (op == MP_UNARY_OP_HASH && mp_obj_is_str_or_bytes(arg)) {
+ // fast path for hashing str/bytes
+ GET_STR_HASH(arg, h);
+ if (h == 0) {
+ GET_STR_DATA_LEN(arg, data, len);
+ h = qstr_compute_hash(data, len);
+ }
+ return MP_OBJ_NEW_SMALL_INT(h);
+ } else {
+ const mp_obj_type_t *type = mp_obj_get_type(arg);
+ mp_unary_op_fun_t unary_op = mp_type_get_unary_op_slot(type);
+ if (unary_op != NULL) {
+ mp_obj_t result = unary_op(op, arg);
+ if (result != MP_OBJ_NULL) {
+ return result;
+ }
+ }
+ if (op == MP_UNARY_OP_BOOL) {
+ // Type doesn't have unary_op (or didn't handle MP_UNARY_OP_BOOL),
+ // so is implicitly True as this code path is impossible to reach
+ // if arg==mp_const_none.
+ return mp_const_true;
+ }
+ // With MP_UNARY_OP_INT, mp_unary_op() becomes a fallback for mp_obj_get_int().
+ // In this case provide a more focused error message to not confuse, e.g. chr(1.0)
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ if (op == MP_UNARY_OP_INT) {
+ mp_raise_TypeError(MP_ERROR_TEXT("can't convert to int"));
+ } else {
+ mp_raise_TypeError(MP_ERROR_TEXT("unsupported type for operator"));
+ }
+ #else
+ if (op == MP_UNARY_OP_INT) {
+ mp_raise_TypeError_varg(MP_ERROR_TEXT("can't convert %q to int"), mp_obj_get_type_qstr(arg));
+ } else {
+ mp_raise_TypeError_varg(MP_ERROR_TEXT("unsupported type for %q: '%q'"),
+ mp_unary_op_method_name[op], mp_obj_get_type_qstr(arg));
+ }
+ #endif
+ }
+}
+
+mp_obj_t MICROPY_WRAP_MP_BINARY_OP(mp_binary_op)(mp_binary_op_t op, mp_obj_t lhs, mp_obj_t rhs) {
+ DEBUG_OP_printf("binary " UINT_FMT " %q %p %p\n", op, mp_binary_op_method_name[op], lhs, rhs);
+
+ // TODO correctly distinguish inplace operators for mutable objects
+ // lookup logic that CPython uses for +=:
+ // check for implemented +=
+ // then check for implemented +
+ // then check for implemented seq.inplace_concat
+ // then check for implemented seq.concat
+ // then fail
+ // note that list does not implement + or +=, so that inplace_concat is reached first for +=
+
+ // deal with is
+ if (op == MP_BINARY_OP_IS) {
+ return mp_obj_new_bool(lhs == rhs);
+ }
+
+ // deal with == and != for all types
+ if (op == MP_BINARY_OP_EQUAL || op == MP_BINARY_OP_NOT_EQUAL) {
+ // mp_obj_equal_not_equal supports a bunch of shortcuts
+ return mp_obj_equal_not_equal(op, lhs, rhs);
+ }
+
+ // deal with exception_match for all types
+ if (op == MP_BINARY_OP_EXCEPTION_MATCH) {
+ // rhs must be issubclass(rhs, BaseException)
+ if (mp_obj_is_exception_type(rhs)) {
+ if (mp_obj_exception_match(lhs, rhs)) {
+ return mp_const_true;
+ } else {
+ return mp_const_false;
+ }
+ } else if (mp_obj_is_type(rhs, &mp_type_tuple)) {
+ mp_obj_tuple_t *tuple = MP_OBJ_TO_PTR(rhs);
+ for (size_t i = 0; i < tuple->len; i++) {
+ rhs = tuple->items[i];
+ if (!mp_obj_is_exception_type(rhs)) {
+ goto unsupported_op;
+ }
+ if (mp_obj_exception_match(lhs, rhs)) {
+ return mp_const_true;
+ }
+ }
+ return mp_const_false;
+ }
+ goto unsupported_op;
+ }
+
+ if (mp_obj_is_small_int(lhs)) {
+ mp_int_t lhs_val = MP_OBJ_SMALL_INT_VALUE(lhs);
+ if (mp_obj_is_small_int(rhs)) {
+ mp_int_t rhs_val = MP_OBJ_SMALL_INT_VALUE(rhs);
+ // This is a binary operation: lhs_val op rhs_val
+ // We need to be careful to handle overflow; see CERT INT32-C
+ // Operations that can overflow:
+ // + result always fits in mp_int_t, then handled by SMALL_INT check
+ // - result always fits in mp_int_t, then handled by SMALL_INT check
+ // * checked explicitly
+ // / if lhs=MIN and rhs=-1; result always fits in mp_int_t, then handled by SMALL_INT check
+ // % if lhs=MIN and rhs=-1; result always fits in mp_int_t, then handled by SMALL_INT check
+ // << checked explicitly
+ switch (op) {
+ case MP_BINARY_OP_OR:
+ case MP_BINARY_OP_INPLACE_OR:
+ lhs_val |= rhs_val;
+ break;
+ case MP_BINARY_OP_XOR:
+ case MP_BINARY_OP_INPLACE_XOR:
+ lhs_val ^= rhs_val;
+ break;
+ case MP_BINARY_OP_AND:
+ case MP_BINARY_OP_INPLACE_AND:
+ lhs_val &= rhs_val;
+ break;
+ case MP_BINARY_OP_LSHIFT:
+ case MP_BINARY_OP_INPLACE_LSHIFT: {
+ if (rhs_val < 0) {
+ // negative shift not allowed
+ mp_raise_ValueError(MP_ERROR_TEXT("negative shift count"));
+ } else if (rhs_val >= (mp_int_t)(sizeof(lhs_val) * MP_BITS_PER_BYTE)
+ || lhs_val > (MP_SMALL_INT_MAX >> rhs_val)
+ || lhs_val < (MP_SMALL_INT_MIN >> rhs_val)) {
+ // left-shift will overflow, so use higher precision integer
+ lhs = mp_obj_new_int_from_ll(lhs_val);
+ goto generic_binary_op;
+ } else {
+ // use standard precision
+ lhs_val = (mp_uint_t)lhs_val << rhs_val;
+ }
+ break;
+ }
+ case MP_BINARY_OP_RSHIFT:
+ case MP_BINARY_OP_INPLACE_RSHIFT:
+ if (rhs_val < 0) {
+ // negative shift not allowed
+ mp_raise_ValueError(MP_ERROR_TEXT("negative shift count"));
+ } else {
+ // standard precision is enough for right-shift
+ if (rhs_val >= (mp_int_t)(sizeof(lhs_val) * MP_BITS_PER_BYTE)) {
+ // Shifting to big amounts is underfined behavior
+ // in C and is CPU-dependent; propagate sign bit.
+ rhs_val = sizeof(lhs_val) * MP_BITS_PER_BYTE - 1;
+ }
+ lhs_val >>= rhs_val;
+ }
+ break;
+ case MP_BINARY_OP_ADD:
+ case MP_BINARY_OP_INPLACE_ADD:
+ lhs_val += rhs_val;
+ break;
+ case MP_BINARY_OP_SUBTRACT:
+ case MP_BINARY_OP_INPLACE_SUBTRACT:
+ lhs_val -= rhs_val;
+ break;
+ case MP_BINARY_OP_MULTIPLY:
+ case MP_BINARY_OP_INPLACE_MULTIPLY: {
+
+ // If long long type exists and is larger than mp_int_t, then
+ // we can use the following code to perform overflow-checked multiplication.
+ // Otherwise (eg in x64 case) we must use mp_small_int_mul_overflow.
+ #if 0
+ // compute result using long long precision
+ long long res = (long long)lhs_val * (long long)rhs_val;
+ if (res > MP_SMALL_INT_MAX || res < MP_SMALL_INT_MIN) {
+ // result overflowed SMALL_INT, so return higher precision integer
+ return mp_obj_new_int_from_ll(res);
+ } else {
+ // use standard precision
+ lhs_val = (mp_int_t)res;
+ }
+ #endif
+
+ if (mp_small_int_mul_overflow(lhs_val, rhs_val)) {
+ // use higher precision
+ lhs = mp_obj_new_int_from_ll(lhs_val);
+ goto generic_binary_op;
+ } else {
+ // use standard precision
+ return MP_OBJ_NEW_SMALL_INT(lhs_val * rhs_val);
+ }
+ }
+ case MP_BINARY_OP_FLOOR_DIVIDE:
+ case MP_BINARY_OP_INPLACE_FLOOR_DIVIDE:
+ if (rhs_val == 0) {
+ goto zero_division;
+ }
+ lhs_val = mp_small_int_floor_divide(lhs_val, rhs_val);
+ break;
+
+ #if MICROPY_PY_BUILTINS_FLOAT
+ case MP_BINARY_OP_TRUE_DIVIDE:
+ case MP_BINARY_OP_INPLACE_TRUE_DIVIDE:
+ if (rhs_val == 0) {
+ goto zero_division;
+ }
+ return mp_obj_new_float((mp_float_t)lhs_val / (mp_float_t)rhs_val);
+ #endif
+
+ case MP_BINARY_OP_MODULO:
+ case MP_BINARY_OP_INPLACE_MODULO: {
+ if (rhs_val == 0) {
+ goto zero_division;
+ }
+ lhs_val = mp_small_int_modulo(lhs_val, rhs_val);
+ break;
+ }
+
+ case MP_BINARY_OP_POWER:
+ case MP_BINARY_OP_INPLACE_POWER:
+ if (rhs_val < 0) {
+ #if MICROPY_PY_BUILTINS_FLOAT
+ return mp_obj_float_binary_op(op, (mp_float_t)lhs_val, rhs);
+ #else
+ mp_raise_ValueError(MP_ERROR_TEXT("negative power with no float support"));
+ #endif
+ } else {
+ mp_int_t ans = 1;
+ while (rhs_val > 0) {
+ if (rhs_val & 1) {
+ if (mp_small_int_mul_overflow(ans, lhs_val)) {
+ goto power_overflow;
+ }
+ ans *= lhs_val;
+ }
+ if (rhs_val == 1) {
+ break;
+ }
+ rhs_val /= 2;
+ if (mp_small_int_mul_overflow(lhs_val, lhs_val)) {
+ goto power_overflow;
+ }
+ lhs_val *= lhs_val;
+ }
+ lhs_val = ans;
+ }
+ break;
+
+ power_overflow:
+ // use higher precision
+ lhs = mp_obj_new_int_from_ll(MP_OBJ_SMALL_INT_VALUE(lhs));
+ goto generic_binary_op;
+
+ case MP_BINARY_OP_DIVMOD: {
+ if (rhs_val == 0) {
+ goto zero_division;
+ }
+ // to reduce stack usage we don't pass a temp array of the 2 items
+ mp_obj_tuple_t *tuple = MP_OBJ_TO_PTR(mp_obj_new_tuple(2, NULL));
+ tuple->items[0] = MP_OBJ_NEW_SMALL_INT(mp_small_int_floor_divide(lhs_val, rhs_val));
+ tuple->items[1] = MP_OBJ_NEW_SMALL_INT(mp_small_int_modulo(lhs_val, rhs_val));
+ return MP_OBJ_FROM_PTR(tuple);
+ }
+
+ case MP_BINARY_OP_LESS:
+ return mp_obj_new_bool(lhs_val < rhs_val);
+ case MP_BINARY_OP_MORE:
+ return mp_obj_new_bool(lhs_val > rhs_val);
+ case MP_BINARY_OP_LESS_EQUAL:
+ return mp_obj_new_bool(lhs_val <= rhs_val);
+ case MP_BINARY_OP_MORE_EQUAL:
+ return mp_obj_new_bool(lhs_val >= rhs_val);
+
+ default:
+ goto unsupported_op;
+ }
+ // This is an inlined version of mp_obj_new_int, for speed
+ if (MP_SMALL_INT_FITS(lhs_val)) {
+ return MP_OBJ_NEW_SMALL_INT(lhs_val);
+ } else {
+ return mp_obj_new_int_from_ll(lhs_val);
+ }
+ #if MICROPY_PY_BUILTINS_FLOAT
+ } else if (mp_obj_is_float(rhs)) {
+ mp_obj_t res = mp_obj_float_binary_op(op, (mp_float_t)lhs_val, rhs);
+ if (res == MP_OBJ_NULL) {
+ goto unsupported_op;
+ } else {
+ return res;
+ }
+ #endif
+ #if MICROPY_PY_BUILTINS_COMPLEX
+ } else if (mp_obj_is_type(rhs, &mp_type_complex)) {
+ mp_obj_t res = mp_obj_complex_binary_op(op, (mp_float_t)lhs_val, 0, rhs);
+ if (res == MP_OBJ_NULL) {
+ goto unsupported_op;
+ } else {
+ return res;
+ }
+ #endif
+ }
+ }
+
+ // Convert MP_BINARY_OP_IN to MP_BINARY_OP_CONTAINS with swapped args.
+ if (op == MP_BINARY_OP_IN) {
+ op = MP_BINARY_OP_CONTAINS;
+ mp_obj_t temp = lhs;
+ lhs = rhs;
+ rhs = temp;
+ }
+
+ // generic binary_op supplied by type
+ const mp_obj_type_t *type;
+generic_binary_op:
+ type = mp_obj_get_type(lhs);
+ mp_binary_op_fun_t binary_op = mp_type_get_binary_op_slot(type);
+ if (binary_op != NULL) {
+ mp_obj_t result = binary_op(op, lhs, rhs);
+ if (result != MP_OBJ_NULL) {
+ return result;
+ }
+ }
+
+ #if MICROPY_PY_REVERSE_SPECIAL_METHODS
+ if (op >= MP_BINARY_OP_OR && op <= MP_BINARY_OP_POWER) {
+ mp_obj_t t = rhs;
+ rhs = lhs;
+ lhs = t;
+ op += MP_BINARY_OP_REVERSE_OR - MP_BINARY_OP_OR;
+ goto generic_binary_op;
+ } else if (op >= MP_BINARY_OP_REVERSE_OR) {
+ // Convert __rop__ back to __op__ for error message
+ mp_obj_t t = rhs;
+ rhs = lhs;
+ lhs = t;
+ op -= MP_BINARY_OP_REVERSE_OR - MP_BINARY_OP_OR;
+ }
+ #endif
+
+ if (op == MP_BINARY_OP_CONTAINS) {
+ // If type didn't support containment then explicitly walk the iterator.
+ // mp_getiter will raise the appropriate exception if lhs is not iterable.
+ mp_obj_iter_buf_t iter_buf;
+ mp_obj_t iter = mp_getiter(lhs, &iter_buf);
+ mp_obj_t next;
+ while ((next = mp_iternext(iter)) != MP_OBJ_STOP_ITERATION) {
+ if (mp_obj_equal(next, rhs)) {
+ return mp_const_true;
+ }
+ }
+ return mp_const_false;
+ }
+
+unsupported_op:
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_TypeError(MP_ERROR_TEXT("unsupported type for operator"));
+ #else
+ mp_raise_TypeError_varg(
+ MP_ERROR_TEXT("unsupported types for %q: '%q', '%q'"),
+ mp_binary_op_method_name[op], mp_obj_get_type_qstr(lhs), mp_obj_get_type_qstr(rhs));
+ #endif
+
+zero_division:
+ mp_raise_msg(&mp_type_ZeroDivisionError, MP_ERROR_TEXT("division by zero"));
+}
+
+mp_obj_t mp_call_function_0(mp_obj_t fun) {
+ return mp_call_function_n_kw(fun, 0, 0, NULL);
+}
+
+mp_obj_t mp_call_function_1(mp_obj_t fun, mp_obj_t arg) {
+ return mp_call_function_n_kw(fun, 1, 0, &arg);
+}
+
+mp_obj_t mp_call_function_2(mp_obj_t fun, mp_obj_t arg1, mp_obj_t arg2) {
+ mp_obj_t args[2];
+ args[0] = arg1;
+ args[1] = arg2;
+ return mp_call_function_n_kw(fun, 2, 0, args);
+}
+
+// args contains, eg: arg0 arg1 key0 value0 key1 value1
+mp_obj_t mp_call_function_n_kw(mp_obj_t fun_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ // TODO improve this: fun object can specify its type and we parse here the arguments,
+ // passing to the function arrays of fixed and keyword arguments
+
+ DEBUG_OP_printf("calling function %p(n_args=" UINT_FMT ", n_kw=" UINT_FMT ", args=%p)\n", fun_in, n_args, n_kw, args);
+
+ // get the type
+ const mp_obj_type_t *type = mp_obj_get_type(fun_in);
+
+ // do the call
+ mp_call_fun_t call = mp_type_get_call_slot(type);
+ if (call) {
+ return call(fun_in, n_args, n_kw, args);
+ }
+
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_TypeError(MP_ERROR_TEXT("object not callable"));
+ #else
+ mp_raise_TypeError_varg(MP_ERROR_TEXT("'%q' object is not callable"), mp_obj_get_type_qstr(fun_in));
+ #endif
+}
+
+// args contains: fun self/NULL arg(0) ... arg(n_args-2) arg(n_args-1) kw_key(0) kw_val(0) ... kw_key(n_kw-1) kw_val(n_kw-1)
+// if n_args==0 and n_kw==0 then there are only fun and self/NULL
+mp_obj_t mp_call_method_n_kw(size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ DEBUG_OP_printf("call method (fun=%p, self=%p, n_args=" UINT_FMT ", n_kw=" UINT_FMT ", args=%p)\n", args[0], args[1], n_args, n_kw, args);
+ int adjust = (args[1] == MP_OBJ_NULL) ? 0 : 1;
+ return mp_call_function_n_kw(args[0], n_args + adjust, n_kw, args + 2 - adjust);
+}
+
+// This function only needs to be exposed externally when in stackless mode.
+#if !MICROPY_STACKLESS
+STATIC
+#endif
+void PLACE_IN_ITCM(mp_call_prepare_args_n_kw_var)(bool have_self, size_t n_args_n_kw, const mp_obj_t *args, mp_call_args_t *out_args) {
+ mp_obj_t fun = *args++;
+ mp_obj_t self = MP_OBJ_NULL;
+ if (have_self) {
+ self = *args++; // may be MP_OBJ_NULL
+ }
+ uint n_args = n_args_n_kw & 0xff;
+ uint n_kw = (n_args_n_kw >> 8) & 0xff;
+ mp_obj_t pos_seq = args[n_args + 2 * n_kw]; // may be MP_OBJ_NULL
+ mp_obj_t kw_dict = args[n_args + 2 * n_kw + 1]; // may be MP_OBJ_NULL
+
+ DEBUG_OP_printf("call method var (fun=%p, self=%p, n_args=%u, n_kw=%u, args=%p, seq=%p, dict=%p)\n", fun, self, n_args, n_kw, args, pos_seq, kw_dict);
+
+ // We need to create the following array of objects:
+ // args[0 .. n_args] unpacked(pos_seq) args[n_args .. n_args + 2 * n_kw] unpacked(kw_dict)
+ // TODO: optimize one day to avoid constructing new arg array? Will be hard.
+
+ // The new args array
+ mp_obj_t *args2;
+ uint args2_alloc;
+ uint args2_len = 0;
+
+ // Try to get a hint for the size of the kw_dict
+ uint kw_dict_len = 0;
+ if (kw_dict != MP_OBJ_NULL && mp_obj_is_type(kw_dict, &mp_type_dict)) {
+ kw_dict_len = mp_obj_dict_len(kw_dict);
+ }
+
+ // Extract the pos_seq sequence to the new args array.
+ // Note that it can be arbitrary iterator.
+ if (pos_seq == MP_OBJ_NULL) {
+ // no sequence
+
+ // allocate memory for the new array of args
+ args2_alloc = 1 + n_args + 2 * (n_kw + kw_dict_len);
+ args2 = mp_nonlocal_alloc(args2_alloc * sizeof(mp_obj_t));
+
+ // copy the self
+ if (self != MP_OBJ_NULL) {
+ args2[args2_len++] = self;
+ }
+
+ // copy the fixed pos args
+ mp_seq_copy(args2 + args2_len, args, n_args, mp_obj_t);
+ args2_len += n_args;
+
+ } else if (mp_obj_is_type(pos_seq, &mp_type_tuple) || mp_obj_is_type(pos_seq, &mp_type_list)) {
+ // optimise the case of a tuple and list
+
+ // get the items
+ size_t len;
+ mp_obj_t *items;
+ mp_obj_get_array(pos_seq, &len, &items);
+
+ // allocate memory for the new array of args
+ args2_alloc = 1 + n_args + len + 2 * (n_kw + kw_dict_len);
+ args2 = mp_nonlocal_alloc(args2_alloc * sizeof(mp_obj_t));
+
+ // copy the self
+ if (self != MP_OBJ_NULL) {
+ args2[args2_len++] = self;
+ }
+
+ // copy the fixed and variable position args
+ mp_seq_cat(args2 + args2_len, args, n_args, items, len, mp_obj_t);
+ args2_len += n_args + len;
+
+ } else {
+ // generic iterator
+
+ // allocate memory for the new array of args
+ args2_alloc = 1 + n_args + 2 * (n_kw + kw_dict_len) + 3;
+ args2 = mp_nonlocal_alloc(args2_alloc * sizeof(mp_obj_t));
+
+ // copy the self
+ if (self != MP_OBJ_NULL) {
+ args2[args2_len++] = self;
+ }
+
+ // copy the fixed position args
+ mp_seq_copy(args2 + args2_len, args, n_args, mp_obj_t);
+ args2_len += n_args;
+
+ // extract the variable position args from the iterator
+ mp_obj_iter_buf_t iter_buf;
+ mp_obj_t iterable = mp_getiter(pos_seq, &iter_buf);
+ mp_obj_t item;
+ while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
+ if (args2_len >= args2_alloc) {
+ args2 = mp_nonlocal_realloc(args2, args2_alloc * sizeof(mp_obj_t), args2_alloc * 2 * sizeof(mp_obj_t));
+ args2_alloc *= 2;
+ }
+ args2[args2_len++] = item;
+ }
+ }
+
+ // The size of the args2 array now is the number of positional args.
+ uint pos_args_len = args2_len;
+
+ // Copy the fixed kw args.
+ mp_seq_copy(args2 + args2_len, args + n_args, 2 * n_kw, mp_obj_t);
+ args2_len += 2 * n_kw;
+
+ // Extract (key,value) pairs from kw_dict dictionary and append to args2.
+ // Note that it can be arbitrary iterator.
+ if (kw_dict == MP_OBJ_NULL) {
+ // pass
+ } else if (mp_obj_is_type(kw_dict, &mp_type_dict)) {
+ // dictionary
+ mp_map_t *map = mp_obj_dict_get_map(kw_dict);
+ assert(args2_len + 2 * map->used <= args2_alloc); // should have enough, since kw_dict_len is in this case hinted correctly above
+ for (size_t i = 0; i < map->alloc; i++) {
+ if (mp_map_slot_is_filled(map, i)) {
+ // the key must be a qstr, so intern it if it's a string
+ mp_obj_t key = map->table[i].key;
+ if (!mp_obj_is_qstr(key)) {
+ key = mp_obj_str_intern_checked(key);
+ }
+ args2[args2_len++] = key;
+ args2[args2_len++] = map->table[i].value;
+ }
+ }
+ } else {
+ // generic mapping:
+ // - call keys() to get an iterable of all keys in the mapping
+ // - call __getitem__ for each key to get the corresponding value
+
+ // get the keys iterable
+ mp_obj_t dest[3];
+ mp_load_method(kw_dict, MP_QSTR_keys, dest);
+ mp_obj_t iterable = mp_getiter(mp_call_method_n_kw(0, 0, dest), NULL);
+
+ mp_obj_t key;
+ while ((key = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
+ // expand size of args array if needed
+ if (args2_len + 1 >= args2_alloc) {
+ uint new_alloc = args2_alloc * 2;
+ if (new_alloc < 4) {
+ new_alloc = 4;
+ }
+ args2 = mp_nonlocal_realloc(args2, args2_alloc * sizeof(mp_obj_t), new_alloc * sizeof(mp_obj_t));
+ args2_alloc = new_alloc;
+ }
+
+ // the key must be a qstr, so intern it if it's a string
+ if (!mp_obj_is_qstr(key)) {
+ key = mp_obj_str_intern_checked(key);
+ }
+
+ // get the value corresponding to the key
+ mp_load_method(kw_dict, MP_QSTR___getitem__, dest);
+ dest[2] = key;
+ mp_obj_t value = mp_call_method_n_kw(1, 0, dest);
+
+ // store the key/value pair in the argument array
+ args2[args2_len++] = key;
+ args2[args2_len++] = value;
+ }
+ }
+
+ out_args->fun = fun;
+ out_args->args = args2;
+ out_args->n_args = pos_args_len;
+ out_args->n_kw = (args2_len - pos_args_len) / 2;
+ out_args->n_alloc = args2_alloc;
+}
+
+mp_obj_t mp_call_method_n_kw_var(bool have_self, size_t n_args_n_kw, const mp_obj_t *args) {
+ mp_call_args_t out_args;
+ mp_call_prepare_args_n_kw_var(have_self, n_args_n_kw, args, &out_args);
+
+ mp_obj_t res = mp_call_function_n_kw(out_args.fun, out_args.n_args, out_args.n_kw, out_args.args);
+ mp_nonlocal_free(out_args.args, out_args.n_alloc * sizeof(mp_obj_t));
+
+ return res;
+}
+
+// unpacked items are stored in reverse order into the array pointed to by items
+void mp_unpack_sequence(mp_obj_t seq_in, size_t num, mp_obj_t *items) {
+ size_t seq_len;
+ if (mp_obj_is_type(seq_in, &mp_type_tuple) || mp_obj_is_type(seq_in, &mp_type_list)) {
+ mp_obj_t *seq_items;
+ mp_obj_get_array(seq_in, &seq_len, &seq_items);
+ if (seq_len < num) {
+ goto too_short;
+ } else if (seq_len > num) {
+ goto too_long;
+ }
+ for (size_t i = 0; i < num; i++) {
+ items[i] = seq_items[num - 1 - i];
+ }
+ } else {
+ mp_obj_iter_buf_t iter_buf;
+ mp_obj_t iterable = mp_getiter(seq_in, &iter_buf);
+
+ for (seq_len = 0; seq_len < num; seq_len++) {
+ mp_obj_t el = mp_iternext(iterable);
+ if (el == MP_OBJ_STOP_ITERATION) {
+ goto too_short;
+ }
+ items[num - 1 - seq_len] = el;
+ }
+ if (mp_iternext(iterable) != MP_OBJ_STOP_ITERATION) {
+ goto too_long;
+ }
+ }
+ return;
+
+too_short:
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_ValueError(MP_ERROR_TEXT("wrong number of values to unpack"));
+ #else
+ mp_raise_ValueError_varg(MP_ERROR_TEXT("need more than %d values to unpack"),
+ (int)seq_len);
+ #endif
+too_long:
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_ValueError(MP_ERROR_TEXT("wrong number of values to unpack"));
+ #else
+ mp_raise_ValueError_varg(MP_ERROR_TEXT("too many values to unpack (expected %d)"),
+ (int)num);
+ #endif
+}
+
+// unpacked items are stored in reverse order into the array pointed to by items
+void mp_unpack_ex(mp_obj_t seq_in, size_t num_in, mp_obj_t *items) {
+ size_t num_left = num_in & 0xff;
+ size_t num_right = (num_in >> 8) & 0xff;
+ DEBUG_OP_printf("unpack ex " UINT_FMT " " UINT_FMT "\n", num_left, num_right);
+ size_t seq_len;
+ if (mp_obj_is_type(seq_in, &mp_type_tuple) || mp_obj_is_type(seq_in, &mp_type_list)) {
+ // Make the seq variable volatile so the compiler keeps a reference to it,
+ // since if it's a tuple then seq_items points to the interior of the GC cell
+ // and mp_obj_new_list may trigger a GC which doesn't trace this and reclaims seq.
+ volatile mp_obj_t seq = seq_in;
+ mp_obj_t *seq_items;
+ mp_obj_get_array(seq, &seq_len, &seq_items);
+ if (seq_len < num_left + num_right) {
+ goto too_short;
+ }
+ for (size_t i = 0; i < num_right; i++) {
+ items[i] = seq_items[seq_len - 1 - i];
+ }
+ items[num_right] = mp_obj_new_list(seq_len - num_left - num_right, seq_items + num_left);
+ for (size_t i = 0; i < num_left; i++) {
+ items[num_right + 1 + i] = seq_items[num_left - 1 - i];
+ }
+ seq = MP_OBJ_NULL;
+ } else {
+ // Generic iterable; this gets a bit messy: we unpack known left length to the
+ // items destination array, then the rest to a dynamically created list. Once the
+ // iterable is exhausted, we take from this list for the right part of the items.
+ // TODO Improve to waste less memory in the dynamically created list.
+ mp_obj_t iterable = mp_getiter(seq_in, NULL);
+ mp_obj_t item;
+ for (seq_len = 0; seq_len < num_left; seq_len++) {
+ item = mp_iternext(iterable);
+ if (item == MP_OBJ_STOP_ITERATION) {
+ goto too_short;
+ }
+ items[num_left + num_right + 1 - 1 - seq_len] = item;
+ }
+ mp_obj_list_t *rest = MP_OBJ_TO_PTR(mp_obj_new_list(0, NULL));
+ while ((item = mp_iternext(iterable)) != MP_OBJ_STOP_ITERATION) {
+ mp_obj_list_append(MP_OBJ_FROM_PTR(rest), item);
+ }
+ if (rest->len < num_right) {
+ goto too_short;
+ }
+ items[num_right] = MP_OBJ_FROM_PTR(rest);
+ for (size_t i = 0; i < num_right; i++) {
+ items[num_right - 1 - i] = rest->items[rest->len - num_right + i];
+ }
+ mp_obj_list_set_len(MP_OBJ_FROM_PTR(rest), rest->len - num_right);
+ }
+ return;
+
+too_short:
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_ValueError(MP_ERROR_TEXT("wrong number of values to unpack"));
+ #else
+ mp_raise_ValueError_varg(MP_ERROR_TEXT("need more than %d values to unpack"),
+ (int)seq_len);
+ #endif
+}
+
+mp_obj_t mp_load_attr(mp_obj_t base, qstr attr) {
+ DEBUG_OP_printf("load attr %p.%s\n", base, qstr_str(attr));
+ // use load_method
+ mp_obj_t dest[2];
+ mp_load_method(base, attr, dest);
+ if (dest[1] == MP_OBJ_NULL) {
+ // load_method returned just a normal attribute
+ return dest[0];
+ } else {
+ // load_method returned a method, so build a bound method object
+ return mp_obj_new_bound_meth(dest[0], dest[1]);
+ }
+}
+
+#if MICROPY_BUILTIN_METHOD_CHECK_SELF_ARG
+
+// The following "checked fun" type is local to the mp_convert_member_lookup
+// function, and serves to check that the first argument to a builtin function
+// has the correct type.
+
+typedef struct _mp_obj_checked_fun_t {
+ mp_obj_base_t base;
+ const mp_obj_type_t *type;
+ mp_obj_t fun;
+} mp_obj_checked_fun_t;
+
+STATIC mp_obj_t checked_fun_call(mp_obj_t self_in, size_t n_args, size_t n_kw, const mp_obj_t *args) {
+ mp_obj_checked_fun_t *self = MP_OBJ_TO_PTR(self_in);
+ if (n_args > 0) {
+ const mp_obj_type_t *arg0_type = mp_obj_get_type(args[0]);
+ if (arg0_type != self->type) {
+ if (MICROPY_ERROR_REPORTING != MICROPY_ERROR_REPORTING_DETAILED) {
+ mp_raise_TypeError(MP_ERROR_TEXT("argument has wrong type"));
+ } else {
+ mp_raise_TypeError_varg(MP_ERROR_TEXT("argument should be a '%q' not a '%q'"),
+ self->type->name, arg0_type->name);
+ }
+ }
+ }
+ return mp_call_function_n_kw(self->fun, n_args, n_kw, args);
+}
+
+STATIC const mp_obj_type_t mp_type_checked_fun = {
+ { &mp_type_type },
+ .flags = MP_TYPE_FLAG_BINDS_SELF | MP_TYPE_FLAG_EXTENDED,
+ .name = MP_QSTR_function,
+ MP_TYPE_EXTENDED_FIELDS(
+ .call = checked_fun_call,
+ )
+};
+
+STATIC mp_obj_t mp_obj_new_checked_fun(const mp_obj_type_t *type, mp_obj_t fun) {
+ mp_obj_checked_fun_t *o = m_new_obj(mp_obj_checked_fun_t);
+ o->base.type = &mp_type_checked_fun;
+ o->type = type;
+ o->fun = fun;
+ return MP_OBJ_FROM_PTR(o);
+}
+
+#endif // MICROPY_BUILTIN_METHOD_CHECK_SELF_ARG
+
+// Given a member that was extracted from an instance, convert it correctly
+// and put the result in the dest[] array for a possible method call.
+// Conversion means dealing with static/class methods, callables, and values.
+// see http://docs.python.org/3/howto/descriptor.html
+// and also https://mail.python.org/pipermail/python-dev/2015-March/138950.html
+void mp_convert_member_lookup(mp_obj_t self, const mp_obj_type_t *type, mp_obj_t member, mp_obj_t *dest) {
+ if (mp_obj_is_obj(member)) {
+ const mp_obj_type_t *m_type = ((mp_obj_base_t *)MP_OBJ_TO_PTR(member))->type;
+ if (m_type->flags & MP_TYPE_FLAG_BINDS_SELF) {
+ // `member` is a function that binds self as its first argument.
+ if (m_type->flags & MP_TYPE_FLAG_BUILTIN_FUN) {
+ // `member` is a built-in function, which has special behaviour.
+ if (mp_obj_is_instance_type(type)) {
+ // Built-in functions on user types always behave like a staticmethod.
+ dest[0] = member;
+ }
+ #if MICROPY_BUILTIN_METHOD_CHECK_SELF_ARG
+ else if (self == MP_OBJ_NULL && type != &mp_type_object) {
+ // `member` is a built-in method without a first argument, so wrap
+ // it in a type checker that will check self when it's supplied.
+ // Note that object will do its own checking so shouldn't be wrapped.
+ dest[0] = mp_obj_new_checked_fun(type, member);
+ }
+ #endif
+ else {
+ // Return a (built-in) bound method, with self being this object.
+ dest[0] = member;
+ dest[1] = self;
+ }
+ } else {
+ // Return a bound method, with self being this object.
+ dest[0] = member;
+ dest[1] = self;
+ }
+ } else if (m_type == &mp_type_staticmethod) {
+ // `member` is a staticmethod, return the function that it wraps.
+ dest[0] = ((mp_obj_static_class_method_t *)MP_OBJ_TO_PTR(member))->fun;
+ } else if (m_type == &mp_type_classmethod) {
+ // `member` is a classmethod, return a bound method with self being the type of
+ // this object. This type should be the type of the original instance, not the
+ // base type (which is what is passed in the `type` argument to this function).
+ if (self != MP_OBJ_NULL) {
+ type = mp_obj_get_type(self);
+ }
+ dest[0] = ((mp_obj_static_class_method_t *)MP_OBJ_TO_PTR(member))->fun;
+ dest[1] = MP_OBJ_FROM_PTR(type);
+ #if MICROPY_PY_BUILTINS_PROPERTY
+ // If self is MP_OBJ_NULL, we looking at the class itself, not an instance.
+ } else if (mp_obj_is_type(member, &mp_type_property) && mp_obj_is_native_type(type) && self != MP_OBJ_NULL) {
+ // object member is a property; delegate the load to the property
+ // Note: This is an optimisation for code size and execution time.
+ // The proper way to do it is have the functionality just below
+ // in a __get__ method of the property object, and then it would
+ // be called by the descriptor code down below. But that way
+ // requires overhead for the nested mp_call's and overhead for
+ // the code.
+ size_t n_proxy;
+ const mp_obj_t *proxy = mp_obj_property_get(member, &n_proxy);
+ if (proxy[0] == mp_const_none) {
+ mp_raise_AttributeError(MP_ERROR_TEXT("unreadable attribute"));
+ } else {
+ dest[0] = mp_call_function_n_kw(proxy[0], 1, 0, &self);
+ }
+ #endif
+ } else {
+ // `member` is a value, so just return that value.
+ dest[0] = member;
+ }
+ } else {
+ // `member` is a value, so just return that value.
+ dest[0] = member;
+ }
+}
+
+// no attribute found, returns: dest[0] == MP_OBJ_NULL, dest[1] == MP_OBJ_NULL
+// normal attribute found, returns: dest[0] == <attribute>, dest[1] == MP_OBJ_NULL
+// method attribute found, returns: dest[0] == <method>, dest[1] == <self>
+void mp_load_method_maybe(mp_obj_t obj, qstr attr, mp_obj_t *dest) {
+ // clear output to indicate no attribute/method found yet
+ dest[0] = MP_OBJ_NULL;
+ dest[1] = MP_OBJ_NULL;
+
+ // Note: the specific case of obj being an instance type is fast-path'ed in the VM
+ // for the MP_BC_LOAD_ATTR opcode. Instance types handle type->attr and look up directly
+ // in their member's map.
+
+ // get the type
+ const mp_obj_type_t *type = mp_obj_get_type(obj);
+
+ // look for built-in names
+ #if MICROPY_CPYTHON_COMPAT
+ if (attr == MP_QSTR___class__) {
+ // a.__class__ is equivalent to type(a)
+ dest[0] = MP_OBJ_FROM_PTR(type);
+ return;
+ }
+ #endif
+
+ if (attr == MP_QSTR___next__ && mp_type_get_iternext_slot(type) != NULL) {
+ dest[0] = MP_OBJ_FROM_PTR(&mp_builtin_next_obj);
+ dest[1] = obj;
+ return;
+ }
+ mp_attr_fun_t attr_fun = mp_type_get_attr_slot(type);
+ if (attr_fun != NULL) {
+ // this type can do its own load, so call it
+ attr_fun(obj, attr, dest);
+
+ // If type->attr has set dest[1] = MP_OBJ_SENTINEL, we should proceed
+ // with lookups below (i.e. in locals_dict). If not, return right away.
+ if (dest[1] != MP_OBJ_SENTINEL) {
+ return;
+ }
+ // Clear the fail flag set by type->attr so it's like it never ran.
+ dest[1] = MP_OBJ_NULL;
+ }
+ if (type->locals_dict != NULL) {
+ // generic method lookup
+ // this is a lookup in the object (ie not class or type)
+ assert(type->locals_dict->base.type == &mp_type_dict); // MicroPython restriction, for now
+ mp_map_t *locals_map = &type->locals_dict->map;
+ mp_map_elem_t *elem = mp_map_lookup(locals_map, MP_OBJ_NEW_QSTR(attr), MP_MAP_LOOKUP);
+ if (elem != NULL) {
+ mp_convert_member_lookup(obj, type, elem->value, dest);
+ }
+ return;
+ }
+}
+
+void mp_load_method(mp_obj_t base, qstr attr, mp_obj_t *dest) {
+ DEBUG_OP_printf("load method %p.%s\n", base, qstr_str(attr));
+
+ mp_load_method_maybe(base, attr, dest);
+
+ if (dest[0] == MP_OBJ_NULL) {
+ // no attribute/method called attr
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_AttributeError(MP_ERROR_TEXT("no such attribute"));
+ #else
+ // following CPython, we give a more detailed error message for type objects
+ if (mp_obj_is_type(base, &mp_type_type)) {
+ mp_raise_msg_varg(&mp_type_AttributeError,
+ MP_ERROR_TEXT("type object '%q' has no attribute '%q'"),
+ ((mp_obj_type_t *)MP_OBJ_TO_PTR(base))->name, attr);
+ } else {
+ mp_raise_msg_varg(&mp_type_AttributeError,
+ MP_ERROR_TEXT("'%s' object has no attribute '%q'"),
+ mp_obj_get_type_str(base), attr);
+ }
+ #endif
+ }
+}
+
+// Acts like mp_load_method_maybe but catches AttributeError, and all other exceptions if requested
+void mp_load_method_protected(mp_obj_t obj, qstr attr, mp_obj_t *dest, bool catch_all_exc) {
+ nlr_buf_t nlr;
+ if (nlr_push(&nlr) == 0) {
+ mp_load_method_maybe(obj, attr, dest);
+ nlr_pop();
+ } else {
+ if (!catch_all_exc
+ && !mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(((mp_obj_base_t *)nlr.ret_val)->type),
+ MP_OBJ_FROM_PTR(&mp_type_AttributeError))) {
+ // Re-raise the exception
+ nlr_raise(MP_OBJ_FROM_PTR(nlr.ret_val));
+ }
+ }
+}
+
+void mp_store_attr(mp_obj_t base, qstr attr, mp_obj_t value) {
+ DEBUG_OP_printf("store attr %p.%s <- %p\n", base, qstr_str(attr), value);
+ const mp_obj_type_t *type = mp_obj_get_type(base);
+ mp_attr_fun_t attr_fun = mp_type_get_attr_slot(type);
+ if (attr_fun != NULL) {
+ mp_obj_t dest[2] = {MP_OBJ_SENTINEL, value};
+ attr_fun(base, attr, dest);
+ if (dest[0] == MP_OBJ_NULL) {
+ // success
+ return;
+ }
+ #if MICROPY_PY_BUILTINS_PROPERTY
+ } else if (type->locals_dict != NULL) {
+ // generic method lookup
+ // this is a lookup in the object (ie not class or type)
+ assert(type->locals_dict->base.type == &mp_type_dict); // Micro Python restriction, for now
+ mp_map_t *locals_map = &type->locals_dict->map;
+ mp_map_elem_t *elem = mp_map_lookup(locals_map, MP_OBJ_NEW_QSTR(attr), MP_MAP_LOOKUP);
+ // If base is MP_OBJ_NULL, we looking at the class itself, not an instance.
+ if (elem != NULL && mp_obj_is_type(elem->value, &mp_type_property) && base != MP_OBJ_NULL) {
+ // attribute exists and is a property; delegate the store/delete
+ // Note: This is an optimisation for code size and execution time.
+ // The proper way to do it is have the functionality just below in
+ // a __set__/__delete__ method of the property object, and then it
+ // would be called by the descriptor code down below. But that way
+ // requires overhead for the nested mp_call's and overhead for
+ // the code.
+ size_t n_proxy;
+ const mp_obj_t *proxy = mp_obj_property_get(elem->value, &n_proxy);
+ mp_obj_t dest[2] = {base, value};
+ if (value == MP_OBJ_NULL) {
+ // delete attribute
+ if (n_proxy == 3 && proxy[2] != mp_const_none) {
+ mp_call_function_n_kw(proxy[2], 1, 0, dest);
+ return;
+ }
+ } else if (n_proxy > 1 && proxy[1] != mp_const_none) {
+ mp_call_function_n_kw(proxy[1], 2, 0, dest);
+ return;
+ }
+ }
+ #endif
+ }
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_AttributeError(MP_ERROR_TEXT("no such attribute"));
+ #else
+ mp_raise_msg_varg(&mp_type_AttributeError,
+ MP_ERROR_TEXT("'%s' object has no attribute '%q'"),
+ mp_obj_get_type_str(base), attr);
+ #endif
+}
+
+mp_obj_t mp_getiter(mp_obj_t o_in, mp_obj_iter_buf_t *iter_buf) {
+ assert(o_in);
+ const mp_obj_type_t *type = mp_obj_get_type(o_in);
+ mp_getiter_fun_t getiter = mp_type_get_getiter_slot(type);
+ // Check for native getiter which is the identity. We handle this case explicitly
+ // so we don't unnecessarily allocate any RAM for the iter_buf, which won't be used.
+ if (getiter == mp_identity_getiter) {
+ return o_in;
+ }
+
+ // check for native getiter (corresponds to __iter__)
+ if (getiter != NULL) {
+ if (iter_buf == NULL && getiter != mp_obj_instance_getiter) {
+ // if caller did not provide a buffer then allocate one on the heap
+ // mp_obj_instance_getiter is special, it will allocate only if needed
+ iter_buf = m_new_obj(mp_obj_iter_buf_t);
+ }
+ mp_obj_t iter = getiter(o_in, iter_buf);
+ if (iter != MP_OBJ_NULL) {
+ return iter;
+ }
+ }
+
+ // check for __getitem__
+ mp_obj_t dest[2];
+ mp_load_method_maybe(o_in, MP_QSTR___getitem__, dest);
+ if (dest[0] != MP_OBJ_NULL) {
+ // __getitem__ exists, create and return an iterator
+ if (iter_buf == NULL) {
+ // if caller did not provide a buffer then allocate one on the heap
+ iter_buf = m_new_obj(mp_obj_iter_buf_t);
+ }
+ return mp_obj_new_getitem_iter(dest, iter_buf);
+ }
+
+ // object not iterable
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_TypeError(MP_ERROR_TEXT("object not iterable"));
+ #else
+ mp_raise_TypeError_varg(
+ MP_ERROR_TEXT("'%q' object is not iterable"), mp_obj_get_type_qstr(o_in));
+ #endif
+}
+
+// may return MP_OBJ_STOP_ITERATION as an optimisation instead of raise StopIteration()
+// may also raise StopIteration()
+mp_obj_t mp_iternext_allow_raise(mp_obj_t o_in) {
+ const mp_obj_type_t *type = mp_obj_get_type(o_in);
+ mp_fun_1_t iternext = mp_type_get_iternext_slot(type);
+ if (iternext != NULL) {
+ MP_STATE_THREAD(stop_iteration_arg) = MP_OBJ_NULL;
+ return iternext(o_in);
+ } else {
+ // check for __next__ method
+ mp_obj_t dest[2];
+ mp_load_method_maybe(o_in, MP_QSTR___next__, dest);
+ if (dest[0] != MP_OBJ_NULL) {
+ // __next__ exists, call it and return its result
+ return mp_call_method_n_kw(0, 0, dest);
+ } else {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_TypeError(MP_ERROR_TEXT("object not an iterator"));
+ #else
+ mp_raise_TypeError_varg(MP_ERROR_TEXT("'%q' object is not an iterator"),
+ mp_obj_get_type_qstr(o_in));
+ #endif
+ }
+ }
+}
+
+// will always return MP_OBJ_STOP_ITERATION instead of raising StopIteration() (or any subclass thereof)
+// may raise other exceptions
+mp_obj_t mp_iternext(mp_obj_t o_in) {
+ MP_STACK_CHECK(); // enumerate, filter, map and zip can recursively call mp_iternext
+ const mp_obj_type_t *type = mp_obj_get_type(o_in);
+ mp_fun_1_t iternext = mp_type_get_iternext_slot(type);
+ if (iternext != NULL) {
+ MP_STATE_THREAD(stop_iteration_arg) = MP_OBJ_NULL;
+ return iternext(o_in);
+ } else {
+ // check for __next__ method
+ mp_obj_t dest[2];
+ mp_load_method_maybe(o_in, MP_QSTR___next__, dest);
+ if (dest[0] != MP_OBJ_NULL) {
+ // __next__ exists, call it and return its result
+ nlr_buf_t nlr;
+ if (nlr_push(&nlr) == 0) {
+ mp_obj_t ret = mp_call_method_n_kw(0, 0, dest);
+ nlr_pop();
+ return ret;
+ } else {
+ if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(((mp_obj_base_t *)nlr.ret_val)->type), MP_OBJ_FROM_PTR(&mp_type_StopIteration))) {
+ return mp_make_stop_iteration(mp_obj_exception_get_value(MP_OBJ_FROM_PTR(nlr.ret_val)));
+ } else {
+ nlr_jump(nlr.ret_val);
+ }
+ }
+ } else {
+ #if MICROPY_ERROR_REPORTING <= MICROPY_ERROR_REPORTING_TERSE
+ mp_raise_TypeError(MP_ERROR_TEXT("object not an iterator"));
+ #else
+ mp_raise_TypeError_varg(MP_ERROR_TEXT("'%q' object is not an iterator"),
+ mp_obj_get_type_qstr(o_in));
+ #endif
+ }
+ }
+}
+
+mp_vm_return_kind_t mp_resume(mp_obj_t self_in, mp_obj_t send_value, mp_obj_t throw_value, mp_obj_t *ret_val) {
+ assert((send_value != MP_OBJ_NULL) ^ (throw_value != MP_OBJ_NULL));
+ const mp_obj_type_t *type = mp_obj_get_type(self_in);
+
+ if (type == &mp_type_gen_instance) {
+ return mp_obj_gen_resume(self_in, send_value, throw_value, ret_val);
+ }
+
+ mp_fun_1_t iternext = mp_type_get_iternext_slot(type);
+ if (iternext != NULL && send_value == mp_const_none) {
+ MP_STATE_THREAD(stop_iteration_arg) = MP_OBJ_NULL;
+ mp_obj_t ret = iternext(self_in);
+ *ret_val = ret;
+ if (ret != MP_OBJ_STOP_ITERATION) {
+ return MP_VM_RETURN_YIELD;
+ } else {
+ // The generator is finished.
+ // This is an optimised "raise StopIteration(*ret_val)".
+ *ret_val = MP_STATE_THREAD(stop_iteration_arg);
+ if (*ret_val == MP_OBJ_NULL) {
+ *ret_val = mp_const_none;
+ }
+ return MP_VM_RETURN_NORMAL;
+ }
+ }
+
+ mp_obj_t dest[3]; // Reserve slot for send() arg
+
+ // Python instance iterator protocol
+ if (send_value == mp_const_none) {
+ mp_load_method_maybe(self_in, MP_QSTR___next__, dest);
+ if (dest[0] != MP_OBJ_NULL) {
+ *ret_val = mp_call_method_n_kw(0, 0, dest);
+ return MP_VM_RETURN_YIELD;
+ }
+ }
+
+ // Either python instance generator protocol, or native object
+ // generator protocol.
+ if (send_value != MP_OBJ_NULL) {
+ mp_load_method(self_in, MP_QSTR_send, dest);
+ dest[2] = send_value;
+ *ret_val = mp_call_method_n_kw(1, 0, dest);
+ return MP_VM_RETURN_YIELD;
+ }
+
+ assert(throw_value != MP_OBJ_NULL);
+ {
+ if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(mp_obj_get_type(throw_value)), MP_OBJ_FROM_PTR(&mp_type_GeneratorExit))) {
+ mp_load_method_maybe(self_in, MP_QSTR_close, dest);
+ if (dest[0] != MP_OBJ_NULL) {
+ // TODO: Exceptions raised in close() are not propagated,
+ // printed to sys.stderr
+ *ret_val = mp_call_method_n_kw(0, 0, dest);
+ // We assume one can't "yield" from close()
+ return MP_VM_RETURN_NORMAL;
+ }
+ } else {
+ mp_load_method_maybe(self_in, MP_QSTR_throw, dest);
+ if (dest[0] != MP_OBJ_NULL) {
+ dest[2] = throw_value;
+ *ret_val = mp_call_method_n_kw(1, 0, dest);
+ // If .throw() method returned, we assume it's value to yield
+ // - any exception would be thrown with nlr_raise().
+ return MP_VM_RETURN_YIELD;
+ }
+ }
+ // If there's nowhere to throw exception into, then we assume that object
+ // is just incapable to handle it, so any exception thrown into it
+ // will be propagated up. This behavior is approved by test_pep380.py
+ // test_delegation_of_close_to_non_generator(),
+ // test_delegating_throw_to_non_generator()
+ if (mp_obj_exception_match(throw_value, MP_OBJ_FROM_PTR(&mp_type_StopIteration))) {
+ // PEP479: if StopIteration is raised inside a generator it is replaced with RuntimeError
+ *ret_val = mp_obj_new_exception_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("generator raised StopIteration"));
+ } else {
+ *ret_val = mp_make_raise_obj(throw_value);
+ }
+ return MP_VM_RETURN_EXCEPTION;
+ }
+}
+
+mp_obj_t mp_make_raise_obj(mp_obj_t o) {
+ DEBUG_printf("raise %p\n", o);
+ if (mp_obj_is_exception_type(o)) {
+ // o is an exception type (it is derived from BaseException (or is BaseException))
+ // create and return a new exception instance by calling o
+ // TODO could have an option to disable traceback, then builtin exceptions (eg TypeError)
+ // could have const instances in ROM which we return here instead
+ o = mp_call_function_n_kw(o, 0, 0, NULL);
+ }
+
+ if (mp_obj_is_exception_instance(o)) {
+ // o is an instance of an exception, so use it as the exception
+ return o;
+ } else {
+ // o cannot be used as an exception, so return a type error (which will be raised by the caller)
+ return mp_obj_new_exception_msg(&mp_type_TypeError, MP_ERROR_TEXT("exceptions must derive from BaseException"));
+ }
+}
+
+mp_obj_t mp_import_name(qstr name, mp_obj_t fromlist, mp_obj_t level) {
+ DEBUG_printf("import name '%s' level=%d\n", qstr_str(name), MP_OBJ_SMALL_INT_VALUE(level));
+
+ // build args array
+ mp_obj_t args[5];
+ args[0] = MP_OBJ_NEW_QSTR(name);
+ args[1] = mp_const_none; // TODO should be globals
+ args[2] = mp_const_none; // TODO should be locals
+ args[3] = fromlist;
+ args[4] = level;
+
+ #if MICROPY_CAN_OVERRIDE_BUILTINS
+ // Lookup __import__ and call that if it exists
+ mp_obj_dict_t *bo_dict = MP_STATE_VM(mp_module_builtins_override_dict);
+ if (bo_dict != NULL) {
+ mp_map_elem_t *import = mp_map_lookup(&bo_dict->map, MP_OBJ_NEW_QSTR(MP_QSTR___import__), MP_MAP_LOOKUP);
+ if (import != NULL) {
+ return mp_call_function_n_kw(import->value, 5, 0, args);
+ }
+ }
+ #endif
+
+ return mp_builtin___import__(5, args);
+}
+
+mp_obj_t mp_import_from(mp_obj_t module, qstr name) {
+ DEBUG_printf("import from %p %s\n", module, qstr_str(name));
+
+ mp_obj_t dest[2];
+
+ mp_load_method_maybe(module, name, dest);
+
+ if (dest[1] != MP_OBJ_NULL) {
+ // Hopefully we can't import bound method from an object
+ mp_raise_msg_varg(&mp_type_ImportError, MP_ERROR_TEXT("cannot import name %q"), name);
+ }
+
+ if (dest[0] != MP_OBJ_NULL) {
+ return dest[0];
+ }
+
+ #if MICROPY_ENABLE_EXTERNAL_IMPORT
+
+ // See if it's a package, then can try FS import
+ if (!mp_obj_is_package(module)) {
+ mp_raise_msg_varg(&mp_type_ImportError, MP_ERROR_TEXT("cannot import name %q"), name);
+ }
+
+ mp_load_method_maybe(module, MP_QSTR___name__, dest);
+ size_t pkg_name_len;
+ const char *pkg_name = mp_obj_str_get_data(dest[0], &pkg_name_len);
+
+ const uint dot_name_len = pkg_name_len + 1 + qstr_len(name);
+ char *dot_name = mp_local_alloc(dot_name_len);
+ memcpy(dot_name, pkg_name, pkg_name_len);
+ dot_name[pkg_name_len] = '.';
+ memcpy(dot_name + pkg_name_len + 1, qstr_str(name), qstr_len(name));
+ qstr dot_name_q = qstr_from_strn(dot_name, dot_name_len);
+ mp_local_free(dot_name);
+
+ // For fromlist, pass sentinel "non empty" value to force returning of leaf module
+ return mp_import_name(dot_name_q, mp_const_true, MP_OBJ_NEW_SMALL_INT(0));
+
+ #else
+
+ // Package import not supported with external imports disabled
+ mp_raise_msg_varg(&mp_type_ImportError, MP_ERROR_TEXT("cannot import name %q"), name);
+
+ #endif
+}
+
+void mp_import_all(mp_obj_t module) {
+ DEBUG_printf("import all %p\n", module);
+
+ // TODO: Support __all__
+ mp_map_t *map = &mp_obj_module_get_globals(module)->map;
+ for (size_t i = 0; i < map->alloc; i++) {
+ if (mp_map_slot_is_filled(map, i)) {
+ // Entry in module global scope may be generated programmatically
+ // (and thus be not a qstr for longer names). Avoid turning it in
+ // qstr if it has '_' and was used exactly to save memory.
+ const char *name = mp_obj_str_get_str(map->table[i].key);
+ if (*name != '_') {
+ qstr qname = mp_obj_str_get_qstr(map->table[i].key);
+ mp_store_name(qname, map->table[i].value);
+ }
+ }
+ }
+}
+
+#if MICROPY_ENABLE_COMPILER
+
+mp_obj_t mp_parse_compile_execute(mp_lexer_t *lex, mp_parse_input_kind_t parse_input_kind, mp_obj_dict_t *globals, mp_obj_dict_t *locals) {
+ // save context
+ mp_obj_dict_t *volatile old_globals = mp_globals_get();
+ mp_obj_dict_t *volatile old_locals = mp_locals_get();
+
+ // set new context
+ mp_globals_set(globals);
+ mp_locals_set(locals);
+
+ nlr_buf_t nlr;
+ if (nlr_push(&nlr) == 0) {
+ qstr source_name = lex->source_name;
+ mp_parse_tree_t parse_tree = mp_parse(lex, parse_input_kind);
+ mp_obj_t module_fun = mp_compile(&parse_tree, source_name, parse_input_kind == MP_PARSE_SINGLE_INPUT);
+
+ mp_obj_t ret;
+ if (MICROPY_PY_BUILTINS_COMPILE && globals == NULL) {
+ // for compile only, return value is the module function
+ ret = module_fun;
+ } else {
+ // execute module function and get return value
+ ret = mp_call_function_0(module_fun);
+ }
+
+ // finish nlr block, restore context and return value
+ nlr_pop();
+ mp_globals_set(old_globals);
+ mp_locals_set(old_locals);
+ return ret;
+ } else {
+ // exception; restore context and re-raise same exception
+ mp_globals_set(old_globals);
+ mp_locals_set(old_locals);
+ nlr_jump(nlr.ret_val);
+ }
+}
+
+#endif // MICROPY_ENABLE_COMPILER
+
+NORETURN void m_malloc_fail(size_t num_bytes) {
+ DEBUG_printf("memory allocation failed, allocating %u bytes\n", (uint)num_bytes);
+ #if MICROPY_ENABLE_GC
+ if (gc_is_locked()) {
+ mp_raise_msg(&mp_type_MemoryError, MP_ERROR_TEXT("memory allocation failed, heap is locked"));
+ }
+ #endif
+ mp_raise_msg_varg(&mp_type_MemoryError,
+ MP_ERROR_TEXT("memory allocation failed, allocating %u bytes"), (uint)num_bytes);
+}
+
+#if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_NONE
+
+NORETURN void mp_raise_type(const mp_obj_type_t *exc_type) {
+ nlr_raise(mp_obj_new_exception(exc_type));
+}
+
+NORETURN void mp_raise_ValueError_no_msg(void) {
+ mp_raise_type(&mp_type_ValueError);
+}
+
+NORETURN void mp_raise_TypeError_no_msg(void) {
+ mp_raise_type(&mp_type_TypeError);
+}
+
+NORETURN void mp_raise_NotImplementedError_no_msg(void) {
+ mp_raise_type(&mp_type_NotImplementedError);
+}
+
+#else
+
+NORETURN void mp_raise_msg(const mp_obj_type_t *exc_type, const compressed_string_t *msg) {
+ if (msg == NULL) {
+ nlr_raise(mp_obj_new_exception(exc_type));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg(exc_type, msg));
+ }
+}
+
+NORETURN void mp_raise_msg_vlist(const mp_obj_type_t *exc_type, const compressed_string_t *fmt, va_list argptr) {
+ mp_obj_t exception = mp_obj_new_exception_msg_vlist(exc_type, fmt, argptr);
+ nlr_raise(exception);
+}
+
+NORETURN void mp_raise_msg_varg(const mp_obj_type_t *exc_type, const compressed_string_t *fmt, ...) {
+ va_list argptr;
+ va_start(argptr,fmt);
+ mp_raise_msg_vlist(exc_type, fmt, argptr);
+ va_end(argptr);
+}
+
+NORETURN void mp_raise_msg_str(const mp_obj_type_t *exc_type, const char *msg) {
+ if (msg == NULL) {
+ nlr_raise(mp_obj_new_exception(exc_type));
+ } else {
+ nlr_raise(mp_obj_new_exception_msg_str(exc_type, msg));
+ }
+}
+
+NORETURN void mp_raise_AttributeError(const compressed_string_t *msg) {
+ mp_raise_msg(&mp_type_AttributeError, msg);
+}
+
+NORETURN void mp_raise_RuntimeError(const compressed_string_t *msg) {
+ mp_raise_msg(&mp_type_RuntimeError, msg);
+}
+
+NORETURN void mp_raise_ImportError(const compressed_string_t *msg) {
+ mp_raise_msg(&mp_type_ImportError, msg);
+}
+
+NORETURN void mp_raise_IndexError(const compressed_string_t *msg) {
+ mp_raise_msg(&mp_type_IndexError, msg);
+}
+
+NORETURN void mp_raise_IndexError_varg(const compressed_string_t *fmt, ...) {
+ va_list argptr;
+ va_start(argptr,fmt);
+ mp_raise_msg_vlist(&mp_type_IndexError, fmt, argptr);
+ va_end(argptr);
+}
+
+NORETURN void mp_raise_ValueError(const compressed_string_t *msg) {
+ mp_raise_msg(&mp_type_ValueError, msg);
+}
+
+NORETURN void mp_raise_ValueError_varg(const compressed_string_t *fmt, ...) {
+ va_list argptr;
+ va_start(argptr,fmt);
+ mp_raise_msg_vlist(&mp_type_ValueError, fmt, argptr);
+ va_end(argptr);
+}
+
+NORETURN void mp_raise_TypeError(const compressed_string_t *msg) {
+ mp_raise_msg(&mp_type_TypeError, msg);
+}
+
+NORETURN void mp_raise_TypeError_varg(const compressed_string_t *fmt, ...) {
+ va_list argptr;
+ va_start(argptr,fmt);
+ mp_raise_msg_vlist(&mp_type_TypeError, fmt, argptr);
+ va_end(argptr);
+}
+
+NORETURN void mp_raise_OSError_msg(const compressed_string_t *msg) {
+ mp_raise_msg(&mp_type_OSError, msg);
+}
+
+NORETURN void mp_raise_OSError_errno_str(int errno_, mp_obj_t str) {
+ mp_obj_t args[2] = {
+ MP_OBJ_NEW_SMALL_INT(errno_),
+ str,
+ };
+ nlr_raise(mp_obj_new_exception_args(&mp_type_OSError, 2, args));
+}
+
+NORETURN void mp_raise_OSError_msg_varg(const compressed_string_t *fmt, ...) {
+ va_list argptr;
+ va_start(argptr,fmt);
+ mp_raise_msg_vlist(&mp_type_OSError, fmt, argptr);
+ va_end(argptr);
+}
+
+NORETURN void mp_raise_ConnectionError(const compressed_string_t *msg) {
+ mp_raise_msg(&mp_type_ConnectionError, msg);
+}
+
+NORETURN void mp_raise_BrokenPipeError(void) {
+ mp_raise_type_arg(&mp_type_BrokenPipeError, MP_OBJ_NEW_SMALL_INT(MP_EPIPE));
+}
+
+NORETURN void mp_raise_NotImplementedError(const compressed_string_t *msg) {
+ mp_raise_msg(&mp_type_NotImplementedError, msg);
+}
+
+NORETURN void mp_raise_NotImplementedError_varg(const compressed_string_t *fmt, ...) {
+ va_list argptr;
+ va_start(argptr,fmt);
+ mp_raise_msg_vlist(&mp_type_NotImplementedError, fmt, argptr);
+ va_end(argptr);
+}
+
+
+NORETURN void mp_raise_OverflowError_varg(const compressed_string_t *fmt, ...) {
+ va_list argptr;
+ va_start(argptr,fmt);
+ mp_raise_msg_vlist(&mp_type_OverflowError, fmt, argptr);
+ va_end(argptr);
+}
+
+NORETURN void mp_raise_MpyError(const compressed_string_t *msg) {
+ mp_raise_msg(&mp_type_MpyError, msg);
+}
+
+NORETURN void mp_raise_type_arg(const mp_obj_type_t *exc_type, mp_obj_t arg) {
+ nlr_raise(mp_obj_new_exception_arg1(exc_type, arg));
+}
+
+NORETURN void mp_raise_StopIteration(mp_obj_t arg) {
+ if (arg == MP_OBJ_NULL) {
+ mp_raise_type(&mp_type_StopIteration);
+ } else {
+ mp_raise_type_arg(&mp_type_StopIteration, arg);
+ }
+}
+
+NORETURN void mp_raise_OSError(int errno_) {
+ mp_raise_type_arg(&mp_type_OSError, MP_OBJ_NEW_SMALL_INT(errno_));
+}
+
+#endif
+
+#if MICROPY_STACK_CHECK || MICROPY_ENABLE_PYSTACK
+NORETURN void mp_raise_recursion_depth(void) {
+ mp_raise_RuntimeError(MP_ERROR_TEXT("maximum recursion depth exceeded"));
+}
+#endif
diff --git a/circuitpython/py/runtime.h b/circuitpython/py/runtime.h
new file mode 100644
index 0000000..4a309f3
--- /dev/null
+++ b/circuitpython/py/runtime.h
@@ -0,0 +1,249 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_RUNTIME_H
+#define MICROPY_INCLUDED_PY_RUNTIME_H
+
+#include <stdarg.h>
+
+#include "py/mpstate.h"
+#include "py/pystack.h"
+
+#include "supervisor/linker.h"
+
+typedef enum {
+ MP_VM_RETURN_NORMAL,
+ MP_VM_RETURN_YIELD,
+ MP_VM_RETURN_EXCEPTION,
+} mp_vm_return_kind_t;
+
+typedef enum {
+ MP_ARG_BOOL = 0x001,
+ MP_ARG_INT = 0x002,
+ MP_ARG_OBJ = 0x003,
+ MP_ARG_KIND_MASK = 0x0ff,
+ MP_ARG_REQUIRED = 0x100,
+ MP_ARG_KW_ONLY = 0x200,
+} mp_arg_flag_t;
+
+typedef union _mp_arg_val_t {
+ bool u_bool;
+ mp_int_t u_int;
+ mp_obj_t u_obj;
+ mp_rom_obj_t u_rom_obj;
+} mp_arg_val_t;
+
+typedef struct _mp_arg_t {
+ uint16_t qst;
+ uint16_t flags;
+ mp_arg_val_t defval;
+} mp_arg_t;
+
+// Tables mapping operator enums to qstrs, defined in objtype.c
+extern const byte mp_unary_op_method_name[];
+extern const byte mp_binary_op_method_name[];
+
+void mp_init(void);
+void mp_deinit(void);
+
+void mp_sched_exception(mp_obj_t exc);
+void mp_sched_keyboard_interrupt(void);
+void mp_handle_pending(bool raise_exc);
+void mp_handle_pending_tail(mp_uint_t atomic_state);
+
+#if MICROPY_ENABLE_SCHEDULER
+void mp_sched_lock(void);
+void mp_sched_unlock(void);
+#define mp_sched_num_pending() (MP_STATE_VM(sched_len))
+bool mp_sched_schedule(mp_obj_t function, mp_obj_t arg);
+#endif
+
+// extra printing method specifically for mp_obj_t's which are integral type
+int mp_print_mp_int(const mp_print_t *print, mp_obj_t x, int base, int base_char, int flags, char fill, int width, int prec);
+
+void mp_arg_check_num_sig(size_t n_args, size_t n_kw, uint32_t sig);
+static inline void mp_arg_check_num(size_t n_args, size_t n_kw, size_t n_args_min, size_t n_args_max, bool takes_kw) {
+ mp_arg_check_num_sig(n_args, n_kw, MP_OBJ_FUN_MAKE_SIG(n_args_min, n_args_max, takes_kw));
+}
+void mp_arg_parse_all(size_t n_pos, const mp_obj_t *pos, mp_map_t *kws, size_t n_allowed, const mp_arg_t *allowed, mp_arg_val_t *out_vals);
+void mp_arg_parse_all_kw_array(size_t n_pos, size_t n_kw, const mp_obj_t *args, size_t n_allowed, const mp_arg_t *allowed, mp_arg_val_t *out_vals);
+NORETURN void mp_arg_error_terse_mismatch(void);
+NORETURN void mp_arg_error_unimpl_kw(void);
+
+mp_int_t mp_arg_validate_int_min(mp_int_t i, mp_int_t min, qstr arg_name);
+mp_int_t mp_arg_validate_int_max(mp_int_t i, mp_int_t j, qstr arg_name);
+mp_int_t mp_arg_validate_int_range(mp_int_t i, mp_int_t min, mp_int_t max, qstr arg_name);
+#if MICROPY_PY_BUILTINS_FLOAT
+mp_float_t mp_arg_validate_obj_float_non_negative(mp_obj_t float_in, mp_float_t default_for_null, qstr arg_name);
+#endif
+mp_uint_t mp_arg_validate_length_range(mp_uint_t length, mp_uint_t min, mp_uint_t max, qstr arg_name);
+mp_obj_t mp_arg_validate_type(mp_obj_t obj, const mp_obj_type_t *type, qstr arg_name);
+mp_obj_t mp_arg_validate_string(mp_obj_t obj, qstr arg_name);
+
+static inline mp_obj_dict_t *PLACE_IN_ITCM(mp_locals_get)(void) {
+ return MP_STATE_THREAD(dict_locals);
+}
+static inline void PLACE_IN_ITCM(mp_locals_set)(mp_obj_dict_t * d) {
+ MP_STATE_THREAD(dict_locals) = d;
+}
+static inline mp_obj_dict_t *PLACE_IN_ITCM(mp_globals_get)(void) {
+ return MP_STATE_THREAD(dict_globals);
+}
+static inline void PLACE_IN_ITCM(mp_globals_set)(mp_obj_dict_t * d) {
+ MP_STATE_THREAD(dict_globals) = d;
+}
+
+mp_obj_t mp_load_name(qstr qst);
+mp_obj_t mp_load_global(qstr qst);
+mp_obj_t mp_load_build_class(void);
+void mp_store_name(qstr qst, mp_obj_t obj);
+void mp_store_global(qstr qst, mp_obj_t obj);
+void mp_delete_name(qstr qst);
+void mp_delete_global(qstr qst);
+
+mp_obj_t mp_unary_op(mp_unary_op_t op, mp_obj_t arg);
+mp_obj_t mp_binary_op(mp_binary_op_t op, mp_obj_t lhs, mp_obj_t rhs);
+
+mp_obj_t mp_call_function_0(mp_obj_t fun);
+mp_obj_t mp_call_function_1(mp_obj_t fun, mp_obj_t arg);
+mp_obj_t mp_call_function_2(mp_obj_t fun, mp_obj_t arg1, mp_obj_t arg2);
+mp_obj_t mp_call_function_n_kw(mp_obj_t fun, size_t n_args, size_t n_kw, const mp_obj_t *args);
+mp_obj_t mp_call_method_n_kw(size_t n_args, size_t n_kw, const mp_obj_t *args);
+mp_obj_t mp_call_method_n_kw_var(bool have_self, size_t n_args_n_kw, const mp_obj_t *args);
+mp_obj_t mp_call_method_self_n_kw(mp_obj_t meth, mp_obj_t self, size_t n_args, size_t n_kw, const mp_obj_t *args);
+// Call function and catch/dump exception - for Python callbacks from C code
+// (return MP_OBJ_NULL in case of exception).
+mp_obj_t mp_call_function_1_protected(mp_obj_t fun, mp_obj_t arg);
+mp_obj_t mp_call_function_2_protected(mp_obj_t fun, mp_obj_t arg1, mp_obj_t arg2);
+
+typedef struct _mp_call_args_t {
+ mp_obj_t fun;
+ size_t n_args, n_kw, n_alloc;
+ mp_obj_t *args;
+} mp_call_args_t;
+
+#if MICROPY_STACKLESS
+// Takes arguments which are the most general mix of Python arg types, and
+// prepares argument array suitable for passing to ->call() method of a
+// function object (and mp_call_function_n_kw()).
+// (Only needed in stackless mode.)
+void mp_call_prepare_args_n_kw_var(bool have_self, size_t n_args_n_kw, const mp_obj_t *args, mp_call_args_t *out_args);
+#endif
+
+void mp_unpack_sequence(mp_obj_t seq, size_t num, mp_obj_t *items);
+void mp_unpack_ex(mp_obj_t seq, size_t num, mp_obj_t *items);
+mp_obj_t mp_store_map(mp_obj_t map, mp_obj_t key, mp_obj_t value);
+mp_obj_t mp_load_attr(mp_obj_t base, qstr attr);
+void mp_convert_member_lookup(mp_obj_t obj, const mp_obj_type_t *type, mp_obj_t member, mp_obj_t *dest);
+void mp_load_method(mp_obj_t base, qstr attr, mp_obj_t *dest);
+void mp_load_method_maybe(mp_obj_t base, qstr attr, mp_obj_t *dest);
+void mp_load_method_protected(mp_obj_t obj, qstr attr, mp_obj_t *dest, bool catch_all_exc);
+void mp_load_super_method(qstr attr, mp_obj_t *dest);
+void mp_store_attr(mp_obj_t base, qstr attr, mp_obj_t val);
+
+mp_obj_t mp_getiter(mp_obj_t o, mp_obj_iter_buf_t *iter_buf);
+mp_obj_t mp_iternext_allow_raise(mp_obj_t o); // may return MP_OBJ_STOP_ITERATION instead of raising StopIteration()
+mp_obj_t mp_iternext(mp_obj_t o); // will always return MP_OBJ_STOP_ITERATION instead of raising StopIteration(...)
+mp_vm_return_kind_t mp_resume(mp_obj_t self_in, mp_obj_t send_value, mp_obj_t throw_value, mp_obj_t *ret_val);
+
+static inline mp_obj_t mp_make_stop_iteration(mp_obj_t o) {
+ MP_STATE_THREAD(stop_iteration_arg) = o;
+ return MP_OBJ_STOP_ITERATION;
+}
+
+mp_obj_t mp_make_raise_obj(mp_obj_t o);
+
+mp_obj_t mp_import_name(qstr name, mp_obj_t fromlist, mp_obj_t level);
+mp_obj_t mp_import_from(mp_obj_t module, qstr name);
+void mp_import_all(mp_obj_t module);
+
+#if MICROPY_ERROR_REPORTING == MICROPY_ERROR_REPORTING_NONE
+NORETURN void mp_raise_type(const mp_obj_type_t *exc_type);
+NORETURN void mp_raise_ValueError_no_msg(void);
+NORETURN void mp_raise_TypeError_no_msg(void);
+NORETURN void mp_raise_NotImplementedError_no_msg(void);
+#define mp_raise_msg(exc_type, msg) mp_raise_type(exc_type)
+#define mp_raise_msg_varg(exc_type, ...) mp_raise_type(exc_type)
+#define mp_raise_ValueError(msg) mp_raise_ValueError_no_msg()
+#define mp_raise_TypeError(msg) mp_raise_TypeError_no_msg()
+#define mp_raise_NotImplementedError(msg) mp_raise_NotImplementedError_no_msg()
+#else
+#define mp_raise_type(exc_type) mp_raise_msg(exc_type, NULL)
+NORETURN void mp_raise_type_arg(const mp_obj_type_t *exc_type, mp_obj_t arg);
+NORETURN void mp_raise_msg(const mp_obj_type_t *exc_type, const compressed_string_t *msg);
+NORETURN void mp_raise_msg_varg(const mp_obj_type_t *exc_type, const compressed_string_t *fmt, ...);
+NORETURN void mp_raise_msg_vlist(const mp_obj_type_t *exc_type, const compressed_string_t *fmt, va_list argptr);
+// Only use this string version in native mpy files. Otherwise, use the compressed string version.
+NORETURN void mp_raise_msg_str(const mp_obj_type_t *exc_type, const char *msg);
+NORETURN void mp_raise_ValueError(const compressed_string_t *msg);
+NORETURN void mp_raise_ValueError_varg(const compressed_string_t *fmt, ...);
+NORETURN void mp_raise_TypeError(const compressed_string_t *msg);
+NORETURN void mp_raise_TypeError_varg(const compressed_string_t *fmt, ...);
+NORETURN void mp_raise_AttributeError(const compressed_string_t *msg);
+NORETURN void mp_raise_RuntimeError(const compressed_string_t *msg);
+NORETURN void mp_raise_ImportError(const compressed_string_t *msg);
+NORETURN void mp_raise_IndexError(const compressed_string_t *msg);
+NORETURN void mp_raise_IndexError_varg(const compressed_string_t *msg, ...);
+NORETURN void mp_raise_StopIteration(mp_obj_t arg);
+NORETURN void mp_raise_OSError(int errno_);
+NORETURN void mp_raise_OSError_errno_str(int errno_, mp_obj_t str);
+NORETURN void mp_raise_OSError_msg(const compressed_string_t *msg);
+NORETURN void mp_raise_OSError_msg_varg(const compressed_string_t *fmt, ...);
+NORETURN void mp_raise_ConnectionError(const compressed_string_t *msg);
+NORETURN void mp_raise_BrokenPipeError(void);
+NORETURN void mp_raise_NotImplementedError(const compressed_string_t *msg);
+NORETURN void mp_raise_NotImplementedError_varg(const compressed_string_t *fmt, ...);
+NORETURN void mp_raise_OverflowError_varg(const compressed_string_t *fmt, ...);
+NORETURN void mp_raise_MpyError(const compressed_string_t *msg);
+NORETURN void mp_raise_recursion_depth(void);
+#endif
+
+#if MICROPY_BUILTIN_METHOD_CHECK_SELF_ARG
+#undef mp_check_self
+#define mp_check_self(pred)
+#else
+// A port may define to raise TypeError for example
+#ifndef mp_check_self
+#define mp_check_self(pred) assert(pred)
+#endif
+#endif
+
+// helper functions for native/viper code
+int mp_native_type_from_qstr(qstr qst);
+mp_uint_t mp_native_from_obj(mp_obj_t obj, mp_uint_t type);
+mp_obj_t mp_native_to_obj(mp_uint_t val, mp_uint_t type);
+
+#define mp_sys_path (MP_OBJ_FROM_PTR(&MP_STATE_VM(mp_sys_path_obj)))
+#define mp_sys_argv (MP_OBJ_FROM_PTR(&MP_STATE_VM(mp_sys_argv_obj)))
+
+#if MICROPY_WARNINGS
+#ifndef mp_warning
+void mp_warning(const char *category, const char *msg, ...);
+#endif
+#else
+#define mp_warning(...)
+#endif
+
+#endif // MICROPY_INCLUDED_PY_RUNTIME_H
diff --git a/circuitpython/py/runtime0.h b/circuitpython/py/runtime0.h
new file mode 100644
index 0000000..9af108d
--- /dev/null
+++ b/circuitpython/py/runtime0.h
@@ -0,0 +1,161 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_RUNTIME0_H
+#define MICROPY_INCLUDED_PY_RUNTIME0_H
+#include "mpconfig.h"
+
+// The first four must fit in 8 bits, see emitbc.c
+// The remaining must fit in 16 bits, see scope.h
+#define MP_SCOPE_FLAG_ALL_SIG (0x1f)
+#define MP_SCOPE_FLAG_GENERATOR (0x01)
+#define MP_SCOPE_FLAG_VARKEYWORDS (0x02)
+#define MP_SCOPE_FLAG_VARARGS (0x04)
+#define MP_SCOPE_FLAG_DEFKWARGS (0x08)
+#define MP_SCOPE_FLAG_ASYNC (0x10)
+#define MP_SCOPE_FLAG_REFGLOBALS (0x20) // used only if native emitter enabled
+#define MP_SCOPE_FLAG_HASCONSTS (0x40) // used only if native emitter enabled
+#define MP_SCOPE_FLAG_VIPERRET_POS (7) // 3 bits used for viper return type, to pass from compiler to native emitter
+#define MP_SCOPE_FLAG_VIPERRELOC (0x20) // used only when loading viper from .mpy
+#define MP_SCOPE_FLAG_VIPERRODATA (0x40) // used only when loading viper from .mpy
+#define MP_SCOPE_FLAG_VIPERBSS (0x80) // used only when loading viper from .mpy
+
+// types for native (viper) function signature
+#define MP_NATIVE_TYPE_OBJ (0x00)
+#define MP_NATIVE_TYPE_BOOL (0x01)
+#define MP_NATIVE_TYPE_INT (0x02)
+#define MP_NATIVE_TYPE_UINT (0x03)
+#define MP_NATIVE_TYPE_PTR (0x04)
+#define MP_NATIVE_TYPE_PTR8 (0x05)
+#define MP_NATIVE_TYPE_PTR16 (0x06)
+#define MP_NATIVE_TYPE_PTR32 (0x07)
+
+// Bytecode and runtime boundaries for unary ops
+#define MP_UNARY_OP_NUM_BYTECODE (MP_UNARY_OP_NOT + 1)
+#define MP_UNARY_OP_NUM_RUNTIME (MP_UNARY_OP_SIZEOF + 1)
+
+// Bytecode and runtime boundaries for binary ops
+#define MP_BINARY_OP_NUM_BYTECODE (MP_BINARY_OP_POWER + 1)
+#if MICROPY_PY_REVERSE_SPECIAL_METHODS
+#define MP_BINARY_OP_NUM_RUNTIME (MP_BINARY_OP_REVERSE_POWER + 1)
+#else
+#define MP_BINARY_OP_NUM_RUNTIME (MP_BINARY_OP_CONTAINS + 1)
+#endif
+
+typedef enum {
+ // These ops may appear in the bytecode. Changing this group
+ // in any way requires changing the bytecode version.
+ MP_UNARY_OP_POSITIVE,
+ MP_UNARY_OP_NEGATIVE,
+ MP_UNARY_OP_INVERT,
+ MP_UNARY_OP_NOT,
+
+ // Following ops cannot appear in the bytecode
+ MP_UNARY_OP_BOOL, // __bool__
+ MP_UNARY_OP_LEN, // __len__
+ MP_UNARY_OP_HASH, // __hash__; must return a small int
+ MP_UNARY_OP_ABS, // __abs__
+ MP_UNARY_OP_INT, // __int__
+ MP_UNARY_OP_SIZEOF, // for sys.getsizeof()
+} mp_unary_op_t;
+
+typedef enum {
+ // The following 9+13+13 ops are used in bytecode and changing
+ // them requires changing the bytecode version.
+
+ // 9 relational operations, should return a bool; order of first 6 matches corresponding mp_token_kind_t
+ MP_BINARY_OP_LESS,
+ MP_BINARY_OP_MORE,
+ MP_BINARY_OP_EQUAL,
+ MP_BINARY_OP_LESS_EQUAL,
+ MP_BINARY_OP_MORE_EQUAL,
+ MP_BINARY_OP_NOT_EQUAL,
+ MP_BINARY_OP_IN,
+ MP_BINARY_OP_IS,
+ MP_BINARY_OP_EXCEPTION_MATCH,
+
+ // 13 inplace arithmetic operations; order matches corresponding mp_token_kind_t
+ MP_BINARY_OP_INPLACE_OR,
+ MP_BINARY_OP_INPLACE_XOR,
+ MP_BINARY_OP_INPLACE_AND,
+ MP_BINARY_OP_INPLACE_LSHIFT,
+ MP_BINARY_OP_INPLACE_RSHIFT,
+ MP_BINARY_OP_INPLACE_ADD,
+ MP_BINARY_OP_INPLACE_SUBTRACT,
+ MP_BINARY_OP_INPLACE_MULTIPLY,
+ MP_BINARY_OP_INPLACE_MAT_MULTIPLY,
+ MP_BINARY_OP_INPLACE_FLOOR_DIVIDE,
+ MP_BINARY_OP_INPLACE_TRUE_DIVIDE,
+ MP_BINARY_OP_INPLACE_MODULO,
+ MP_BINARY_OP_INPLACE_POWER,
+
+ // 13 normal arithmetic operations; order matches corresponding mp_token_kind_t
+ MP_BINARY_OP_OR,
+ MP_BINARY_OP_XOR,
+ MP_BINARY_OP_AND,
+ MP_BINARY_OP_LSHIFT,
+ MP_BINARY_OP_RSHIFT,
+ MP_BINARY_OP_ADD,
+ MP_BINARY_OP_SUBTRACT,
+ MP_BINARY_OP_MULTIPLY,
+ MP_BINARY_OP_MAT_MULTIPLY,
+ MP_BINARY_OP_FLOOR_DIVIDE,
+ MP_BINARY_OP_TRUE_DIVIDE,
+ MP_BINARY_OP_MODULO,
+ MP_BINARY_OP_POWER,
+
+ // Operations below this line don't appear in bytecode, they
+ // just identify special methods.
+
+ // This is not emitted by the compiler but is supported by the runtime.
+ // It must follow immediately after MP_BINARY_OP_POWER.
+ MP_BINARY_OP_DIVMOD,
+
+ // The runtime will convert MP_BINARY_OP_IN to this operator with swapped args.
+ // A type should implement this containment operator instead of MP_BINARY_OP_IN.
+ MP_BINARY_OP_CONTAINS,
+
+ // 13 MP_BINARY_OP_REVERSE_* operations must be in the same order as MP_BINARY_OP_*,
+ // and be the last ones supported by the runtime.
+ MP_BINARY_OP_REVERSE_OR,
+ MP_BINARY_OP_REVERSE_XOR,
+ MP_BINARY_OP_REVERSE_AND,
+ MP_BINARY_OP_REVERSE_LSHIFT,
+ MP_BINARY_OP_REVERSE_RSHIFT,
+ MP_BINARY_OP_REVERSE_ADD,
+ MP_BINARY_OP_REVERSE_SUBTRACT,
+ MP_BINARY_OP_REVERSE_MULTIPLY,
+ MP_BINARY_OP_REVERSE_MAT_MULTIPLY,
+ MP_BINARY_OP_REVERSE_FLOOR_DIVIDE,
+ MP_BINARY_OP_REVERSE_TRUE_DIVIDE,
+ MP_BINARY_OP_REVERSE_MODULO,
+ MP_BINARY_OP_REVERSE_POWER,
+
+ // These 2 are not supported by the runtime and must be synthesised by the emitter
+ MP_BINARY_OP_NOT_IN,
+ MP_BINARY_OP_IS_NOT,
+} mp_binary_op_t;
+
+#endif // MICROPY_INCLUDED_PY_RUNTIME0_H
diff --git a/circuitpython/py/runtime_utils.c b/circuitpython/py/runtime_utils.c
new file mode 100644
index 0000000..fd3f071
--- /dev/null
+++ b/circuitpython/py/runtime_utils.c
@@ -0,0 +1,53 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2015 Josef Gajdusek
+ * Copyright (c) 2015 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/mpconfig.h"
+#include "py/runtime.h"
+
+MP_NOINLINE mp_obj_t mp_call_function_1_protected(mp_obj_t fun, mp_obj_t arg) {
+ nlr_buf_t nlr;
+ if (nlr_push(&nlr) == 0) {
+ mp_obj_t ret = mp_call_function_1(fun, arg);
+ nlr_pop();
+ return ret;
+ } else {
+ mp_obj_print_exception(&mp_plat_print, MP_OBJ_FROM_PTR(nlr.ret_val));
+ return MP_OBJ_NULL;
+ }
+}
+
+mp_obj_t mp_call_function_2_protected(mp_obj_t fun, mp_obj_t arg1, mp_obj_t arg2) {
+ nlr_buf_t nlr;
+ if (nlr_push(&nlr) == 0) {
+ mp_obj_t ret = mp_call_function_2(fun, arg1, arg2);
+ nlr_pop();
+ return ret;
+ } else {
+ mp_obj_print_exception(&mp_plat_print, MP_OBJ_FROM_PTR(nlr.ret_val));
+ return MP_OBJ_NULL;
+ }
+}
diff --git a/circuitpython/py/scheduler.c b/circuitpython/py/scheduler.c
new file mode 100644
index 0000000..1fd5dae
--- /dev/null
+++ b/circuitpython/py/scheduler.c
@@ -0,0 +1,162 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2017 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+
+#include "py/runtime.h"
+
+// Schedules an exception on the main thread (for exceptions "thrown" by async
+// sources such as interrupts and UNIX signal handlers).
+void MICROPY_WRAP_MP_SCHED_EXCEPTION(mp_sched_exception)(mp_obj_t exc) {
+ MP_STATE_MAIN_THREAD(mp_pending_exception) = exc;
+ #if MICROPY_ENABLE_SCHEDULER
+ if (MP_STATE_VM(sched_state) == MP_SCHED_IDLE) {
+ MP_STATE_VM(sched_state) = MP_SCHED_PENDING;
+ }
+ #endif
+}
+
+#if MICROPY_KBD_EXCEPTION
+// This function may be called asynchronously at any time so only do the bare minimum.
+void MICROPY_WRAP_MP_SCHED_KEYBOARD_INTERRUPT(mp_sched_keyboard_interrupt)(void) {
+ MP_STATE_VM(mp_kbd_exception).traceback = (mp_obj_traceback_t *)&mp_const_empty_traceback_obj;
+ mp_sched_exception(MP_OBJ_FROM_PTR(&MP_STATE_VM(mp_kbd_exception)));
+}
+#endif
+
+#if MICROPY_ENABLE_SCHEDULER
+
+#define IDX_MASK(i) ((i) & (MICROPY_SCHEDULER_DEPTH - 1))
+
+// This is a macro so it is guaranteed to be inlined in functions like
+// mp_sched_schedule that may be located in a special memory region.
+#define mp_sched_full() (mp_sched_num_pending() == MICROPY_SCHEDULER_DEPTH)
+
+static inline bool mp_sched_empty(void) {
+ MP_STATIC_ASSERT(MICROPY_SCHEDULER_DEPTH <= 255); // MICROPY_SCHEDULER_DEPTH must fit in 8 bits
+ MP_STATIC_ASSERT((IDX_MASK(MICROPY_SCHEDULER_DEPTH) == 0)); // MICROPY_SCHEDULER_DEPTH must be a power of 2
+
+ return mp_sched_num_pending() == 0;
+}
+
+// A variant of this is inlined in the VM at the pending exception check
+void mp_handle_pending(bool raise_exc) {
+ if (MP_STATE_VM(sched_state) == MP_SCHED_PENDING) {
+ mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
+ // Re-check state is still pending now that we're in the atomic section.
+ if (MP_STATE_VM(sched_state) == MP_SCHED_PENDING) {
+ mp_obj_t obj = MP_STATE_THREAD(mp_pending_exception);
+ if (obj != MP_OBJ_NULL) {
+ MP_STATE_THREAD(mp_pending_exception) = MP_OBJ_NULL;
+ if (!mp_sched_num_pending()) {
+ MP_STATE_VM(sched_state) = MP_SCHED_IDLE;
+ }
+ if (raise_exc) {
+ MICROPY_END_ATOMIC_SECTION(atomic_state);
+ nlr_raise(obj);
+ }
+ }
+ mp_handle_pending_tail(atomic_state);
+ } else {
+ MICROPY_END_ATOMIC_SECTION(atomic_state);
+ }
+ }
+}
+
+// This function should only be called by mp_handle_pending,
+// or by the VM's inlined version of that function.
+void mp_handle_pending_tail(mp_uint_t atomic_state) {
+ MP_STATE_VM(sched_state) = MP_SCHED_LOCKED;
+ if (!mp_sched_empty()) {
+ mp_sched_item_t item = MP_STATE_VM(sched_queue)[MP_STATE_VM(sched_idx)];
+ MP_STATE_VM(sched_idx) = IDX_MASK(MP_STATE_VM(sched_idx) + 1);
+ --MP_STATE_VM(sched_len);
+ MICROPY_END_ATOMIC_SECTION(atomic_state);
+ mp_call_function_1_protected(item.func, item.arg);
+ } else {
+ MICROPY_END_ATOMIC_SECTION(atomic_state);
+ }
+ mp_sched_unlock();
+}
+
+void mp_sched_lock(void) {
+ mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
+ if (MP_STATE_VM(sched_state) < 0) {
+ --MP_STATE_VM(sched_state);
+ } else {
+ MP_STATE_VM(sched_state) = MP_SCHED_LOCKED;
+ }
+ MICROPY_END_ATOMIC_SECTION(atomic_state);
+}
+
+void mp_sched_unlock(void) {
+ mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
+ assert(MP_STATE_VM(sched_state) < 0);
+ if (++MP_STATE_VM(sched_state) == 0) {
+ // vm became unlocked
+ if (MP_STATE_THREAD(mp_pending_exception) != MP_OBJ_NULL || mp_sched_num_pending()) {
+ MP_STATE_VM(sched_state) = MP_SCHED_PENDING;
+ } else {
+ MP_STATE_VM(sched_state) = MP_SCHED_IDLE;
+ }
+ }
+ MICROPY_END_ATOMIC_SECTION(atomic_state);
+}
+
+bool MICROPY_WRAP_MP_SCHED_SCHEDULE(mp_sched_schedule)(mp_obj_t function, mp_obj_t arg) {
+ mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
+ bool ret;
+ if (!mp_sched_full()) {
+ if (MP_STATE_VM(sched_state) == MP_SCHED_IDLE) {
+ MP_STATE_VM(sched_state) = MP_SCHED_PENDING;
+ }
+ uint8_t iput = IDX_MASK(MP_STATE_VM(sched_idx) + MP_STATE_VM(sched_len)++);
+ MP_STATE_VM(sched_queue)[iput].func = function;
+ MP_STATE_VM(sched_queue)[iput].arg = arg;
+ MICROPY_SCHED_HOOK_SCHEDULED;
+ ret = true;
+ } else {
+ // schedule queue is full
+ ret = false;
+ }
+ MICROPY_END_ATOMIC_SECTION(atomic_state);
+ return ret;
+}
+
+#else // MICROPY_ENABLE_SCHEDULER
+
+// A variant of this is inlined in the VM at the pending exception check
+void mp_handle_pending(bool raise_exc) {
+ if (MP_STATE_THREAD(mp_pending_exception) != MP_OBJ_NULL) {
+ mp_obj_t obj = MP_STATE_THREAD(mp_pending_exception);
+ MP_STATE_THREAD(mp_pending_exception) = MP_OBJ_NULL;
+ if (raise_exc) {
+ nlr_raise(obj);
+ }
+ }
+}
+
+#endif // MICROPY_ENABLE_SCHEDULER
diff --git a/circuitpython/py/scope.c b/circuitpython/py/scope.c
new file mode 100644
index 0000000..f930826
--- /dev/null
+++ b/circuitpython/py/scope.c
@@ -0,0 +1,153 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <assert.h>
+
+#include "py/scope.h"
+
+#if MICROPY_ENABLE_COMPILER
+
+// These low numbered qstrs should fit in 8 bits. See assertions below.
+STATIC const uint8_t scope_simple_name_table[] = {
+ [SCOPE_MODULE] = MP_QSTR__lt_module_gt_,
+ [SCOPE_LAMBDA] = MP_QSTR__lt_lambda_gt_,
+ [SCOPE_LIST_COMP] = MP_QSTR__lt_listcomp_gt_,
+ [SCOPE_DICT_COMP] = MP_QSTR__lt_dictcomp_gt_,
+ [SCOPE_SET_COMP] = MP_QSTR__lt_setcomp_gt_,
+ [SCOPE_GEN_EXPR] = MP_QSTR__lt_genexpr_gt_,
+};
+
+scope_t *scope_new(scope_kind_t kind, mp_parse_node_t pn, qstr source_file, mp_uint_t emit_options) {
+ // Make sure those qstrs indeed fit in an uint8_t.
+ MP_STATIC_ASSERT(MP_QSTR__lt_module_gt_ <= UINT8_MAX);
+ MP_STATIC_ASSERT(MP_QSTR__lt_lambda_gt_ <= UINT8_MAX);
+ MP_STATIC_ASSERT(MP_QSTR__lt_listcomp_gt_ <= UINT8_MAX);
+ MP_STATIC_ASSERT(MP_QSTR__lt_dictcomp_gt_ <= UINT8_MAX);
+ MP_STATIC_ASSERT(MP_QSTR__lt_setcomp_gt_ <= UINT8_MAX);
+ MP_STATIC_ASSERT(MP_QSTR__lt_genexpr_gt_ <= UINT8_MAX);
+
+ scope_t *scope = m_new0(scope_t, 1);
+ scope->kind = kind;
+ scope->pn = pn;
+ scope->source_file = source_file;
+ if (kind == SCOPE_FUNCTION || kind == SCOPE_CLASS) {
+ assert(MP_PARSE_NODE_IS_STRUCT(pn));
+ scope->simple_name = MP_PARSE_NODE_LEAF_ARG(((mp_parse_node_struct_t *)pn)->nodes[0]);
+ } else {
+ scope->simple_name = scope_simple_name_table[kind];
+ }
+ scope->raw_code = mp_emit_glue_new_raw_code();
+ scope->emit_options = emit_options;
+ scope->id_info_alloc = MICROPY_ALLOC_SCOPE_ID_INIT;
+ scope->id_info = m_new(id_info_t, scope->id_info_alloc);
+
+ return scope;
+}
+
+void scope_free(scope_t *scope) {
+ m_del(id_info_t, scope->id_info, scope->id_info_alloc);
+ m_del(scope_t, scope, 1);
+}
+
+id_info_t *scope_find_or_add_id(scope_t *scope, qstr qst, id_info_kind_t kind) {
+ id_info_t *id_info = scope_find(scope, qst);
+ if (id_info != NULL) {
+ return id_info;
+ }
+
+ // make sure we have enough memory
+ if (scope->id_info_len >= scope->id_info_alloc) {
+ scope->id_info = m_renew(id_info_t, scope->id_info, scope->id_info_alloc, scope->id_info_alloc + MICROPY_ALLOC_SCOPE_ID_INC);
+ scope->id_info_alloc += MICROPY_ALLOC_SCOPE_ID_INC;
+ }
+
+ // add new id to end of array of all ids; this seems to match CPython
+ // important thing is that function arguments are first, but that is
+ // handled by the compiler because it adds arguments before compiling the body
+ id_info = &scope->id_info[scope->id_info_len++];
+
+ id_info->kind = kind;
+ id_info->flags = 0;
+ id_info->local_num = 0;
+ id_info->qst = qst;
+ return id_info;
+}
+
+id_info_t *scope_find(scope_t *scope, qstr qst) {
+ for (mp_uint_t i = 0; i < scope->id_info_len; i++) {
+ if (scope->id_info[i].qst == qst) {
+ return &scope->id_info[i];
+ }
+ }
+ return NULL;
+}
+
+id_info_t *scope_find_global(scope_t *scope, qstr qst) {
+ while (scope->parent != NULL) {
+ scope = scope->parent;
+ }
+ return scope_find(scope, qst);
+}
+
+STATIC void scope_close_over_in_parents(scope_t *scope, qstr qst) {
+ assert(scope->parent != NULL); // we should have at least 1 parent
+ for (scope_t *s = scope->parent;; s = s->parent) {
+ assert(s->parent != NULL); // we should not get to the outer scope
+ id_info_t *id = scope_find_or_add_id(s, qst, ID_INFO_KIND_UNDECIDED);
+ if (id->kind == ID_INFO_KIND_UNDECIDED) {
+ // variable not previously declared in this scope, so declare it as free and keep searching parents
+ id->kind = ID_INFO_KIND_FREE;
+ } else {
+ // variable is declared in this scope, so finish
+ if (id->kind == ID_INFO_KIND_LOCAL) {
+ // variable local to this scope, close it over
+ id->kind = ID_INFO_KIND_CELL;
+ } else {
+ // ID_INFO_KIND_FREE: variable already closed over in a parent scope
+ // ID_INFO_KIND_CELL: variable already closed over in this scope
+ assert(id->kind == ID_INFO_KIND_FREE || id->kind == ID_INFO_KIND_CELL);
+ }
+ return;
+ }
+ }
+}
+
+void scope_check_to_close_over(scope_t *scope, id_info_t *id) {
+ if (scope->parent != NULL) {
+ for (scope_t *s = scope->parent; s->parent != NULL; s = s->parent) {
+ id_info_t *id2 = scope_find(s, id->qst);
+ if (id2 != NULL) {
+ if (id2->kind == ID_INFO_KIND_LOCAL || id2->kind == ID_INFO_KIND_CELL || id2->kind == ID_INFO_KIND_FREE) {
+ id->kind = ID_INFO_KIND_FREE;
+ scope_close_over_in_parents(scope, id->qst);
+ }
+ break;
+ }
+ }
+ }
+}
+
+#endif // MICROPY_ENABLE_COMPILER
diff --git a/circuitpython/py/scope.h b/circuitpython/py/scope.h
new file mode 100644
index 0000000..8b05421
--- /dev/null
+++ b/circuitpython/py/scope.h
@@ -0,0 +1,100 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_SCOPE_H
+#define MICROPY_INCLUDED_PY_SCOPE_H
+
+#include "py/parse.h"
+#include "py/emitglue.h"
+
+typedef enum {
+ ID_INFO_KIND_UNDECIDED,
+ ID_INFO_KIND_GLOBAL_IMPLICIT,
+ ID_INFO_KIND_GLOBAL_EXPLICIT,
+ ID_INFO_KIND_LOCAL, // in a function f, written and only referenced by f
+ ID_INFO_KIND_CELL, // in a function f, read/written by children of f
+ ID_INFO_KIND_FREE, // in a function f, belongs to the parent of f
+} id_info_kind_t;
+
+enum {
+ ID_FLAG_IS_PARAM = 0x01,
+ ID_FLAG_IS_STAR_PARAM = 0x02,
+ ID_FLAG_IS_DBL_STAR_PARAM = 0x04,
+ ID_FLAG_VIPER_TYPE_POS = 4,
+};
+
+typedef struct _id_info_t {
+ uint8_t kind;
+ uint8_t flags;
+ // when it's an ID_INFO_KIND_LOCAL this is the unique number of the local
+ // whet it's an ID_INFO_KIND_CELL/FREE this is the unique number of the closed over variable
+ uint16_t local_num;
+ qstr qst;
+} id_info_t;
+
+#define SCOPE_IS_FUNC_LIKE(s) ((s) >= SCOPE_LAMBDA)
+#define SCOPE_IS_COMP_LIKE(s) (SCOPE_LIST_COMP <= (s) && (s) <= SCOPE_GEN_EXPR)
+
+// scope is a "block" in Python parlance
+typedef enum {
+ SCOPE_MODULE,
+ SCOPE_CLASS,
+ SCOPE_LAMBDA,
+ SCOPE_LIST_COMP,
+ SCOPE_DICT_COMP,
+ SCOPE_SET_COMP,
+ SCOPE_GEN_EXPR,
+ SCOPE_FUNCTION,
+} scope_kind_t;
+
+typedef struct _scope_t {
+ scope_kind_t kind;
+ struct _scope_t *parent;
+ struct _scope_t *next;
+ mp_parse_node_t pn;
+ mp_raw_code_t *raw_code;
+ uint16_t source_file; // a qstr
+ uint16_t simple_name; // a qstr
+ uint16_t scope_flags; // see runtime0.h
+ uint16_t emit_options; // see emitglue.h
+ uint16_t num_pos_args;
+ uint16_t num_kwonly_args;
+ uint16_t num_def_pos_args;
+ uint16_t num_locals;
+ uint16_t stack_size; // maximum size of the locals stack
+ uint16_t exc_stack_size; // maximum size of the exception stack
+ uint16_t id_info_alloc;
+ uint16_t id_info_len;
+ id_info_t *id_info;
+} scope_t;
+
+scope_t *scope_new(scope_kind_t kind, mp_parse_node_t pn, qstr source_file, mp_uint_t emit_options);
+void scope_free(scope_t *scope);
+id_info_t *scope_find_or_add_id(scope_t *scope, qstr qstr, id_info_kind_t kind);
+id_info_t *scope_find(scope_t *scope, qstr qstr);
+id_info_t *scope_find_global(scope_t *scope, qstr qstr);
+void scope_check_to_close_over(scope_t *scope, id_info_t *id);
+
+#endif // MICROPY_INCLUDED_PY_SCOPE_H
diff --git a/circuitpython/py/sequence.c b/circuitpython/py/sequence.c
new file mode 100644
index 0000000..ee400cc
--- /dev/null
+++ b/circuitpython/py/sequence.c
@@ -0,0 +1,233 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+
+#include "py/runtime.h"
+#include "supervisor/shared/translate.h"
+
+// Helpers for sequence types
+
+#define SWAP(type, var1, var2) { type t = var2; var2 = var1; var1 = t; }
+
+#if __GNUC__ < 5
+// n.b. does not actually detect overflow!
+#define __builtin_mul_overflow(a, b, x) (*(x) = (a) * (b), false)
+#endif
+
+// Detect when a multiply causes an overflow.
+size_t mp_seq_multiply_len(size_t item_sz, size_t len) {
+ size_t new_len;
+ if (__builtin_mul_overflow(item_sz, len, &new_len)) {
+ mp_raise_msg(&mp_type_OverflowError, MP_ERROR_TEXT("small int overflow"));
+ }
+ return new_len;
+}
+
+// Implements backend of sequence * integer operation. Assumes elements are
+// memory-adjacent in sequence.
+void mp_seq_multiply(const void *items, size_t item_sz, size_t len, size_t times, void *dest) {
+ for (size_t i = 0; i < times; i++) {
+ size_t copy_sz = item_sz * len;
+ memcpy(dest, items, copy_sz);
+ dest = (char *)dest + copy_sz;
+ }
+}
+
+#if MICROPY_PY_BUILTINS_SLICE
+
+bool mp_seq_get_fast_slice_indexes(mp_uint_t len, mp_obj_t slice, mp_bound_slice_t *indexes) {
+ mp_obj_slice_indices(slice, len, indexes);
+
+ // If the index is negative then stop points to the last item, not after it
+ if (indexes->step < 0) {
+ indexes->stop++;
+ }
+
+ // CPython returns empty sequence in such case, or point for assignment is at start
+ if (indexes->step > 0 && indexes->start > indexes->stop) {
+ indexes->stop = indexes->start;
+ } else if (indexes->step < 0 && indexes->start < indexes->stop) {
+ indexes->stop = indexes->start + 1;
+ }
+
+ return indexes->step == 1;
+}
+
+#endif
+
+mp_obj_t mp_seq_extract_slice(size_t len, const mp_obj_t *seq, mp_bound_slice_t *indexes) {
+ (void)len; // TODO can we remove len from the arg list?
+
+ mp_int_t start = indexes->start, stop = indexes->stop;
+ mp_int_t step = indexes->step;
+
+ mp_obj_t res = mp_obj_new_list(0, NULL);
+
+ if (step < 0) {
+ while (start >= stop) {
+ mp_obj_list_append(res, seq[start]);
+ start += step;
+ }
+ } else {
+ while (start < stop) {
+ mp_obj_list_append(res, seq[start]);
+ start += step;
+ }
+ }
+ return res;
+}
+
+// Special-case comparison function for sequences of bytes
+// Don't pass MP_BINARY_OP_NOT_EQUAL here
+bool mp_seq_cmp_bytes(mp_uint_t op, const byte *data1, size_t len1, const byte *data2, size_t len2) {
+ if (op == MP_BINARY_OP_EQUAL && len1 != len2) {
+ return false;
+ }
+
+ // Let's deal only with > & >=
+ if (op == MP_BINARY_OP_LESS || op == MP_BINARY_OP_LESS_EQUAL) {
+ SWAP(const byte *, data1, data2);
+ SWAP(size_t, len1, len2);
+ if (op == MP_BINARY_OP_LESS) {
+ op = MP_BINARY_OP_MORE;
+ } else {
+ op = MP_BINARY_OP_MORE_EQUAL;
+ }
+ }
+ size_t min_len = len1 < len2 ? len1 : len2;
+ int res = memcmp(data1, data2, min_len);
+ if (op == MP_BINARY_OP_EQUAL) {
+ // If we are checking for equality, here's the answer
+ return res == 0;
+ }
+ if (res < 0) {
+ return false;
+ }
+ if (res > 0) {
+ return true;
+ }
+
+ // If we had tie in the last element...
+ // ... and we have lists of different lengths...
+ if (len1 != len2) {
+ if (len1 < len2) {
+ // ... then longer list length wins (we deal only with >)
+ return false;
+ }
+ } else if (op == MP_BINARY_OP_MORE) {
+ // Otherwise, if we have strict relation, equality means failure
+ return false;
+ }
+ return true;
+}
+
+// Special-case comparison function for sequences of mp_obj_t
+// Don't pass MP_BINARY_OP_NOT_EQUAL here
+bool mp_seq_cmp_objs(mp_uint_t op, const mp_obj_t *items1, size_t len1, const mp_obj_t *items2, size_t len2) {
+ if (op == MP_BINARY_OP_EQUAL && len1 != len2) {
+ return false;
+ }
+
+ // Let's deal only with > & >=
+ if (op == MP_BINARY_OP_LESS || op == MP_BINARY_OP_LESS_EQUAL) {
+ SWAP(const mp_obj_t *, items1, items2);
+ SWAP(size_t, len1, len2);
+ if (op == MP_BINARY_OP_LESS) {
+ op = MP_BINARY_OP_MORE;
+ } else {
+ op = MP_BINARY_OP_MORE_EQUAL;
+ }
+ }
+
+ size_t len = len1 < len2 ? len1 : len2;
+ for (size_t i = 0; i < len; i++) {
+ // If current elements equal, can't decide anything - go on
+ if (mp_obj_equal(items1[i], items2[i])) {
+ continue;
+ }
+
+ // Othewise, if they are not equal, we can have final decision based on them
+ if (op == MP_BINARY_OP_EQUAL) {
+ // In particular, if we are checking for equality, here're the answer
+ return false;
+ }
+
+ // Otherwise, application of relation op gives the answer
+ return mp_binary_op(op, items1[i], items2[i]) == mp_const_true;
+ }
+
+ // If we had tie in the last element...
+ // ... and we have lists of different lengths...
+ if (len1 != len2) {
+ if (len1 < len2) {
+ // ... then longer list length wins (we deal only with >)
+ return false;
+ }
+ } else if (op == MP_BINARY_OP_MORE) {
+ // Otherwise, if we have strict relation, sequence equality means failure
+ return false;
+ }
+
+ return true;
+}
+
+// Special-case of index() which searches for mp_obj_t
+mp_obj_t mp_seq_index_obj(const mp_obj_t *items, size_t len, size_t n_args, const mp_obj_t *args) {
+ const mp_obj_type_t *type = mp_obj_get_type(args[0]);
+ mp_obj_t value = args[1];
+ size_t start = 0;
+ size_t stop = len;
+
+ if (n_args >= 3) {
+ start = mp_get_index(type, len, args[2], true);
+ if (n_args >= 4) {
+ stop = mp_get_index(type, len, args[3], true);
+ }
+ }
+
+ for (size_t i = start; i < stop; i++) {
+ if (mp_obj_equal(items[i], value)) {
+ // Common sense says this cannot overflow small int
+ return MP_OBJ_NEW_SMALL_INT(i);
+ }
+ }
+
+ mp_raise_ValueError(MP_ERROR_TEXT("object not in sequence"));
+}
+
+mp_obj_t mp_seq_count_obj(const mp_obj_t *items, size_t len, mp_obj_t value) {
+ size_t count = 0;
+ for (size_t i = 0; i < len; i++) {
+ if (mp_obj_equal(items[i], value)) {
+ count++;
+ }
+ }
+
+ // Common sense says this cannot overflow small int
+ return MP_OBJ_NEW_SMALL_INT(count);
+}
diff --git a/circuitpython/py/showbc.c b/circuitpython/py/showbc.c
new file mode 100644
index 0000000..1d02b55
--- /dev/null
+++ b/circuitpython/py/showbc.c
@@ -0,0 +1,548 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <assert.h>
+
+#include "py/bc0.h"
+#include "py/bc.h"
+
+#if MICROPY_DEBUG_PRINTERS
+
+#define DECODE_UINT { \
+ unum = 0; \
+ do { \
+ unum = (unum << 7) + (*ip & 0x7f); \
+ } while ((*ip++ & 0x80) != 0); \
+}
+#define DECODE_ULABEL do { unum = (ip[0] | (ip[1] << 8)); ip += 2; } while (0)
+#define DECODE_SLABEL do { unum = (ip[0] | (ip[1] << 8)) - 0x8000; ip += 2; } while (0)
+
+#if MICROPY_PERSISTENT_CODE
+
+#define DECODE_QSTR \
+ qst = ip[0] | ip[1] << 8; \
+ ip += 2;
+#define DECODE_PTR \
+ DECODE_UINT; \
+ unum = mp_showbc_const_table[unum]
+#define DECODE_OBJ \
+ DECODE_UINT; \
+ unum = mp_showbc_const_table[unum]
+
+#else
+
+#define DECODE_QSTR { \
+ qst = 0; \
+ do { \
+ qst = (qst << 7) + (*ip & 0x7f); \
+ } while ((*ip++ & 0x80) != 0); \
+}
+#define DECODE_PTR do { \
+ ip = (byte *)MP_ALIGN(ip, sizeof(void *)); \
+ unum = (uintptr_t)*(void **)ip; \
+ ip += sizeof(void *); \
+} while (0)
+#define DECODE_OBJ do { \
+ ip = (byte *)MP_ALIGN(ip, sizeof(mp_obj_t)); \
+ unum = (mp_uint_t)*(mp_obj_t *)ip; \
+ ip += sizeof(mp_obj_t); \
+} while (0)
+
+#endif
+
+const byte *mp_showbc_code_start;
+const mp_uint_t *mp_showbc_const_table;
+
+void mp_bytecode_print(const mp_print_t *print, const void *descr, const byte *ip, mp_uint_t len, const mp_uint_t *const_table) {
+ mp_showbc_code_start = ip;
+
+ // Decode prelude
+ MP_BC_PRELUDE_SIG_DECODE(ip);
+ MP_BC_PRELUDE_SIZE_DECODE(ip);
+ const byte *code_info = ip;
+
+ #if MICROPY_PERSISTENT_CODE
+ qstr block_name = code_info[0] | (code_info[1] << 8);
+ qstr source_file = code_info[2] | (code_info[3] << 8);
+ code_info += 4;
+ #else
+ qstr block_name = mp_decode_uint(&code_info);
+ qstr source_file = mp_decode_uint(&code_info);
+ #endif
+ mp_printf(print, "File %s, code block '%s' (descriptor: %p, bytecode @%p " UINT_FMT " bytes)\n",
+ qstr_str(source_file), qstr_str(block_name), descr, mp_showbc_code_start, len);
+
+ // raw bytecode dump
+ size_t prelude_size = ip - mp_showbc_code_start + n_info + n_cell;
+ mp_printf(print, "Raw bytecode (code_info_size=%u, bytecode_size=%u):\n",
+ (unsigned)prelude_size, (unsigned)(len - prelude_size));
+ for (mp_uint_t i = 0; i < len; i++) {
+ if (i > 0 && i % 16 == 0) {
+ mp_printf(print, "\n");
+ }
+ mp_printf(print, " %02x", mp_showbc_code_start[i]);
+ }
+ mp_printf(print, "\n");
+
+ // bytecode prelude: arg names (as qstr objects)
+ mp_printf(print, "arg names:");
+ for (mp_uint_t i = 0; i < n_pos_args + n_kwonly_args; i++) {
+ mp_printf(print, " %s", qstr_str(MP_OBJ_QSTR_VALUE(const_table[i])));
+ }
+ mp_printf(print, "\n");
+
+ mp_printf(print, "(N_STATE %u)\n", (unsigned)n_state);
+ mp_printf(print, "(N_EXC_STACK %u)\n", (unsigned)n_exc_stack);
+
+ // skip over code_info
+ ip += n_info;
+
+ // bytecode prelude: initialise closed over variables
+ for (size_t i = 0; i < n_cell; ++i) {
+ uint local_num = *ip++;
+ mp_printf(print, "(INIT_CELL %u)\n", local_num);
+ }
+
+ // print out line number info
+ {
+ mp_int_t bc = 0;
+ mp_uint_t source_line = 1;
+ mp_printf(print, " bc=" INT_FMT " line=" UINT_FMT "\n", bc, source_line);
+ for (const byte *ci = code_info; *ci;) {
+ if ((ci[0] & 0x80) == 0) {
+ // 0b0LLBBBBB encoding
+ bc += ci[0] & 0x1f;
+ source_line += ci[0] >> 5;
+ ci += 1;
+ } else {
+ // 0b1LLLBBBB 0bLLLLLLLL encoding (l's LSB in second byte)
+ bc += ci[0] & 0xf;
+ source_line += ((ci[0] << 4) & 0x700) | ci[1];
+ ci += 2;
+ }
+ mp_printf(print, " bc=" INT_FMT " line=" UINT_FMT "\n", bc, source_line);
+ }
+ }
+ mp_bytecode_print2(print, ip, len - prelude_size, const_table);
+}
+
+const byte *mp_bytecode_print_str(const mp_print_t *print, const byte *ip) {
+ mp_uint_t unum;
+ qstr qst;
+
+ switch (*ip++) {
+ case MP_BC_LOAD_CONST_FALSE:
+ mp_printf(print, "LOAD_CONST_FALSE");
+ break;
+
+ case MP_BC_LOAD_CONST_NONE:
+ mp_printf(print, "LOAD_CONST_NONE");
+ break;
+
+ case MP_BC_LOAD_CONST_TRUE:
+ mp_printf(print, "LOAD_CONST_TRUE");
+ break;
+
+ case MP_BC_LOAD_CONST_SMALL_INT: {
+ mp_int_t num = 0;
+ if ((ip[0] & 0x40) != 0) {
+ // Number is negative
+ num--;
+ }
+ do {
+ num = ((mp_uint_t)num << 7) | (*ip & 0x7f);
+ } while ((*ip++ & 0x80) != 0);
+ mp_printf(print, "LOAD_CONST_SMALL_INT " INT_FMT, num);
+ break;
+ }
+
+ case MP_BC_LOAD_CONST_STRING:
+ DECODE_QSTR;
+ mp_printf(print, "LOAD_CONST_STRING '%s'", qstr_str(qst));
+ break;
+
+ case MP_BC_LOAD_CONST_OBJ:
+ DECODE_OBJ;
+ mp_printf(print, "LOAD_CONST_OBJ %p=", MP_OBJ_TO_PTR(unum));
+ mp_obj_print_helper(print, (mp_obj_t)unum, PRINT_REPR);
+ break;
+
+ case MP_BC_LOAD_NULL:
+ mp_printf(print, "LOAD_NULL");
+ break;
+
+ case MP_BC_LOAD_FAST_N:
+ DECODE_UINT;
+ mp_printf(print, "LOAD_FAST_N " UINT_FMT, unum);
+ break;
+
+ case MP_BC_LOAD_DEREF:
+ DECODE_UINT;
+ mp_printf(print, "LOAD_DEREF " UINT_FMT, unum);
+ break;
+
+ case MP_BC_LOAD_NAME:
+ DECODE_QSTR;
+ mp_printf(print, "LOAD_NAME %s", qstr_str(qst));
+ break;
+
+ case MP_BC_LOAD_GLOBAL:
+ DECODE_QSTR;
+ mp_printf(print, "LOAD_GLOBAL %s", qstr_str(qst));
+ break;
+
+ case MP_BC_LOAD_ATTR:
+ DECODE_QSTR;
+ mp_printf(print, "LOAD_ATTR %s", qstr_str(qst));
+ break;
+
+ case MP_BC_LOAD_METHOD:
+ DECODE_QSTR;
+ mp_printf(print, "LOAD_METHOD %s", qstr_str(qst));
+ break;
+
+ case MP_BC_LOAD_SUPER_METHOD:
+ DECODE_QSTR;
+ mp_printf(print, "LOAD_SUPER_METHOD %s", qstr_str(qst));
+ break;
+
+ case MP_BC_LOAD_BUILD_CLASS:
+ mp_printf(print, "LOAD_BUILD_CLASS");
+ break;
+
+ case MP_BC_LOAD_SUBSCR:
+ mp_printf(print, "LOAD_SUBSCR");
+ break;
+
+ case MP_BC_STORE_FAST_N:
+ DECODE_UINT;
+ mp_printf(print, "STORE_FAST_N " UINT_FMT, unum);
+ break;
+
+ case MP_BC_STORE_DEREF:
+ DECODE_UINT;
+ mp_printf(print, "STORE_DEREF " UINT_FMT, unum);
+ break;
+
+ case MP_BC_STORE_NAME:
+ DECODE_QSTR;
+ mp_printf(print, "STORE_NAME %s", qstr_str(qst));
+ break;
+
+ case MP_BC_STORE_GLOBAL:
+ DECODE_QSTR;
+ mp_printf(print, "STORE_GLOBAL %s", qstr_str(qst));
+ break;
+
+ case MP_BC_STORE_ATTR:
+ DECODE_QSTR;
+ mp_printf(print, "STORE_ATTR %s", qstr_str(qst));
+ break;
+
+ case MP_BC_STORE_SUBSCR:
+ mp_printf(print, "STORE_SUBSCR");
+ break;
+
+ case MP_BC_DELETE_FAST:
+ DECODE_UINT;
+ mp_printf(print, "DELETE_FAST " UINT_FMT, unum);
+ break;
+
+ case MP_BC_DELETE_DEREF:
+ DECODE_UINT;
+ mp_printf(print, "DELETE_DEREF " UINT_FMT, unum);
+ break;
+
+ case MP_BC_DELETE_NAME:
+ DECODE_QSTR;
+ mp_printf(print, "DELETE_NAME %s", qstr_str(qst));
+ break;
+
+ case MP_BC_DELETE_GLOBAL:
+ DECODE_QSTR;
+ mp_printf(print, "DELETE_GLOBAL %s", qstr_str(qst));
+ break;
+
+ case MP_BC_DUP_TOP:
+ mp_printf(print, "DUP_TOP");
+ break;
+
+ case MP_BC_DUP_TOP_TWO:
+ mp_printf(print, "DUP_TOP_TWO");
+ break;
+
+ case MP_BC_POP_TOP:
+ mp_printf(print, "POP_TOP");
+ break;
+
+ case MP_BC_ROT_TWO:
+ mp_printf(print, "ROT_TWO");
+ break;
+
+ case MP_BC_ROT_THREE:
+ mp_printf(print, "ROT_THREE");
+ break;
+
+ case MP_BC_JUMP:
+ DECODE_SLABEL;
+ mp_printf(print, "JUMP " UINT_FMT, (mp_uint_t)(ip + unum - mp_showbc_code_start));
+ break;
+
+ case MP_BC_POP_JUMP_IF_TRUE:
+ DECODE_SLABEL;
+ mp_printf(print, "POP_JUMP_IF_TRUE " UINT_FMT, (mp_uint_t)(ip + unum - mp_showbc_code_start));
+ break;
+
+ case MP_BC_POP_JUMP_IF_FALSE:
+ DECODE_SLABEL;
+ mp_printf(print, "POP_JUMP_IF_FALSE " UINT_FMT, (mp_uint_t)(ip + unum - mp_showbc_code_start));
+ break;
+
+ case MP_BC_JUMP_IF_TRUE_OR_POP:
+ DECODE_SLABEL;
+ mp_printf(print, "JUMP_IF_TRUE_OR_POP " UINT_FMT, (mp_uint_t)(ip + unum - mp_showbc_code_start));
+ break;
+
+ case MP_BC_JUMP_IF_FALSE_OR_POP:
+ DECODE_SLABEL;
+ mp_printf(print, "JUMP_IF_FALSE_OR_POP " UINT_FMT, (mp_uint_t)(ip + unum - mp_showbc_code_start));
+ break;
+
+ case MP_BC_SETUP_WITH:
+ DECODE_ULABEL; // loop-like labels are always forward
+ mp_printf(print, "SETUP_WITH " UINT_FMT, (mp_uint_t)(ip + unum - mp_showbc_code_start));
+ break;
+
+ case MP_BC_WITH_CLEANUP:
+ mp_printf(print, "WITH_CLEANUP");
+ break;
+
+ case MP_BC_UNWIND_JUMP:
+ DECODE_SLABEL;
+ mp_printf(print, "UNWIND_JUMP " UINT_FMT " %d", (mp_uint_t)(ip + unum - mp_showbc_code_start), *ip);
+ ip += 1;
+ break;
+
+ case MP_BC_SETUP_EXCEPT:
+ DECODE_ULABEL; // except labels are always forward
+ mp_printf(print, "SETUP_EXCEPT " UINT_FMT, (mp_uint_t)(ip + unum - mp_showbc_code_start));
+ break;
+
+ case MP_BC_SETUP_FINALLY:
+ DECODE_ULABEL; // except labels are always forward
+ mp_printf(print, "SETUP_FINALLY " UINT_FMT, (mp_uint_t)(ip + unum - mp_showbc_code_start));
+ break;
+
+ case MP_BC_END_FINALLY:
+ // if TOS is an exception, reraises the exception (3 values on TOS)
+ // if TOS is an integer, does something else
+ // if TOS is None, just pops it and continues
+ // else error
+ mp_printf(print, "END_FINALLY");
+ break;
+
+ case MP_BC_GET_ITER:
+ mp_printf(print, "GET_ITER");
+ break;
+
+ case MP_BC_GET_ITER_STACK:
+ mp_printf(print, "GET_ITER_STACK");
+ break;
+
+ case MP_BC_FOR_ITER:
+ DECODE_ULABEL; // the jump offset if iteration finishes; for labels are always forward
+ mp_printf(print, "FOR_ITER " UINT_FMT, (mp_uint_t)(ip + unum - mp_showbc_code_start));
+ break;
+
+ case MP_BC_POP_EXCEPT_JUMP:
+ DECODE_ULABEL; // these labels are always forward
+ mp_printf(print, "POP_EXCEPT_JUMP " UINT_FMT, (mp_uint_t)(ip + unum - mp_showbc_code_start));
+ break;
+
+ case MP_BC_BUILD_TUPLE:
+ DECODE_UINT;
+ mp_printf(print, "BUILD_TUPLE " UINT_FMT, unum);
+ break;
+
+ case MP_BC_BUILD_LIST:
+ DECODE_UINT;
+ mp_printf(print, "BUILD_LIST " UINT_FMT, unum);
+ break;
+
+ case MP_BC_BUILD_MAP:
+ DECODE_UINT;
+ mp_printf(print, "BUILD_MAP " UINT_FMT, unum);
+ break;
+
+ case MP_BC_STORE_MAP:
+ mp_printf(print, "STORE_MAP");
+ break;
+
+ case MP_BC_BUILD_SET:
+ DECODE_UINT;
+ mp_printf(print, "BUILD_SET " UINT_FMT, unum);
+ break;
+
+ #if MICROPY_PY_BUILTINS_SLICE
+ case MP_BC_BUILD_SLICE:
+ DECODE_UINT;
+ mp_printf(print, "BUILD_SLICE " UINT_FMT, unum);
+ break;
+ #endif
+
+ case MP_BC_STORE_COMP:
+ DECODE_UINT;
+ mp_printf(print, "STORE_COMP " UINT_FMT, unum);
+ break;
+
+ case MP_BC_UNPACK_SEQUENCE:
+ DECODE_UINT;
+ mp_printf(print, "UNPACK_SEQUENCE " UINT_FMT, unum);
+ break;
+
+ case MP_BC_UNPACK_EX:
+ DECODE_UINT;
+ mp_printf(print, "UNPACK_EX " UINT_FMT, unum);
+ break;
+
+ case MP_BC_MAKE_FUNCTION:
+ DECODE_PTR;
+ mp_printf(print, "MAKE_FUNCTION %p", (void *)(uintptr_t)unum);
+ break;
+
+ case MP_BC_MAKE_FUNCTION_DEFARGS:
+ DECODE_PTR;
+ mp_printf(print, "MAKE_FUNCTION_DEFARGS %p", (void *)(uintptr_t)unum);
+ break;
+
+ case MP_BC_MAKE_CLOSURE: {
+ DECODE_PTR;
+ mp_uint_t n_closed_over = *ip++;
+ mp_printf(print, "MAKE_CLOSURE %p " UINT_FMT, (void *)(uintptr_t)unum, n_closed_over);
+ break;
+ }
+
+ case MP_BC_MAKE_CLOSURE_DEFARGS: {
+ DECODE_PTR;
+ mp_uint_t n_closed_over = *ip++;
+ mp_printf(print, "MAKE_CLOSURE_DEFARGS %p " UINT_FMT, (void *)(uintptr_t)unum, n_closed_over);
+ break;
+ }
+
+ case MP_BC_CALL_FUNCTION:
+ DECODE_UINT;
+ mp_printf(print, "CALL_FUNCTION n=" UINT_FMT " nkw=" UINT_FMT, unum & 0xff, (unum >> 8) & 0xff);
+ break;
+
+ case MP_BC_CALL_FUNCTION_VAR_KW:
+ DECODE_UINT;
+ mp_printf(print, "CALL_FUNCTION_VAR_KW n=" UINT_FMT " nkw=" UINT_FMT, unum & 0xff, (unum >> 8) & 0xff);
+ break;
+
+ case MP_BC_CALL_METHOD:
+ DECODE_UINT;
+ mp_printf(print, "CALL_METHOD n=" UINT_FMT " nkw=" UINT_FMT, unum & 0xff, (unum >> 8) & 0xff);
+ break;
+
+ case MP_BC_CALL_METHOD_VAR_KW:
+ DECODE_UINT;
+ mp_printf(print, "CALL_METHOD_VAR_KW n=" UINT_FMT " nkw=" UINT_FMT, unum & 0xff, (unum >> 8) & 0xff);
+ break;
+
+ case MP_BC_RETURN_VALUE:
+ mp_printf(print, "RETURN_VALUE");
+ break;
+
+ case MP_BC_RAISE_LAST:
+ mp_printf(print, "RAISE_LAST");
+ break;
+
+ case MP_BC_RAISE_OBJ:
+ mp_printf(print, "RAISE_OBJ");
+ break;
+
+ case MP_BC_RAISE_FROM:
+ mp_printf(print, "RAISE_FROM");
+ break;
+
+ case MP_BC_YIELD_VALUE:
+ mp_printf(print, "YIELD_VALUE");
+ break;
+
+ case MP_BC_YIELD_FROM:
+ mp_printf(print, "YIELD_FROM");
+ break;
+
+ case MP_BC_IMPORT_NAME:
+ DECODE_QSTR;
+ mp_printf(print, "IMPORT_NAME '%s'", qstr_str(qst));
+ break;
+
+ case MP_BC_IMPORT_FROM:
+ DECODE_QSTR;
+ mp_printf(print, "IMPORT_FROM '%s'", qstr_str(qst));
+ break;
+
+ case MP_BC_IMPORT_STAR:
+ mp_printf(print, "IMPORT_STAR");
+ break;
+
+ default:
+ if (ip[-1] < MP_BC_LOAD_CONST_SMALL_INT_MULTI + 64) {
+ mp_printf(print, "LOAD_CONST_SMALL_INT " INT_FMT, (mp_int_t)ip[-1] - MP_BC_LOAD_CONST_SMALL_INT_MULTI - 16);
+ } else if (ip[-1] < MP_BC_LOAD_FAST_MULTI + 16) {
+ mp_printf(print, "LOAD_FAST " UINT_FMT, (mp_uint_t)ip[-1] - MP_BC_LOAD_FAST_MULTI);
+ } else if (ip[-1] < MP_BC_STORE_FAST_MULTI + 16) {
+ mp_printf(print, "STORE_FAST " UINT_FMT, (mp_uint_t)ip[-1] - MP_BC_STORE_FAST_MULTI);
+ } else if (ip[-1] < MP_BC_UNARY_OP_MULTI + MP_UNARY_OP_NUM_BYTECODE) {
+ mp_uint_t op = ip[-1] - MP_BC_UNARY_OP_MULTI;
+ mp_printf(print, "UNARY_OP " UINT_FMT " %s", op, qstr_str(mp_unary_op_method_name[op]));
+ } else if (ip[-1] < MP_BC_BINARY_OP_MULTI + MP_BINARY_OP_NUM_BYTECODE) {
+ mp_uint_t op = ip[-1] - MP_BC_BINARY_OP_MULTI;
+ mp_printf(print, "BINARY_OP " UINT_FMT " %s", op, qstr_str(mp_binary_op_method_name[op]));
+ } else {
+ mp_printf(print, "code %p, byte code 0x%02x not implemented\n", ip - 1, ip[-1]);
+ assert(0);
+ return ip;
+ }
+ break;
+ }
+
+ return ip;
+}
+
+void mp_bytecode_print2(const mp_print_t *print, const byte *ip, size_t len, const mp_uint_t *const_table) {
+ mp_showbc_code_start = ip;
+ mp_showbc_const_table = const_table;
+ while (ip < len + mp_showbc_code_start) {
+ mp_printf(print, "%02u ", (uint)(ip - mp_showbc_code_start));
+ ip = mp_bytecode_print_str(print, ip);
+ mp_printf(print, "\n");
+ }
+}
+
+#endif // MICROPY_DEBUG_PRINTERS
diff --git a/circuitpython/py/smallint.c b/circuitpython/py/smallint.c
new file mode 100644
index 0000000..9124b76
--- /dev/null
+++ b/circuitpython/py/smallint.c
@@ -0,0 +1,75 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/smallint.h"
+
+bool mp_small_int_mul_overflow(mp_int_t x, mp_int_t y) {
+ // Check for multiply overflow; see CERT INT32-C
+ if (x > 0) { // x is positive
+ if (y > 0) { // x and y are positive
+ if (x > (MP_SMALL_INT_MAX / y)) {
+ return true;
+ }
+ } else { // x positive, y nonpositive
+ if (y < (MP_SMALL_INT_MIN / x)) {
+ return true;
+ }
+ } // x positive, y nonpositive
+ } else { // x is nonpositive
+ if (y > 0) { // x is nonpositive, y is positive
+ if (x < (MP_SMALL_INT_MIN / y)) {
+ return true;
+ }
+ } else { // x and y are nonpositive
+ if (x != 0 && y < (MP_SMALL_INT_MAX / x)) {
+ return true;
+ }
+ } // End if x and y are nonpositive
+ } // End if x is nonpositive
+ return false;
+}
+
+mp_int_t mp_small_int_modulo(mp_int_t dividend, mp_int_t divisor) {
+ // Python specs require that mod has same sign as second operand
+ dividend %= divisor;
+ if ((dividend < 0 && divisor > 0) || (dividend > 0 && divisor < 0)) {
+ dividend += divisor;
+ }
+ return dividend;
+}
+
+mp_int_t mp_small_int_floor_divide(mp_int_t num, mp_int_t denom) {
+ if (num >= 0) {
+ if (denom < 0) {
+ num += -denom - 1;
+ }
+ } else {
+ if (denom >= 0) {
+ num += -denom + 1;
+ }
+ }
+ return num / denom;
+}
diff --git a/circuitpython/py/smallint.h b/circuitpython/py/smallint.h
new file mode 100644
index 0000000..2fcd3fb
--- /dev/null
+++ b/circuitpython/py/smallint.h
@@ -0,0 +1,68 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_SMALLINT_H
+#define MICROPY_INCLUDED_PY_SMALLINT_H
+
+#include "py/mpconfig.h"
+#include "py/misc.h"
+
+// Functions for small integer arithmetic
+
+#ifndef MP_SMALL_INT_MIN
+
+// In SMALL_INT, next-to-highest bits is used as sign, so both must match for value in range
+#if MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_A || MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_C
+
+#define MP_SMALL_INT_MIN ((mp_int_t)(((mp_int_t)MP_OBJ_WORD_MSBIT_HIGH) >> 1))
+#define MP_SMALL_INT_FITS(n) ((((n) ^ ((mp_uint_t)(n) << 1)) & MP_OBJ_WORD_MSBIT_HIGH) == 0)
+// Mask to truncate mp_int_t to positive value
+#define MP_SMALL_INT_POSITIVE_MASK ~(MP_OBJ_WORD_MSBIT_HIGH | (MP_OBJ_WORD_MSBIT_HIGH >> 1))
+
+#elif MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_B
+
+#define MP_SMALL_INT_MIN ((mp_int_t)(((mp_int_t)MP_OBJ_WORD_MSBIT_HIGH) >> 2))
+#define MP_SMALL_INT_FITS(n) ((((n) & MP_SMALL_INT_MIN) == 0) || (((n) & MP_SMALL_INT_MIN) == MP_SMALL_INT_MIN))
+// Mask to truncate mp_int_t to positive value
+#define MP_SMALL_INT_POSITIVE_MASK ~(MP_OBJ_WORD_MSBIT_HIGH | (MP_OBJ_WORD_MSBIT_HIGH >> 1) | (MP_OBJ_WORD_MSBIT_HIGH >> 2))
+
+#elif MICROPY_OBJ_REPR == MICROPY_OBJ_REPR_D
+
+#define MP_SMALL_INT_MIN ((mp_int_t)(((mp_int_t)0xffff800000000000) >> 1))
+#define MP_SMALL_INT_FITS(n) ((((n) ^ ((n) << 1)) & 0xffff800000000000) == 0)
+// Mask to truncate mp_int_t to positive value
+#define MP_SMALL_INT_POSITIVE_MASK ~(0xffff800000000000 | (0xffff800000000000 >> 1))
+
+#endif
+
+#endif
+
+#define MP_SMALL_INT_MAX ((mp_int_t)(~(MP_SMALL_INT_MIN)))
+
+bool mp_small_int_mul_overflow(mp_int_t x, mp_int_t y);
+mp_int_t mp_small_int_modulo(mp_int_t dividend, mp_int_t divisor);
+mp_int_t mp_small_int_floor_divide(mp_int_t num, mp_int_t denom);
+
+#endif // MICROPY_INCLUDED_PY_SMALLINT_H
diff --git a/circuitpython/py/stackctrl.c b/circuitpython/py/stackctrl.c
new file mode 100644
index 0000000..d699d6d
--- /dev/null
+++ b/circuitpython/py/stackctrl.c
@@ -0,0 +1,99 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "py/runtime.h"
+#include "py/stackctrl.h"
+
+void mp_stack_ctrl_init(void) {
+ // Force routine to not be inlined. Better guarantee than MP_NOINLINE for -flto.
+ __asm volatile ("");
+ volatile int stack_dummy;
+ MP_STATE_THREAD(stack_top) = (char *)&stack_dummy;
+}
+
+void mp_stack_set_top(void *top) {
+ MP_STATE_THREAD(stack_top) = top;
+}
+
+mp_uint_t mp_stack_usage(void) {
+ // Assumes descending stack
+ // Force routine to not be inlined. Better guarantee than MP_NOINLINE for -flto.
+ __asm volatile ("");
+ volatile int stack_dummy;
+ return MP_STATE_THREAD(stack_top) - (char *)&stack_dummy;
+}
+
+#if MICROPY_STACK_CHECK
+
+void mp_stack_set_limit(mp_uint_t limit) {
+ MP_STATE_THREAD(stack_limit) = limit;
+}
+
+void mp_stack_check(void) {
+ if (mp_stack_usage() >= MP_STATE_THREAD(stack_limit)) {
+ mp_raise_recursion_depth();
+ }
+}
+
+#endif // MICROPY_STACK_CHECK
+
+#if MICROPY_MAX_STACK_USAGE
+
+// Fill stack space with this unusual value.
+const char MP_MAX_STACK_USAGE_SENTINEL_BYTE = 0xEE;
+
+// Record absolute bottom (logical limit) of stack.
+void mp_stack_set_bottom(void *stack_bottom) {
+ MP_STATE_THREAD(stack_bottom) = stack_bottom;
+}
+
+// Return the current frame pointer. This can be used as an
+// approximation for the stack pointer of the _calling_ function.
+// This routine must not be inlined. This method is
+// architecture-independent, as opposed to using asm("sp") or similar.
+//
+// The stack_dummy approach used elsewhere in this file is not safe in
+// all cases. That value may be below the actual top of the stack.
+static void *approx_stack_pointer(void) {
+ __asm volatile ("");
+ return __builtin_frame_address(0);
+}
+
+// Fill stack space down toward the stack limit with a known unusual value.
+void mp_stack_fill_with_sentinel(void) {
+ // Force routine to not be inlined. Better guarantee than MP_NOINLINE for -flto.
+ __asm volatile ("");
+ // Start filling stack just below the current stack frame.
+ // Continue until we've hit the bottom of the stack (lowest address,
+ // logical "ceiling" of stack).
+ char *p = (char *)approx_stack_pointer() - 1;
+
+ while (p >= MP_STATE_THREAD(stack_bottom)) {
+ *p-- = MP_MAX_STACK_USAGE_SENTINEL_BYTE;
+ }
+}
+
+#endif // MICROPY_MAX_STACK_USAGE
diff --git a/circuitpython/py/stackctrl.h b/circuitpython/py/stackctrl.h
new file mode 100644
index 0000000..a3dfadb
--- /dev/null
+++ b/circuitpython/py/stackctrl.h
@@ -0,0 +1,56 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_STACKCTRL_H
+#define MICROPY_INCLUDED_PY_STACKCTRL_H
+
+#include "py/mpconfig.h"
+
+void mp_stack_ctrl_init(void);
+void mp_stack_set_top(void *top);
+mp_uint_t mp_stack_usage(void);
+
+#if MICROPY_STACK_CHECK
+
+void mp_stack_set_limit(mp_uint_t limit);
+void mp_stack_check(void);
+#define MP_STACK_CHECK() mp_stack_check()
+
+#else
+
+#define mp_stack_set_limit(limit) (void)(limit)
+#define MP_STACK_CHECK()
+
+#endif
+
+#if MICROPY_MAX_STACK_USAGE
+
+const char MP_MAX_STACK_USAGE_SENTINEL_BYTE;
+void mp_stack_set_bottom(void *stack_bottom);
+void mp_stack_fill_with_sentinel(void);
+
+#endif
+
+#endif // MICROPY_INCLUDED_PY_STACKCTRL_H
diff --git a/circuitpython/py/stream.c b/circuitpython/py/stream.c
new file mode 100644
index 0000000..b12d693
--- /dev/null
+++ b/circuitpython/py/stream.c
@@ -0,0 +1,583 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2014 Damien P. George
+ * SPDX-FileCopyrightText: Copyright (c) 2014-2016 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <string.h>
+#include <unistd.h>
+
+#include "py/objstr.h"
+#include "py/stream.h"
+#include "py/runtime.h"
+#include "supervisor/shared/translate.h"
+
+// This file defines generic Python stream read/write methods which
+// dispatch to the underlying stream interface of an object.
+
+// TODO: should be in mpconfig.h
+#define DEFAULT_BUFFER_SIZE 256
+
+STATIC mp_obj_t stream_readall(mp_obj_t self_in);
+
+#define STREAM_CONTENT_TYPE(stream) (((stream)->is_text) ? &mp_type_str : &mp_type_bytes)
+
+// Returns error condition in *errcode, if non-zero, return value is number of bytes written
+// before error condition occurred. If *errcode == 0, returns total bytes written (which will
+// be equal to input size).
+mp_uint_t mp_stream_rw(mp_obj_t stream, void *buf_, mp_uint_t size, int *errcode, byte flags) {
+ byte *buf = buf_;
+ typedef mp_uint_t (*io_func_t)(mp_obj_t obj, void *buf, mp_uint_t size, int *errcode);
+ io_func_t io_func;
+ const mp_stream_p_t *stream_p = mp_get_stream(stream);
+ if (flags & MP_STREAM_RW_WRITE) {
+ io_func = (io_func_t)stream_p->write;
+ } else {
+ io_func = stream_p->read;
+ }
+
+ *errcode = 0;
+ mp_uint_t done = 0;
+ while (size > 0) {
+ mp_uint_t out_sz = io_func(stream, buf, size, errcode);
+ // For read, out_sz == 0 means EOF. For write, it's unspecified
+ // what it means, but we don't make any progress, so returning
+ // is still the best option.
+ if (out_sz == 0) {
+ return done;
+ }
+ if (out_sz == MP_STREAM_ERROR) {
+ // If we read something before getting EAGAIN, don't leak it
+ if (mp_is_nonblocking_error(*errcode) && done != 0) {
+ *errcode = 0;
+ }
+ return done;
+ }
+ if (flags & MP_STREAM_RW_ONCE) {
+ return out_sz;
+ }
+
+ buf += out_sz;
+ size -= out_sz;
+ done += out_sz;
+ }
+ return done;
+}
+
+const mp_stream_p_t *mp_get_stream(mp_const_obj_t self) {
+ return mp_proto_get(MP_QSTR_protocol_stream, self);
+}
+
+const mp_stream_p_t *mp_get_stream_raise(mp_obj_t self_in, int flags) {
+ const mp_stream_p_t *stream_p = mp_proto_get(MP_QSTR_protocol_stream, self_in);
+ if (stream_p == NULL
+ || ((flags & MP_STREAM_OP_READ) && stream_p->read == NULL)
+ || ((flags & MP_STREAM_OP_WRITE) && stream_p->write == NULL)
+ || ((flags & MP_STREAM_OP_IOCTL) && stream_p->ioctl == NULL)) {
+ // CPython: io.UnsupportedOperation, OSError subclass
+ mp_raise_msg(&mp_type_OSError, MP_ERROR_TEXT("stream operation not supported"));
+ }
+ return stream_p;
+}
+
+STATIC mp_obj_t stream_read_generic(size_t n_args, const mp_obj_t *args, byte flags) {
+ // What to do if sz < -1? Python docs don't specify this case.
+ // CPython does a readall, let's do the same.
+ mp_int_t sz;
+ const mp_stream_p_t *stream_p = mp_get_stream(args[0]);
+ if (stream_p->pyserial_read_compatibility) {
+ // Pyserial defaults to sz=1 if not specified.
+ if (n_args == 1) {
+ sz = 1;
+ } else {
+ // Pyserial treats negative size as 0.
+ sz = MAX(0, mp_obj_get_int(args[1]));
+ }
+ } else {
+ if (n_args == 1 || args[1] == mp_const_none || (sz = mp_obj_get_int(args[1])) <= -1) {
+ return stream_readall(args[0]);
+ }
+ }
+
+ #if MICROPY_PY_BUILTINS_STR_UNICODE
+ if (stream_p->is_text) {
+ // We need to read sz number of unicode characters. Because we don't have any
+ // buffering, and because the stream API can only read bytes, we must read here
+ // in units of bytes and must never over read. If we want sz chars, then reading
+ // sz bytes will never over-read, so we follow this approach, in a loop to keep
+ // reading until we have exactly enough chars. This will be 1 read for text
+ // with ASCII-only chars, and about 2 reads for text with a couple of non-ASCII
+ // chars. For text with lots of non-ASCII chars, it'll be pretty inefficient
+ // in time and memory.
+
+ vstr_t vstr;
+ vstr_init(&vstr, sz);
+ mp_uint_t more_bytes = sz;
+ mp_uint_t last_buf_offset = 0;
+ while (more_bytes > 0) {
+ char *p = vstr_add_len(&vstr, more_bytes);
+ int error;
+ mp_uint_t out_sz = mp_stream_read_exactly(args[0], p, more_bytes, &error);
+ if (error != 0) {
+ vstr_cut_tail_bytes(&vstr, more_bytes);
+ if (mp_is_nonblocking_error(error)) {
+ // With non-blocking streams, we read as much as we can.
+ // If we read nothing, return None, just like read().
+ // Otherwise, return data read so far.
+ // TODO what if we have read only half a non-ASCII char?
+ if (vstr.len == 0) {
+ vstr_clear(&vstr);
+ return mp_const_none;
+ }
+ break;
+ }
+ mp_raise_OSError(error);
+ }
+
+ if (out_sz < more_bytes) {
+ // Finish reading.
+ // TODO what if we have read only half a non-ASCII char?
+ vstr_cut_tail_bytes(&vstr, more_bytes - out_sz);
+ if (out_sz == 0) {
+ break;
+ }
+ }
+
+ // count chars from bytes just read
+ for (mp_uint_t off = last_buf_offset;;) {
+ byte b = vstr.buf[off];
+ int n;
+ if (!UTF8_IS_NONASCII(b)) {
+ // 1-byte ASCII char
+ n = 1;
+ } else if ((b & 0xe0) == 0xc0) {
+ // 2-byte char
+ n = 2;
+ } else if ((b & 0xf0) == 0xe0) {
+ // 3-byte char
+ n = 3;
+ } else if ((b & 0xf8) == 0xf0) {
+ // 4-byte char
+ n = 4;
+ } else {
+ // TODO
+ n = 5;
+ }
+ if (off + n <= vstr.len) {
+ // got a whole char in n bytes
+ off += n;
+ sz -= 1;
+ last_buf_offset = off;
+ if (off >= vstr.len) {
+ more_bytes = sz;
+ break;
+ }
+ } else {
+ // didn't get a whole char, so work out how many extra bytes are needed for
+ // this partial char, plus bytes for additional chars that we want
+ more_bytes = (off + n - vstr.len) + (sz - 1);
+ break;
+ }
+ }
+ }
+
+ return mp_obj_new_str_from_vstr(&mp_type_str, &vstr);
+ }
+ #endif
+
+ vstr_t vstr;
+ vstr_init_len(&vstr, sz);
+ int error;
+ mp_uint_t out_sz = mp_stream_rw(args[0], vstr.buf, sz, &error, flags);
+ if (error != 0) {
+ vstr_clear(&vstr);
+ if (mp_is_nonblocking_error(error)) {
+ // https://docs.python.org/3.4/library/io.html#io.RawIOBase.read
+ // "If the object is in non-blocking mode and no bytes are available,
+ // None is returned."
+ // This is actually very weird, as naive truth check will treat
+ // this as EOF.
+ return mp_const_none;
+ }
+ mp_raise_OSError(error);
+ } else {
+ vstr.len = out_sz;
+ return mp_obj_new_str_from_vstr(STREAM_CONTENT_TYPE(stream_p), &vstr);
+ }
+}
+
+STATIC mp_obj_t stream_read(size_t n_args, const mp_obj_t *args) {
+ return stream_read_generic(n_args, args, MP_STREAM_RW_READ);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_read_obj, 1, 2, stream_read);
+
+STATIC mp_obj_t stream_read1(size_t n_args, const mp_obj_t *args) {
+ return stream_read_generic(n_args, args, MP_STREAM_RW_READ | MP_STREAM_RW_ONCE);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_read1_obj, 1, 2, stream_read1);
+
+mp_obj_t mp_stream_write(mp_obj_t self_in, const void *buf, size_t len, byte flags) {
+ int error;
+ mp_uint_t out_sz = mp_stream_rw(self_in, (void *)buf, len, &error, flags);
+ if (error != 0) {
+ if (mp_is_nonblocking_error(error)) {
+ // http://docs.python.org/3/library/io.html#io.RawIOBase.write
+ // "None is returned if the raw stream is set not to block and
+ // no single byte could be readily written to it."
+ return mp_const_none;
+ }
+ mp_raise_OSError(error);
+ } else {
+ return MP_OBJ_NEW_SMALL_INT(out_sz);
+ }
+}
+
+// This is used to adapt a stream object to an mp_print_t interface
+void mp_stream_write_adaptor(void *self, const char *buf, size_t len) {
+ mp_stream_write(MP_OBJ_FROM_PTR(self), buf, len, MP_STREAM_RW_WRITE);
+}
+
+STATIC mp_obj_t stream_write_method(size_t n_args, const mp_obj_t *args) {
+ mp_buffer_info_t bufinfo;
+ mp_get_buffer_raise(args[1], &bufinfo, MP_BUFFER_READ);
+ if (!mp_get_stream(args[0])->is_text && mp_obj_is_str(args[1])) {
+ mp_raise_ValueError(MP_ERROR_TEXT("string not supported; use bytes or bytearray"));
+ }
+ size_t max_len = (size_t)-1;
+ size_t off = 0;
+ if (n_args == 3) {
+ max_len = mp_obj_get_int_truncated(args[2]);
+ } else if (n_args == 4) {
+ off = mp_obj_get_int_truncated(args[2]);
+ max_len = mp_obj_get_int_truncated(args[3]);
+ if (off > bufinfo.len) {
+ off = bufinfo.len;
+ }
+ }
+ bufinfo.len -= off;
+ return mp_stream_write(args[0], (byte *)bufinfo.buf + off, MIN(bufinfo.len, max_len), MP_STREAM_RW_WRITE);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_write_obj, 2, 4, stream_write_method);
+
+STATIC mp_obj_t stream_write1_method(mp_obj_t self_in, mp_obj_t arg) {
+ mp_buffer_info_t bufinfo;
+ mp_get_buffer_raise(arg, &bufinfo, MP_BUFFER_READ);
+ return mp_stream_write(self_in, bufinfo.buf, bufinfo.len, MP_STREAM_RW_WRITE | MP_STREAM_RW_ONCE);
+}
+MP_DEFINE_CONST_FUN_OBJ_2(mp_stream_write1_obj, stream_write1_method);
+
+STATIC mp_obj_t stream_readinto(size_t n_args, const mp_obj_t *args) {
+ mp_buffer_info_t bufinfo;
+ mp_get_buffer_raise(args[1], &bufinfo, MP_BUFFER_WRITE);
+
+ // CPython extension: if 2nd arg is provided, that's max len to read,
+ // instead of full buffer. Similar to
+ // https://docs.python.org/3/library/socket.html#socket.socket.recv_into
+ mp_uint_t len = bufinfo.len;
+ if (n_args > 2) {
+ if (mp_get_stream(args[0])->pyserial_readinto_compatibility) {
+ mp_raise_ValueError(MP_ERROR_TEXT("length argument not allowed for this type"));
+ }
+ len = mp_obj_get_int(args[2]);
+ if (len > bufinfo.len) {
+ len = bufinfo.len;
+ }
+ }
+
+ int error;
+ mp_uint_t out_sz = mp_stream_read_exactly(args[0], bufinfo.buf, len, &error);
+ if (error != 0) {
+ if (mp_is_nonblocking_error(error)) {
+ // pyserial readinto never returns None, just 0.
+ return mp_get_stream(args[0])->pyserial_dont_return_none_compatibility
+ ? MP_OBJ_NEW_SMALL_INT(0)
+ : mp_const_none;
+ }
+ mp_raise_OSError(error);
+ } else {
+ return MP_OBJ_NEW_SMALL_INT(out_sz);
+ }
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_readinto_obj, 2, 3, stream_readinto);
+
+STATIC mp_obj_t stream_readall(mp_obj_t self_in) {
+ const mp_stream_p_t *stream_p = mp_get_stream(self_in);
+
+ mp_uint_t total_size = 0;
+ vstr_t vstr;
+ vstr_init(&vstr, DEFAULT_BUFFER_SIZE);
+ char *p = vstr.buf;
+ mp_uint_t current_read = DEFAULT_BUFFER_SIZE;
+ while (true) {
+ int error;
+ mp_uint_t out_sz = stream_p->read(self_in, p, current_read, &error);
+ if (out_sz == MP_STREAM_ERROR) {
+ if (mp_is_nonblocking_error(error)) {
+ // With non-blocking streams, we read as much as we can.
+ // If we read nothing, return None, just like read().
+ // Otherwise, return data read so far.
+ if (total_size == 0) {
+ // pyserial read() never returns None, just b''.
+ return stream_p->pyserial_dont_return_none_compatibility
+ ? mp_const_empty_bytes
+ : mp_const_none;
+ }
+ break;
+ }
+ mp_raise_OSError(error);
+ }
+ if (out_sz == 0) {
+ break;
+ }
+ total_size += out_sz;
+ if (out_sz < current_read) {
+ current_read -= out_sz;
+ p += out_sz;
+ } else {
+ p = vstr_extend(&vstr, DEFAULT_BUFFER_SIZE);
+ current_read = DEFAULT_BUFFER_SIZE;
+ }
+ #ifdef RUN_BACKGROUND_TASKS
+ RUN_BACKGROUND_TASKS;
+ #endif
+ }
+
+ vstr.len = total_size;
+ return mp_obj_new_str_from_vstr(STREAM_CONTENT_TYPE(stream_p), &vstr);
+}
+
+// Unbuffered, inefficient implementation of readline() for raw I/O files.
+STATIC mp_obj_t stream_unbuffered_readline(size_t n_args, const mp_obj_t *args) {
+ const mp_stream_p_t *stream_p = mp_get_stream(args[0]);
+
+ mp_int_t max_size = -1;
+ if (n_args > 1) {
+ max_size = MP_OBJ_SMALL_INT_VALUE(args[1]);
+ }
+
+ vstr_t vstr;
+ if (max_size != -1) {
+ vstr_init(&vstr, max_size);
+ } else {
+ vstr_init(&vstr, 16);
+ }
+
+ while (max_size == -1 || max_size-- != 0) {
+ char *p = vstr_add_len(&vstr, 1);
+ int error;
+ mp_uint_t out_sz = stream_p->read(args[0], p, 1, &error);
+ if (out_sz == MP_STREAM_ERROR) {
+ if (mp_is_nonblocking_error(error)) {
+ if (vstr.len == 1) {
+ // We just incremented it, but otherwise we read nothing
+ // and immediately got EAGAIN. This case is not well
+ // specified in
+ // https://docs.python.org/3/library/io.html#io.IOBase.readline
+ // unlike similar case for read(). But we follow the latter's
+ // behavior - return None.
+ vstr_clear(&vstr);
+ return mp_const_none;
+ } else {
+ goto done;
+ }
+ }
+ mp_raise_OSError(error);
+ }
+ if (out_sz == 0) {
+ done:
+ // Back out previously added byte
+ // Consider, what's better - read a char and get OutOfMemory (so read
+ // char is lost), or allocate first as we do.
+ vstr_cut_tail_bytes(&vstr, 1);
+ break;
+ }
+ if (*p == '\n') {
+ break;
+ }
+ }
+
+ return mp_obj_new_str_from_vstr(STREAM_CONTENT_TYPE(stream_p), &vstr);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_unbuffered_readline_obj, 1, 2, stream_unbuffered_readline);
+
+// TODO take an optional extra argument (what does it do exactly?)
+STATIC mp_obj_t stream_unbuffered_readlines(mp_obj_t self) {
+ mp_obj_t lines = mp_obj_new_list(0, NULL);
+ for (;;) {
+ mp_obj_t line = stream_unbuffered_readline(1, &self);
+ if (!mp_obj_is_true(line)) {
+ break;
+ }
+ mp_obj_list_append(lines, line);
+ }
+ return lines;
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_stream_unbuffered_readlines_obj, stream_unbuffered_readlines);
+
+mp_obj_t mp_stream_unbuffered_iter(mp_obj_t self) {
+ mp_obj_t l_in = stream_unbuffered_readline(1, &self);
+ if (mp_obj_is_true(l_in)) {
+ return l_in;
+ }
+ return MP_OBJ_STOP_ITERATION;
+}
+
+mp_obj_t mp_stream_close(mp_obj_t stream) {
+ const mp_stream_p_t *stream_p = mp_get_stream(stream);
+ int error;
+ mp_uint_t res = stream_p->ioctl(stream, MP_STREAM_CLOSE, 0, &error);
+ if (res == MP_STREAM_ERROR) {
+ mp_raise_OSError(error);
+ }
+ return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_stream_close_obj, mp_stream_close);
+
+STATIC mp_obj_t stream_seek(size_t n_args, const mp_obj_t *args) {
+ struct mp_stream_seek_t seek_s;
+ // TODO: Could be uint64
+ seek_s.offset = mp_obj_get_int(args[1]);
+ seek_s.whence = SEEK_SET;
+ if (n_args == 3) {
+ seek_s.whence = mp_obj_get_int(args[2]);
+ }
+
+ // In POSIX, it's error to seek before end of stream, we enforce it here.
+ if (seek_s.whence == SEEK_SET && seek_s.offset < 0) {
+ mp_raise_OSError(MP_EINVAL);
+ }
+
+ const mp_stream_p_t *stream_p = mp_get_stream(args[0]);
+ int error;
+ mp_uint_t res = stream_p->ioctl(args[0], MP_STREAM_SEEK, (mp_uint_t)(uintptr_t)&seek_s, &error);
+ if (res == MP_STREAM_ERROR) {
+ mp_raise_OSError(error);
+ }
+
+ // TODO: Could be uint64
+ return mp_obj_new_int_from_uint(seek_s.offset);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_seek_obj, 2, 3, stream_seek);
+
+STATIC mp_obj_t stream_tell(mp_obj_t self) {
+ mp_obj_t offset = MP_OBJ_NEW_SMALL_INT(0);
+ mp_obj_t whence = MP_OBJ_NEW_SMALL_INT(SEEK_CUR);
+ const mp_obj_t args[3] = {self, offset, whence};
+ return stream_seek(3, args);
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_stream_tell_obj, stream_tell);
+
+mp_obj_t mp_stream_flush(mp_obj_t self) {
+ const mp_stream_p_t *stream_p = mp_get_stream(self);
+ int error;
+ if (stream_p->ioctl == NULL) {
+ mp_raise_OSError(MP_EINVAL);
+ }
+ mp_uint_t res = stream_p->ioctl(self, MP_STREAM_FLUSH, 0, &error);
+ if (res == MP_STREAM_ERROR) {
+ mp_raise_OSError(error);
+ }
+ return mp_const_none;
+}
+MP_DEFINE_CONST_FUN_OBJ_1(mp_stream_flush_obj, mp_stream_flush);
+
+STATIC mp_obj_t stream_ioctl(size_t n_args, const mp_obj_t *args) {
+ mp_buffer_info_t bufinfo;
+ uintptr_t val = 0;
+ if (n_args > 2) {
+ if (mp_get_buffer(args[2], &bufinfo, MP_BUFFER_WRITE)) {
+ val = (uintptr_t)bufinfo.buf;
+ } else {
+ val = mp_obj_get_int_truncated(args[2]);
+ }
+ }
+
+ const mp_stream_p_t *stream_p = mp_get_stream(args[0]);
+ int error;
+ mp_uint_t res = stream_p->ioctl(args[0], mp_obj_get_int(args[1]), val, &error);
+ if (res == MP_STREAM_ERROR) {
+ mp_raise_OSError(error);
+ }
+
+ return mp_obj_new_int(res);
+}
+MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_ioctl_obj, 2, 3, stream_ioctl);
+
+#if MICROPY_STREAMS_POSIX_API
+/*
+ * POSIX-like functions
+ *
+ * These functions have POSIX-compatible signature (except for "void *stream"
+ * first argument instead of "int fd"). They are useful to port existing
+ * POSIX-compatible software to work with MicroPython streams.
+ */
+
+#include <errno.h>
+
+ssize_t mp_stream_posix_write(mp_obj_t stream, const void *buf, size_t len) {
+ mp_obj_base_t *o = (mp_obj_base_t *)MP_OBJ_TO_PTR(stream);
+ const mp_stream_p_t *stream_p = mp_get_stream(o);
+ mp_uint_t out_sz = stream_p->write(stream, buf, len, &errno);
+ if (out_sz == MP_STREAM_ERROR) {
+ return -1;
+ } else {
+ return out_sz;
+ }
+}
+
+ssize_t mp_stream_posix_read(mp_obj_t stream, void *buf, size_t len) {
+ mp_obj_base_t *o = (mp_obj_base_t *)MP_OBJ_TO_PTR(stream);
+ const mp_stream_p_t *stream_p = mp_get_stream(o);
+ mp_uint_t out_sz = stream_p->read(stream, buf, len, &errno);
+ if (out_sz == MP_STREAM_ERROR) {
+ return -1;
+ } else {
+ return out_sz;
+ }
+}
+
+off_t mp_stream_posix_lseek(mp_obj_t stream, off_t offset, int whence) {
+ const mp_obj_base_t *o = (mp_obj_base_t *)MP_OBJ_TO_PTR(stream);
+ const mp_stream_p_t *stream_p = mp_get_stream(o);
+ struct mp_stream_seek_t seek_s;
+ seek_s.offset = offset;
+ seek_s.whence = whence;
+ mp_uint_t res = stream_p->ioctl(MP_OBJ_FROM_PTR(stream), MP_STREAM_SEEK, (mp_uint_t)(uintptr_t)&seek_s, &errno);
+ if (res == MP_STREAM_ERROR) {
+ return -1;
+ }
+ return seek_s.offset;
+}
+
+int mp_stream_posix_fsync(mp_obj_t stream) {
+ mp_obj_base_t *o = (mp_obj_base_t *)MP_OBJ_TO_PTR(stream);
+ const mp_stream_p_t *stream_p = mp_get_stream(o);
+ mp_uint_t res = stream_p->ioctl(stream, MP_STREAM_FLUSH, 0, &errno);
+ if (res == MP_STREAM_ERROR) {
+ return -1;
+ }
+ return res;
+}
+
+#endif
diff --git a/circuitpython/py/stream.h b/circuitpython/py/stream.h
new file mode 100644
index 0000000..4283e68
--- /dev/null
+++ b/circuitpython/py/stream.h
@@ -0,0 +1,139 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ * SPDX-FileCopyrightText: Copyright (c) 2014-2016 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_STREAM_H
+#define MICROPY_INCLUDED_PY_STREAM_H
+
+#include "py/obj.h"
+#include "py/proto.h"
+#include "py/mperrno.h"
+
+#define MP_STREAM_ERROR ((mp_uint_t)-1)
+
+// Stream ioctl request codes
+#define MP_STREAM_FLUSH (1)
+#define MP_STREAM_SEEK (2)
+#define MP_STREAM_POLL (3)
+#define MP_STREAM_CLOSE (4)
+#define MP_STREAM_TIMEOUT (5) // Get/set timeout (single op)
+#define MP_STREAM_GET_OPTS (6) // Get stream options
+#define MP_STREAM_SET_OPTS (7) // Set stream options
+#define MP_STREAM_GET_DATA_OPTS (8) // Get data/message options
+#define MP_STREAM_SET_DATA_OPTS (9) // Set data/message options
+#define MP_STREAM_GET_FILENO (10) // Get fileno of underlying file
+
+// These poll ioctl values are compatible with Linux
+#define MP_STREAM_POLL_RD (0x0001)
+#define MP_STREAM_POLL_WR (0x0004)
+#define MP_STREAM_POLL_ERR (0x0008)
+#define MP_STREAM_POLL_HUP (0x0010)
+#define MP_STREAM_POLL_NVAL (0x0020)
+
+// Argument structure for MP_STREAM_SEEK
+struct mp_stream_seek_t {
+ // If whence == MP_SEEK_SET, offset should be treated as unsigned.
+ // This allows dealing with full-width stream sizes (16, 32, 64,
+ // etc. bits). For other seek types, should be treated as signed.
+ mp_off_t offset;
+ int whence;
+};
+
+// seek ioctl "whence" values
+#define MP_SEEK_SET (0)
+#define MP_SEEK_CUR (1)
+#define MP_SEEK_END (2)
+
+// Stream protocol
+typedef struct _mp_stream_p_t {
+ MP_PROTOCOL_HEAD
+ // On error, functions should return MP_STREAM_ERROR and fill in *errcode (values
+ // are implementation-dependent, but will be exposed to user, e.g. via exception).
+ mp_uint_t (*read)(mp_obj_t obj, void *buf, mp_uint_t size, int *errcode);
+ mp_uint_t (*write)(mp_obj_t obj, const void *buf, mp_uint_t size, int *errcode);
+ mp_uint_t (*ioctl)(mp_obj_t obj, mp_uint_t request, uintptr_t arg, int *errcode);
+ mp_uint_t is_text : 1; // default is bytes, set this for text stream
+ bool pyserial_readinto_compatibility : 1; // Disallow size parameter in readinto()
+ bool pyserial_read_compatibility : 1; // Disallow omitting read(size) size parameter
+ bool pyserial_dont_return_none_compatibility : 1; // Don't return None for read() or readinto()
+} mp_stream_p_t;
+
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_read_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_read1_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_readinto_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_unbuffered_readline_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_stream_unbuffered_readlines_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_write_obj);
+MP_DECLARE_CONST_FUN_OBJ_2(mp_stream_write1_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_stream_close_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_seek_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_stream_tell_obj);
+MP_DECLARE_CONST_FUN_OBJ_1(mp_stream_flush_obj);
+MP_DECLARE_CONST_FUN_OBJ_VAR_BETWEEN(mp_stream_ioctl_obj);
+
+// these are for mp_get_stream_raise and can be or'd together
+#define MP_STREAM_OP_READ (1)
+#define MP_STREAM_OP_WRITE (2)
+#define MP_STREAM_OP_IOCTL (4)
+
+// Object is assumed to have a non-NULL stream protocol with valid r/w/ioctl methods
+const mp_stream_p_t *mp_get_stream(mp_const_obj_t self);
+
+const mp_stream_p_t *mp_get_stream_raise(mp_obj_t self_in, int flags);
+mp_obj_t mp_stream_close(mp_obj_t stream);
+
+// Iterator which uses mp_stream_unbuffered_readline_obj
+mp_obj_t mp_stream_unbuffered_iter(mp_obj_t self);
+
+mp_obj_t mp_stream_write(mp_obj_t self_in, const void *buf, size_t len, byte flags);
+
+// C-level helper functions
+#define MP_STREAM_RW_READ 0
+#define MP_STREAM_RW_WRITE 2
+#define MP_STREAM_RW_ONCE 1
+mp_uint_t mp_stream_rw(mp_obj_t stream, void *buf, mp_uint_t size, int *errcode, byte flags);
+#define mp_stream_write_exactly(stream, buf, size, err) mp_stream_rw(stream, (byte *)buf, size, err, MP_STREAM_RW_WRITE)
+#define mp_stream_read_exactly(stream, buf, size, err) mp_stream_rw(stream, buf, size, err, MP_STREAM_RW_READ)
+
+void mp_stream_write_adaptor(void *self, const char *buf, size_t len);
+mp_obj_t mp_stream_flush(mp_obj_t self);
+
+#if MICROPY_STREAMS_POSIX_API
+#include <sys/types.h>
+// Functions with POSIX-compatible signatures
+// "stream" is assumed to be a pointer to a concrete object with the stream protocol
+ssize_t mp_stream_posix_write(void *stream, const void *buf, size_t len);
+ssize_t mp_stream_posix_read(void *stream, void *buf, size_t len);
+off_t mp_stream_posix_lseek(void *stream, off_t offset, int whence);
+int mp_stream_posix_fsync(void *stream);
+#endif
+
+#if MICROPY_STREAMS_NON_BLOCK
+#define mp_is_nonblocking_error(errno) ((errno) == MP_EAGAIN || (errno) == MP_EWOULDBLOCK)
+#else
+#define mp_is_nonblocking_error(errno) (0)
+#endif
+
+#endif // MICROPY_INCLUDED_PY_STREAM_H
diff --git a/circuitpython/py/unicode.c b/circuitpython/py/unicode.c
new file mode 100644
index 0000000..4028af5
--- /dev/null
+++ b/circuitpython/py/unicode.c
@@ -0,0 +1,211 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdint.h>
+
+#include "py/unicode.h"
+
+// attribute flags
+#define FL_PRINT (0x01)
+#define FL_SPACE (0x02)
+#define FL_DIGIT (0x04)
+#define FL_ALPHA (0x08)
+#define FL_UPPER (0x10)
+#define FL_LOWER (0x20)
+#define FL_XDIGIT (0x40)
+
+// shorthand character attributes
+#define AT_PR (FL_PRINT)
+#define AT_SP (FL_SPACE | FL_PRINT)
+#define AT_DI (FL_DIGIT | FL_PRINT | FL_XDIGIT)
+#define AT_AL (FL_ALPHA | FL_PRINT)
+#define AT_UP (FL_UPPER | FL_ALPHA | FL_PRINT)
+#define AT_LO (FL_LOWER | FL_ALPHA | FL_PRINT)
+#define AT_UX (FL_UPPER | FL_ALPHA | FL_PRINT | FL_XDIGIT)
+#define AT_LX (FL_LOWER | FL_ALPHA | FL_PRINT | FL_XDIGIT)
+
+// table of attributes for ascii characters
+STATIC const uint8_t attr[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, AT_SP, AT_SP, AT_SP, AT_SP, AT_SP, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ AT_SP, AT_PR, AT_PR, AT_PR, AT_PR, AT_PR, AT_PR, AT_PR,
+ AT_PR, AT_PR, AT_PR, AT_PR, AT_PR, AT_PR, AT_PR, AT_PR,
+ AT_DI, AT_DI, AT_DI, AT_DI, AT_DI, AT_DI, AT_DI, AT_DI,
+ AT_DI, AT_DI, AT_PR, AT_PR, AT_PR, AT_PR, AT_PR, AT_PR,
+ AT_PR, AT_UX, AT_UX, AT_UX, AT_UX, AT_UX, AT_UX, AT_UP,
+ AT_UP, AT_UP, AT_UP, AT_UP, AT_UP, AT_UP, AT_UP, AT_UP,
+ AT_UP, AT_UP, AT_UP, AT_UP, AT_UP, AT_UP, AT_UP, AT_UP,
+ AT_UP, AT_UP, AT_UP, AT_PR, AT_PR, AT_PR, AT_PR, AT_PR,
+ AT_PR, AT_LX, AT_LX, AT_LX, AT_LX, AT_LX, AT_LX, AT_LO,
+ AT_LO, AT_LO, AT_LO, AT_LO, AT_LO, AT_LO, AT_LO, AT_LO,
+ AT_LO, AT_LO, AT_LO, AT_LO, AT_LO, AT_LO, AT_LO, AT_LO,
+ AT_LO, AT_LO, AT_LO, AT_PR, AT_PR, AT_PR, AT_PR, 0
+};
+
+#if MICROPY_PY_BUILTINS_STR_UNICODE
+
+unichar utf8_get_char(const byte *s) {
+ unichar ord = *s++;
+ if (!UTF8_IS_NONASCII(ord)) {
+ return ord;
+ }
+ ord &= 0x7F;
+ for (unichar mask = 0x40; ord & mask; mask >>= 1) {
+ ord &= ~mask;
+ }
+ while (UTF8_IS_CONT(*s)) {
+ ord = (ord << 6) | (*s++ & 0x3F);
+ }
+ return ord;
+}
+
+const byte *utf8_next_char(const byte *s) {
+ ++s;
+ while (UTF8_IS_CONT(*s)) {
+ ++s;
+ }
+ return s;
+}
+
+mp_uint_t utf8_ptr_to_index(const byte *s, const byte *ptr) {
+ mp_uint_t i = 0;
+ while (ptr > s) {
+ if (!UTF8_IS_CONT(*--ptr)) {
+ i++;
+ }
+ }
+
+ return i;
+}
+
+size_t utf8_charlen(const byte *str, size_t len) {
+ size_t charlen = 0;
+ for (const byte *top = str + len; str < top; ++str) {
+ if (!UTF8_IS_CONT(*str)) {
+ ++charlen;
+ }
+ }
+ return charlen;
+}
+
+#endif
+
+// Be aware: These unichar_is* functions are actually ASCII-only!
+bool unichar_isspace(unichar c) {
+ return c < 128 && (attr[c] & FL_SPACE) != 0;
+}
+
+bool unichar_isalpha(unichar c) {
+ return c < 128 && (attr[c] & FL_ALPHA) != 0;
+}
+
+/* unused
+bool unichar_isprint(unichar c) {
+ return c < 128 && (attr[c] & FL_PRINT) != 0;
+}
+*/
+
+bool unichar_isdigit(unichar c) {
+ return c < 128 && (attr[c] & FL_DIGIT) != 0;
+}
+
+bool unichar_isxdigit(unichar c) {
+ return c < 128 && (attr[c] & FL_XDIGIT) != 0;
+}
+
+bool unichar_isident(unichar c) {
+ return c < 128 && ((attr[c] & (FL_ALPHA | FL_DIGIT)) != 0 || c == '_');
+}
+
+bool unichar_isalnum(unichar c) {
+ return c < 128 && ((attr[c] & (FL_ALPHA | FL_DIGIT)) != 0);
+}
+
+bool unichar_isupper(unichar c) {
+ return c < 128 && (attr[c] & FL_UPPER) != 0;
+}
+
+bool unichar_islower(unichar c) {
+ return c < 128 && (attr[c] & FL_LOWER) != 0;
+}
+
+unichar unichar_tolower(unichar c) {
+ if (unichar_isupper(c)) {
+ return c + 0x20;
+ }
+ return c;
+}
+
+unichar unichar_toupper(unichar c) {
+ if (unichar_islower(c)) {
+ return c - 0x20;
+ }
+ return c;
+}
+
+mp_uint_t unichar_xdigit_value(unichar c) {
+ // c is assumed to be hex digit
+ mp_uint_t n = c - '0';
+ if (n > 9) {
+ n &= ~('a' - 'A');
+ n -= ('A' - ('9' + 1));
+ }
+ return n;
+}
+
+#if MICROPY_PY_BUILTINS_STR_UNICODE
+
+bool utf8_check(const byte *p, size_t len) {
+ uint8_t need = 0;
+ const byte *end = p + len;
+ for (; p < end; p++) {
+ byte c = *p;
+ if (need) {
+ if (UTF8_IS_CONT(c)) {
+ need--;
+ } else {
+ // mismatch
+ return 0;
+ }
+ } else {
+ if (c >= 0xc0) {
+ if (c >= 0xf8) {
+ // mismatch
+ return 0;
+ }
+ need = (0xe5 >> ((c >> 3) & 0x6)) & 3;
+ } else if (c >= 0x80) {
+ // mismatch
+ return 0;
+ }
+ }
+ }
+ return need == 0; // no pending fragments allowed
+}
+
+#endif
diff --git a/circuitpython/py/unicode.h b/circuitpython/py/unicode.h
new file mode 100644
index 0000000..78e7a7a
--- /dev/null
+++ b/circuitpython/py/unicode.h
@@ -0,0 +1,35 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef MICROPY_INCLUDED_PY_UNICODE_H
+#define MICROPY_INCLUDED_PY_UNICODE_H
+
+#include "py/mpconfig.h"
+#include "py/misc.h"
+
+mp_uint_t utf8_ptr_to_index(const byte *s, const byte *ptr);
+bool utf8_check(const byte *p, size_t len);
+
+#endif // MICROPY_INCLUDED_PY_UNICODE_H
diff --git a/circuitpython/py/usermod.cmake b/circuitpython/py/usermod.cmake
new file mode 100644
index 0000000..8532762
--- /dev/null
+++ b/circuitpython/py/usermod.cmake
@@ -0,0 +1,52 @@
+# Create a target for all user modules to link against.
+add_library(usermod INTERFACE)
+
+function(usermod_gather_sources SOURCES_VARNAME INCLUDE_DIRECTORIES_VARNAME INCLUDED_VARNAME LIB)
+ if (NOT ${LIB} IN_LIST ${INCLUDED_VARNAME})
+ list(APPEND ${INCLUDED_VARNAME} ${LIB})
+
+ # Gather library sources
+ get_target_property(lib_sources ${LIB} INTERFACE_SOURCES)
+ if (lib_sources)
+ list(APPEND ${SOURCES_VARNAME} ${lib_sources})
+ endif()
+
+ # Gather library includes
+ get_target_property(lib_include_directories ${LIB} INTERFACE_INCLUDE_DIRECTORIES)
+ if (lib_include_directories)
+ list(APPEND ${INCLUDE_DIRECTORIES_VARNAME} ${lib_include_directories})
+ endif()
+
+ # Recurse linked libraries
+ get_target_property(trans_depend ${LIB} INTERFACE_LINK_LIBRARIES)
+ if (trans_depend)
+ foreach(SUB_LIB ${trans_depend})
+ usermod_gather_sources(
+ ${SOURCES_VARNAME}
+ ${INCLUDE_DIRECTORIES_VARNAME}
+ ${INCLUDED_VARNAME}
+ ${SUB_LIB})
+ endforeach()
+ endif()
+
+ set(${SOURCES_VARNAME} ${${SOURCES_VARNAME}} PARENT_SCOPE)
+ set(${INCLUDE_DIRECTORIES_VARNAME} ${${INCLUDE_DIRECTORIES_VARNAME}} PARENT_SCOPE)
+ set(${INCLUDED_VARNAME} ${${INCLUDED_VARNAME}} PARENT_SCOPE)
+ endif()
+endfunction()
+
+# Include CMake files for user modules.
+if (USER_C_MODULES)
+ foreach(USER_C_MODULE_PATH ${USER_C_MODULES})
+ message("Including User C Module(s) from ${USER_C_MODULE_PATH}")
+ include(${USER_C_MODULE_PATH})
+ endforeach()
+endif()
+
+# Recursively gather sources for QSTR scanning - doesn't support generator expressions.
+usermod_gather_sources(MICROPY_SOURCE_USERMOD MICROPY_INC_USERMOD found_modules usermod)
+
+# Report found modules.
+list(REMOVE_ITEM found_modules "usermod")
+list(JOIN found_modules ", " found_modules)
+message("Found User C Module(s): ${found_modules}")
diff --git a/circuitpython/py/vm.c b/circuitpython/py/vm.c
new file mode 100644
index 0000000..8cb01d3
--- /dev/null
+++ b/circuitpython/py/vm.c
@@ -0,0 +1,1471 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013-2019 Damien P. George
+ * SPDX-FileCopyrightText: Copyright (c) 2014-2015 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/emitglue.h"
+#include "py/objtype.h"
+#include "py/runtime.h"
+#include "py/bc0.h"
+#include "py/bc.h"
+#include "py/profile.h"
+
+#include "supervisor/linker.h"
+
+// *FORMAT-OFF*
+
+#if 0
+#if MICROPY_PY_THREAD
+#define TRACE_PREFIX mp_printf(&mp_plat_print, "ts=%p sp=%d ", mp_thread_get_state(), (int)(sp - &code_state->state[0] + 1))
+#else
+#define TRACE_PREFIX mp_printf(&mp_plat_print, "sp=%d ", (int)(sp - &code_state->state[0] + 1))
+#endif
+#define TRACE(ip) TRACE_PREFIX; mp_bytecode_print2(&mp_plat_print, ip, 1, code_state->fun_bc->const_table);
+#else
+#define TRACE(ip)
+#endif
+
+// Value stack grows up (this makes it incompatible with native C stack, but
+// makes sure that arguments to functions are in natural order arg1..argN
+// (Python semantics mandates left-to-right evaluation order, including for
+// function arguments). Stack pointer is pre-incremented and points at the
+// top element.
+// Exception stack also grows up, top element is also pointed at.
+
+#define DECODE_UINT \
+ mp_uint_t unum = 0; \
+ do { \
+ unum = (unum << 7) + (*ip & 0x7f); \
+ } while ((*ip++ & 0x80) != 0)
+#define DECODE_ULABEL size_t ulab = (ip[0] | (ip[1] << 8)); ip += 2
+#define DECODE_SLABEL size_t slab = (ip[0] | (ip[1] << 8)) - 0x8000; ip += 2
+
+#if MICROPY_PERSISTENT_CODE
+
+#define DECODE_QSTR \
+ qstr qst = ip[0] | ip[1] << 8; \
+ ip += 2;
+#define DECODE_PTR \
+ DECODE_UINT; \
+ void *ptr = (void*)(uintptr_t)code_state->fun_bc->const_table[unum]
+#define DECODE_OBJ \
+ DECODE_UINT; \
+ mp_obj_t obj = (mp_obj_t)code_state->fun_bc->const_table[unum]
+
+#else
+
+#define DECODE_QSTR qstr qst = 0; \
+ do { \
+ qst = (qst << 7) + (*ip & 0x7f); \
+ } while ((*ip++ & 0x80) != 0)
+#define DECODE_PTR \
+ ip = (byte*)MP_ALIGN(ip, sizeof(void*)); \
+ void *ptr = *(void**)ip; \
+ ip += sizeof(void*)
+#define DECODE_OBJ \
+ ip = (byte*)MP_ALIGN(ip, sizeof(mp_obj_t)); \
+ mp_obj_t obj = *(mp_obj_t*)ip; \
+ ip += sizeof(mp_obj_t)
+
+#endif
+
+#define PUSH(val) *++sp = (val)
+#define POP() (*sp--)
+#define TOP() (*sp)
+#define SET_TOP(val) *sp = (val)
+
+#if MICROPY_PY_SYS_EXC_INFO
+#define CLEAR_SYS_EXC_INFO() MP_STATE_VM(cur_exception) = NULL;
+#else
+#define CLEAR_SYS_EXC_INFO()
+#endif
+
+#define PUSH_EXC_BLOCK(with_or_finally) do { \
+ DECODE_ULABEL; /* except labels are always forward */ \
+ ++exc_sp; \
+ exc_sp->handler = ip + ulab; \
+ exc_sp->val_sp = MP_TAGPTR_MAKE(sp, ((with_or_finally) << 1)); \
+ exc_sp->prev_exc = NULL; \
+} while (0)
+
+#define POP_EXC_BLOCK() \
+ exc_sp--; /* pop back to previous exception handler */ \
+ CLEAR_SYS_EXC_INFO() /* just clear sys.exc_info(), not compliant, but it shouldn't be used in 1st place */
+
+#define CANCEL_ACTIVE_FINALLY(sp) do { \
+ if (mp_obj_is_small_int(sp[-1])) { \
+ /* Stack: (..., prev_dest_ip, prev_cause, dest_ip) */ \
+ /* Cancel the unwind through the previous finally, replace with current one */ \
+ sp[-2] = sp[0]; \
+ sp -= 2; \
+ } else { \
+ assert(sp[-1] == mp_const_none || mp_obj_is_exception_instance(sp[-1])); \
+ /* Stack: (..., None/exception, dest_ip) */ \
+ /* Silence the finally's exception value (may be None or an exception) */ \
+ sp[-1] = sp[0]; \
+ --sp; \
+ } \
+} while (0)
+
+#if MICROPY_PY_SYS_SETTRACE
+
+#define FRAME_SETUP() do { \
+ assert(code_state != code_state->prev_state); \
+ MP_STATE_THREAD(current_code_state) = code_state; \
+ assert(code_state != code_state->prev_state); \
+} while(0)
+
+#define FRAME_ENTER() do { \
+ assert(code_state != code_state->prev_state); \
+ code_state->prev_state = MP_STATE_THREAD(current_code_state); \
+ assert(code_state != code_state->prev_state); \
+ if (!mp_prof_is_executing) { \
+ mp_prof_frame_enter(code_state); \
+ } \
+} while(0)
+
+#define FRAME_LEAVE() do { \
+ assert(code_state != code_state->prev_state); \
+ MP_STATE_THREAD(current_code_state) = code_state->prev_state; \
+ assert(code_state != code_state->prev_state); \
+} while(0)
+
+#define FRAME_UPDATE() do { \
+ assert(MP_STATE_THREAD(current_code_state) == code_state); \
+ if (!mp_prof_is_executing) { \
+ code_state->frame = MP_OBJ_TO_PTR(mp_prof_frame_update(code_state)); \
+ } \
+} while(0)
+
+#define TRACE_TICK(current_ip, current_sp, is_exception) do { \
+ assert(code_state != code_state->prev_state); \
+ assert(MP_STATE_THREAD(current_code_state) == code_state); \
+ if (!mp_prof_is_executing && code_state->frame && MP_STATE_THREAD(prof_trace_callback)) { \
+ MP_PROF_INSTR_DEBUG_PRINT(code_state->ip); \
+ } \
+ if (!mp_prof_is_executing && code_state->frame && code_state->frame->callback) { \
+ mp_prof_instr_tick(code_state, is_exception); \
+ } \
+} while(0)
+
+#else // MICROPY_PY_SYS_SETTRACE
+#define FRAME_SETUP()
+#define FRAME_ENTER()
+#define FRAME_LEAVE()
+#define FRAME_UPDATE()
+#define TRACE_TICK(current_ip, current_sp, is_exception)
+#endif // MICROPY_PY_SYS_SETTRACE
+
+// fastn has items in reverse order (fastn[0] is local[0], fastn[-1] is local[1], etc)
+// sp points to bottom of stack which grows up
+// returns:
+// MP_VM_RETURN_NORMAL, sp valid, return value in *sp
+// MP_VM_RETURN_YIELD, ip, sp valid, yielded value in *sp
+// MP_VM_RETURN_EXCEPTION, exception in state[0]
+mp_vm_return_kind_t MICROPY_WRAP_MP_EXECUTE_BYTECODE(mp_execute_bytecode)(mp_code_state_t *code_state, volatile mp_obj_t inject_exc) {
+#define SELECTIVE_EXC_IP (0)
+#if SELECTIVE_EXC_IP
+#define MARK_EXC_IP_SELECTIVE() { code_state->ip = ip; } /* stores ip 1 byte past last opcode */
+#define MARK_EXC_IP_GLOBAL()
+#else
+#define MARK_EXC_IP_SELECTIVE()
+#define MARK_EXC_IP_GLOBAL() { code_state->ip = ip; } /* stores ip pointing to last opcode */
+#endif
+#if MICROPY_OPT_COMPUTED_GOTO
+ #include "py/vmentrytable.h"
+ #if MICROPY_OPT_COMPUTED_GOTO_SAVE_SPACE
+ #define ONE_TRUE_DISPATCH() one_true_dispatch : do { \
+ TRACE(ip); \
+ MARK_EXC_IP_GLOBAL(); \
+ goto *(void *)((char *) && entry_MP_BC_LOAD_CONST_FALSE + entry_table[*ip++]); \
+} while (0)
+ #define DISPATCH() do { goto one_true_dispatch; } while (0)
+ #else
+ #define ONE_TRUE_DISPATCH() DISPATCH()
+ #define DISPATCH() do { \
+ TRACE(ip); \
+ MARK_EXC_IP_GLOBAL(); \
+ TRACE_TICK(ip, sp, false); \
+ goto *entry_table[*ip++]; \
+} while (0)
+ #endif
+ #define DISPATCH_WITH_PEND_EXC_CHECK() goto pending_exception_check
+ #define ENTRY(op) entry_##op
+ #define ENTRY_DEFAULT entry_default
+#else
+ #define DISPATCH() goto dispatch_loop
+ #define DISPATCH_WITH_PEND_EXC_CHECK() goto pending_exception_check
+ #define ENTRY(op) case op
+ #define ENTRY_DEFAULT default
+#endif
+
+ // nlr_raise needs to be implemented as a goto, so that the C compiler's flow analyser
+ // sees that it's possible for us to jump from the dispatch loop to the exception
+ // handler. Without this, the code may have a different stack layout in the dispatch
+ // loop and the exception handler, leading to very obscure bugs.
+ #define RAISE(o) do { nlr_pop(); nlr.ret_val = MP_OBJ_TO_PTR(o); goto exception_handler; } while (0)
+
+#if MICROPY_STACKLESS
+run_code_state: ;
+#endif
+FRAME_ENTER();
+
+#if MICROPY_STACKLESS
+run_code_state_from_return: ;
+#endif
+FRAME_SETUP();
+
+ // Pointers which are constant for particular invocation of mp_execute_bytecode()
+ mp_obj_t * /*const*/ fastn;
+ mp_exc_stack_t * /*const*/ exc_stack;
+ {
+ size_t n_state = code_state->n_state;
+ fastn = &code_state->state[n_state - 1];
+ exc_stack = (mp_exc_stack_t*)(code_state->state + n_state);
+ }
+
+ // variables that are visible to the exception handler (declared volatile)
+ mp_exc_stack_t *volatile exc_sp = MP_CODE_STATE_EXC_SP_IDX_TO_PTR(exc_stack, code_state->exc_sp_idx); // stack grows up, exc_sp points to top of stack
+
+ #if MICROPY_PY_THREAD_GIL && MICROPY_PY_THREAD_GIL_VM_DIVISOR
+ // This needs to be volatile and outside the VM loop so it persists across handling
+ // of any exceptions. Otherwise it's possible that the VM never gives up the GIL.
+ volatile int gil_divisor = MICROPY_PY_THREAD_GIL_VM_DIVISOR;
+ #endif
+
+ // outer exception handling loop
+ for (;;) {
+ nlr_buf_t nlr;
+outer_dispatch_loop:
+ if (nlr_push(&nlr) == 0) {
+ // local variables that are not visible to the exception handler
+ const byte *ip = code_state->ip;
+ mp_obj_t *sp = code_state->sp;
+ mp_obj_t obj_shared;
+ MICROPY_VM_HOOK_INIT
+
+ // If we have exception to inject, now that we finish setting up
+ // execution context, raise it. This works as if MP_BC_RAISE_OBJ
+ // bytecode was executed.
+ // Injecting exc into yield from generator is a special case,
+ // handled by MP_BC_YIELD_FROM itself
+ if (inject_exc != MP_OBJ_NULL && *ip != MP_BC_YIELD_FROM) {
+ mp_obj_t exc = inject_exc;
+ inject_exc = MP_OBJ_NULL;
+ exc = mp_make_raise_obj(exc);
+ RAISE(exc);
+ }
+
+ // loop to execute byte code
+ for (;;) {
+dispatch_loop:
+#if MICROPY_OPT_COMPUTED_GOTO
+ ONE_TRUE_DISPATCH();
+#else
+ TRACE(ip);
+ MARK_EXC_IP_GLOBAL();
+ TRACE_TICK(ip, sp, false);
+ switch (*ip++) {
+#endif
+
+ ENTRY(MP_BC_LOAD_CONST_FALSE):
+ PUSH(mp_const_false);
+ DISPATCH();
+
+ ENTRY(MP_BC_LOAD_CONST_NONE):
+ PUSH(mp_const_none);
+ DISPATCH();
+
+ ENTRY(MP_BC_LOAD_CONST_TRUE):
+ PUSH(mp_const_true);
+ DISPATCH();
+
+ ENTRY(MP_BC_LOAD_CONST_SMALL_INT): {
+ mp_uint_t num = 0;
+ if ((ip[0] & 0x40) != 0) {
+ // Number is negative
+ num--;
+ }
+ do {
+ num = (num << 7) | (*ip & 0x7f);
+ } while ((*ip++ & 0x80) != 0);
+ PUSH(MP_OBJ_NEW_SMALL_INT(num));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_LOAD_CONST_STRING): {
+ DECODE_QSTR;
+ PUSH(MP_OBJ_NEW_QSTR(qst));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_LOAD_CONST_OBJ): {
+ DECODE_OBJ;
+ PUSH(obj);
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_LOAD_NULL):
+ PUSH(MP_OBJ_NULL);
+ DISPATCH();
+
+ ENTRY(MP_BC_LOAD_FAST_N): {
+ DECODE_UINT;
+ obj_shared = fastn[-unum];
+ load_check:
+ if (obj_shared == MP_OBJ_NULL) {
+ local_name_error: {
+ MARK_EXC_IP_SELECTIVE();
+ mp_obj_t obj = mp_obj_new_exception_msg(&mp_type_NameError, MP_ERROR_TEXT("local variable referenced before assignment"));
+ RAISE(obj);
+ }
+ }
+ PUSH(obj_shared);
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_LOAD_DEREF): {
+ DECODE_UINT;
+ obj_shared = mp_obj_cell_get(fastn[-unum]);
+ goto load_check;
+ }
+
+ ENTRY(MP_BC_LOAD_NAME): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_QSTR;
+ PUSH(mp_load_name(qst));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_LOAD_GLOBAL): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_QSTR;
+ PUSH(mp_load_global(qst));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_LOAD_ATTR): {
+ FRAME_UPDATE();
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_QSTR;
+ mp_obj_t top = TOP();
+ mp_obj_t obj;
+ #if MICROPY_OPT_LOAD_ATTR_FAST_PATH
+ // For the specific case of an instance type, it implements .attr
+ // and forwards to its members map. Attribute lookups on instance
+ // types are extremely common, so avoid all the other checks and
+ // calls that normally happen first.
+ mp_map_elem_t *elem = NULL;
+ if (mp_obj_is_instance_type(mp_obj_get_type(top))) {
+ mp_obj_instance_t *self = MP_OBJ_TO_PTR(top);
+ elem = mp_map_lookup(&self->members, MP_OBJ_NEW_QSTR(qst), MP_MAP_LOOKUP);
+ }
+ if (elem) {
+ obj = elem->value;
+ } else
+ #endif
+ {
+ obj = mp_load_attr(top, qst);
+ }
+ SET_TOP(obj);
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_LOAD_METHOD): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_QSTR;
+ mp_load_method(*sp, qst, sp);
+ sp += 1;
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_LOAD_SUPER_METHOD): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_QSTR;
+ sp -= 1;
+ mp_load_super_method(qst, sp - 1);
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_LOAD_BUILD_CLASS):
+ MARK_EXC_IP_SELECTIVE();
+ PUSH(mp_load_build_class());
+ DISPATCH();
+
+ ENTRY(MP_BC_LOAD_SUBSCR): {
+ MARK_EXC_IP_SELECTIVE();
+ mp_obj_t index = POP();
+ SET_TOP(mp_obj_subscr(TOP(), index, MP_OBJ_SENTINEL));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_STORE_FAST_N): {
+ DECODE_UINT;
+ fastn[-unum] = POP();
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_STORE_DEREF): {
+ DECODE_UINT;
+ mp_obj_cell_set(fastn[-unum], POP());
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_STORE_NAME): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_QSTR;
+ mp_store_name(qst, POP());
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_STORE_GLOBAL): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_QSTR;
+ mp_store_global(qst, POP());
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_STORE_ATTR): {
+ FRAME_UPDATE();
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_QSTR;
+ mp_store_attr(sp[0], qst, sp[-1]);
+ sp -= 2;
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_STORE_SUBSCR):
+ MARK_EXC_IP_SELECTIVE();
+ mp_obj_subscr(sp[-1], sp[0], sp[-2]);
+ sp -= 3;
+ DISPATCH();
+
+ ENTRY(MP_BC_DELETE_FAST): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_UINT;
+ if (fastn[-unum] == MP_OBJ_NULL) {
+ goto local_name_error;
+ }
+ fastn[-unum] = MP_OBJ_NULL;
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_DELETE_DEREF): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_UINT;
+ if (mp_obj_cell_get(fastn[-unum]) == MP_OBJ_NULL) {
+ goto local_name_error;
+ }
+ mp_obj_cell_set(fastn[-unum], MP_OBJ_NULL);
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_DELETE_NAME): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_QSTR;
+ mp_delete_name(qst);
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_DELETE_GLOBAL): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_QSTR;
+ mp_delete_global(qst);
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_DUP_TOP): {
+ mp_obj_t top = TOP();
+ PUSH(top);
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_DUP_TOP_TWO):
+ sp += 2;
+ sp[0] = sp[-2];
+ sp[-1] = sp[-3];
+ DISPATCH();
+
+ ENTRY(MP_BC_POP_TOP):
+ sp -= 1;
+ DISPATCH();
+
+ ENTRY(MP_BC_ROT_TWO): {
+ mp_obj_t top = sp[0];
+ sp[0] = sp[-1];
+ sp[-1] = top;
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_ROT_THREE): {
+ mp_obj_t top = sp[0];
+ sp[0] = sp[-1];
+ sp[-1] = sp[-2];
+ sp[-2] = top;
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_JUMP): {
+ DECODE_SLABEL;
+ ip += slab;
+ DISPATCH_WITH_PEND_EXC_CHECK();
+ }
+
+ ENTRY(MP_BC_POP_JUMP_IF_TRUE): {
+ DECODE_SLABEL;
+ if (mp_obj_is_true(POP())) {
+ ip += slab;
+ }
+ DISPATCH_WITH_PEND_EXC_CHECK();
+ }
+
+ ENTRY(MP_BC_POP_JUMP_IF_FALSE): {
+ DECODE_SLABEL;
+ if (!mp_obj_is_true(POP())) {
+ ip += slab;
+ }
+ DISPATCH_WITH_PEND_EXC_CHECK();
+ }
+
+ ENTRY(MP_BC_JUMP_IF_TRUE_OR_POP): {
+ DECODE_SLABEL;
+ if (mp_obj_is_true(TOP())) {
+ ip += slab;
+ } else {
+ sp--;
+ }
+ DISPATCH_WITH_PEND_EXC_CHECK();
+ }
+
+ ENTRY(MP_BC_JUMP_IF_FALSE_OR_POP): {
+ DECODE_SLABEL;
+ if (mp_obj_is_true(TOP())) {
+ sp--;
+ } else {
+ ip += slab;
+ }
+ DISPATCH_WITH_PEND_EXC_CHECK();
+ }
+
+ ENTRY(MP_BC_SETUP_WITH): {
+ MARK_EXC_IP_SELECTIVE();
+ // stack: (..., ctx_mgr)
+ mp_obj_t obj = TOP();
+ mp_load_method(obj, MP_QSTR___exit__, sp);
+ mp_load_method(obj, MP_QSTR___enter__, sp + 2);
+ mp_obj_t ret = mp_call_method_n_kw(0, 0, sp + 2);
+ sp += 1;
+ PUSH_EXC_BLOCK(1);
+ PUSH(ret);
+ // stack: (..., __exit__, ctx_mgr, as_value)
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_WITH_CLEANUP): {
+ MARK_EXC_IP_SELECTIVE();
+ // Arriving here, there's "exception control block" on top of stack,
+ // and __exit__ method (with self) underneath it. Bytecode calls __exit__,
+ // and "deletes" it off stack, shifting "exception control block"
+ // to its place.
+ // The bytecode emitter ensures that there is enough space on the Python
+ // value stack to hold the __exit__ method plus an additional 4 entries.
+ if (TOP() == mp_const_none) {
+ // stack: (..., __exit__, ctx_mgr, None)
+ sp[1] = mp_const_none;
+ sp[2] = mp_const_none;
+ sp -= 2;
+ mp_call_method_n_kw(3, 0, sp);
+ SET_TOP(mp_const_none);
+ } else if (mp_obj_is_small_int(TOP())) {
+ // Getting here there are two distinct cases:
+ // - unwind return, stack: (..., __exit__, ctx_mgr, ret_val, SMALL_INT(-1))
+ // - unwind jump, stack: (..., __exit__, ctx_mgr, dest_ip, SMALL_INT(num_exc))
+ // For both cases we do exactly the same thing.
+ mp_obj_t data = sp[-1];
+ mp_obj_t cause = sp[0];
+ sp[-1] = mp_const_none;
+ sp[0] = mp_const_none;
+ sp[1] = mp_const_none;
+ mp_call_method_n_kw(3, 0, sp - 3);
+ sp[-3] = data;
+ sp[-2] = cause;
+ sp -= 2; // we removed (__exit__, ctx_mgr)
+ } else {
+ assert(mp_obj_is_exception_instance(TOP()));
+ // stack: (..., __exit__, ctx_mgr, exc_instance)
+ // Need to pass (exc_type, exc_instance, None) as arguments to __exit__.
+ sp[1] = sp[0];
+ sp[0] = MP_OBJ_FROM_PTR(mp_obj_get_type(sp[0]));
+ sp[2] = mp_const_none;
+ sp -= 2;
+ mp_obj_t ret_value = mp_call_method_n_kw(3, 0, sp);
+ if (mp_obj_is_true(ret_value)) {
+ // We need to silence/swallow the exception. This is done
+ // by popping the exception and the __exit__ handler and
+ // replacing it with None, which signals END_FINALLY to just
+ // execute the finally handler normally.
+ SET_TOP(mp_const_none);
+ } else {
+ // We need to re-raise the exception. We pop __exit__ handler
+ // by copying the exception instance down to the new top-of-stack.
+ sp[0] = sp[3];
+ }
+ }
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_UNWIND_JUMP): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_SLABEL;
+ PUSH((mp_obj_t)(mp_uint_t)(uintptr_t)(ip + slab)); // push destination ip for jump
+ PUSH((mp_obj_t)(mp_uint_t)(*ip)); // push number of exception handlers to unwind (0x80 bit set if we also need to pop stack)
+unwind_jump:;
+ mp_uint_t unum = (mp_uint_t)POP(); // get number of exception handlers to unwind
+ while ((unum & 0x7f) > 0) {
+ unum -= 1;
+ assert(exc_sp >= exc_stack);
+
+ if (MP_TAGPTR_TAG1(exc_sp->val_sp)) {
+ if (exc_sp->handler > ip) {
+ // Found a finally handler that isn't active; run it.
+ // Getting here the stack looks like:
+ // (..., X, dest_ip)
+ // where X is pointed to by exc_sp->val_sp and in the case
+ // of a "with" block contains the context manager info.
+ assert(&sp[-1] == MP_TAGPTR_PTR(exc_sp->val_sp));
+ // We're going to run "finally" code as a coroutine
+ // (not calling it recursively). Set up a sentinel
+ // on the stack so it can return back to us when it is
+ // done (when WITH_CLEANUP or END_FINALLY reached).
+ // The sentinel is the number of exception handlers left to
+ // unwind, which is a non-negative integer.
+ PUSH(MP_OBJ_NEW_SMALL_INT(unum));
+ ip = exc_sp->handler;
+ goto dispatch_loop;
+ } else {
+ // Found a finally handler that is already active; cancel it.
+ CANCEL_ACTIVE_FINALLY(sp);
+ }
+ }
+ POP_EXC_BLOCK();
+ }
+ ip = (const byte*)MP_OBJ_TO_PTR(POP()); // pop destination ip for jump
+ if (unum != 0) {
+ // pop the exhausted iterator
+ sp -= MP_OBJ_ITER_BUF_NSLOTS;
+ }
+ DISPATCH_WITH_PEND_EXC_CHECK();
+ }
+
+ ENTRY(MP_BC_SETUP_EXCEPT):
+ ENTRY(MP_BC_SETUP_FINALLY): {
+ MARK_EXC_IP_SELECTIVE();
+ #if SELECTIVE_EXC_IP
+ PUSH_EXC_BLOCK((code_state->ip[-1] == MP_BC_SETUP_FINALLY) ? 1 : 0);
+ #else
+ PUSH_EXC_BLOCK((code_state->ip[0] == MP_BC_SETUP_FINALLY) ? 1 : 0);
+ #endif
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_END_FINALLY):
+ MARK_EXC_IP_SELECTIVE();
+ // if TOS is None, just pops it and continues
+ // if TOS is an integer, finishes coroutine and returns control to caller
+ // if TOS is an exception, reraises the exception
+ assert(exc_sp >= exc_stack);
+ POP_EXC_BLOCK();
+ if (TOP() == mp_const_none) {
+ sp--;
+ } else if (mp_obj_is_small_int(TOP())) {
+ // We finished "finally" coroutine and now dispatch back
+ // to our caller, based on TOS value
+ mp_int_t cause = MP_OBJ_SMALL_INT_VALUE(POP());
+ if (cause < 0) {
+ // A negative cause indicates unwind return
+ goto unwind_return;
+ } else {
+ // Otherwise it's an unwind jump and we must push as a raw
+ // number the number of exception handlers to unwind
+ PUSH((mp_obj_t)cause);
+ goto unwind_jump;
+ }
+ } else {
+ assert(mp_obj_is_exception_instance(TOP()));
+ RAISE(TOP());
+ }
+ DISPATCH();
+
+ ENTRY(MP_BC_GET_ITER):
+ MARK_EXC_IP_SELECTIVE();
+ SET_TOP(mp_getiter(TOP(), NULL));
+ DISPATCH();
+
+ // An iterator for a for-loop takes MP_OBJ_ITER_BUF_NSLOTS slots on
+ // the Python value stack. These slots are either used to store the
+ // iterator object itself, or the first slot is MP_OBJ_NULL and
+ // the second slot holds a reference to the iterator object.
+ ENTRY(MP_BC_GET_ITER_STACK): {
+ MARK_EXC_IP_SELECTIVE();
+ mp_obj_t obj = TOP();
+ mp_obj_iter_buf_t *iter_buf = (mp_obj_iter_buf_t*)sp;
+ sp += MP_OBJ_ITER_BUF_NSLOTS - 1;
+ obj = mp_getiter(obj, iter_buf);
+ if (obj != MP_OBJ_FROM_PTR(iter_buf)) {
+ // Iterator didn't use the stack so indicate that with MP_OBJ_NULL.
+ *(sp - MP_OBJ_ITER_BUF_NSLOTS + 1) = MP_OBJ_NULL;
+ *(sp - MP_OBJ_ITER_BUF_NSLOTS + 2) = obj;
+ }
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_FOR_ITER): {
+ FRAME_UPDATE();
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_ULABEL; // the jump offset if iteration finishes; for labels are always forward
+ code_state->sp = sp;
+ mp_obj_t obj;
+ if (*(sp - MP_OBJ_ITER_BUF_NSLOTS + 1) == MP_OBJ_NULL) {
+ obj = *(sp - MP_OBJ_ITER_BUF_NSLOTS + 2);
+ } else {
+ obj = MP_OBJ_FROM_PTR(&sp[-MP_OBJ_ITER_BUF_NSLOTS + 1]);
+ }
+ mp_obj_t value = mp_iternext_allow_raise(obj);
+ if (value == MP_OBJ_STOP_ITERATION) {
+ sp -= MP_OBJ_ITER_BUF_NSLOTS; // pop the exhausted iterator
+ ip += ulab; // jump to after for-block
+ } else {
+ PUSH(value); // push the next iteration value
+ #if MICROPY_PY_SYS_SETTRACE
+ // LINE event should trigger for every iteration so invalidate last trigger
+ if (code_state->frame) {
+ code_state->frame->lineno = 0;
+ }
+ #endif
+ }
+ DISPATCH_WITH_PEND_EXC_CHECK();
+ }
+
+ ENTRY(MP_BC_POP_EXCEPT_JUMP): {
+ assert(exc_sp >= exc_stack);
+ POP_EXC_BLOCK();
+ DECODE_ULABEL;
+ ip += ulab;
+ DISPATCH_WITH_PEND_EXC_CHECK();
+ }
+
+ ENTRY(MP_BC_BUILD_TUPLE): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_UINT;
+ sp -= unum - 1;
+ SET_TOP(mp_obj_new_tuple(unum, sp));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_BUILD_LIST): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_UINT;
+ sp -= unum - 1;
+ SET_TOP(mp_obj_new_list(unum, sp));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_BUILD_MAP): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_UINT;
+ PUSH(mp_obj_new_dict(unum));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_STORE_MAP):
+ MARK_EXC_IP_SELECTIVE();
+ sp -= 2;
+ mp_obj_dict_store(sp[0], sp[2], sp[1]);
+ DISPATCH();
+
+#if MICROPY_PY_BUILTINS_SET
+ ENTRY(MP_BC_BUILD_SET): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_UINT;
+ sp -= unum - 1;
+ SET_TOP(mp_obj_new_set(unum, sp));
+ DISPATCH();
+ }
+#endif
+
+#if MICROPY_PY_BUILTINS_SLICE
+ ENTRY(MP_BC_BUILD_SLICE): {
+ MARK_EXC_IP_SELECTIVE();
+ mp_obj_t step = mp_const_none;
+ if (*ip++ == 3) {
+ // 3-argument slice includes step
+ step = POP();
+ }
+ mp_obj_t stop = POP();
+ mp_obj_t start = TOP();
+ SET_TOP(mp_obj_new_slice(start, stop, step));
+ DISPATCH();
+ }
+#endif
+
+ ENTRY(MP_BC_STORE_COMP): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_UINT;
+ mp_obj_t obj = sp[-(unum >> 2)];
+ if ((unum & 3) == 0) {
+ mp_obj_list_append(obj, sp[0]);
+ sp--;
+ } else if (!MICROPY_PY_BUILTINS_SET || (unum & 3) == 1) {
+ mp_obj_dict_store(obj, sp[0], sp[-1]);
+ sp -= 2;
+ #if MICROPY_PY_BUILTINS_SET
+ } else {
+ mp_obj_set_store(obj, sp[0]);
+ sp--;
+ #endif
+ }
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_UNPACK_SEQUENCE): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_UINT;
+ mp_unpack_sequence(sp[0], unum, sp);
+ sp += unum - 1;
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_UNPACK_EX): {
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_UINT;
+ mp_unpack_ex(sp[0], unum, sp);
+ sp += (unum & 0xff) + ((unum >> 8) & 0xff);
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_MAKE_FUNCTION): {
+ DECODE_PTR;
+ PUSH(mp_make_function_from_raw_code(ptr, MP_OBJ_NULL, MP_OBJ_NULL));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_MAKE_FUNCTION_DEFARGS): {
+ DECODE_PTR;
+ // Stack layout: def_tuple def_dict <- TOS
+ mp_obj_t def_dict = POP();
+ SET_TOP(mp_make_function_from_raw_code(ptr, TOP(), def_dict));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_MAKE_CLOSURE): {
+ DECODE_PTR;
+ size_t n_closed_over = *ip++;
+ // Stack layout: closed_overs <- TOS
+ sp -= n_closed_over - 1;
+ SET_TOP(mp_make_closure_from_raw_code(ptr, n_closed_over, sp));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_MAKE_CLOSURE_DEFARGS): {
+ DECODE_PTR;
+ size_t n_closed_over = *ip++;
+ // Stack layout: def_tuple def_dict closed_overs <- TOS
+ sp -= 2 + n_closed_over - 1;
+ SET_TOP(mp_make_closure_from_raw_code(ptr, 0x100 | n_closed_over, sp));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_CALL_FUNCTION): {
+ FRAME_UPDATE();
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_UINT;
+ // unum & 0xff == n_positional
+ // (unum >> 8) & 0xff == n_keyword
+ sp -= (unum & 0xff) + ((unum >> 7) & 0x1fe);
+ #if MICROPY_STACKLESS
+ if (mp_obj_get_type(*sp) == &mp_type_fun_bc) {
+ code_state->ip = ip;
+ code_state->sp = sp;
+ code_state->exc_sp_idx = MP_CODE_STATE_EXC_SP_IDX_FROM_PTR(exc_stack, exc_sp);
+ mp_code_state_t *new_state = mp_obj_fun_bc_prepare_codestate(*sp, unum & 0xff, (unum >> 8) & 0xff, sp + 1);
+ #if !MICROPY_ENABLE_PYSTACK
+ if (new_state == NULL) {
+ // Couldn't allocate codestate on heap: in the strict case raise
+ // an exception, otherwise just fall through to stack allocation.
+ #if MICROPY_STACKLESS_STRICT
+ deep_recursion_error:
+ mp_raise_recursion_depth();
+ #endif
+ } else
+ #endif
+ {
+ new_state->prev = code_state;
+ code_state = new_state;
+ nlr_pop();
+ goto run_code_state;
+ }
+ }
+ #endif
+ SET_TOP(mp_call_function_n_kw(*sp, unum & 0xff, (unum >> 8) & 0xff, sp + 1));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_CALL_FUNCTION_VAR_KW): {
+ FRAME_UPDATE();
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_UINT;
+ // unum & 0xff == n_positional
+ // (unum >> 8) & 0xff == n_keyword
+ // We have following stack layout here:
+ // fun arg0 arg1 ... kw0 val0 kw1 val1 ... seq dict <- TOS
+ sp -= (unum & 0xff) + ((unum >> 7) & 0x1fe) + 2;
+ #if MICROPY_STACKLESS
+ if (mp_obj_get_type(*sp) == &mp_type_fun_bc) {
+ code_state->ip = ip;
+ code_state->sp = sp;
+ code_state->exc_sp_idx = MP_CODE_STATE_EXC_SP_IDX_FROM_PTR(exc_stack, exc_sp);
+
+ mp_call_args_t out_args;
+ mp_call_prepare_args_n_kw_var(false, unum, sp, &out_args);
+
+ mp_code_state_t *new_state = mp_obj_fun_bc_prepare_codestate(out_args.fun,
+ out_args.n_args, out_args.n_kw, out_args.args);
+ #if !MICROPY_ENABLE_PYSTACK
+ // Freeing args at this point does not follow a LIFO order so only do it if
+ // pystack is not enabled. For pystack, they are freed when code_state is.
+ mp_nonlocal_free(out_args.args, out_args.n_alloc * sizeof(mp_obj_t));
+ #endif
+ #if !MICROPY_ENABLE_PYSTACK
+ if (new_state == NULL) {
+ // Couldn't allocate codestate on heap: in the strict case raise
+ // an exception, otherwise just fall through to stack allocation.
+ #if MICROPY_STACKLESS_STRICT
+ goto deep_recursion_error;
+ #endif
+ } else
+ #endif
+ {
+ new_state->prev = code_state;
+ code_state = new_state;
+ nlr_pop();
+ goto run_code_state;
+ }
+ }
+ #endif
+ SET_TOP(mp_call_method_n_kw_var(false, unum, sp));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_CALL_METHOD): {
+ FRAME_UPDATE();
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_UINT;
+ // unum & 0xff == n_positional
+ // (unum >> 8) & 0xff == n_keyword
+ sp -= (unum & 0xff) + ((unum >> 7) & 0x1fe) + 1;
+ #if MICROPY_STACKLESS
+ if (mp_obj_get_type(*sp) == &mp_type_fun_bc) {
+ code_state->ip = ip;
+ code_state->sp = sp;
+ code_state->exc_sp_idx = MP_CODE_STATE_EXC_SP_IDX_FROM_PTR(exc_stack, exc_sp);
+
+ size_t n_args = unum & 0xff;
+ size_t n_kw = (unum >> 8) & 0xff;
+ int adjust = (sp[1] == MP_OBJ_NULL) ? 0 : 1;
+
+ mp_code_state_t *new_state = mp_obj_fun_bc_prepare_codestate(*sp, n_args + adjust, n_kw, sp + 2 - adjust);
+ #if !MICROPY_ENABLE_PYSTACK
+ if (new_state == NULL) {
+ // Couldn't allocate codestate on heap: in the strict case raise
+ // an exception, otherwise just fall through to stack allocation.
+ #if MICROPY_STACKLESS_STRICT
+ goto deep_recursion_error;
+ #endif
+ } else
+ #endif
+ {
+ new_state->prev = code_state;
+ code_state = new_state;
+ nlr_pop();
+ goto run_code_state;
+ }
+ }
+ #endif
+ SET_TOP(mp_call_method_n_kw(unum & 0xff, (unum >> 8) & 0xff, sp));
+ DISPATCH_WITH_PEND_EXC_CHECK();
+ }
+
+ ENTRY(MP_BC_CALL_METHOD_VAR_KW): {
+ FRAME_UPDATE();
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_UINT;
+ // unum & 0xff == n_positional
+ // (unum >> 8) & 0xff == n_keyword
+ // We have following stack layout here:
+ // fun self arg0 arg1 ... kw0 val0 kw1 val1 ... seq dict <- TOS
+ sp -= (unum & 0xff) + ((unum >> 7) & 0x1fe) + 3;
+ #if MICROPY_STACKLESS
+ if (mp_obj_get_type(*sp) == &mp_type_fun_bc) {
+ code_state->ip = ip;
+ code_state->sp = sp;
+ code_state->exc_sp_idx = MP_CODE_STATE_EXC_SP_IDX_FROM_PTR(exc_stack, exc_sp);
+
+ mp_call_args_t out_args;
+ mp_call_prepare_args_n_kw_var(true, unum, sp, &out_args);
+
+ mp_code_state_t *new_state = mp_obj_fun_bc_prepare_codestate(out_args.fun,
+ out_args.n_args, out_args.n_kw, out_args.args);
+ #if !MICROPY_ENABLE_PYSTACK
+ // Freeing args at this point does not follow a LIFO order so only do it if
+ // pystack is not enabled. For pystack, they are freed when code_state is.
+ mp_nonlocal_free(out_args.args, out_args.n_alloc * sizeof(mp_obj_t));
+ #endif
+ #if !MICROPY_ENABLE_PYSTACK
+ if (new_state == NULL) {
+ // Couldn't allocate codestate on heap: in the strict case raise
+ // an exception, otherwise just fall through to stack allocation.
+ #if MICROPY_STACKLESS_STRICT
+ goto deep_recursion_error;
+ #endif
+ } else
+ #endif
+ {
+ new_state->prev = code_state;
+ code_state = new_state;
+ nlr_pop();
+ goto run_code_state;
+ }
+ }
+ #endif
+ SET_TOP(mp_call_method_n_kw_var(true, unum, sp));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_RETURN_VALUE):
+ MARK_EXC_IP_SELECTIVE();
+unwind_return:
+ // Search for and execute finally handlers that aren't already active
+ while (exc_sp >= exc_stack) {
+ if (MP_TAGPTR_TAG1(exc_sp->val_sp)) {
+ if (exc_sp->handler > ip) {
+ // Found a finally handler that isn't active; run it.
+ // Getting here the stack looks like:
+ // (..., X, [iter0, iter1, ...,] ret_val)
+ // where X is pointed to by exc_sp->val_sp and in the case
+ // of a "with" block contains the context manager info.
+ // There may be 0 or more for-iterators between X and the
+ // return value, and these must be removed before control can
+ // pass to the finally code. We simply copy the ret_value down
+ // over these iterators, if they exist. If they don't then the
+ // following is a null operation.
+ mp_obj_t *finally_sp = MP_TAGPTR_PTR(exc_sp->val_sp);
+ finally_sp[1] = sp[0];
+ sp = &finally_sp[1];
+ // We're going to run "finally" code as a coroutine
+ // (not calling it recursively). Set up a sentinel
+ // on a stack so it can return back to us when it is
+ // done (when WITH_CLEANUP or END_FINALLY reached).
+ PUSH(MP_OBJ_NEW_SMALL_INT(-1));
+ ip = exc_sp->handler;
+ goto dispatch_loop;
+ } else {
+ // Found a finally handler that is already active; cancel it.
+ CANCEL_ACTIVE_FINALLY(sp);
+ }
+ }
+ POP_EXC_BLOCK();
+ }
+ nlr_pop();
+ code_state->sp = sp;
+ assert(exc_sp == exc_stack - 1);
+ MICROPY_VM_HOOK_RETURN
+ #if MICROPY_STACKLESS
+ if (code_state->prev != NULL) {
+ mp_obj_t res = *sp;
+ mp_globals_set(code_state->old_globals);
+ mp_code_state_t *new_code_state = code_state->prev;
+ #if MICROPY_ENABLE_PYSTACK
+ // Free code_state, and args allocated by mp_call_prepare_args_n_kw_var
+ // (The latter is implicitly freed when using pystack due to its LIFO nature.)
+ // The sizeof in the following statement does not include the size of the variable
+ // part of the struct. This arg is anyway not used if pystack is enabled.
+ mp_nonlocal_free(code_state, sizeof(mp_code_state_t));
+ #endif
+ code_state = new_code_state;
+ *code_state->sp = res;
+ goto run_code_state_from_return;
+ }
+ #endif
+ FRAME_LEAVE();
+ return MP_VM_RETURN_NORMAL;
+
+ ENTRY(MP_BC_RAISE_LAST): {
+ MARK_EXC_IP_SELECTIVE();
+ // search for the inner-most previous exception, to reraise it
+ mp_obj_t obj = MP_OBJ_NULL;
+ for (mp_exc_stack_t *e = exc_sp; e >= exc_stack; --e) {
+ if (e->prev_exc != NULL) {
+ obj = MP_OBJ_FROM_PTR(e->prev_exc);
+ break;
+ }
+ }
+ if (obj == MP_OBJ_NULL) {
+ obj = mp_obj_new_exception_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("no active exception to reraise"));
+ }
+ RAISE(obj);
+ }
+
+ ENTRY(MP_BC_RAISE_OBJ): {
+ MARK_EXC_IP_SELECTIVE();
+ mp_obj_t obj = mp_make_raise_obj(TOP());
+ RAISE(obj);
+ }
+
+ ENTRY(MP_BC_RAISE_FROM): {
+ MARK_EXC_IP_SELECTIVE();
+ mp_warning(NULL, "exception chaining not supported");
+ sp--; // ignore (pop) "from" argument
+ mp_obj_t obj = mp_make_raise_obj(TOP());
+ RAISE(obj);
+ }
+
+ ENTRY(MP_BC_YIELD_VALUE):
+yield:
+ nlr_pop();
+ code_state->ip = ip;
+ code_state->sp = sp;
+ code_state->exc_sp_idx = MP_CODE_STATE_EXC_SP_IDX_FROM_PTR(exc_stack, exc_sp);
+ FRAME_LEAVE();
+ return MP_VM_RETURN_YIELD;
+
+ ENTRY(MP_BC_YIELD_FROM): {
+ MARK_EXC_IP_SELECTIVE();
+//#define EXC_MATCH(exc, type) mp_obj_is_type(exc, type)
+#define EXC_MATCH(exc, type) mp_obj_exception_match(exc, type)
+#define GENERATOR_EXIT_IF_NEEDED(t) if (t != MP_OBJ_NULL && EXC_MATCH(t, MP_OBJ_FROM_PTR(&mp_type_GeneratorExit))) { mp_obj_t raise_t = mp_make_raise_obj(t); RAISE(raise_t); }
+ mp_vm_return_kind_t ret_kind;
+ mp_obj_t send_value = POP();
+ mp_obj_t t_exc = MP_OBJ_NULL;
+ mp_obj_t ret_value;
+ code_state->sp = sp; // Save sp because it's needed if mp_resume raises StopIteration
+ if (inject_exc != MP_OBJ_NULL) {
+ t_exc = inject_exc;
+ inject_exc = MP_OBJ_NULL;
+ ret_kind = mp_resume(TOP(), MP_OBJ_NULL, t_exc, &ret_value);
+ } else {
+ ret_kind = mp_resume(TOP(), send_value, MP_OBJ_NULL, &ret_value);
+ }
+
+ if (ret_kind == MP_VM_RETURN_YIELD) {
+ ip--;
+ PUSH(ret_value);
+ goto yield;
+ } else if (ret_kind == MP_VM_RETURN_NORMAL) {
+ // The generator has finished, and returned a value via StopIteration
+ // Replace exhausted generator with the returned value
+ SET_TOP(ret_value);
+ // If we injected GeneratorExit downstream, then even
+ // if it was swallowed, we re-raise GeneratorExit
+ GENERATOR_EXIT_IF_NEEDED(t_exc);
+ DISPATCH();
+ } else {
+ assert(ret_kind == MP_VM_RETURN_EXCEPTION);
+ assert(!EXC_MATCH(ret_value, MP_OBJ_FROM_PTR(&mp_type_StopIteration)));
+ // Pop exhausted gen
+ sp--;
+ RAISE(ret_value);
+ }
+ }
+
+ ENTRY(MP_BC_IMPORT_NAME): {
+ FRAME_UPDATE();
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_QSTR;
+ mp_obj_t obj = POP();
+ SET_TOP(mp_import_name(qst, obj, TOP()));
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_IMPORT_FROM): {
+ FRAME_UPDATE();
+ MARK_EXC_IP_SELECTIVE();
+ DECODE_QSTR;
+ mp_obj_t obj = mp_import_from(TOP(), qst);
+ PUSH(obj);
+ DISPATCH();
+ }
+
+ ENTRY(MP_BC_IMPORT_STAR):
+ MARK_EXC_IP_SELECTIVE();
+ mp_import_all(POP());
+ DISPATCH();
+
+#if MICROPY_OPT_COMPUTED_GOTO
+ ENTRY(MP_BC_LOAD_CONST_SMALL_INT_MULTI):
+ PUSH(MP_OBJ_NEW_SMALL_INT((mp_int_t)ip[-1] - MP_BC_LOAD_CONST_SMALL_INT_MULTI - MP_BC_LOAD_CONST_SMALL_INT_MULTI_EXCESS));
+ DISPATCH();
+
+ ENTRY(MP_BC_LOAD_FAST_MULTI):
+ obj_shared = fastn[MP_BC_LOAD_FAST_MULTI - (mp_int_t)ip[-1]];
+ goto load_check;
+
+ ENTRY(MP_BC_STORE_FAST_MULTI):
+ fastn[MP_BC_STORE_FAST_MULTI - (mp_int_t)ip[-1]] = POP();
+ DISPATCH();
+
+ ENTRY(MP_BC_UNARY_OP_MULTI):
+ MARK_EXC_IP_SELECTIVE();
+ SET_TOP(mp_unary_op(ip[-1] - MP_BC_UNARY_OP_MULTI, TOP()));
+ DISPATCH();
+
+ ENTRY(MP_BC_BINARY_OP_MULTI): {
+ MARK_EXC_IP_SELECTIVE();
+ mp_obj_t rhs = POP();
+ mp_obj_t lhs = TOP();
+ SET_TOP(mp_binary_op(ip[-1] - MP_BC_BINARY_OP_MULTI, lhs, rhs));
+ DISPATCH();
+ }
+
+ ENTRY_DEFAULT:
+ MARK_EXC_IP_SELECTIVE();
+#else
+ ENTRY_DEFAULT:
+ if (ip[-1] < MP_BC_LOAD_CONST_SMALL_INT_MULTI + MP_BC_LOAD_CONST_SMALL_INT_MULTI_NUM) {
+ PUSH(MP_OBJ_NEW_SMALL_INT((mp_int_t)ip[-1] - MP_BC_LOAD_CONST_SMALL_INT_MULTI - MP_BC_LOAD_CONST_SMALL_INT_MULTI_EXCESS));
+ DISPATCH();
+ } else if (ip[-1] < MP_BC_LOAD_FAST_MULTI + MP_BC_LOAD_FAST_MULTI_NUM) {
+ obj_shared = fastn[MP_BC_LOAD_FAST_MULTI - (mp_int_t)ip[-1]];
+ goto load_check;
+ } else if (ip[-1] < MP_BC_STORE_FAST_MULTI + MP_BC_STORE_FAST_MULTI_NUM) {
+ fastn[MP_BC_STORE_FAST_MULTI - (mp_int_t)ip[-1]] = POP();
+ DISPATCH();
+ } else if (ip[-1] < MP_BC_UNARY_OP_MULTI + MP_BC_UNARY_OP_MULTI_NUM) {
+ SET_TOP(mp_unary_op(ip[-1] - MP_BC_UNARY_OP_MULTI, TOP()));
+ DISPATCH();
+ } else if (ip[-1] < MP_BC_BINARY_OP_MULTI + MP_BC_BINARY_OP_MULTI_NUM) {
+ mp_obj_t rhs = POP();
+ mp_obj_t lhs = TOP();
+ SET_TOP(mp_binary_op(ip[-1] - MP_BC_BINARY_OP_MULTI, lhs, rhs));
+ DISPATCH();
+ } else
+#endif
+ {
+
+ mp_obj_t obj = mp_obj_new_exception_msg(&mp_type_NotImplementedError, MP_ERROR_TEXT("opcode"));
+ nlr_pop();
+ code_state->state[0] = obj;
+ FRAME_LEAVE();
+ return MP_VM_RETURN_EXCEPTION;
+ }
+
+#if !MICROPY_OPT_COMPUTED_GOTO
+ } // switch
+#endif
+
+pending_exception_check:
+ MICROPY_VM_HOOK_LOOP
+
+ #if MICROPY_ENABLE_SCHEDULER
+ // This is an inlined variant of mp_handle_pending
+ if (MP_STATE_VM(sched_state) == MP_SCHED_PENDING) {
+ mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
+ // Re-check state is still pending now that we're in the atomic section.
+ if (MP_STATE_VM(sched_state) == MP_SCHED_PENDING) {
+ MARK_EXC_IP_SELECTIVE();
+ mp_obj_t obj = MP_STATE_THREAD(mp_pending_exception);
+ if (obj != MP_OBJ_NULL) {
+ MP_STATE_THREAD(mp_pending_exception) = MP_OBJ_NULL;
+ if (!mp_sched_num_pending()) {
+ MP_STATE_VM(sched_state) = MP_SCHED_IDLE;
+ }
+ MICROPY_END_ATOMIC_SECTION(atomic_state);
+ RAISE(obj);
+ }
+ mp_handle_pending_tail(atomic_state);
+ } else {
+ MICROPY_END_ATOMIC_SECTION(atomic_state);
+ }
+ }
+ #else
+ // This is an inlined variant of mp_handle_pending
+ if (MP_STATE_THREAD(mp_pending_exception) != MP_OBJ_NULL) {
+ MARK_EXC_IP_SELECTIVE();
+ mp_obj_t obj = MP_STATE_THREAD(mp_pending_exception);
+ MP_STATE_THREAD(mp_pending_exception) = MP_OBJ_NULL;
+ RAISE(obj);
+ }
+ #endif
+
+ #if MICROPY_PY_THREAD_GIL
+ #if MICROPY_PY_THREAD_GIL_VM_DIVISOR
+ if (--gil_divisor == 0)
+ #endif
+ {
+ #if MICROPY_PY_THREAD_GIL_VM_DIVISOR
+ gil_divisor = MICROPY_PY_THREAD_GIL_VM_DIVISOR;
+ #endif
+ #if MICROPY_ENABLE_SCHEDULER
+ // can only switch threads if the scheduler is unlocked
+ if (MP_STATE_VM(sched_state) == MP_SCHED_IDLE)
+ #endif
+ {
+ MP_THREAD_GIL_EXIT();
+ MP_THREAD_GIL_ENTER();
+ }
+ }
+ #endif
+
+ } // for loop
+
+ } else {
+exception_handler:
+ // exception occurred
+
+ #if MICROPY_PY_SYS_EXC_INFO
+ MP_STATE_VM(cur_exception) = nlr.ret_val;
+ #endif
+
+ #if SELECTIVE_EXC_IP
+ // with selective ip, we store the ip 1 byte past the opcode, so move ptr back
+ code_state->ip -= 1;
+ #endif
+
+ if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(((mp_obj_base_t*)nlr.ret_val)->type), MP_OBJ_FROM_PTR(&mp_type_StopIteration))) {
+ if (code_state->ip) {
+ // check if it's a StopIteration within a for block
+ if (*code_state->ip == MP_BC_FOR_ITER) {
+ const byte *ip = code_state->ip + 1;
+ DECODE_ULABEL; // the jump offset if iteration finishes; for labels are always forward
+ code_state->ip = ip + ulab; // jump to after for-block
+ code_state->sp -= MP_OBJ_ITER_BUF_NSLOTS; // pop the exhausted iterator
+ goto outer_dispatch_loop; // continue with dispatch loop
+ } else if (*code_state->ip == MP_BC_YIELD_FROM) {
+ // StopIteration inside yield from call means return a value of
+ // yield from, so inject exception's value as yield from's result
+ // (Instead of stack pop then push we just replace exhausted gen with value)
+ *code_state->sp = mp_obj_exception_get_value(MP_OBJ_FROM_PTR(nlr.ret_val));
+ code_state->ip++; // yield from is over, move to next instruction
+ goto outer_dispatch_loop; // continue with dispatch loop
+ }
+ }
+ }
+
+ #if MICROPY_PY_SYS_SETTRACE
+ // Exceptions are traced here
+ if (mp_obj_is_subclass_fast(MP_OBJ_FROM_PTR(((mp_obj_base_t*)nlr.ret_val)->type), MP_OBJ_FROM_PTR(&mp_type_Exception))) {
+ TRACE_TICK(code_state->ip, code_state->sp, true /* yes, it's an exception */);
+ }
+ #endif
+
+#if MICROPY_STACKLESS
+unwind_loop:
+#endif
+ // Set traceback info (file and line number) where the exception occurred, but not for:
+ // - constant GeneratorExit object, because it's const
+ // - exceptions re-raised by END_FINALLY
+ // - exceptions re-raised explicitly by "raise"
+ if (nlr.ret_val != &mp_const_GeneratorExit_obj
+ && *code_state->ip != MP_BC_END_FINALLY
+ && *code_state->ip != MP_BC_RAISE_LAST) {
+ const byte *ip = code_state->fun_bc->bytecode;
+ MP_BC_PRELUDE_SIG_DECODE(ip);
+ MP_BC_PRELUDE_SIZE_DECODE(ip);
+ const byte *bytecode_start = ip + n_info + n_cell;
+ #if !MICROPY_PERSISTENT_CODE
+ // so bytecode is aligned
+ bytecode_start = MP_ALIGN(bytecode_start, sizeof(mp_uint_t));
+ #endif
+ size_t bc = code_state->ip - bytecode_start;
+ #if MICROPY_PERSISTENT_CODE
+ qstr block_name = ip[0] | (ip[1] << 8);
+ qstr source_file = ip[2] | (ip[3] << 8);
+ ip += 4;
+ #else
+ qstr block_name = mp_decode_uint_value(ip);
+ ip = mp_decode_uint_skip(ip);
+ qstr source_file = mp_decode_uint_value(ip);
+ ip = mp_decode_uint_skip(ip);
+ #endif
+ size_t source_line = mp_bytecode_get_source_line(ip, bc);
+ mp_obj_exception_add_traceback(MP_OBJ_FROM_PTR(nlr.ret_val), source_file, source_line, block_name);
+ }
+
+ while (exc_sp >= exc_stack && exc_sp->handler <= code_state->ip) {
+
+ // nested exception
+
+ assert(exc_sp >= exc_stack);
+
+ // TODO make a proper message for nested exception
+ // at the moment we are just raising the very last exception (the one that caused the nested exception)
+
+ // move up to previous exception handler
+ POP_EXC_BLOCK();
+ }
+
+ if (exc_sp >= exc_stack) {
+ // catch exception and pass to byte code
+ code_state->ip = exc_sp->handler;
+ mp_obj_t *sp = MP_TAGPTR_PTR(exc_sp->val_sp);
+ // save this exception in the stack so it can be used in a reraise, if needed
+ exc_sp->prev_exc = nlr.ret_val;
+ // push exception object so it can be handled by bytecode
+ PUSH(MP_OBJ_FROM_PTR(nlr.ret_val));
+ code_state->sp = sp;
+
+ #if MICROPY_STACKLESS
+ } else if (code_state->prev != NULL) {
+ mp_globals_set(code_state->old_globals);
+ mp_code_state_t *new_code_state = code_state->prev;
+ #if MICROPY_ENABLE_PYSTACK
+ // Free code_state, and args allocated by mp_call_prepare_args_n_kw_var
+ // (The latter is implicitly freed when using pystack due to its LIFO nature.)
+ // The sizeof in the following statement does not include the size of the variable
+ // part of the struct. This arg is anyway not used if pystack is enabled.
+ mp_nonlocal_free(code_state, sizeof(mp_code_state_t));
+ #endif
+ code_state = new_code_state;
+ size_t n_state = code_state->n_state;
+ fastn = &code_state->state[n_state - 1];
+ exc_stack = (mp_exc_stack_t*)(code_state->state + n_state);
+ // variables that are visible to the exception handler (declared volatile)
+ exc_sp = MP_CODE_STATE_EXC_SP_IDX_TO_PTR(exc_stack, code_state->exc_sp_idx); // stack grows up, exc_sp points to top of stack
+ goto unwind_loop;
+
+ #endif
+ } else {
+ // propagate exception to higher level
+ // Note: ip and sp don't have usable values at this point
+ code_state->state[0] = MP_OBJ_FROM_PTR(nlr.ret_val); // put exception here because sp is invalid
+ FRAME_LEAVE();
+ return MP_VM_RETURN_EXCEPTION;
+ }
+ }
+ }
+}
diff --git a/circuitpython/py/vmentrytable.h b/circuitpython/py/vmentrytable.h
new file mode 100644
index 0000000..b270dc9
--- /dev/null
+++ b/circuitpython/py/vmentrytable.h
@@ -0,0 +1,138 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+// *FORMAT-OFF*
+
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Winitializer-overrides"
+#endif // __clang__
+#if __GNUC__ >= 5
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Woverride-init"
+#endif // __GNUC__ >= 5
+
+#include "supervisor/linker.h"
+
+#if MICROPY_OPT_COMPUTED_GOTO_SAVE_SPACE
+#define COMPUTE_ENTRY(x) ((char *)(x) - (char *) && entry_MP_BC_LOAD_CONST_FALSE)
+typedef int16_t entry_table_type;
+#else
+#define COMPUTE_ENTRY(x) (x)
+typedef void *entry_table_type;
+#endif
+
+static entry_table_type const PLACE_IN_DTCM_DATA(entry_table[256]) = {
+ [0 ... 255] = COMPUTE_ENTRY(&& entry_default),
+ [MP_BC_LOAD_CONST_FALSE] = COMPUTE_ENTRY(&& entry_MP_BC_LOAD_CONST_FALSE),
+ [MP_BC_LOAD_CONST_NONE] = COMPUTE_ENTRY(&& entry_MP_BC_LOAD_CONST_NONE),
+ [MP_BC_LOAD_CONST_TRUE] = COMPUTE_ENTRY(&& entry_MP_BC_LOAD_CONST_TRUE),
+ [MP_BC_LOAD_CONST_SMALL_INT] = COMPUTE_ENTRY(&& entry_MP_BC_LOAD_CONST_SMALL_INT),
+ [MP_BC_LOAD_CONST_STRING] = COMPUTE_ENTRY(&& entry_MP_BC_LOAD_CONST_STRING),
+ [MP_BC_LOAD_CONST_OBJ] = COMPUTE_ENTRY(&& entry_MP_BC_LOAD_CONST_OBJ),
+ [MP_BC_LOAD_NULL] = COMPUTE_ENTRY(&& entry_MP_BC_LOAD_NULL),
+ [MP_BC_LOAD_FAST_N] = COMPUTE_ENTRY(&& entry_MP_BC_LOAD_FAST_N),
+ [MP_BC_LOAD_DEREF] = COMPUTE_ENTRY(&& entry_MP_BC_LOAD_DEREF),
+ [MP_BC_LOAD_NAME] = COMPUTE_ENTRY(&& entry_MP_BC_LOAD_NAME),
+ [MP_BC_LOAD_GLOBAL] = COMPUTE_ENTRY(&& entry_MP_BC_LOAD_GLOBAL),
+ [MP_BC_LOAD_ATTR] = COMPUTE_ENTRY(&& entry_MP_BC_LOAD_ATTR),
+ [MP_BC_LOAD_METHOD] = COMPUTE_ENTRY(&& entry_MP_BC_LOAD_METHOD),
+ [MP_BC_LOAD_SUPER_METHOD] = COMPUTE_ENTRY(&& entry_MP_BC_LOAD_SUPER_METHOD),
+ [MP_BC_LOAD_BUILD_CLASS] = COMPUTE_ENTRY(&& entry_MP_BC_LOAD_BUILD_CLASS),
+ [MP_BC_LOAD_SUBSCR] = COMPUTE_ENTRY(&& entry_MP_BC_LOAD_SUBSCR),
+ [MP_BC_STORE_FAST_N] = COMPUTE_ENTRY(&& entry_MP_BC_STORE_FAST_N),
+ [MP_BC_STORE_DEREF] = COMPUTE_ENTRY(&& entry_MP_BC_STORE_DEREF),
+ [MP_BC_STORE_NAME] = COMPUTE_ENTRY(&& entry_MP_BC_STORE_NAME),
+ [MP_BC_STORE_GLOBAL] = COMPUTE_ENTRY(&& entry_MP_BC_STORE_GLOBAL),
+ [MP_BC_STORE_ATTR] = COMPUTE_ENTRY(&& entry_MP_BC_STORE_ATTR),
+ [MP_BC_STORE_SUBSCR] = COMPUTE_ENTRY(&& entry_MP_BC_STORE_SUBSCR),
+ [MP_BC_DELETE_FAST] = COMPUTE_ENTRY(&& entry_MP_BC_DELETE_FAST),
+ [MP_BC_DELETE_DEREF] = COMPUTE_ENTRY(&& entry_MP_BC_DELETE_DEREF),
+ [MP_BC_DELETE_NAME] = COMPUTE_ENTRY(&& entry_MP_BC_DELETE_NAME),
+ [MP_BC_DELETE_GLOBAL] = COMPUTE_ENTRY(&& entry_MP_BC_DELETE_GLOBAL),
+ [MP_BC_DUP_TOP] = COMPUTE_ENTRY(&& entry_MP_BC_DUP_TOP),
+ [MP_BC_DUP_TOP_TWO] = COMPUTE_ENTRY(&& entry_MP_BC_DUP_TOP_TWO),
+ [MP_BC_POP_TOP] = COMPUTE_ENTRY(&& entry_MP_BC_POP_TOP),
+ [MP_BC_ROT_TWO] = COMPUTE_ENTRY(&& entry_MP_BC_ROT_TWO),
+ [MP_BC_ROT_THREE] = COMPUTE_ENTRY(&& entry_MP_BC_ROT_THREE),
+ [MP_BC_JUMP] = COMPUTE_ENTRY(&& entry_MP_BC_JUMP),
+ [MP_BC_POP_JUMP_IF_TRUE] = COMPUTE_ENTRY(&& entry_MP_BC_POP_JUMP_IF_TRUE),
+ [MP_BC_POP_JUMP_IF_FALSE] = COMPUTE_ENTRY(&& entry_MP_BC_POP_JUMP_IF_FALSE),
+ [MP_BC_JUMP_IF_TRUE_OR_POP] = COMPUTE_ENTRY(&& entry_MP_BC_JUMP_IF_TRUE_OR_POP),
+ [MP_BC_JUMP_IF_FALSE_OR_POP] = COMPUTE_ENTRY(&& entry_MP_BC_JUMP_IF_FALSE_OR_POP),
+ [MP_BC_SETUP_WITH] = COMPUTE_ENTRY(&& entry_MP_BC_SETUP_WITH),
+ [MP_BC_WITH_CLEANUP] = COMPUTE_ENTRY(&& entry_MP_BC_WITH_CLEANUP),
+ [MP_BC_UNWIND_JUMP] = COMPUTE_ENTRY(&& entry_MP_BC_UNWIND_JUMP),
+ [MP_BC_SETUP_EXCEPT] = COMPUTE_ENTRY(&& entry_MP_BC_SETUP_EXCEPT),
+ [MP_BC_SETUP_FINALLY] = COMPUTE_ENTRY(&& entry_MP_BC_SETUP_FINALLY),
+ [MP_BC_END_FINALLY] = COMPUTE_ENTRY(&& entry_MP_BC_END_FINALLY),
+ [MP_BC_GET_ITER] = COMPUTE_ENTRY(&& entry_MP_BC_GET_ITER),
+ [MP_BC_GET_ITER_STACK] = COMPUTE_ENTRY(&& entry_MP_BC_GET_ITER_STACK),
+ [MP_BC_FOR_ITER] = COMPUTE_ENTRY(&& entry_MP_BC_FOR_ITER),
+ [MP_BC_POP_EXCEPT_JUMP] = COMPUTE_ENTRY(&& entry_MP_BC_POP_EXCEPT_JUMP),
+ [MP_BC_BUILD_TUPLE] = COMPUTE_ENTRY(&& entry_MP_BC_BUILD_TUPLE),
+ [MP_BC_BUILD_LIST] = COMPUTE_ENTRY(&& entry_MP_BC_BUILD_LIST),
+ [MP_BC_BUILD_MAP] = COMPUTE_ENTRY(&& entry_MP_BC_BUILD_MAP),
+ [MP_BC_STORE_MAP] = COMPUTE_ENTRY(&& entry_MP_BC_STORE_MAP),
+ #if MICROPY_PY_BUILTINS_SET
+ [MP_BC_BUILD_SET] = COMPUTE_ENTRY(&& entry_MP_BC_BUILD_SET),
+ #endif
+ #if MICROPY_PY_BUILTINS_SLICE
+ [MP_BC_BUILD_SLICE] = COMPUTE_ENTRY(&& entry_MP_BC_BUILD_SLICE),
+ #endif
+ [MP_BC_STORE_COMP] = COMPUTE_ENTRY(&& entry_MP_BC_STORE_COMP),
+ [MP_BC_UNPACK_SEQUENCE] = COMPUTE_ENTRY(&& entry_MP_BC_UNPACK_SEQUENCE),
+ [MP_BC_UNPACK_EX] = COMPUTE_ENTRY(&& entry_MP_BC_UNPACK_EX),
+ [MP_BC_MAKE_FUNCTION] = COMPUTE_ENTRY(&& entry_MP_BC_MAKE_FUNCTION),
+ [MP_BC_MAKE_FUNCTION_DEFARGS] = COMPUTE_ENTRY(&& entry_MP_BC_MAKE_FUNCTION_DEFARGS),
+ [MP_BC_MAKE_CLOSURE] = COMPUTE_ENTRY(&& entry_MP_BC_MAKE_CLOSURE),
+ [MP_BC_MAKE_CLOSURE_DEFARGS] = COMPUTE_ENTRY(&& entry_MP_BC_MAKE_CLOSURE_DEFARGS),
+ [MP_BC_CALL_FUNCTION] = COMPUTE_ENTRY(&& entry_MP_BC_CALL_FUNCTION),
+ [MP_BC_CALL_FUNCTION_VAR_KW] = COMPUTE_ENTRY(&& entry_MP_BC_CALL_FUNCTION_VAR_KW),
+ [MP_BC_CALL_METHOD] = COMPUTE_ENTRY(&& entry_MP_BC_CALL_METHOD),
+ [MP_BC_CALL_METHOD_VAR_KW] = COMPUTE_ENTRY(&& entry_MP_BC_CALL_METHOD_VAR_KW),
+ [MP_BC_RETURN_VALUE] = COMPUTE_ENTRY(&& entry_MP_BC_RETURN_VALUE),
+ [MP_BC_RAISE_LAST] = COMPUTE_ENTRY(&& entry_MP_BC_RAISE_LAST),
+ [MP_BC_RAISE_OBJ] = COMPUTE_ENTRY(&& entry_MP_BC_RAISE_OBJ),
+ [MP_BC_RAISE_FROM] = COMPUTE_ENTRY(&& entry_MP_BC_RAISE_FROM),
+ [MP_BC_YIELD_VALUE] = COMPUTE_ENTRY(&& entry_MP_BC_YIELD_VALUE),
+ [MP_BC_YIELD_FROM] = COMPUTE_ENTRY(&& entry_MP_BC_YIELD_FROM),
+ [MP_BC_IMPORT_NAME] = COMPUTE_ENTRY(&& entry_MP_BC_IMPORT_NAME),
+ [MP_BC_IMPORT_FROM] = COMPUTE_ENTRY(&& entry_MP_BC_IMPORT_FROM),
+ [MP_BC_IMPORT_STAR] = COMPUTE_ENTRY(&& entry_MP_BC_IMPORT_STAR),
+ [MP_BC_LOAD_CONST_SMALL_INT_MULTI ... MP_BC_LOAD_CONST_SMALL_INT_MULTI + MP_BC_LOAD_CONST_SMALL_INT_MULTI_NUM - 1] = COMPUTE_ENTRY(&& entry_MP_BC_LOAD_CONST_SMALL_INT_MULTI),
+ [MP_BC_LOAD_FAST_MULTI ... MP_BC_LOAD_FAST_MULTI + MP_BC_LOAD_FAST_MULTI_NUM - 1] = COMPUTE_ENTRY(&& entry_MP_BC_LOAD_FAST_MULTI),
+ [MP_BC_STORE_FAST_MULTI ... MP_BC_STORE_FAST_MULTI + MP_BC_LOAD_FAST_MULTI_NUM - 1] = COMPUTE_ENTRY(&& entry_MP_BC_STORE_FAST_MULTI),
+ [MP_BC_UNARY_OP_MULTI ... MP_BC_UNARY_OP_MULTI + MP_BC_UNARY_OP_MULTI_NUM - 1] = COMPUTE_ENTRY(&& entry_MP_BC_UNARY_OP_MULTI),
+ [MP_BC_BINARY_OP_MULTI ... MP_BC_BINARY_OP_MULTI + MP_BC_BINARY_OP_MULTI_NUM - 1] = COMPUTE_ENTRY(&& entry_MP_BC_BINARY_OP_MULTI),
+};
+
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif // __clang__
+#if __GNUC__ >= 5
+#pragma GCC diagnostic pop
+#endif // __GNUC__ >= 5
diff --git a/circuitpython/py/vstr.c b/circuitpython/py/vstr.c
new file mode 100644
index 0000000..acc957e
--- /dev/null
+++ b/circuitpython/py/vstr.c
@@ -0,0 +1,249 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2013, 2014 Damien P. George
+ * SPDX-FileCopyrightText: Copyright (c) 2014 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <assert.h>
+
+#include "py/mpconfig.h"
+#include "py/runtime.h"
+#include "py/mpprint.h"
+
+// returned value is always at least 1 greater than argument
+#define ROUND_ALLOC(a) (((a) & ((~0U) - 7)) + 8)
+
+// Init the vstr so it allocs exactly given number of bytes. Set length to zero.
+void vstr_init(vstr_t *vstr, size_t alloc) {
+ if (alloc < 1) {
+ alloc = 1;
+ }
+ vstr->alloc = alloc;
+ vstr->len = 0;
+ vstr->buf = m_new(char, vstr->alloc);
+ vstr->fixed_buf = false;
+}
+
+// Init the vstr so it allocs exactly enough ram to hold a null-terminated
+// string of the given length, and set the length.
+void vstr_init_len(vstr_t *vstr, size_t len) {
+ if (len == SIZE_MAX) {
+ m_malloc_fail(len);
+ }
+ vstr_init(vstr, len + 1);
+ vstr->len = len;
+}
+
+void vstr_init_fixed_buf(vstr_t *vstr, size_t alloc, char *buf) {
+ vstr->alloc = alloc;
+ vstr->len = 0;
+ vstr->buf = buf;
+ vstr->fixed_buf = true;
+}
+
+void vstr_init_print(vstr_t *vstr, size_t alloc, mp_print_t *print) {
+ vstr_init(vstr, alloc);
+ print->data = vstr;
+ print->print_strn = (mp_print_strn_t)vstr_add_strn;
+}
+
+void vstr_clear(vstr_t *vstr) {
+ if (!vstr->fixed_buf) {
+ m_del(char, vstr->buf, vstr->alloc);
+ }
+ vstr->buf = NULL;
+}
+
+vstr_t *vstr_new(size_t alloc) {
+ vstr_t *vstr = m_new_obj(vstr_t);
+ vstr_init(vstr, alloc);
+ return vstr;
+}
+
+void vstr_free(vstr_t *vstr) {
+ if (vstr != NULL) {
+ if (!vstr->fixed_buf) {
+ m_del(char, vstr->buf, vstr->alloc);
+ }
+ m_del_obj(vstr_t, vstr);
+ }
+}
+
+// Extend vstr strictly by requested size, return pointer to newly added chunk.
+char *vstr_extend(vstr_t *vstr, size_t size) {
+ if (vstr->fixed_buf) {
+ // We can't reallocate, and the caller is expecting the space to
+ // be there, so the only safe option is to raise an exception.
+ mp_raise_msg(&mp_type_RuntimeError, NULL);
+ }
+ char *new_buf = m_renew(char, vstr->buf, vstr->alloc, vstr->alloc + size);
+ char *p = new_buf + vstr->alloc;
+ vstr->alloc += size;
+ vstr->buf = new_buf;
+ return p;
+}
+
+STATIC void vstr_ensure_extra(vstr_t *vstr, size_t size) {
+ if (vstr->len + size > vstr->alloc) {
+ if (vstr->fixed_buf) {
+ // We can't reallocate, and the caller is expecting the space to
+ // be there, so the only safe option is to raise an exception.
+ mp_raise_msg(&mp_type_RuntimeError, NULL);
+ }
+ size_t new_alloc = ROUND_ALLOC((vstr->len + size) + 16);
+ char *new_buf = m_renew(char, vstr->buf, vstr->alloc, new_alloc);
+ vstr->alloc = new_alloc;
+ vstr->buf = new_buf;
+ }
+}
+
+void vstr_hint_size(vstr_t *vstr, size_t size) {
+ vstr_ensure_extra(vstr, size);
+}
+
+char *vstr_add_len(vstr_t *vstr, size_t len) {
+ vstr_ensure_extra(vstr, len);
+ char *buf = vstr->buf + vstr->len;
+ vstr->len += len;
+ return buf;
+}
+
+// Doesn't increase len, just makes sure there is a null byte at the end
+char *vstr_null_terminated_str(vstr_t *vstr) {
+ // If there's no more room, add single byte
+ if (vstr->alloc == vstr->len) {
+ vstr_extend(vstr, 1);
+ }
+ vstr->buf[vstr->len] = '\0';
+ return vstr->buf;
+}
+
+void vstr_add_byte(vstr_t *vstr, byte b) {
+ byte *buf = (byte *)vstr_add_len(vstr, 1);
+ buf[0] = b;
+}
+
+void vstr_add_char(vstr_t *vstr, unichar c) {
+ #if MICROPY_PY_BUILTINS_STR_UNICODE
+ // TODO: Can this be simplified and deduplicated?
+ // Is it worth just calling vstr_add_len(vstr, 4)?
+ if (c < 0x80) {
+ byte *buf = (byte *)vstr_add_len(vstr, 1);
+ *buf = (byte)c;
+ } else if (c < 0x800) {
+ byte *buf = (byte *)vstr_add_len(vstr, 2);
+ buf[0] = (c >> 6) | 0xC0;
+ buf[1] = (c & 0x3F) | 0x80;
+ } else if (c < 0x10000) {
+ byte *buf = (byte *)vstr_add_len(vstr, 3);
+ buf[0] = (c >> 12) | 0xE0;
+ buf[1] = ((c >> 6) & 0x3F) | 0x80;
+ buf[2] = (c & 0x3F) | 0x80;
+ } else {
+ assert(c < 0x110000);
+ byte *buf = (byte *)vstr_add_len(vstr, 4);
+ buf[0] = (c >> 18) | 0xF0;
+ buf[1] = ((c >> 12) & 0x3F) | 0x80;
+ buf[2] = ((c >> 6) & 0x3F) | 0x80;
+ buf[3] = (c & 0x3F) | 0x80;
+ }
+ #else
+ vstr_add_byte(vstr, c);
+ #endif
+}
+
+void vstr_add_str(vstr_t *vstr, const char *str) {
+ vstr_add_strn(vstr, str, strlen(str));
+}
+
+void vstr_add_strn(vstr_t *vstr, const char *str, size_t len) {
+ vstr_ensure_extra(vstr, len);
+ memmove(vstr->buf + vstr->len, str, len);
+ vstr->len += len;
+}
+
+STATIC char *vstr_ins_blank_bytes(vstr_t *vstr, size_t byte_pos, size_t byte_len) {
+ size_t l = vstr->len;
+ if (byte_pos > l) {
+ byte_pos = l;
+ }
+ if (byte_len > 0) {
+ // ensure room for the new bytes
+ vstr_ensure_extra(vstr, byte_len);
+ // copy up the string to make room for the new bytes
+ memmove(vstr->buf + byte_pos + byte_len, vstr->buf + byte_pos, l - byte_pos);
+ // increase the length
+ vstr->len += byte_len;
+ }
+ return vstr->buf + byte_pos;
+}
+
+void vstr_ins_byte(vstr_t *vstr, size_t byte_pos, byte b) {
+ char *s = vstr_ins_blank_bytes(vstr, byte_pos, 1);
+ *s = b;
+}
+
+void vstr_ins_char(vstr_t *vstr, size_t char_pos, unichar chr) {
+ // TODO UNICODE
+ char *s = vstr_ins_blank_bytes(vstr, char_pos, 1);
+ *s = chr;
+}
+
+void vstr_cut_head_bytes(vstr_t *vstr, size_t bytes_to_cut) {
+ vstr_cut_out_bytes(vstr, 0, bytes_to_cut);
+}
+
+void vstr_cut_tail_bytes(vstr_t *vstr, size_t len) {
+ if (len > vstr->len) {
+ vstr->len = 0;
+ } else {
+ vstr->len -= len;
+ }
+}
+
+void vstr_cut_out_bytes(vstr_t *vstr, size_t byte_pos, size_t bytes_to_cut) {
+ if (byte_pos >= vstr->len) {
+ return;
+ } else if (byte_pos + bytes_to_cut >= vstr->len) {
+ vstr->len = byte_pos;
+ } else {
+ memmove(vstr->buf + byte_pos, vstr->buf + byte_pos + bytes_to_cut, vstr->len - byte_pos - bytes_to_cut);
+ vstr->len -= bytes_to_cut;
+ }
+}
+
+void vstr_printf(vstr_t *vstr, const char *fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ vstr_vprintf(vstr, fmt, ap);
+ va_end(ap);
+}
+
+void vstr_vprintf(vstr_t *vstr, const char *fmt, va_list ap) {
+ mp_print_t print = {vstr, (mp_print_strn_t)vstr_add_strn};
+ mp_vprintf(&print, fmt, ap);
+}
diff --git a/circuitpython/py/warning.c b/circuitpython/py/warning.c
new file mode 100644
index 0000000..71a3ac5
--- /dev/null
+++ b/circuitpython/py/warning.c
@@ -0,0 +1,56 @@
+/*
+ * This file is part of the MicroPython project, http://micropython.org/
+ *
+ * The MIT License (MIT)
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2014 Damien P. George
+ * SPDX-FileCopyrightText: Copyright (c) 2015-2018 Paul Sokolovsky
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <stdarg.h>
+#include <stdio.h>
+
+#include "py/emit.h"
+#include "py/runtime.h"
+
+#if MICROPY_WARNINGS
+
+void mp_warning(const char *category, const char *msg, ...) {
+ if (category == NULL) {
+ category = "Warning";
+ }
+ mp_print_str(MICROPY_ERROR_PRINTER, category);
+ mp_print_str(MICROPY_ERROR_PRINTER, ": ");
+
+ va_list args;
+ va_start(args, msg);
+ mp_vprintf(MICROPY_ERROR_PRINTER, msg, args);
+ mp_print_str(MICROPY_ERROR_PRINTER, "\n");
+ va_end(args);
+}
+
+void mp_emitter_warning(pass_kind_t pass, const char *msg) {
+ if (pass == MP_PASS_CODE_SIZE) {
+ mp_warning(NULL, msg);
+ }
+}
+
+#endif // MICROPY_WARNINGS