py/obj: Change sizeof to offsetof in mp_obj_malloc_var macro.
Following b6a9778484, to properly calculate
the size of the variable-length allocation.
Signed-off-by: Damien George <damien@micropython.org>
This commit is contained in:
@@ -119,7 +119,7 @@ typedef struct _mp_obj_bufwriter_t {
|
||||
STATIC mp_obj_t bufwriter_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
|
||||
mp_arg_check_num(n_args, n_kw, 2, 2, false);
|
||||
size_t alloc = mp_obj_get_int(args[1]);
|
||||
mp_obj_bufwriter_t *o = mp_obj_malloc_var(mp_obj_bufwriter_t, byte, alloc, type);
|
||||
mp_obj_bufwriter_t *o = mp_obj_malloc_var(mp_obj_bufwriter_t, buf, byte, alloc, type);
|
||||
o->stream = args[0];
|
||||
o->alloc = alloc;
|
||||
o->len = 0;
|
||||
|
||||
2
py/obj.h
2
py/obj.h
@@ -913,7 +913,7 @@ extern const struct _mp_obj_exception_t mp_const_GeneratorExit_obj;
|
||||
// Helper versions of m_new_obj when you need to immediately set base.type.
|
||||
// Implementing this as a call rather than inline saves 8 bytes per usage.
|
||||
#define mp_obj_malloc(struct_type, obj_type) ((struct_type *)mp_obj_malloc_helper(sizeof(struct_type), obj_type))
|
||||
#define mp_obj_malloc_var(struct_type, var_type, var_num, obj_type) ((struct_type *)mp_obj_malloc_helper(sizeof(struct_type) + sizeof(var_type) * (var_num), obj_type))
|
||||
#define mp_obj_malloc_var(struct_type, var_field, var_type, var_num, obj_type) ((struct_type *)mp_obj_malloc_helper(offsetof(struct_type, var_field) + sizeof(var_type) * (var_num), obj_type))
|
||||
void *mp_obj_malloc_helper(size_t num_bytes, const mp_obj_type_t *type);
|
||||
|
||||
// These macros are derived from more primitive ones and are used to
|
||||
|
||||
@@ -71,7 +71,7 @@ STATIC void mp_obj_attrtuple_attr(mp_obj_t self_in, qstr attr, mp_obj_t *dest) {
|
||||
}
|
||||
|
||||
mp_obj_t mp_obj_new_attrtuple(const qstr *fields, size_t n, const mp_obj_t *items) {
|
||||
mp_obj_tuple_t *o = mp_obj_malloc_var(mp_obj_tuple_t, mp_obj_t, n + 1, &mp_type_attrtuple);
|
||||
mp_obj_tuple_t *o = mp_obj_malloc_var(mp_obj_tuple_t, items, mp_obj_t, n + 1, &mp_type_attrtuple);
|
||||
o->len = n;
|
||||
for (size_t i = 0; i < n; i++) {
|
||||
o->items[i] = items[i];
|
||||
|
||||
@@ -105,7 +105,7 @@ MP_DEFINE_CONST_OBJ_TYPE(
|
||||
);
|
||||
|
||||
mp_obj_t mp_obj_new_closure(mp_obj_t fun, size_t n_closed_over, const mp_obj_t *closed) {
|
||||
mp_obj_closure_t *o = mp_obj_malloc_var(mp_obj_closure_t, mp_obj_t, n_closed_over, &mp_type_closure);
|
||||
mp_obj_closure_t *o = mp_obj_malloc_var(mp_obj_closure_t, closed, mp_obj_t, n_closed_over, &mp_type_closure);
|
||||
o->fun = fun;
|
||||
o->n_closed = n_closed_over;
|
||||
memcpy(o->closed, closed, n_closed_over * sizeof(mp_obj_t));
|
||||
|
||||
@@ -384,7 +384,7 @@ mp_obj_t mp_obj_new_fun_bc(const mp_obj_t *def_args, const byte *code, const mp_
|
||||
def_kw_args = def_args[1];
|
||||
n_extra_args += 1;
|
||||
}
|
||||
mp_obj_fun_bc_t *o = mp_obj_malloc_var(mp_obj_fun_bc_t, mp_obj_t, n_extra_args, &mp_type_fun_bc);
|
||||
mp_obj_fun_bc_t *o = mp_obj_malloc_var(mp_obj_fun_bc_t, extra_args, mp_obj_t, n_extra_args, &mp_type_fun_bc);
|
||||
o->bytecode = code;
|
||||
o->context = context;
|
||||
o->child_table = child_table;
|
||||
|
||||
@@ -59,7 +59,7 @@ STATIC mp_obj_t gen_wrap_call(mp_obj_t self_in, size_t n_args, size_t n_kw, cons
|
||||
MP_BC_PRELUDE_SIG_DECODE(ip);
|
||||
|
||||
// allocate the generator object, with room for local stack and exception stack
|
||||
mp_obj_gen_instance_t *o = mp_obj_malloc_var(mp_obj_gen_instance_t, byte,
|
||||
mp_obj_gen_instance_t *o = mp_obj_malloc_var(mp_obj_gen_instance_t, code_state.state, byte,
|
||||
n_state * sizeof(mp_obj_t) + n_exc_stack * sizeof(mp_exc_stack_t),
|
||||
&mp_type_gen_instance);
|
||||
|
||||
@@ -114,7 +114,7 @@ STATIC mp_obj_t native_gen_wrap_call(mp_obj_t self_in, size_t n_args, size_t n_k
|
||||
MP_BC_PRELUDE_SIG_DECODE(ip);
|
||||
|
||||
// Allocate the generator object, with room for local stack (exception stack not needed).
|
||||
mp_obj_gen_instance_native_t *o = mp_obj_malloc_var(mp_obj_gen_instance_native_t, byte, n_state * sizeof(mp_obj_t), &mp_type_gen_instance);
|
||||
mp_obj_gen_instance_native_t *o = mp_obj_malloc_var(mp_obj_gen_instance_native_t, code_state.state, byte, n_state * sizeof(mp_obj_t), &mp_type_gen_instance);
|
||||
|
||||
// Parse the input arguments and set up the code state
|
||||
o->pend_exc = mp_const_none;
|
||||
|
||||
@@ -38,7 +38,7 @@ typedef struct _mp_obj_map_t {
|
||||
|
||||
STATIC mp_obj_t map_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
|
||||
mp_arg_check_num(n_args, n_kw, 2, MP_OBJ_FUN_ARGS_MAX, false);
|
||||
mp_obj_map_t *o = mp_obj_malloc_var(mp_obj_map_t, mp_obj_t, n_args - 1, type);
|
||||
mp_obj_map_t *o = mp_obj_malloc_var(mp_obj_map_t, iters, mp_obj_t, n_args - 1, type);
|
||||
o->n_iters = n_args - 1;
|
||||
o->fun = args[0];
|
||||
for (size_t i = 0; i < n_args - 1; i++) {
|
||||
|
||||
@@ -110,7 +110,7 @@ STATIC mp_obj_t namedtuple_make_new(const mp_obj_type_t *type_in, size_t n_args,
|
||||
|
||||
// Create a namedtuple with explicit malloc. Calling mp_obj_new_tuple
|
||||
// with num_fields=0 returns a read-only object.
|
||||
mp_obj_tuple_t *tuple = mp_obj_malloc_var(mp_obj_tuple_t, mp_obj_t, num_fields, type_in);
|
||||
mp_obj_tuple_t *tuple = mp_obj_malloc_var(mp_obj_tuple_t, items, mp_obj_t, num_fields, type_in);
|
||||
tuple->len = num_fields;
|
||||
|
||||
// Copy the positional args into the first slots of the namedtuple
|
||||
|
||||
@@ -244,7 +244,7 @@ mp_obj_t mp_obj_new_tuple(size_t n, const mp_obj_t *items) {
|
||||
if (n == 0) {
|
||||
return mp_const_empty_tuple;
|
||||
}
|
||||
mp_obj_tuple_t *o = mp_obj_malloc_var(mp_obj_tuple_t, mp_obj_t, n, &mp_type_tuple);
|
||||
mp_obj_tuple_t *o = mp_obj_malloc_var(mp_obj_tuple_t, items, mp_obj_t, n, &mp_type_tuple);
|
||||
o->len = n;
|
||||
if (items) {
|
||||
for (size_t i = 0; i < n; i++) {
|
||||
|
||||
@@ -99,7 +99,7 @@ STATIC
|
||||
mp_obj_instance_t *mp_obj_new_instance(const mp_obj_type_t *class, const mp_obj_type_t **native_base) {
|
||||
size_t num_native_bases = instance_count_native_bases(class, native_base);
|
||||
assert(num_native_bases < 2);
|
||||
mp_obj_instance_t *o = mp_obj_malloc_var(mp_obj_instance_t, mp_obj_t, num_native_bases, class);
|
||||
mp_obj_instance_t *o = mp_obj_malloc_var(mp_obj_instance_t, subobj, mp_obj_t, num_native_bases, class);
|
||||
mp_map_init(&o->members, 0);
|
||||
// Initialise the native base-class slot (should be 1 at most) with a valid
|
||||
// object. It doesn't matter which object, so long as it can be uniquely
|
||||
|
||||
@@ -39,7 +39,7 @@ typedef struct _mp_obj_zip_t {
|
||||
STATIC mp_obj_t zip_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *args) {
|
||||
mp_arg_check_num(n_args, n_kw, 0, MP_OBJ_FUN_ARGS_MAX, false);
|
||||
|
||||
mp_obj_zip_t *o = mp_obj_malloc_var(mp_obj_zip_t, mp_obj_t, n_args, type);
|
||||
mp_obj_zip_t *o = mp_obj_malloc_var(mp_obj_zip_t, iters, mp_obj_t, n_args, type);
|
||||
o->n_iters = n_args;
|
||||
for (size_t i = 0; i < n_args; i++) {
|
||||
o->iters[i] = mp_getiter(args[i], NULL);
|
||||
|
||||
Reference in New Issue
Block a user