py/nlr: Factor out common NLR code to generic functions.

Each NLR implementation (Thumb, x86, x64, xtensa, setjmp) duplicates a lot
of the NLR code, specifically that dealing with pushing and popping the NLR
pointer to maintain the linked-list of NLR buffers.  This patch factors all
of that code out of the specific implementations into generic functions in
nlr.c.  This eliminates duplicated code.

The factoring also allows to make the machine-specific NLR code pure
assembler code, thus allowing nlrthumb.c to use naked function attributes
in the correct way (naked functions can only have basic inline assembler
code in them).

There is a small overhead introduced (typically 1 machine instruction)
because now the generic nlr_jump() must call nlr_jump_tail() rather than
them being one combined function.
This commit is contained in:
Damien George
2017-12-18 18:57:15 +11:00
parent d8d633f156
commit 6a3a742a6c
8 changed files with 157 additions and 204 deletions

View File

@@ -26,7 +26,7 @@
#include "py/mpstate.h"
#if (!defined(MICROPY_NLR_SETJMP) || !MICROPY_NLR_SETJMP) && (defined(__thumb2__) || defined(__thumb__) || defined(__arm__))
#if MICROPY_NLR_THUMB
#undef nlr_push
@@ -37,7 +37,6 @@
// r4-r11, r13=sp
__attribute__((naked)) unsigned int nlr_push(nlr_buf_t *nlr) {
__asm volatile (
"str r4, [r0, #12] \n" // store r4 into nlr_buf
"str r5, [r0, #16] \n" // store r5 into nlr_buf
@@ -75,36 +74,10 @@ __attribute__((naked)) unsigned int nlr_push(nlr_buf_t *nlr) {
"b nlr_push_tail \n" // do the rest in C
#endif
);
return 0; // needed to silence compiler warning
}
__attribute__((used)) unsigned int nlr_push_tail(nlr_buf_t *nlr) {
nlr_buf_t **top = &MP_STATE_THREAD(nlr_top);
nlr->prev = *top;
MP_NLR_SAVE_PYSTACK(nlr);
*top = nlr;
return 0; // normal return
}
void nlr_pop(void) {
nlr_buf_t **top = &MP_STATE_THREAD(nlr_top);
*top = (*top)->prev;
}
NORETURN __attribute__((naked)) void nlr_jump(void *val) {
nlr_buf_t **top_ptr = &MP_STATE_THREAD(nlr_top);
nlr_buf_t *top = *top_ptr;
if (top == NULL) {
nlr_jump_fail(val);
}
top->ret_val = val;
MP_NLR_RESTORE_PYSTACK(top);
*top_ptr = top->prev;
NORETURN __attribute__((naked)) void nlr_jump_tail(nlr_buf_t *top) {
__asm volatile (
"mov r0, %0 \n" // r0 points to nlr_buf
"ldr r4, [r0, #12] \n" // load r4 from nlr_buf
"ldr r5, [r0, #16] \n" // load r5 from nlr_buf
"ldr r6, [r0, #20] \n" // load r6 from nlr_buf
@@ -133,12 +106,7 @@ NORETURN __attribute__((naked)) void nlr_jump(void *val) {
#endif
"movs r0, #1 \n" // return 1, non-local return
"bx lr \n" // return
: // output operands
: "r"(top) // input operands
: // clobbered registers
);
for (;;); // needed to silence compiler warning
}
#endif // (!defined(MICROPY_NLR_SETJMP) || !MICROPY_NLR_SETJMP) && (defined(__thumb2__) || defined(__thumb__) || defined(__arm__))
#endif // MICROPY_NLR_THUMB