aboutsummaryrefslogtreecommitdiff
path: root/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT
diff options
context:
space:
mode:
authorchai <chaifix@163.com>2018-11-16 00:24:51 +0800
committerchai <chaifix@163.com>2018-11-16 00:24:51 +0800
commit831e814ce9bdb84e86c06c4a52008f6bdaaa00d6 (patch)
treef91fccc7d2628d6e0a39886134b2bb174f5eede4 /build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT
parent6dc75930fe5fe02f1af5489917752d315cf9e48f (diff)
*合并master到minimal分支
Diffstat (limited to 'build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT')
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lauxlib.h167
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_alloc.h17
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_arch.h452
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_asm.h17
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_asm_arm.h2360
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_asm_mips.h1977
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_asm_ppc.h2168
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_asm_x86.h2900
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_bc.h261
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_bcdump.h66
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_carith.h28
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_ccall.h171
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_ccallback.h25
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_cconv.h70
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_cdata.h75
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_char.h42
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_clib.h29
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_cparse.h65
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_crecord.h31
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_ctype.h461
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_debug.h61
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_def.h353
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_dispatch.h131
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_emit_arm.h356
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_emit_mips.h211
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_emit_ppc.h238
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_emit_x86.h468
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_err.h41
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_errmsg.h193
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_ff.h18
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_ffrecord.h24
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_frame.h187
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_func.h24
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_gc.h134
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_gdbjit.h22
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_ir.h551
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_ircall.h271
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_iropt.h161
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_jit.h423
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_lex.h85
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_lib.h112
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_mcode.h30
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_meta.h37
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_obj.h864
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_parse.h18
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_record.h44
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_snap.h34
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_state.h35
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_str.h50
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_strscan.h39
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_tab.h70
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_target.h162
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_target_arm.h274
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_target_mips.h258
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_target_ppc.h280
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_target_x86.h342
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_trace.h53
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_traceerr.h61
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_udata.h14
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_vm.h116
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_vmevent.h59
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lua.h393
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lua.hpp9
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/luaconf.h156
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/luajit.h70
-rw-r--r--build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lualib.h43
66 files changed, 0 insertions, 18957 deletions
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lauxlib.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lauxlib.h
deleted file mode 100644
index fed1491..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lauxlib.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
-** $Id: lauxlib.h,v 1.88.1.1 2007/12/27 13:02:25 roberto Exp $
-** Auxiliary functions for building Lua libraries
-** See Copyright Notice in lua.h
-*/
-
-
-#ifndef lauxlib_h
-#define lauxlib_h
-
-
-#include <stddef.h>
-#include <stdio.h>
-
-#include "lua.h"
-
-
-#define luaL_getn(L,i) ((int)lua_objlen(L, i))
-#define luaL_setn(L,i,j) ((void)0) /* no op! */
-
-/* extra error code for `luaL_load' */
-#define LUA_ERRFILE (LUA_ERRERR+1)
-
-typedef struct luaL_Reg {
- const char *name;
- lua_CFunction func;
-} luaL_Reg;
-
-LUALIB_API void (luaL_openlib) (lua_State *L, const char *libname,
- const luaL_Reg *l, int nup);
-LUALIB_API void (luaL_register) (lua_State *L, const char *libname,
- const luaL_Reg *l);
-LUALIB_API int (luaL_getmetafield) (lua_State *L, int obj, const char *e);
-LUALIB_API int (luaL_callmeta) (lua_State *L, int obj, const char *e);
-LUALIB_API int (luaL_typerror) (lua_State *L, int narg, const char *tname);
-LUALIB_API int (luaL_argerror) (lua_State *L, int numarg, const char *extramsg);
-LUALIB_API const char *(luaL_checklstring) (lua_State *L, int numArg,
- size_t *l);
-LUALIB_API const char *(luaL_optlstring) (lua_State *L, int numArg,
- const char *def, size_t *l);
-LUALIB_API lua_Number (luaL_checknumber) (lua_State *L, int numArg);
-LUALIB_API lua_Number (luaL_optnumber) (lua_State *L, int nArg, lua_Number def);
-
-LUALIB_API lua_Integer (luaL_checkinteger) (lua_State *L, int numArg);
-LUALIB_API lua_Integer (luaL_optinteger) (lua_State *L, int nArg,
- lua_Integer def);
-
-LUALIB_API void (luaL_checkstack) (lua_State *L, int sz, const char *msg);
-LUALIB_API void (luaL_checktype) (lua_State *L, int narg, int t);
-LUALIB_API void (luaL_checkany) (lua_State *L, int narg);
-
-LUALIB_API int (luaL_newmetatable) (lua_State *L, const char *tname);
-LUALIB_API void *(luaL_checkudata) (lua_State *L, int ud, const char *tname);
-
-LUALIB_API void (luaL_where) (lua_State *L, int lvl);
-LUALIB_API int (luaL_error) (lua_State *L, const char *fmt, ...);
-
-LUALIB_API int (luaL_checkoption) (lua_State *L, int narg, const char *def,
- const char *const lst[]);
-
-LUALIB_API int (luaL_ref) (lua_State *L, int t);
-LUALIB_API void (luaL_unref) (lua_State *L, int t, int ref);
-
-LUALIB_API int (luaL_loadfile) (lua_State *L, const char *filename);
-LUALIB_API int (luaL_loadbuffer) (lua_State *L, const char *buff, size_t sz,
- const char *name);
-LUALIB_API int (luaL_loadstring) (lua_State *L, const char *s);
-
-LUALIB_API lua_State *(luaL_newstate) (void);
-
-
-LUALIB_API const char *(luaL_gsub) (lua_State *L, const char *s, const char *p,
- const char *r);
-
-LUALIB_API const char *(luaL_findtable) (lua_State *L, int idx,
- const char *fname, int szhint);
-
-/* From Lua 5.2. */
-LUALIB_API int luaL_fileresult(lua_State *L, int stat, const char *fname);
-LUALIB_API int luaL_execresult(lua_State *L, int stat);
-LUALIB_API int (luaL_loadfilex) (lua_State *L, const char *filename,
- const char *mode);
-LUALIB_API int (luaL_loadbufferx) (lua_State *L, const char *buff, size_t sz,
- const char *name, const char *mode);
-LUALIB_API void luaL_traceback (lua_State *L, lua_State *L1, const char *msg,
- int level);
-
-
-/*
-** ===============================================================
-** some useful macros
-** ===============================================================
-*/
-
-#define luaL_argcheck(L, cond,numarg,extramsg) \
- ((void)((cond) || luaL_argerror(L, (numarg), (extramsg))))
-#define luaL_checkstring(L,n) (luaL_checklstring(L, (n), NULL))
-#define luaL_optstring(L,n,d) (luaL_optlstring(L, (n), (d), NULL))
-#define luaL_checkint(L,n) ((int)luaL_checkinteger(L, (n)))
-#define luaL_optint(L,n,d) ((int)luaL_optinteger(L, (n), (d)))
-#define luaL_checklong(L,n) ((long)luaL_checkinteger(L, (n)))
-#define luaL_optlong(L,n,d) ((long)luaL_optinteger(L, (n), (d)))
-
-#define luaL_typename(L,i) lua_typename(L, lua_type(L,(i)))
-
-#define luaL_dofile(L, fn) \
- (luaL_loadfile(L, fn) || lua_pcall(L, 0, LUA_MULTRET, 0))
-
-#define luaL_dostring(L, s) \
- (luaL_loadstring(L, s) || lua_pcall(L, 0, LUA_MULTRET, 0))
-
-#define luaL_getmetatable(L,n) (lua_getfield(L, LUA_REGISTRYINDEX, (n)))
-
-#define luaL_opt(L,f,n,d) (lua_isnoneornil(L,(n)) ? (d) : f(L,(n)))
-
-/*
-** {======================================================
-** Generic Buffer manipulation
-** =======================================================
-*/
-
-
-
-typedef struct luaL_Buffer {
- char *p; /* current position in buffer */
- int lvl; /* number of strings in the stack (level) */
- lua_State *L;
- char buffer[LUAL_BUFFERSIZE];
-} luaL_Buffer;
-
-#define luaL_addchar(B,c) \
- ((void)((B)->p < ((B)->buffer+LUAL_BUFFERSIZE) || luaL_prepbuffer(B)), \
- (*(B)->p++ = (char)(c)))
-
-/* compatibility only */
-#define luaL_putchar(B,c) luaL_addchar(B,c)
-
-#define luaL_addsize(B,n) ((B)->p += (n))
-
-LUALIB_API void (luaL_buffinit) (lua_State *L, luaL_Buffer *B);
-LUALIB_API char *(luaL_prepbuffer) (luaL_Buffer *B);
-LUALIB_API void (luaL_addlstring) (luaL_Buffer *B, const char *s, size_t l);
-LUALIB_API void (luaL_addstring) (luaL_Buffer *B, const char *s);
-LUALIB_API void (luaL_addvalue) (luaL_Buffer *B);
-LUALIB_API void (luaL_pushresult) (luaL_Buffer *B);
-
-
-/* }====================================================== */
-
-
-/* compatibility with ref system */
-
-/* pre-defined references */
-#define LUA_NOREF (-2)
-#define LUA_REFNIL (-1)
-
-#define lua_ref(L,lock) ((lock) ? luaL_ref(L, LUA_REGISTRYINDEX) : \
- (lua_pushstring(L, "unlocked references are obsolete"), lua_error(L), 0))
-
-#define lua_unref(L,ref) luaL_unref(L, LUA_REGISTRYINDEX, (ref))
-
-#define lua_getref(L,ref) lua_rawgeti(L, LUA_REGISTRYINDEX, (ref))
-
-
-#define luaL_reg luaL_Reg
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_alloc.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_alloc.h
deleted file mode 100644
index f87a7cf..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_alloc.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
-** Bundled memory allocator.
-** Donated to the public domain.
-*/
-
-#ifndef _LJ_ALLOC_H
-#define _LJ_ALLOC_H
-
-#include "lj_def.h"
-
-#ifndef LUAJIT_USE_SYSMALLOC
-LJ_FUNC void *lj_alloc_create(void);
-LJ_FUNC void lj_alloc_destroy(void *msp);
-LJ_FUNC void *lj_alloc_f(void *msp, void *ptr, size_t osize, size_t nsize);
-#endif
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_arch.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_arch.h
deleted file mode 100644
index 5f7e445..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_arch.h
+++ /dev/null
@@ -1,452 +0,0 @@
-/*
-** Target architecture selection.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_ARCH_H
-#define _LJ_ARCH_H
-
-#include "lua.h"
-
-/* Target endianess. */
-#define LUAJIT_LE 0
-#define LUAJIT_BE 1
-
-/* Target architectures. */
-#define LUAJIT_ARCH_X86 1
-#define LUAJIT_ARCH_x86 1
-#define LUAJIT_ARCH_X64 2
-#define LUAJIT_ARCH_x64 2
-#define LUAJIT_ARCH_ARM 3
-#define LUAJIT_ARCH_arm 3
-#define LUAJIT_ARCH_PPC 4
-#define LUAJIT_ARCH_ppc 4
-#define LUAJIT_ARCH_PPCSPE 5
-#define LUAJIT_ARCH_ppcspe 5
-#define LUAJIT_ARCH_MIPS 6
-#define LUAJIT_ARCH_mips 6
-
-/* Target OS. */
-#define LUAJIT_OS_OTHER 0
-#define LUAJIT_OS_WINDOWS 1
-#define LUAJIT_OS_LINUX 2
-#define LUAJIT_OS_OSX 3
-#define LUAJIT_OS_BSD 4
-#define LUAJIT_OS_POSIX 5
-
-/* Select native target if no target defined. */
-#ifndef LUAJIT_TARGET
-
-#if defined(__i386) || defined(__i386__) || defined(_M_IX86)
-#define LUAJIT_TARGET LUAJIT_ARCH_X86
-#elif defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) || defined(_M_AMD64)
-#define LUAJIT_TARGET LUAJIT_ARCH_X64
-#elif defined(__arm__) || defined(__arm) || defined(__ARM__) || defined(__ARM)
-#define LUAJIT_TARGET LUAJIT_ARCH_ARM
-#elif defined(__ppc__) || defined(__ppc) || defined(__PPC__) || defined(__PPC) || defined(__powerpc__) || defined(__powerpc) || defined(__POWERPC__) || defined(__POWERPC) || defined(_M_PPC)
-#ifdef __NO_FPRS__
-#define LUAJIT_TARGET LUAJIT_ARCH_PPCSPE
-#else
-#define LUAJIT_TARGET LUAJIT_ARCH_PPC
-#endif
-#elif defined(__mips__) || defined(__mips) || defined(__MIPS__) || defined(__MIPS)
-#define LUAJIT_TARGET LUAJIT_ARCH_MIPS
-#else
-#error "No support for this architecture (yet)"
-#endif
-
-#endif
-
-/* Select native OS if no target OS defined. */
-#ifndef LUAJIT_OS
-
-#if defined(_WIN32) && !defined(_XBOX_VER)
-#define LUAJIT_OS LUAJIT_OS_WINDOWS
-#elif defined(__linux__)
-#define LUAJIT_OS LUAJIT_OS_LINUX
-#elif defined(__MACH__) && defined(__APPLE__)
-#define LUAJIT_OS LUAJIT_OS_OSX
-#elif (defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \
- defined(__NetBSD__) || defined(__OpenBSD__) || \
- defined(__DragonFly__)) && !defined(__ORBIS__)
-#define LUAJIT_OS LUAJIT_OS_BSD
-#elif (defined(__sun__) && defined(__svr4__))
-#define LUAJIT_OS LUAJIT_OS_POSIX
-#elif defined(__CYGWIN__)
-#define LJ_TARGET_CYGWIN 1
-#define LUAJIT_OS LUAJIT_OS_POSIX
-#else
-#define LUAJIT_OS LUAJIT_OS_OTHER
-#endif
-
-#endif
-
-/* Set target OS properties. */
-#if LUAJIT_OS == LUAJIT_OS_WINDOWS
-#define LJ_OS_NAME "Windows"
-#elif LUAJIT_OS == LUAJIT_OS_LINUX
-#define LJ_OS_NAME "Linux"
-#elif LUAJIT_OS == LUAJIT_OS_OSX
-#define LJ_OS_NAME "OSX"
-#elif LUAJIT_OS == LUAJIT_OS_BSD
-#define LJ_OS_NAME "BSD"
-#elif LUAJIT_OS == LUAJIT_OS_POSIX
-#define LJ_OS_NAME "POSIX"
-#else
-#define LJ_OS_NAME "Other"
-#endif
-
-#define LJ_TARGET_WINDOWS (LUAJIT_OS == LUAJIT_OS_WINDOWS)
-#define LJ_TARGET_LINUX (LUAJIT_OS == LUAJIT_OS_LINUX)
-#define LJ_TARGET_OSX (LUAJIT_OS == LUAJIT_OS_OSX)
-#define LJ_TARGET_IOS (LJ_TARGET_OSX && LUAJIT_TARGET == LUAJIT_ARCH_ARM)
-#define LJ_TARGET_POSIX (LUAJIT_OS > LUAJIT_OS_WINDOWS)
-#define LJ_TARGET_DLOPEN LJ_TARGET_POSIX
-
-#ifdef __CELLOS_LV2__
-#define LJ_TARGET_PS3 1
-#define LJ_TARGET_CONSOLE 1
-#endif
-
-#ifdef __ORBIS__
-#define LJ_TARGET_PS4 1
-#define LJ_TARGET_CONSOLE 1
-#undef NULL
-#define NULL ((void*)0)
-#endif
-
-#ifdef __psp2__
-#define LJ_TARGET_PSVITA 1
-#define LJ_TARGET_CONSOLE 1
-#endif
-
-#if _XBOX_VER >= 200
-#define LJ_TARGET_XBOX360 1
-#define LJ_TARGET_CONSOLE 1
-#endif
-
-#define LJ_NUMMODE_SINGLE 0 /* Single-number mode only. */
-#define LJ_NUMMODE_SINGLE_DUAL 1 /* Default to single-number mode. */
-#define LJ_NUMMODE_DUAL 2 /* Dual-number mode only. */
-#define LJ_NUMMODE_DUAL_SINGLE 3 /* Default to dual-number mode. */
-
-/* Set target architecture properties. */
-#if LUAJIT_TARGET == LUAJIT_ARCH_X86
-
-#define LJ_ARCH_NAME "x86"
-#define LJ_ARCH_BITS 32
-#define LJ_ARCH_ENDIAN LUAJIT_LE
-#if LJ_TARGET_WINDOWS || LJ_TARGET_CYGWIN
-#define LJ_ABI_WIN 1
-#else
-#define LJ_ABI_WIN 0
-#endif
-#define LJ_TARGET_X86 1
-#define LJ_TARGET_X86ORX64 1
-#define LJ_TARGET_EHRETREG 0
-#define LJ_TARGET_MASKSHIFT 1
-#define LJ_TARGET_MASKROT 1
-#define LJ_TARGET_UNALIGNED 1
-#define LJ_ARCH_NUMMODE LJ_NUMMODE_SINGLE_DUAL
-
-#elif LUAJIT_TARGET == LUAJIT_ARCH_X64
-
-#define LJ_ARCH_NAME "x64"
-#define LJ_ARCH_BITS 64
-#define LJ_ARCH_ENDIAN LUAJIT_LE
-#if LJ_TARGET_WINDOWS || LJ_TARGET_CYGWIN
-#define LJ_ABI_WIN 1
-#else
-#define LJ_ABI_WIN 0
-#endif
-#define LJ_TARGET_X64 1
-#define LJ_TARGET_X86ORX64 1
-#define LJ_TARGET_EHRETREG 0
-#define LJ_TARGET_JUMPRANGE 31 /* +-2^31 = +-2GB */
-#define LJ_TARGET_MASKSHIFT 1
-#define LJ_TARGET_MASKROT 1
-#define LJ_TARGET_UNALIGNED 1
-#define LJ_ARCH_NUMMODE LJ_NUMMODE_SINGLE_DUAL
-
-#elif LUAJIT_TARGET == LUAJIT_ARCH_ARM
-
-#define LJ_ARCH_NAME "arm"
-#define LJ_ARCH_BITS 32
-#define LJ_ARCH_ENDIAN LUAJIT_LE
-#if !defined(LJ_ARCH_HASFPU) && __SOFTFP__
-#define LJ_ARCH_HASFPU 0
-#endif
-#if !defined(LJ_ABI_SOFTFP) && !__ARM_PCS_VFP
-#define LJ_ABI_SOFTFP 1
-#endif
-#define LJ_ABI_EABI 1
-#define LJ_TARGET_ARM 1
-#define LJ_TARGET_EHRETREG 0
-#define LJ_TARGET_JUMPRANGE 25 /* +-2^25 = +-32MB */
-#define LJ_TARGET_MASKSHIFT 0
-#define LJ_TARGET_MASKROT 1
-#define LJ_TARGET_UNIFYROT 2 /* Want only IR_BROR. */
-#define LJ_ARCH_NUMMODE LJ_NUMMODE_DUAL
-
-#if __ARM_ARCH____ARM_ARCH_8__ || __ARM_ARCH_8A__
-#define LJ_ARCH_VERSION 80
-#elif __ARM_ARCH_7__ || __ARM_ARCH_7A__ || __ARM_ARCH_7R__ || __ARM_ARCH_7S__ || __ARM_ARCH_7VE__
-#define LJ_ARCH_VERSION 70
-#elif __ARM_ARCH_6T2__
-#define LJ_ARCH_VERSION 61
-#elif __ARM_ARCH_6__ || __ARM_ARCH_6J__ || __ARM_ARCH_6K__ || __ARM_ARCH_6Z__ || __ARM_ARCH_6ZK__
-#define LJ_ARCH_VERSION 60
-#else
-#define LJ_ARCH_VERSION 50
-#endif
-
-#elif LUAJIT_TARGET == LUAJIT_ARCH_PPC
-
-#define LJ_ARCH_NAME "ppc"
-#if _LP64
-#define LJ_ARCH_BITS 64
-#else
-#define LJ_ARCH_BITS 32
-#endif
-#define LJ_ARCH_ENDIAN LUAJIT_BE
-#define LJ_TARGET_PPC 1
-#define LJ_TARGET_EHRETREG 3
-#define LJ_TARGET_JUMPRANGE 25 /* +-2^25 = +-32MB */
-#define LJ_TARGET_MASKSHIFT 0
-#define LJ_TARGET_MASKROT 1
-#define LJ_TARGET_UNIFYROT 1 /* Want only IR_BROL. */
-#define LJ_ARCH_NUMMODE LJ_NUMMODE_DUAL_SINGLE
-
-#if _ARCH_PWR7
-#define LJ_ARCH_VERSION 70
-#elif _ARCH_PWR6
-#define LJ_ARCH_VERSION 60
-#elif _ARCH_PWR5X
-#define LJ_ARCH_VERSION 51
-#elif _ARCH_PWR5
-#define LJ_ARCH_VERSION 50
-#elif _ARCH_PWR4
-#define LJ_ARCH_VERSION 40
-#else
-#define LJ_ARCH_VERSION 0
-#endif
-#if __PPC64__ || __powerpc64__ || LJ_TARGET_CONSOLE
-#define LJ_ARCH_PPC64 1
-#define LJ_ARCH_NOFFI 1
-#endif
-#if _ARCH_PPCSQ
-#define LJ_ARCH_SQRT 1
-#endif
-#if _ARCH_PWR5X
-#define LJ_ARCH_ROUND 1
-#endif
-#if __PPU__
-#define LJ_ARCH_CELL 1
-#endif
-#if LJ_TARGET_XBOX360
-#define LJ_ARCH_XENON 1
-#endif
-
-#elif LUAJIT_TARGET == LUAJIT_ARCH_PPCSPE
-
-#define LJ_ARCH_NAME "ppcspe"
-#define LJ_ARCH_BITS 32
-#define LJ_ARCH_ENDIAN LUAJIT_BE
-#ifndef LJ_ABI_SOFTFP
-#define LJ_ABI_SOFTFP 1
-#endif
-#define LJ_ABI_EABI 1
-#define LJ_TARGET_PPCSPE 1
-#define LJ_TARGET_EHRETREG 3
-#define LJ_TARGET_JUMPRANGE 25 /* +-2^25 = +-32MB */
-#define LJ_TARGET_MASKSHIFT 0
-#define LJ_TARGET_MASKROT 1
-#define LJ_TARGET_UNIFYROT 1 /* Want only IR_BROL. */
-#define LJ_ARCH_NUMMODE LJ_NUMMODE_SINGLE
-#define LJ_ARCH_NOFFI 1 /* NYI: comparisons, calls. */
-#define LJ_ARCH_NOJIT 1
-
-#elif LUAJIT_TARGET == LUAJIT_ARCH_MIPS
-
-#if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL)
-#define LJ_ARCH_NAME "mipsel"
-#define LJ_ARCH_ENDIAN LUAJIT_LE
-#else
-#define LJ_ARCH_NAME "mips"
-#define LJ_ARCH_ENDIAN LUAJIT_BE
-#endif
-#define LJ_ARCH_BITS 32
-#define LJ_TARGET_MIPS 1
-#define LJ_TARGET_EHRETREG 4
-#define LJ_TARGET_JUMPRANGE 27 /* 2*2^27 = 256MB-aligned region */
-#define LJ_TARGET_MASKSHIFT 1
-#define LJ_TARGET_MASKROT 1
-#define LJ_TARGET_UNIFYROT 2 /* Want only IR_BROR. */
-#define LJ_ARCH_NUMMODE LJ_NUMMODE_SINGLE
-
-#if _MIPS_ARCH_MIPS32R2
-#define LJ_ARCH_VERSION 20
-#else
-#define LJ_ARCH_VERSION 10
-#endif
-
-#else
-#error "No target architecture defined"
-#endif
-
-#ifndef LJ_PAGESIZE
-#define LJ_PAGESIZE 4096
-#endif
-
-/* Check for minimum required compiler versions. */
-#if defined(__GNUC__)
-#if LJ_TARGET_X86
-#if (__GNUC__ < 3) || ((__GNUC__ == 3) && __GNUC_MINOR__ < 4)
-#error "Need at least GCC 3.4 or newer"
-#endif
-#elif LJ_TARGET_X64
-#if __GNUC__ < 4
-#error "Need at least GCC 4.0 or newer"
-#endif
-#elif LJ_TARGET_ARM
-#if (__GNUC__ < 4) || ((__GNUC__ == 4) && __GNUC_MINOR__ < 2)
-#error "Need at least GCC 4.2 or newer"
-#endif
-#elif !LJ_TARGET_PS3
-#if (__GNUC__ < 4) || ((__GNUC__ == 4) && __GNUC_MINOR__ < 3)
-#error "Need at least GCC 4.3 or newer"
-#endif
-#endif
-#endif
-
-/* Check target-specific constraints. */
-#ifndef _BUILDVM_H
-#if LJ_TARGET_X64
-#if __USING_SJLJ_EXCEPTIONS__
-#error "Need a C compiler with native exception handling on x64"
-#endif
-#elif LJ_TARGET_ARM
-#if defined(__ARMEB__)
-#error "No support for big-endian ARM"
-#endif
-#if __ARM_ARCH_6M__ || __ARM_ARCH_7M__ || __ARM_ARCH_7EM__
-#error "No support for Cortex-M CPUs"
-#endif
-#if !(__ARM_EABI__ || LJ_TARGET_IOS)
-#error "Only ARM EABI or iOS 3.0+ ABI is supported"
-#endif
-#elif LJ_TARGET_PPC || LJ_TARGET_PPCSPE
-#if defined(_SOFT_FLOAT) || defined(_SOFT_DOUBLE)
-#error "No support for PowerPC CPUs without double-precision FPU"
-#endif
-#if defined(_LITTLE_ENDIAN) && (!defined(_BYTE_ORDER) || (_BYTE_ORDER == _LITTLE_ENDIAN))
-#error "No support for little-endian PowerPC"
-#endif
-#if defined(_LP64)
-#error "No support for PowerPC 64 bit mode"
-#endif
-#elif LJ_TARGET_MIPS
-#if defined(__mips_soft_float)
-#error "No support for MIPS CPUs without FPU"
-#endif
-#if defined(_LP64)
-#error "No support for MIPS64"
-#endif
-#endif
-#endif
-
-/* Enable or disable the dual-number mode for the VM. */
-#if (LJ_ARCH_NUMMODE == LJ_NUMMODE_SINGLE && LUAJIT_NUMMODE == 2) || \
- (LJ_ARCH_NUMMODE == LJ_NUMMODE_DUAL && LUAJIT_NUMMODE == 1)
-#error "No support for this number mode on this architecture"
-#endif
-#if LJ_ARCH_NUMMODE == LJ_NUMMODE_DUAL || \
- (LJ_ARCH_NUMMODE == LJ_NUMMODE_DUAL_SINGLE && LUAJIT_NUMMODE != 1) || \
- (LJ_ARCH_NUMMODE == LJ_NUMMODE_SINGLE_DUAL && LUAJIT_NUMMODE == 2)
-#define LJ_DUALNUM 1
-#else
-#define LJ_DUALNUM 0
-#endif
-
-#if LJ_TARGET_IOS || LJ_TARGET_CONSOLE
-/* Runtime code generation is restricted on iOS. Complain to Apple, not me. */
-/* Ditto for the consoles. Complain to Sony or MS, not me. */
-#ifndef LUAJIT_ENABLE_JIT
-#define LJ_OS_NOJIT 1
-#endif
-#endif
-
-/* Disable or enable the JIT compiler. */
-#if defined(LUAJIT_DISABLE_JIT) || defined(LJ_ARCH_NOJIT) || defined(LJ_OS_NOJIT)
-#define LJ_HASJIT 0
-#else
-#define LJ_HASJIT 1
-#endif
-
-/* Disable or enable the FFI extension. */
-#if defined(LUAJIT_DISABLE_FFI) || defined(LJ_ARCH_NOFFI)
-#define LJ_HASFFI 0
-#else
-#define LJ_HASFFI 1
-#endif
-
-#ifndef LJ_ARCH_HASFPU
-#define LJ_ARCH_HASFPU 1
-#endif
-#ifndef LJ_ABI_SOFTFP
-#define LJ_ABI_SOFTFP 0
-#endif
-#define LJ_SOFTFP (!LJ_ARCH_HASFPU)
-
-#if LJ_ARCH_ENDIAN == LUAJIT_BE
-#define LJ_LE 0
-#define LJ_BE 1
-#define LJ_ENDIAN_SELECT(le, be) be
-#define LJ_ENDIAN_LOHI(lo, hi) hi lo
-#else
-#define LJ_LE 1
-#define LJ_BE 0
-#define LJ_ENDIAN_SELECT(le, be) le
-#define LJ_ENDIAN_LOHI(lo, hi) lo hi
-#endif
-
-#if LJ_ARCH_BITS == 32
-#define LJ_32 1
-#define LJ_64 0
-#else
-#define LJ_32 0
-#define LJ_64 1
-#endif
-
-#ifndef LJ_TARGET_UNALIGNED
-#define LJ_TARGET_UNALIGNED 0
-#endif
-
-/* Various workarounds for embedded operating systems. */
-#if (defined(__ANDROID__) && !defined(LJ_TARGET_X86ORX64)) || defined(__symbian__) || LJ_TARGET_XBOX360
-#define LUAJIT_NO_LOG2
-#endif
-#if defined(__symbian__)
-#define LUAJIT_NO_EXP2
-#endif
-#if LJ_TARGET_CONSOLE || (LJ_TARGET_IOS && __IPHONE_OS_VERSION_MIN_REQUIRED >= __IPHONE_8_0)
-#define LJ_NO_SYSTEM 1
-#endif
-
-#if !defined(LUAJIT_NO_UNWIND) && __GNU_COMPACT_EH__
-/* NYI: no support for compact unwind specification, yet. */
-#define LUAJIT_NO_UNWIND 1
-#endif
-
-#if defined(LUAJIT_NO_UNWIND) || defined(__symbian__) || LJ_TARGET_IOS || LJ_TARGET_PS3 || LJ_TARGET_PS4
-#define LJ_NO_UNWIND 1
-#endif
-
-/* Compatibility with Lua 5.1 vs. 5.2. */
-#ifdef LUAJIT_ENABLE_LUA52COMPAT
-#define LJ_52 1
-#else
-#define LJ_52 0
-#endif
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_asm.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_asm.h
deleted file mode 100644
index 2819481..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_asm.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
-** IR assembler (SSA IR -> machine code).
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_ASM_H
-#define _LJ_ASM_H
-
-#include "lj_jit.h"
-
-#if LJ_HASJIT
-LJ_FUNC void lj_asm_trace(jit_State *J, GCtrace *T);
-LJ_FUNC void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno,
- MCode *target);
-#endif
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_asm_arm.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_asm_arm.h
deleted file mode 100644
index 961f7e3..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_asm_arm.h
+++ /dev/null
@@ -1,2360 +0,0 @@
-/*
-** ARM IR assembler (SSA IR -> machine code).
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-/* -- Register allocator extensions --------------------------------------- */
-
-/* Allocate a register with a hint. */
-static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow)
-{
- Reg r = IR(ref)->r;
- if (ra_noreg(r)) {
- if (!ra_hashint(r) && !iscrossref(as, ref))
- ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */
- r = ra_allocref(as, ref, allow);
- }
- ra_noweak(as, r);
- return r;
-}
-
-/* Allocate a scratch register pair. */
-static Reg ra_scratchpair(ASMState *as, RegSet allow)
-{
- RegSet pick1 = as->freeset & allow;
- RegSet pick2 = pick1 & (pick1 >> 1) & RSET_GPREVEN;
- Reg r;
- if (pick2) {
- r = rset_picktop(pick2);
- } else {
- RegSet pick = pick1 & (allow >> 1) & RSET_GPREVEN;
- if (pick) {
- r = rset_picktop(pick);
- ra_restore(as, regcost_ref(as->cost[r+1]));
- } else {
- pick = pick1 & (allow << 1) & RSET_GPRODD;
- if (pick) {
- r = ra_restore(as, regcost_ref(as->cost[rset_picktop(pick)-1]));
- } else {
- r = ra_evict(as, allow & (allow >> 1) & RSET_GPREVEN);
- ra_restore(as, regcost_ref(as->cost[r+1]));
- }
- }
- }
- lua_assert(rset_test(RSET_GPREVEN, r));
- ra_modified(as, r);
- ra_modified(as, r+1);
- RA_DBGX((as, "scratchpair $r $r", r, r+1));
- return r;
-}
-
-#if !LJ_SOFTFP
-/* Allocate two source registers for three-operand instructions. */
-static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow)
-{
- IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
- Reg left = irl->r, right = irr->r;
- if (ra_hasreg(left)) {
- ra_noweak(as, left);
- if (ra_noreg(right))
- right = ra_allocref(as, ir->op2, rset_exclude(allow, left));
- else
- ra_noweak(as, right);
- } else if (ra_hasreg(right)) {
- ra_noweak(as, right);
- left = ra_allocref(as, ir->op1, rset_exclude(allow, right));
- } else if (ra_hashint(right)) {
- right = ra_allocref(as, ir->op2, allow);
- left = ra_alloc1(as, ir->op1, rset_exclude(allow, right));
- } else {
- left = ra_allocref(as, ir->op1, allow);
- right = ra_alloc1(as, ir->op2, rset_exclude(allow, left));
- }
- return left | (right << 8);
-}
-#endif
-
-/* -- Guard handling ------------------------------------------------------ */
-
-/* Generate an exit stub group at the bottom of the reserved MCode memory. */
-static MCode *asm_exitstub_gen(ASMState *as, ExitNo group)
-{
- MCode *mxp = as->mcbot;
- int i;
- if (mxp + 4*4+4*EXITSTUBS_PER_GROUP >= as->mctop)
- asm_mclimit(as);
- /* str lr, [sp]; bl ->vm_exit_handler; .long DISPATCH_address, group. */
- *mxp++ = ARMI_STR|ARMI_LS_P|ARMI_LS_U|ARMF_D(RID_LR)|ARMF_N(RID_SP);
- *mxp = ARMI_BL|((((MCode *)(void *)lj_vm_exit_handler-mxp)-2)&0x00ffffffu);
- mxp++;
- *mxp++ = (MCode)i32ptr(J2GG(as->J)->dispatch); /* DISPATCH address */
- *mxp++ = group*EXITSTUBS_PER_GROUP;
- for (i = 0; i < EXITSTUBS_PER_GROUP; i++)
- *mxp++ = ARMI_B|((-6-i)&0x00ffffffu);
- lj_mcode_sync(as->mcbot, mxp);
- lj_mcode_commitbot(as->J, mxp);
- as->mcbot = mxp;
- as->mclim = as->mcbot + MCLIM_REDZONE;
- return mxp - EXITSTUBS_PER_GROUP;
-}
-
-/* Setup all needed exit stubs. */
-static void asm_exitstub_setup(ASMState *as, ExitNo nexits)
-{
- ExitNo i;
- if (nexits >= EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR)
- lj_trace_err(as->J, LJ_TRERR_SNAPOV);
- for (i = 0; i < (nexits+EXITSTUBS_PER_GROUP-1)/EXITSTUBS_PER_GROUP; i++)
- if (as->J->exitstubgroup[i] == NULL)
- as->J->exitstubgroup[i] = asm_exitstub_gen(as, i);
-}
-
-/* Emit conditional branch to exit for guard. */
-static void asm_guardcc(ASMState *as, ARMCC cc)
-{
- MCode *target = exitstub_addr(as->J, as->snapno);
- MCode *p = as->mcp;
- if (LJ_UNLIKELY(p == as->invmcp)) {
- as->loopinv = 1;
- *p = ARMI_BL | ((target-p-2) & 0x00ffffffu);
- emit_branch(as, ARMF_CC(ARMI_B, cc^1), p+1);
- return;
- }
- emit_branch(as, ARMF_CC(ARMI_BL, cc), target);
-}
-
-/* -- Operand fusion ------------------------------------------------------ */
-
-/* Limit linear search to this distance. Avoids O(n^2) behavior. */
-#define CONFLICT_SEARCH_LIM 31
-
-/* Check if there's no conflicting instruction between curins and ref. */
-static int noconflict(ASMState *as, IRRef ref, IROp conflict)
-{
- IRIns *ir = as->ir;
- IRRef i = as->curins;
- if (i > ref + CONFLICT_SEARCH_LIM)
- return 0; /* Give up, ref is too far away. */
- while (--i > ref)
- if (ir[i].o == conflict)
- return 0; /* Conflict found. */
- return 1; /* Ok, no conflict. */
-}
-
-/* Fuse the array base of colocated arrays. */
-static int32_t asm_fuseabase(ASMState *as, IRRef ref)
-{
- IRIns *ir = IR(ref);
- if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE &&
- !neverfuse(as) && noconflict(as, ref, IR_NEWREF))
- return (int32_t)sizeof(GCtab);
- return 0;
-}
-
-/* Fuse array/hash/upvalue reference into register+offset operand. */
-static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow,
- int lim)
-{
- IRIns *ir = IR(ref);
- if (ra_noreg(ir->r)) {
- if (ir->o == IR_AREF) {
- if (mayfuse(as, ref)) {
- if (irref_isk(ir->op2)) {
- IRRef tab = IR(ir->op1)->op1;
- int32_t ofs = asm_fuseabase(as, tab);
- IRRef refa = ofs ? tab : ir->op1;
- ofs += 8*IR(ir->op2)->i;
- if (ofs > -lim && ofs < lim) {
- *ofsp = ofs;
- return ra_alloc1(as, refa, allow);
- }
- }
- }
- } else if (ir->o == IR_HREFK) {
- if (mayfuse(as, ref)) {
- int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
- if (ofs < lim) {
- *ofsp = ofs;
- return ra_alloc1(as, ir->op1, allow);
- }
- }
- } else if (ir->o == IR_UREFC) {
- if (irref_isk(ir->op1)) {
- GCfunc *fn = ir_kfunc(IR(ir->op1));
- int32_t ofs = i32ptr(&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv);
- *ofsp = (ofs & 255); /* Mask out less bits to allow LDRD. */
- return ra_allock(as, (ofs & ~255), allow);
- }
- }
- }
- *ofsp = 0;
- return ra_alloc1(as, ref, allow);
-}
-
-/* Fuse m operand into arithmetic/logic instructions. */
-static uint32_t asm_fuseopm(ASMState *as, ARMIns ai, IRRef ref, RegSet allow)
-{
- IRIns *ir = IR(ref);
- if (ra_hasreg(ir->r)) {
- ra_noweak(as, ir->r);
- return ARMF_M(ir->r);
- } else if (irref_isk(ref)) {
- uint32_t k = emit_isk12(ai, ir->i);
- if (k)
- return k;
- } else if (mayfuse(as, ref)) {
- if (ir->o >= IR_BSHL && ir->o <= IR_BROR) {
- Reg m = ra_alloc1(as, ir->op1, allow);
- ARMShift sh = ir->o == IR_BSHL ? ARMSH_LSL :
- ir->o == IR_BSHR ? ARMSH_LSR :
- ir->o == IR_BSAR ? ARMSH_ASR : ARMSH_ROR;
- if (irref_isk(ir->op2)) {
- return m | ARMF_SH(sh, (IR(ir->op2)->i & 31));
- } else {
- Reg s = ra_alloc1(as, ir->op2, rset_exclude(allow, m));
- return m | ARMF_RSH(sh, s);
- }
- } else if (ir->o == IR_ADD && ir->op1 == ir->op2) {
- Reg m = ra_alloc1(as, ir->op1, allow);
- return m | ARMF_SH(ARMSH_LSL, 1);
- }
- }
- return ra_allocref(as, ref, allow);
-}
-
-/* Fuse shifts into loads/stores. Only bother with BSHL 2 => lsl #2. */
-static IRRef asm_fuselsl2(ASMState *as, IRRef ref)
-{
- IRIns *ir = IR(ref);
- if (ra_noreg(ir->r) && mayfuse(as, ref) && ir->o == IR_BSHL &&
- irref_isk(ir->op2) && IR(ir->op2)->i == 2)
- return ir->op1;
- return 0; /* No fusion. */
-}
-
-/* Fuse XLOAD/XSTORE reference into load/store operand. */
-static void asm_fusexref(ASMState *as, ARMIns ai, Reg rd, IRRef ref,
- RegSet allow, int32_t ofs)
-{
- IRIns *ir = IR(ref);
- Reg base;
- if (ra_noreg(ir->r) && canfuse(as, ir)) {
- int32_t lim = (!LJ_SOFTFP && (ai & 0x08000000)) ? 1024 :
- (ai & 0x04000000) ? 4096 : 256;
- if (ir->o == IR_ADD) {
- int32_t ofs2;
- if (irref_isk(ir->op2) &&
- (ofs2 = ofs + IR(ir->op2)->i) > -lim && ofs2 < lim &&
- (!(!LJ_SOFTFP && (ai & 0x08000000)) || !(ofs2 & 3))) {
- ofs = ofs2;
- ref = ir->op1;
- } else if (ofs == 0 && !(!LJ_SOFTFP && (ai & 0x08000000))) {
- IRRef lref = ir->op1, rref = ir->op2;
- Reg rn, rm;
- if ((ai & 0x04000000)) {
- IRRef sref = asm_fuselsl2(as, rref);
- if (sref) {
- rref = sref;
- ai |= ARMF_SH(ARMSH_LSL, 2);
- } else if ((sref = asm_fuselsl2(as, lref)) != 0) {
- lref = rref;
- rref = sref;
- ai |= ARMF_SH(ARMSH_LSL, 2);
- }
- }
- rn = ra_alloc1(as, lref, allow);
- rm = ra_alloc1(as, rref, rset_exclude(allow, rn));
- if ((ai & 0x04000000)) ai |= ARMI_LS_R;
- emit_dnm(as, ai|ARMI_LS_P|ARMI_LS_U, rd, rn, rm);
- return;
- }
- } else if (ir->o == IR_STRREF && !(!LJ_SOFTFP && (ai & 0x08000000))) {
- lua_assert(ofs == 0);
- ofs = (int32_t)sizeof(GCstr);
- if (irref_isk(ir->op2)) {
- ofs += IR(ir->op2)->i;
- ref = ir->op1;
- } else if (irref_isk(ir->op1)) {
- ofs += IR(ir->op1)->i;
- ref = ir->op2;
- } else {
- /* NYI: Fuse ADD with constant. */
- Reg rn = ra_alloc1(as, ir->op1, allow);
- uint32_t m = asm_fuseopm(as, 0, ir->op2, rset_exclude(allow, rn));
- if ((ai & 0x04000000))
- emit_lso(as, ai, rd, rd, ofs);
- else
- emit_lsox(as, ai, rd, rd, ofs);
- emit_dn(as, ARMI_ADD^m, rd, rn);
- return;
- }
- if (ofs <= -lim || ofs >= lim) {
- Reg rn = ra_alloc1(as, ref, allow);
- Reg rm = ra_allock(as, ofs, rset_exclude(allow, rn));
- if ((ai & 0x04000000)) ai |= ARMI_LS_R;
- emit_dnm(as, ai|ARMI_LS_P|ARMI_LS_U, rd, rn, rm);
- return;
- }
- }
- }
- base = ra_alloc1(as, ref, allow);
-#if !LJ_SOFTFP
- if ((ai & 0x08000000))
- emit_vlso(as, ai, rd, base, ofs);
- else
-#endif
- if ((ai & 0x04000000))
- emit_lso(as, ai, rd, base, ofs);
- else
- emit_lsox(as, ai, rd, base, ofs);
-}
-
-#if !LJ_SOFTFP
-/* Fuse to multiply-add/sub instruction. */
-static int asm_fusemadd(ASMState *as, IRIns *ir, ARMIns ai, ARMIns air)
-{
- IRRef lref = ir->op1, rref = ir->op2;
- IRIns *irm;
- if (lref != rref &&
- ((mayfuse(as, lref) && (irm = IR(lref), irm->o == IR_MUL) &&
- ra_noreg(irm->r)) ||
- (mayfuse(as, rref) && (irm = IR(rref), irm->o == IR_MUL) &&
- (rref = lref, ai = air, ra_noreg(irm->r))))) {
- Reg dest = ra_dest(as, ir, RSET_FPR);
- Reg add = ra_hintalloc(as, rref, dest, RSET_FPR);
- Reg right, left = ra_alloc2(as, irm,
- rset_exclude(rset_exclude(RSET_FPR, dest), add));
- right = (left >> 8); left &= 255;
- emit_dnm(as, ai, (dest & 15), (left & 15), (right & 15));
- if (dest != add) emit_dm(as, ARMI_VMOV_D, (dest & 15), (add & 15));
- return 1;
- }
- return 0;
-}
-#endif
-
-/* -- Calls --------------------------------------------------------------- */
-
-/* Generate a call to a C function. */
-static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
-{
- uint32_t n, nargs = CCI_NARGS(ci);
- int32_t ofs = 0;
-#if LJ_SOFTFP
- Reg gpr = REGARG_FIRSTGPR;
-#else
- Reg gpr, fpr = REGARG_FIRSTFPR, fprodd = 0;
-#endif
- if ((void *)ci->func)
- emit_call(as, (void *)ci->func);
-#if !LJ_SOFTFP
- for (gpr = REGARG_FIRSTGPR; gpr <= REGARG_LASTGPR; gpr++)
- as->cost[gpr] = REGCOST(~0u, ASMREF_L);
- gpr = REGARG_FIRSTGPR;
-#endif
- for (n = 0; n < nargs; n++) { /* Setup args. */
- IRRef ref = args[n];
- IRIns *ir = IR(ref);
-#if !LJ_SOFTFP
- if (ref && irt_isfp(ir->t)) {
- RegSet of = as->freeset;
- Reg src;
- if (!LJ_ABI_SOFTFP && !(ci->flags & CCI_VARARG)) {
- if (irt_isnum(ir->t)) {
- if (fpr <= REGARG_LASTFPR) {
- ra_leftov(as, fpr, ref);
- fpr++;
- continue;
- }
- } else if (fprodd) { /* Ick. */
- src = ra_alloc1(as, ref, RSET_FPR);
- emit_dm(as, ARMI_VMOV_S, (fprodd & 15), (src & 15) | 0x00400000);
- fprodd = 0;
- continue;
- } else if (fpr <= REGARG_LASTFPR) {
- ra_leftov(as, fpr, ref);
- fprodd = fpr++;
- continue;
- }
- /* Workaround to protect argument GPRs from being used for remat. */
- as->freeset &= ~RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1);
- src = ra_alloc1(as, ref, RSET_FPR); /* May alloc GPR to remat FPR. */
- as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1));
- fprodd = 0;
- goto stackfp;
- }
- /* Workaround to protect argument GPRs from being used for remat. */
- as->freeset &= ~RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1);
- src = ra_alloc1(as, ref, RSET_FPR); /* May alloc GPR to remat FPR. */
- as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1));
- if (irt_isnum(ir->t)) gpr = (gpr+1) & ~1u;
- if (gpr <= REGARG_LASTGPR) {
- lua_assert(rset_test(as->freeset, gpr)); /* Must have been evicted. */
- if (irt_isnum(ir->t)) {
- lua_assert(rset_test(as->freeset, gpr+1)); /* Ditto. */
- emit_dnm(as, ARMI_VMOV_RR_D, gpr, gpr+1, (src & 15));
- gpr += 2;
- } else {
- emit_dn(as, ARMI_VMOV_R_S, gpr, (src & 15));
- gpr++;
- }
- } else {
- stackfp:
- if (irt_isnum(ir->t)) ofs = (ofs + 4) & ~4;
- emit_spstore(as, ir, src, ofs);
- ofs += irt_isnum(ir->t) ? 8 : 4;
- }
- } else
-#endif
- {
- if (gpr <= REGARG_LASTGPR) {
- lua_assert(rset_test(as->freeset, gpr)); /* Must have been evicted. */
- if (ref) ra_leftov(as, gpr, ref);
- gpr++;
- } else {
- if (ref) {
- Reg r = ra_alloc1(as, ref, RSET_GPR);
- emit_spstore(as, ir, r, ofs);
- }
- ofs += 4;
- }
- }
- }
-}
-
-/* Setup result reg/sp for call. Evict scratch regs. */
-static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
-{
- RegSet drop = RSET_SCRATCH;
- int hiop = ((ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t));
- if (ra_hasreg(ir->r))
- rset_clear(drop, ir->r); /* Dest reg handled below. */
- if (hiop && ra_hasreg((ir+1)->r))
- rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
- ra_evictset(as, drop); /* Evictions must be performed first. */
- if (ra_used(ir)) {
- lua_assert(!irt_ispri(ir->t));
- if (!LJ_SOFTFP && irt_isfp(ir->t)) {
- if (LJ_ABI_SOFTFP || (ci->flags & (CCI_CASTU64|CCI_VARARG))) {
- Reg dest = (ra_dest(as, ir, RSET_FPR) & 15);
- if (irt_isnum(ir->t))
- emit_dnm(as, ARMI_VMOV_D_RR, RID_RETLO, RID_RETHI, dest);
- else
- emit_dn(as, ARMI_VMOV_S_R, RID_RET, dest);
- } else {
- ra_destreg(as, ir, RID_FPRET);
- }
- } else if (hiop) {
- ra_destpair(as, ir);
- } else {
- ra_destreg(as, ir, RID_RET);
- }
- }
- UNUSED(ci);
-}
-
-static void asm_call(ASMState *as, IRIns *ir)
-{
- IRRef args[CCI_NARGS_MAX];
- const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
- asm_collectargs(as, ir, ci, args);
- asm_setupresult(as, ir, ci);
- asm_gencall(as, ci, args);
-}
-
-static void asm_callx(ASMState *as, IRIns *ir)
-{
- IRRef args[CCI_NARGS_MAX*2];
- CCallInfo ci;
- IRRef func;
- IRIns *irf;
- ci.flags = asm_callx_flags(as, ir);
- asm_collectargs(as, ir, &ci, args);
- asm_setupresult(as, ir, &ci);
- func = ir->op2; irf = IR(func);
- if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
- if (irref_isk(func)) { /* Call to constant address. */
- ci.func = (ASMFunction)(void *)(irf->i);
- } else { /* Need a non-argument register for indirect calls. */
- Reg freg = ra_alloc1(as, func, RSET_RANGE(RID_R4, RID_R12+1));
- emit_m(as, ARMI_BLXr, freg);
- ci.func = (ASMFunction)(void *)0;
- }
- asm_gencall(as, &ci, args);
-}
-
-/* -- Returns ------------------------------------------------------------- */
-
-/* Return to lower frame. Guard that it goes to the right spot. */
-static void asm_retf(ASMState *as, IRIns *ir)
-{
- Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
- void *pc = ir_kptr(IR(ir->op2));
- int32_t delta = 1+bc_a(*((const BCIns *)pc - 1));
- as->topslot -= (BCReg)delta;
- if ((int32_t)as->topslot < 0) as->topslot = 0;
- irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */
- /* Need to force a spill on REF_BASE now to update the stack slot. */
- emit_lso(as, ARMI_STR, base, RID_SP, ra_spill(as, IR(REF_BASE)));
- emit_setgl(as, base, jit_base);
- emit_addptr(as, base, -8*delta);
- asm_guardcc(as, CC_NE);
- emit_nm(as, ARMI_CMP, RID_TMP,
- ra_allock(as, i32ptr(pc), rset_exclude(RSET_GPR, base)));
- emit_lso(as, ARMI_LDR, RID_TMP, base, -4);
-}
-
-/* -- Type conversions ---------------------------------------------------- */
-
-#if !LJ_SOFTFP
-static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
-{
- Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
- Reg dest = ra_dest(as, ir, RSET_GPR);
- asm_guardcc(as, CC_NE);
- emit_d(as, ARMI_VMRS, 0);
- emit_dm(as, ARMI_VCMP_D, (tmp & 15), (left & 15));
- emit_dm(as, ARMI_VCVT_F64_S32, (tmp & 15), (tmp & 15));
- emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15));
- emit_dm(as, ARMI_VCVT_S32_F64, (tmp & 15), (left & 15));
-}
-
-static void asm_tobit(ASMState *as, IRIns *ir)
-{
- RegSet allow = RSET_FPR;
- Reg left = ra_alloc1(as, ir->op1, allow);
- Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left));
- Reg tmp = ra_scratch(as, rset_clear(allow, right));
- Reg dest = ra_dest(as, ir, RSET_GPR);
- emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15));
- emit_dnm(as, ARMI_VADD_D, (tmp & 15), (left & 15), (right & 15));
-}
-#endif
-
-static void asm_conv(ASMState *as, IRIns *ir)
-{
- IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
-#if !LJ_SOFTFP
- int stfp = (st == IRT_NUM || st == IRT_FLOAT);
-#endif
- IRRef lref = ir->op1;
- /* 64 bit integer conversions are handled by SPLIT. */
- lua_assert(!irt_isint64(ir->t) && !(st == IRT_I64 || st == IRT_U64));
-#if LJ_SOFTFP
- /* FP conversions are handled by SPLIT. */
- lua_assert(!irt_isfp(ir->t) && !(st == IRT_NUM || st == IRT_FLOAT));
- /* Can't check for same types: SPLIT uses CONV int.int + BXOR for sfp NEG. */
-#else
- lua_assert(irt_type(ir->t) != st);
- if (irt_isfp(ir->t)) {
- Reg dest = ra_dest(as, ir, RSET_FPR);
- if (stfp) { /* FP to FP conversion. */
- emit_dm(as, st == IRT_NUM ? ARMI_VCVT_F32_F64 : ARMI_VCVT_F64_F32,
- (dest & 15), (ra_alloc1(as, lref, RSET_FPR) & 15));
- } else { /* Integer to FP conversion. */
- Reg left = ra_alloc1(as, lref, RSET_GPR);
- ARMIns ai = irt_isfloat(ir->t) ?
- (st == IRT_INT ? ARMI_VCVT_F32_S32 : ARMI_VCVT_F32_U32) :
- (st == IRT_INT ? ARMI_VCVT_F64_S32 : ARMI_VCVT_F64_U32);
- emit_dm(as, ai, (dest & 15), (dest & 15));
- emit_dn(as, ARMI_VMOV_S_R, left, (dest & 15));
- }
- } else if (stfp) { /* FP to integer conversion. */
- if (irt_isguard(ir->t)) {
- /* Checked conversions are only supported from number to int. */
- lua_assert(irt_isint(ir->t) && st == IRT_NUM);
- asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
- } else {
- Reg left = ra_alloc1(as, lref, RSET_FPR);
- Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
- Reg dest = ra_dest(as, ir, RSET_GPR);
- ARMIns ai;
- emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15));
- ai = irt_isint(ir->t) ?
- (st == IRT_NUM ? ARMI_VCVT_S32_F64 : ARMI_VCVT_S32_F32) :
- (st == IRT_NUM ? ARMI_VCVT_U32_F64 : ARMI_VCVT_U32_F32);
- emit_dm(as, ai, (tmp & 15), (left & 15));
- }
- } else
-#endif
- {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
- Reg left = ra_alloc1(as, lref, RSET_GPR);
- lua_assert(irt_isint(ir->t) || irt_isu32(ir->t));
- if ((as->flags & JIT_F_ARMV6)) {
- ARMIns ai = st == IRT_I8 ? ARMI_SXTB :
- st == IRT_U8 ? ARMI_UXTB :
- st == IRT_I16 ? ARMI_SXTH : ARMI_UXTH;
- emit_dm(as, ai, dest, left);
- } else if (st == IRT_U8) {
- emit_dn(as, ARMI_AND|ARMI_K12|255, dest, left);
- } else {
- uint32_t shift = st == IRT_I8 ? 24 : 16;
- ARMShift sh = st == IRT_U16 ? ARMSH_LSR : ARMSH_ASR;
- emit_dm(as, ARMI_MOV|ARMF_SH(sh, shift), dest, RID_TMP);
- emit_dm(as, ARMI_MOV|ARMF_SH(ARMSH_LSL, shift), RID_TMP, left);
- }
- } else { /* Handle 32/32 bit no-op (cast). */
- ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
- }
- }
-}
-
-#if !LJ_SOFTFP && LJ_HASFFI
-static void asm_conv64(ASMState *as, IRIns *ir)
-{
- IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
- IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
- IRCallID id;
- CCallInfo ci;
- IRRef args[2];
- args[0] = (ir-1)->op1;
- args[1] = ir->op1;
- if (st == IRT_NUM || st == IRT_FLOAT) {
- id = IRCALL_fp64_d2l + ((st == IRT_FLOAT) ? 2 : 0) + (dt - IRT_I64);
- ir--;
- } else {
- id = IRCALL_fp64_l2d + ((dt == IRT_FLOAT) ? 2 : 0) + (st - IRT_I64);
- }
- ci = lj_ir_callinfo[id];
-#if !LJ_ABI_SOFTFP
- ci.flags |= CCI_VARARG; /* These calls don't use the hard-float ABI! */
-#endif
- asm_setupresult(as, ir, &ci);
- asm_gencall(as, &ci, args);
-}
-#endif
-
-static void asm_strto(ASMState *as, IRIns *ir)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
- IRRef args[2];
- Reg rlo = 0, rhi = 0, tmp;
- int destused = ra_used(ir);
- int32_t ofs = 0;
- ra_evictset(as, RSET_SCRATCH);
-#if LJ_SOFTFP
- if (destused) {
- if (ra_hasspill(ir->s) && ra_hasspill((ir+1)->s) &&
- (ir->s & 1) == 0 && ir->s + 1 == (ir+1)->s) {
- int i;
- for (i = 0; i < 2; i++) {
- Reg r = (ir+i)->r;
- if (ra_hasreg(r)) {
- ra_free(as, r);
- ra_modified(as, r);
- emit_spload(as, ir+i, r, sps_scale((ir+i)->s));
- }
- }
- ofs = sps_scale(ir->s);
- destused = 0;
- } else {
- rhi = ra_dest(as, ir+1, RSET_GPR);
- rlo = ra_dest(as, ir, rset_exclude(RSET_GPR, rhi));
- }
- }
- asm_guardcc(as, CC_EQ);
- if (destused) {
- emit_lso(as, ARMI_LDR, rhi, RID_SP, 4);
- emit_lso(as, ARMI_LDR, rlo, RID_SP, 0);
- }
-#else
- UNUSED(rhi);
- if (destused) {
- if (ra_hasspill(ir->s)) {
- ofs = sps_scale(ir->s);
- destused = 0;
- if (ra_hasreg(ir->r)) {
- ra_free(as, ir->r);
- ra_modified(as, ir->r);
- emit_spload(as, ir, ir->r, ofs);
- }
- } else {
- rlo = ra_dest(as, ir, RSET_FPR);
- }
- }
- asm_guardcc(as, CC_EQ);
- if (destused)
- emit_vlso(as, ARMI_VLDR_D, rlo, RID_SP, 0);
-#endif
- emit_n(as, ARMI_CMP|ARMI_K12|0, RID_RET); /* Test return status. */
- args[0] = ir->op1; /* GCstr *str */
- args[1] = ASMREF_TMP1; /* TValue *n */
- asm_gencall(as, ci, args);
- tmp = ra_releasetmp(as, ASMREF_TMP1);
- if (ofs == 0)
- emit_dm(as, ARMI_MOV, tmp, RID_SP);
- else
- emit_opk(as, ARMI_ADD, tmp, RID_SP, ofs, RSET_GPR);
-}
-
-/* Get pointer to TValue. */
-static void asm_tvptr(ASMState *as, Reg dest, IRRef ref)
-{
- IRIns *ir = IR(ref);
- if (irt_isnum(ir->t)) {
- if (irref_isk(ref)) {
- /* Use the number constant itself as a TValue. */
- ra_allockreg(as, i32ptr(ir_knum(ir)), dest);
- } else {
-#if LJ_SOFTFP
- lua_assert(0);
-#else
- /* Otherwise force a spill and use the spill slot. */
- emit_opk(as, ARMI_ADD, dest, RID_SP, ra_spill(as, ir), RSET_GPR);
-#endif
- }
- } else {
- /* Otherwise use [sp] and [sp+4] to hold the TValue. */
- RegSet allow = rset_exclude(RSET_GPR, dest);
- Reg type;
- emit_dm(as, ARMI_MOV, dest, RID_SP);
- if (!irt_ispri(ir->t)) {
- Reg src = ra_alloc1(as, ref, allow);
- emit_lso(as, ARMI_STR, src, RID_SP, 0);
- }
- if ((ir+1)->o == IR_HIOP)
- type = ra_alloc1(as, ref+1, allow);
- else
- type = ra_allock(as, irt_toitype(ir->t), allow);
- emit_lso(as, ARMI_STR, type, RID_SP, 4);
- }
-}
-
-static void asm_tostr(ASMState *as, IRIns *ir)
-{
- IRRef args[2];
- args[0] = ASMREF_L;
- as->gcsteps++;
- if (irt_isnum(IR(ir->op1)->t) || (ir+1)->o == IR_HIOP) {
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromnum];
- args[1] = ASMREF_TMP1; /* const lua_Number * */
- asm_setupresult(as, ir, ci); /* GCstr * */
- asm_gencall(as, ci, args);
- asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op1);
- } else {
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromint];
- args[1] = ir->op1; /* int32_t k */
- asm_setupresult(as, ir, ci); /* GCstr * */
- asm_gencall(as, ci, args);
- }
-}
-
-/* -- Memory references --------------------------------------------------- */
-
-static void asm_aref(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg idx, base;
- if (irref_isk(ir->op2)) {
- IRRef tab = IR(ir->op1)->op1;
- int32_t ofs = asm_fuseabase(as, tab);
- IRRef refa = ofs ? tab : ir->op1;
- uint32_t k = emit_isk12(ARMI_ADD, ofs + 8*IR(ir->op2)->i);
- if (k) {
- base = ra_alloc1(as, refa, RSET_GPR);
- emit_dn(as, ARMI_ADD^k, dest, base);
- return;
- }
- }
- base = ra_alloc1(as, ir->op1, RSET_GPR);
- idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
- emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 3), dest, base, idx);
-}
-
-/* Inlined hash lookup. Specialized for key type and for const keys.
-** The equivalent C code is:
-** Node *n = hashkey(t, key);
-** do {
-** if (lj_obj_equal(&n->key, key)) return &n->val;
-** } while ((n = nextnode(n)));
-** return niltv(L);
-*/
-static void asm_href(ASMState *as, IRIns *ir, IROp merge)
-{
- RegSet allow = RSET_GPR;
- int destused = ra_used(ir);
- Reg dest = ra_dest(as, ir, allow);
- Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
- Reg key = 0, keyhi = 0, keynumhi = RID_NONE, tmp = RID_TMP;
- IRRef refkey = ir->op2;
- IRIns *irkey = IR(refkey);
- IRType1 kt = irkey->t;
- int32_t k = 0, khi = emit_isk12(ARMI_CMP, irt_toitype(kt));
- uint32_t khash;
- MCLabel l_end, l_loop;
- rset_clear(allow, tab);
- if (!irref_isk(refkey) || irt_isstr(kt)) {
-#if LJ_SOFTFP
- key = ra_alloc1(as, refkey, allow);
- rset_clear(allow, key);
- if (irkey[1].o == IR_HIOP) {
- if (ra_hasreg((irkey+1)->r)) {
- keynumhi = (irkey+1)->r;
- keyhi = RID_TMP;
- ra_noweak(as, keynumhi);
- } else {
- keyhi = keynumhi = ra_allocref(as, refkey+1, allow);
- }
- rset_clear(allow, keynumhi);
- khi = 0;
- }
-#else
- if (irt_isnum(kt)) {
- key = ra_scratch(as, allow);
- rset_clear(allow, key);
- keyhi = keynumhi = ra_scratch(as, allow);
- rset_clear(allow, keyhi);
- khi = 0;
- } else {
- key = ra_alloc1(as, refkey, allow);
- rset_clear(allow, key);
- }
-#endif
- } else if (irt_isnum(kt)) {
- int32_t val = (int32_t)ir_knum(irkey)->u32.lo;
- k = emit_isk12(ARMI_CMP, val);
- if (!k) {
- key = ra_allock(as, val, allow);
- rset_clear(allow, key);
- }
- val = (int32_t)ir_knum(irkey)->u32.hi;
- khi = emit_isk12(ARMI_CMP, val);
- if (!khi) {
- keyhi = ra_allock(as, val, allow);
- rset_clear(allow, keyhi);
- }
- } else if (!irt_ispri(kt)) {
- k = emit_isk12(ARMI_CMP, irkey->i);
- if (!k) {
- key = ra_alloc1(as, refkey, allow);
- rset_clear(allow, key);
- }
- }
- if (!irt_ispri(kt))
- tmp = ra_scratchpair(as, allow);
-
- /* Key not found in chain: jump to exit (if merged) or load niltv. */
- l_end = emit_label(as);
- as->invmcp = NULL;
- if (merge == IR_NE)
- asm_guardcc(as, CC_AL);
- else if (destused)
- emit_loada(as, dest, niltvg(J2G(as->J)));
-
- /* Follow hash chain until the end. */
- l_loop = --as->mcp;
- emit_n(as, ARMI_CMP|ARMI_K12|0, dest);
- emit_lso(as, ARMI_LDR, dest, dest, (int32_t)offsetof(Node, next));
-
- /* Type and value comparison. */
- if (merge == IR_EQ)
- asm_guardcc(as, CC_EQ);
- else
- emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end);
- if (!irt_ispri(kt)) {
- emit_nm(as, ARMF_CC(ARMI_CMP, CC_EQ)^k, tmp, key);
- emit_nm(as, ARMI_CMP^khi, tmp+1, keyhi);
- emit_lsox(as, ARMI_LDRD, tmp, dest, (int32_t)offsetof(Node, key));
- } else {
- emit_n(as, ARMI_CMP^khi, tmp);
- emit_lso(as, ARMI_LDR, tmp, dest, (int32_t)offsetof(Node, key.it));
- }
- *l_loop = ARMF_CC(ARMI_B, CC_NE) | ((as->mcp-l_loop-2) & 0x00ffffffu);
-
- /* Load main position relative to tab->node into dest. */
- khash = irref_isk(refkey) ? ir_khash(irkey) : 1;
- if (khash == 0) {
- emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
- } else {
- emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 3), dest, dest, tmp);
- emit_dnm(as, ARMI_ADD|ARMF_SH(ARMSH_LSL, 1), tmp, tmp, tmp);
- if (irt_isstr(kt)) { /* Fetch of str->hash is cheaper than ra_allock. */
- emit_dnm(as, ARMI_AND, tmp, tmp+1, RID_TMP);
- emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
- emit_lso(as, ARMI_LDR, tmp+1, key, (int32_t)offsetof(GCstr, hash));
- emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask));
- } else if (irref_isk(refkey)) {
- emit_opk(as, ARMI_AND, tmp, RID_TMP, (int32_t)khash,
- rset_exclude(rset_exclude(RSET_GPR, tab), dest));
- emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
- emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask));
- } else { /* Must match with hash*() in lj_tab.c. */
- if (ra_hasreg(keynumhi)) { /* Canonicalize +-0.0 to 0.0. */
- if (keyhi == RID_TMP)
- emit_dm(as, ARMF_CC(ARMI_MOV, CC_NE), keyhi, keynumhi);
- emit_d(as, ARMF_CC(ARMI_MOV, CC_EQ)|ARMI_K12|0, keyhi);
- }
- emit_dnm(as, ARMI_AND, tmp, tmp, RID_TMP);
- emit_dnm(as, ARMI_SUB|ARMF_SH(ARMSH_ROR, 32-HASH_ROT3), tmp, tmp, tmp+1);
- emit_lso(as, ARMI_LDR, dest, tab, (int32_t)offsetof(GCtab, node));
- emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_ROR, 32-((HASH_ROT2+HASH_ROT1)&31)),
- tmp, tmp+1, tmp);
- emit_lso(as, ARMI_LDR, RID_TMP, tab, (int32_t)offsetof(GCtab, hmask));
- emit_dnm(as, ARMI_SUB|ARMF_SH(ARMSH_ROR, 32-HASH_ROT1), tmp+1, tmp+1, tmp);
- if (ra_hasreg(keynumhi)) {
- emit_dnm(as, ARMI_EOR, tmp+1, tmp, key);
- emit_dnm(as, ARMI_ORR|ARMI_S, RID_TMP, tmp, key); /* Test for +-0.0. */
- emit_dnm(as, ARMI_ADD, tmp, keynumhi, keynumhi);
-#if !LJ_SOFTFP
- emit_dnm(as, ARMI_VMOV_RR_D, key, keynumhi,
- (ra_alloc1(as, refkey, RSET_FPR) & 15));
-#endif
- } else {
- emit_dnm(as, ARMI_EOR, tmp+1, tmp, key);
- emit_opk(as, ARMI_ADD, tmp, key, (int32_t)HASH_BIAS,
- rset_exclude(rset_exclude(RSET_GPR, tab), key));
- }
- }
- }
-}
-
-static void asm_hrefk(ASMState *as, IRIns *ir)
-{
- IRIns *kslot = IR(ir->op2);
- IRIns *irkey = IR(kslot->op1);
- int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
- int32_t kofs = ofs + (int32_t)offsetof(Node, key);
- Reg dest = (ra_used(ir) || ofs > 4095) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
- Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
- Reg key = RID_NONE, type = RID_TMP, idx = node;
- RegSet allow = rset_exclude(RSET_GPR, node);
- lua_assert(ofs % sizeof(Node) == 0);
- if (ofs > 4095) {
- idx = dest;
- rset_clear(allow, dest);
- kofs = (int32_t)offsetof(Node, key);
- } else if (ra_hasreg(dest)) {
- emit_opk(as, ARMI_ADD, dest, node, ofs, allow);
- }
- asm_guardcc(as, CC_NE);
- if (!irt_ispri(irkey->t)) {
- RegSet even = (as->freeset & allow);
- even = even & (even >> 1) & RSET_GPREVEN;
- if (even) {
- key = ra_scratch(as, even);
- if (rset_test(as->freeset, key+1)) {
- type = key+1;
- ra_modified(as, type);
- }
- } else {
- key = ra_scratch(as, allow);
- }
- rset_clear(allow, key);
- }
- rset_clear(allow, type);
- if (irt_isnum(irkey->t)) {
- emit_opk(as, ARMF_CC(ARMI_CMP, CC_EQ), 0, type,
- (int32_t)ir_knum(irkey)->u32.hi, allow);
- emit_opk(as, ARMI_CMP, 0, key,
- (int32_t)ir_knum(irkey)->u32.lo, allow);
- } else {
- if (ra_hasreg(key))
- emit_opk(as, ARMF_CC(ARMI_CMP, CC_EQ), 0, key, irkey->i, allow);
- emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype(irkey->t), type);
- }
- emit_lso(as, ARMI_LDR, type, idx, kofs+4);
- if (ra_hasreg(key)) emit_lso(as, ARMI_LDR, key, idx, kofs);
- if (ofs > 4095)
- emit_opk(as, ARMI_ADD, dest, node, ofs, RSET_GPR);
-}
-
-static void asm_newref(ASMState *as, IRIns *ir)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey];
- IRRef args[3];
- if (ir->r == RID_SINK)
- return;
- args[0] = ASMREF_L; /* lua_State *L */
- args[1] = ir->op1; /* GCtab *t */
- args[2] = ASMREF_TMP1; /* cTValue *key */
- asm_setupresult(as, ir, ci); /* TValue * */
- asm_gencall(as, ci, args);
- asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op2);
-}
-
-static void asm_uref(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- if (irref_isk(ir->op1)) {
- GCfunc *fn = ir_kfunc(IR(ir->op1));
- MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
- emit_lsptr(as, ARMI_LDR, dest, v);
- } else {
- Reg uv = ra_scratch(as, RSET_GPR);
- Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
- if (ir->o == IR_UREFC) {
- asm_guardcc(as, CC_NE);
- emit_n(as, ARMI_CMP|ARMI_K12|1, RID_TMP);
- emit_opk(as, ARMI_ADD, dest, uv,
- (int32_t)offsetof(GCupval, tv), RSET_GPR);
- emit_lso(as, ARMI_LDRB, RID_TMP, uv, (int32_t)offsetof(GCupval, closed));
- } else {
- emit_lso(as, ARMI_LDR, dest, uv, (int32_t)offsetof(GCupval, v));
- }
- emit_lso(as, ARMI_LDR, uv, func,
- (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8));
- }
-}
-
-static void asm_fref(ASMState *as, IRIns *ir)
-{
- UNUSED(as); UNUSED(ir);
- lua_assert(!ra_used(ir));
-}
-
-static void asm_strref(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- IRRef ref = ir->op2, refk = ir->op1;
- Reg r;
- if (irref_isk(ref)) {
- IRRef tmp = refk; refk = ref; ref = tmp;
- } else if (!irref_isk(refk)) {
- uint32_t k, m = ARMI_K12|sizeof(GCstr);
- Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
- IRIns *irr = IR(ir->op2);
- if (ra_hasreg(irr->r)) {
- ra_noweak(as, irr->r);
- right = irr->r;
- } else if (mayfuse(as, irr->op2) &&
- irr->o == IR_ADD && irref_isk(irr->op2) &&
- (k = emit_isk12(ARMI_ADD,
- (int32_t)sizeof(GCstr) + IR(irr->op2)->i))) {
- m = k;
- right = ra_alloc1(as, irr->op1, rset_exclude(RSET_GPR, left));
- } else {
- right = ra_allocref(as, ir->op2, rset_exclude(RSET_GPR, left));
- }
- emit_dn(as, ARMI_ADD^m, dest, dest);
- emit_dnm(as, ARMI_ADD, dest, left, right);
- return;
- }
- r = ra_alloc1(as, ref, RSET_GPR);
- emit_opk(as, ARMI_ADD, dest, r,
- sizeof(GCstr) + IR(refk)->i, rset_exclude(RSET_GPR, r));
-}
-
-/* -- Loads and stores ---------------------------------------------------- */
-
-static ARMIns asm_fxloadins(IRIns *ir)
-{
- switch (irt_type(ir->t)) {
- case IRT_I8: return ARMI_LDRSB;
- case IRT_U8: return ARMI_LDRB;
- case IRT_I16: return ARMI_LDRSH;
- case IRT_U16: return ARMI_LDRH;
- case IRT_NUM: lua_assert(!LJ_SOFTFP); return ARMI_VLDR_D;
- case IRT_FLOAT: if (!LJ_SOFTFP) return ARMI_VLDR_S;
- default: return ARMI_LDR;
- }
-}
-
-static ARMIns asm_fxstoreins(IRIns *ir)
-{
- switch (irt_type(ir->t)) {
- case IRT_I8: case IRT_U8: return ARMI_STRB;
- case IRT_I16: case IRT_U16: return ARMI_STRH;
- case IRT_NUM: lua_assert(!LJ_SOFTFP); return ARMI_VSTR_D;
- case IRT_FLOAT: if (!LJ_SOFTFP) return ARMI_VSTR_S;
- default: return ARMI_STR;
- }
-}
-
-static void asm_fload(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg idx = ra_alloc1(as, ir->op1, RSET_GPR);
- ARMIns ai = asm_fxloadins(ir);
- int32_t ofs;
- if (ir->op2 == IRFL_TAB_ARRAY) {
- ofs = asm_fuseabase(as, ir->op1);
- if (ofs) { /* Turn the t->array load into an add for colocated arrays. */
- emit_dn(as, ARMI_ADD|ARMI_K12|ofs, dest, idx);
- return;
- }
- }
- ofs = field_ofs[ir->op2];
- if ((ai & 0x04000000))
- emit_lso(as, ai, dest, idx, ofs);
- else
- emit_lsox(as, ai, dest, idx, ofs);
-}
-
-static void asm_fstore(ASMState *as, IRIns *ir)
-{
- if (ir->r != RID_SINK) {
- Reg src = ra_alloc1(as, ir->op2, RSET_GPR);
- IRIns *irf = IR(ir->op1);
- Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src));
- int32_t ofs = field_ofs[irf->op2];
- ARMIns ai = asm_fxstoreins(ir);
- if ((ai & 0x04000000))
- emit_lso(as, ai, src, idx, ofs);
- else
- emit_lsox(as, ai, src, idx, ofs);
- }
-}
-
-static void asm_xload(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir,
- (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
- lua_assert(!(ir->op2 & IRXLOAD_UNALIGNED));
- asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR, 0);
-}
-
-static void asm_xstore(ASMState *as, IRIns *ir, int32_t ofs)
-{
- if (ir->r != RID_SINK) {
- Reg src = ra_alloc1(as, ir->op2,
- (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR);
- asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1,
- rset_exclude(RSET_GPR, src), ofs);
- }
-}
-
-static void asm_ahuvload(ASMState *as, IRIns *ir)
-{
- int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP);
- IRType t = hiop ? IRT_NUM : irt_type(ir->t);
- Reg dest = RID_NONE, type = RID_NONE, idx;
- RegSet allow = RSET_GPR;
- int32_t ofs = 0;
- if (hiop && ra_used(ir+1)) {
- type = ra_dest(as, ir+1, allow);
- rset_clear(allow, type);
- }
- if (ra_used(ir)) {
- lua_assert((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) ||
- irt_isint(ir->t) || irt_isaddr(ir->t));
- dest = ra_dest(as, ir, (!LJ_SOFTFP && t == IRT_NUM) ? RSET_FPR : allow);
- rset_clear(allow, dest);
- }
- idx = asm_fuseahuref(as, ir->op1, &ofs, allow,
- (!LJ_SOFTFP && t == IRT_NUM) ? 1024 : 4096);
- if (!hiop || type == RID_NONE) {
- rset_clear(allow, idx);
- if (ofs < 256 && ra_hasreg(dest) && (dest & 1) == 0 &&
- rset_test((as->freeset & allow), dest+1)) {
- type = dest+1;
- ra_modified(as, type);
- } else {
- type = RID_TMP;
- }
- }
- asm_guardcc(as, t == IRT_NUM ? CC_HS : CC_NE);
- emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype_(t), type);
- if (ra_hasreg(dest)) {
-#if !LJ_SOFTFP
- if (t == IRT_NUM)
- emit_vlso(as, ARMI_VLDR_D, dest, idx, ofs);
- else
-#endif
- emit_lso(as, ARMI_LDR, dest, idx, ofs);
- }
- emit_lso(as, ARMI_LDR, type, idx, ofs+4);
-}
-
-static void asm_ahustore(ASMState *as, IRIns *ir)
-{
- if (ir->r != RID_SINK) {
- RegSet allow = RSET_GPR;
- Reg idx, src = RID_NONE, type = RID_NONE;
- int32_t ofs = 0;
-#if !LJ_SOFTFP
- if (irt_isnum(ir->t)) {
- src = ra_alloc1(as, ir->op2, RSET_FPR);
- idx = asm_fuseahuref(as, ir->op1, &ofs, allow, 1024);
- emit_vlso(as, ARMI_VSTR_D, src, idx, ofs);
- } else
-#endif
- {
- int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP);
- if (!irt_ispri(ir->t)) {
- src = ra_alloc1(as, ir->op2, allow);
- rset_clear(allow, src);
- }
- if (hiop)
- type = ra_alloc1(as, (ir+1)->op2, allow);
- else
- type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
- idx = asm_fuseahuref(as, ir->op1, &ofs, rset_exclude(allow, type), 4096);
- if (ra_hasreg(src)) emit_lso(as, ARMI_STR, src, idx, ofs);
- emit_lso(as, ARMI_STR, type, idx, ofs+4);
- }
- }
-}
-
-static void asm_sload(ASMState *as, IRIns *ir)
-{
- int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0);
- int hiop = (LJ_SOFTFP && (ir+1)->o == IR_HIOP);
- IRType t = hiop ? IRT_NUM : irt_type(ir->t);
- Reg dest = RID_NONE, type = RID_NONE, base;
- RegSet allow = RSET_GPR;
- lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */
- lua_assert(irt_isguard(ir->t) || !(ir->op2 & IRSLOAD_TYPECHECK));
-#if LJ_SOFTFP
- lua_assert(!(ir->op2 & IRSLOAD_CONVERT)); /* Handled by LJ_SOFTFP SPLIT. */
- if (hiop && ra_used(ir+1)) {
- type = ra_dest(as, ir+1, allow);
- rset_clear(allow, type);
- }
-#else
- if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(ir->t) && t == IRT_INT) {
- dest = ra_scratch(as, RSET_FPR);
- asm_tointg(as, ir, dest);
- t = IRT_NUM; /* Continue with a regular number type check. */
- } else
-#endif
- if (ra_used(ir)) {
- Reg tmp = RID_NONE;
- if ((ir->op2 & IRSLOAD_CONVERT))
- tmp = ra_scratch(as, t == IRT_INT ? RSET_FPR : RSET_GPR);
- lua_assert((LJ_SOFTFP ? 0 : irt_isnum(ir->t)) ||
- irt_isint(ir->t) || irt_isaddr(ir->t));
- dest = ra_dest(as, ir, (!LJ_SOFTFP && t == IRT_NUM) ? RSET_FPR : allow);
- rset_clear(allow, dest);
- base = ra_alloc1(as, REF_BASE, allow);
- if ((ir->op2 & IRSLOAD_CONVERT)) {
- if (t == IRT_INT) {
- emit_dn(as, ARMI_VMOV_R_S, dest, (tmp & 15));
- emit_dm(as, ARMI_VCVT_S32_F64, (tmp & 15), (tmp & 15));
- t = IRT_NUM; /* Check for original type. */
- } else {
- emit_dm(as, ARMI_VCVT_F64_S32, (dest & 15), (dest & 15));
- emit_dn(as, ARMI_VMOV_S_R, tmp, (dest & 15));
- t = IRT_INT; /* Check for original type. */
- }
- dest = tmp;
- }
- goto dotypecheck;
- }
- base = ra_alloc1(as, REF_BASE, allow);
-dotypecheck:
- rset_clear(allow, base);
- if ((ir->op2 & IRSLOAD_TYPECHECK)) {
- if (ra_noreg(type)) {
- if (ofs < 256 && ra_hasreg(dest) && (dest & 1) == 0 &&
- rset_test((as->freeset & allow), dest+1)) {
- type = dest+1;
- ra_modified(as, type);
- } else {
- type = RID_TMP;
- }
- }
- asm_guardcc(as, t == IRT_NUM ? CC_HS : CC_NE);
- emit_n(as, ARMI_CMN|ARMI_K12|-irt_toitype_(t), type);
- }
- if (ra_hasreg(dest)) {
-#if !LJ_SOFTFP
- if (t == IRT_NUM) {
- if (ofs < 1024) {
- emit_vlso(as, ARMI_VLDR_D, dest, base, ofs);
- } else {
- if (ra_hasreg(type)) emit_lso(as, ARMI_LDR, type, base, ofs+4);
- emit_vlso(as, ARMI_VLDR_D, dest, RID_TMP, 0);
- emit_opk(as, ARMI_ADD, RID_TMP, base, ofs, allow);
- return;
- }
- } else
-#endif
- emit_lso(as, ARMI_LDR, dest, base, ofs);
- }
- if (ra_hasreg(type)) emit_lso(as, ARMI_LDR, type, base, ofs+4);
-}
-
-/* -- Allocations --------------------------------------------------------- */
-
-#if LJ_HASFFI
-static void asm_cnew(ASMState *as, IRIns *ir)
-{
- CTState *cts = ctype_ctsG(J2G(as->J));
- CTypeID ctypeid = (CTypeID)IR(ir->op1)->i;
- CTSize sz = (ir->o == IR_CNEWI || ir->op2 == REF_NIL) ?
- lj_ctype_size(cts, ctypeid) : (CTSize)IR(ir->op2)->i;
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
- IRRef args[2];
- RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
- RegSet drop = RSET_SCRATCH;
- lua_assert(sz != CTSIZE_INVALID);
-
- args[0] = ASMREF_L; /* lua_State *L */
- args[1] = ASMREF_TMP1; /* MSize size */
- as->gcsteps++;
-
- if (ra_hasreg(ir->r))
- rset_clear(drop, ir->r); /* Dest reg handled below. */
- ra_evictset(as, drop);
- if (ra_used(ir))
- ra_destreg(as, ir, RID_RET); /* GCcdata * */
-
- /* Initialize immutable cdata object. */
- if (ir->o == IR_CNEWI) {
- int32_t ofs = sizeof(GCcdata);
- lua_assert(sz == 4 || sz == 8);
- if (sz == 8) {
- ofs += 4; ir++;
- lua_assert(ir->o == IR_HIOP);
- }
- for (;;) {
- Reg r = ra_alloc1(as, ir->op2, allow);
- emit_lso(as, ARMI_STR, r, RID_RET, ofs);
- rset_clear(allow, r);
- if (ofs == sizeof(GCcdata)) break;
- ofs -= 4; ir--;
- }
- }
- /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */
- {
- uint32_t k = emit_isk12(ARMI_MOV, ctypeid);
- Reg r = k ? RID_R1 : ra_allock(as, ctypeid, allow);
- emit_lso(as, ARMI_STRB, RID_TMP, RID_RET, offsetof(GCcdata, gct));
- emit_lsox(as, ARMI_STRH, r, RID_RET, offsetof(GCcdata, ctypeid));
- emit_d(as, ARMI_MOV|ARMI_K12|~LJ_TCDATA, RID_TMP);
- if (k) emit_d(as, ARMI_MOV^k, RID_R1);
- }
- asm_gencall(as, ci, args);
- ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
- ra_releasetmp(as, ASMREF_TMP1));
-}
-#else
-#define asm_cnew(as, ir) ((void)0)
-#endif
-
-/* -- Write barriers ------------------------------------------------------ */
-
-static void asm_tbar(ASMState *as, IRIns *ir)
-{
- Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
- Reg link = ra_scratch(as, rset_exclude(RSET_GPR, tab));
- Reg gr = ra_allock(as, i32ptr(J2G(as->J)),
- rset_exclude(rset_exclude(RSET_GPR, tab), link));
- Reg mark = RID_TMP;
- MCLabel l_end = emit_label(as);
- emit_lso(as, ARMI_STR, link, tab, (int32_t)offsetof(GCtab, gclist));
- emit_lso(as, ARMI_STRB, mark, tab, (int32_t)offsetof(GCtab, marked));
- emit_lso(as, ARMI_STR, tab, gr,
- (int32_t)offsetof(global_State, gc.grayagain));
- emit_dn(as, ARMI_BIC|ARMI_K12|LJ_GC_BLACK, mark, mark);
- emit_lso(as, ARMI_LDR, link, gr,
- (int32_t)offsetof(global_State, gc.grayagain));
- emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end);
- emit_n(as, ARMI_TST|ARMI_K12|LJ_GC_BLACK, mark);
- emit_lso(as, ARMI_LDRB, mark, tab, (int32_t)offsetof(GCtab, marked));
-}
-
-static void asm_obar(ASMState *as, IRIns *ir)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
- IRRef args[2];
- MCLabel l_end;
- Reg obj, val, tmp;
- /* No need for other object barriers (yet). */
- lua_assert(IR(ir->op1)->o == IR_UREFC);
- ra_evictset(as, RSET_SCRATCH);
- l_end = emit_label(as);
- args[0] = ASMREF_TMP1; /* global_State *g */
- args[1] = ir->op1; /* TValue *tv */
- asm_gencall(as, ci, args);
- if ((l_end[-1] >> 28) == CC_AL)
- l_end[-1] = ARMF_CC(l_end[-1], CC_NE);
- else
- emit_branch(as, ARMF_CC(ARMI_B, CC_EQ), l_end);
- ra_allockreg(as, i32ptr(J2G(as->J)), ra_releasetmp(as, ASMREF_TMP1));
- obj = IR(ir->op1)->r;
- tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj));
- emit_n(as, ARMF_CC(ARMI_TST, CC_NE)|ARMI_K12|LJ_GC_BLACK, tmp);
- emit_n(as, ARMI_TST|ARMI_K12|LJ_GC_WHITES, RID_TMP);
- val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj));
- emit_lso(as, ARMI_LDRB, tmp, obj,
- (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
- emit_lso(as, ARMI_LDRB, RID_TMP, val, (int32_t)offsetof(GChead, marked));
-}
-
-/* -- Arithmetic and logic operations ------------------------------------- */
-
-#if !LJ_SOFTFP
-static void asm_fparith(ASMState *as, IRIns *ir, ARMIns ai)
-{
- Reg dest = ra_dest(as, ir, RSET_FPR);
- Reg right, left = ra_alloc2(as, ir, RSET_FPR);
- right = (left >> 8); left &= 255;
- emit_dnm(as, ai, (dest & 15), (left & 15), (right & 15));
-}
-
-static void asm_fpunary(ASMState *as, IRIns *ir, ARMIns ai)
-{
- Reg dest = ra_dest(as, ir, RSET_FPR);
- Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR);
- emit_dm(as, ai, (dest & 15), (left & 15));
-}
-
-static int asm_fpjoin_pow(ASMState *as, IRIns *ir)
-{
- IRIns *irp = IR(ir->op1);
- if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) {
- IRIns *irpp = IR(irp->op1);
- if (irpp == ir-2 && irpp->o == IR_FPMATH &&
- irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) {
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_pow];
- IRRef args[2];
- args[0] = irpp->op1;
- args[1] = irp->op2;
- asm_setupresult(as, ir, ci);
- asm_gencall(as, ci, args);
- return 1;
- }
- }
- return 0;
-}
-#endif
-
-static int asm_swapops(ASMState *as, IRRef lref, IRRef rref)
-{
- IRIns *ir;
- if (irref_isk(rref))
- return 0; /* Don't swap constants to the left. */
- if (irref_isk(lref))
- return 1; /* But swap constants to the right. */
- ir = IR(rref);
- if ((ir->o >= IR_BSHL && ir->o <= IR_BROR) ||
- (ir->o == IR_ADD && ir->op1 == ir->op2))
- return 0; /* Don't swap fusable operands to the left. */
- ir = IR(lref);
- if ((ir->o >= IR_BSHL && ir->o <= IR_BROR) ||
- (ir->o == IR_ADD && ir->op1 == ir->op2))
- return 1; /* But swap fusable operands to the right. */
- return 0; /* Otherwise don't swap. */
-}
-
-static void asm_intop(ASMState *as, IRIns *ir, ARMIns ai)
-{
- IRRef lref = ir->op1, rref = ir->op2;
- Reg left, dest = ra_dest(as, ir, RSET_GPR);
- uint32_t m;
- if (asm_swapops(as, lref, rref)) {
- IRRef tmp = lref; lref = rref; rref = tmp;
- if ((ai & ~ARMI_S) == ARMI_SUB || (ai & ~ARMI_S) == ARMI_SBC)
- ai ^= (ARMI_SUB^ARMI_RSB);
- }
- left = ra_hintalloc(as, lref, dest, RSET_GPR);
- m = asm_fuseopm(as, ai, rref, rset_exclude(RSET_GPR, left));
- if (irt_isguard(ir->t)) { /* For IR_ADDOV etc. */
- asm_guardcc(as, CC_VS);
- ai |= ARMI_S;
- }
- emit_dn(as, ai^m, dest, left);
-}
-
-static void asm_intop_s(ASMState *as, IRIns *ir, ARMIns ai)
-{
- if (as->flagmcp == as->mcp) { /* Drop cmp r, #0. */
- as->flagmcp = NULL;
- as->mcp++;
- ai |= ARMI_S;
- }
- asm_intop(as, ir, ai);
-}
-
-static void asm_bitop(ASMState *as, IRIns *ir, ARMIns ai)
-{
- if (as->flagmcp == as->mcp) { /* Try to drop cmp r, #0. */
- uint32_t cc = (as->mcp[1] >> 28);
- as->flagmcp = NULL;
- if (cc <= CC_NE) {
- as->mcp++;
- ai |= ARMI_S;
- } else if (cc == CC_GE) {
- *++as->mcp ^= ((CC_GE^CC_PL) << 28);
- ai |= ARMI_S;
- } else if (cc == CC_LT) {
- *++as->mcp ^= ((CC_LT^CC_MI) << 28);
- ai |= ARMI_S;
- } /* else: other conds don't work with bit ops. */
- }
- if (ir->op2 == 0) {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- uint32_t m = asm_fuseopm(as, ai, ir->op1, RSET_GPR);
- emit_d(as, ai^m, dest);
- } else {
- /* NYI: Turn BAND !k12 into uxtb, uxth or bfc or shl+shr. */
- asm_intop(as, ir, ai);
- }
-}
-
-static void asm_intneg(ASMState *as, IRIns *ir, ARMIns ai)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
- emit_dn(as, ai|ARMI_K12|0, dest, left);
-}
-
-/* NYI: use add/shift for MUL(OV) with constants. FOLD only does 2^k. */
-static void asm_intmul(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, dest));
- Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- Reg tmp = RID_NONE;
- /* ARMv5 restriction: dest != left and dest_hi != left. */
- if (dest == left && left != right) { left = right; right = dest; }
- if (irt_isguard(ir->t)) { /* IR_MULOV */
- if (!(as->flags & JIT_F_ARMV6) && dest == left)
- tmp = left = ra_scratch(as, rset_exclude(RSET_GPR, left));
- asm_guardcc(as, CC_NE);
- emit_nm(as, ARMI_TEQ|ARMF_SH(ARMSH_ASR, 31), RID_TMP, dest);
- emit_dnm(as, ARMI_SMULL|ARMF_S(right), dest, RID_TMP, left);
- } else {
- if (!(as->flags & JIT_F_ARMV6) && dest == left) tmp = left = RID_TMP;
- emit_nm(as, ARMI_MUL|ARMF_S(right), dest, left);
- }
- /* Only need this for the dest == left == right case. */
- if (ra_hasreg(tmp)) emit_dm(as, ARMI_MOV, tmp, right);
-}
-
-static void asm_add(ASMState *as, IRIns *ir)
-{
-#if !LJ_SOFTFP
- if (irt_isnum(ir->t)) {
- if (!asm_fusemadd(as, ir, ARMI_VMLA_D, ARMI_VMLA_D))
- asm_fparith(as, ir, ARMI_VADD_D);
- return;
- }
-#endif
- asm_intop_s(as, ir, ARMI_ADD);
-}
-
-static void asm_sub(ASMState *as, IRIns *ir)
-{
-#if !LJ_SOFTFP
- if (irt_isnum(ir->t)) {
- if (!asm_fusemadd(as, ir, ARMI_VNMLS_D, ARMI_VMLS_D))
- asm_fparith(as, ir, ARMI_VSUB_D);
- return;
- }
-#endif
- asm_intop_s(as, ir, ARMI_SUB);
-}
-
-static void asm_mul(ASMState *as, IRIns *ir)
-{
-#if !LJ_SOFTFP
- if (irt_isnum(ir->t)) {
- asm_fparith(as, ir, ARMI_VMUL_D);
- return;
- }
-#endif
- asm_intmul(as, ir);
-}
-
-static void asm_neg(ASMState *as, IRIns *ir)
-{
-#if !LJ_SOFTFP
- if (irt_isnum(ir->t)) {
- asm_fpunary(as, ir, ARMI_VNEG_D);
- return;
- }
-#endif
- asm_intneg(as, ir, ARMI_RSB);
-}
-
-static void asm_callid(ASMState *as, IRIns *ir, IRCallID id)
-{
- const CCallInfo *ci = &lj_ir_callinfo[id];
- IRRef args[2];
- args[0] = ir->op1;
- args[1] = ir->op2;
- asm_setupresult(as, ir, ci);
- asm_gencall(as, ci, args);
-}
-
-#if !LJ_SOFTFP
-static void asm_callround(ASMState *as, IRIns *ir, int id)
-{
- /* The modified regs must match with the *.dasc implementation. */
- RegSet drop = RID2RSET(RID_R0)|RID2RSET(RID_R1)|RID2RSET(RID_R2)|
- RID2RSET(RID_R3)|RID2RSET(RID_R12);
- RegSet of;
- Reg dest, src;
- ra_evictset(as, drop);
- dest = ra_dest(as, ir, RSET_FPR);
- emit_dnm(as, ARMI_VMOV_D_RR, RID_RETLO, RID_RETHI, (dest & 15));
- emit_call(as, id == IRFPM_FLOOR ? (void *)lj_vm_floor_sf :
- id == IRFPM_CEIL ? (void *)lj_vm_ceil_sf :
- (void *)lj_vm_trunc_sf);
- /* Workaround to protect argument GPRs from being used for remat. */
- of = as->freeset;
- as->freeset &= ~RSET_RANGE(RID_R0, RID_R1+1);
- as->cost[RID_R0] = as->cost[RID_R1] = REGCOST(~0u, ASMREF_L);
- src = ra_alloc1(as, ir->op1, RSET_FPR); /* May alloc GPR to remat FPR. */
- as->freeset |= (of & RSET_RANGE(RID_R0, RID_R1+1));
- emit_dnm(as, ARMI_VMOV_RR_D, RID_R0, RID_R1, (src & 15));
-}
-#endif
-
-static void asm_bitswap(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
- if ((as->flags & JIT_F_ARMV6)) {
- emit_dm(as, ARMI_REV, dest, left);
- } else {
- Reg tmp2 = dest;
- if (tmp2 == left)
- tmp2 = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, dest), left));
- emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_LSR, 8), dest, tmp2, RID_TMP);
- emit_dm(as, ARMI_MOV|ARMF_SH(ARMSH_ROR, 8), tmp2, left);
- emit_dn(as, ARMI_BIC|ARMI_K12|256*8|255, RID_TMP, RID_TMP);
- emit_dnm(as, ARMI_EOR|ARMF_SH(ARMSH_ROR, 16), RID_TMP, left, left);
- }
-}
-
-static void asm_bitshift(ASMState *as, IRIns *ir, ARMShift sh)
-{
- if (irref_isk(ir->op2)) { /* Constant shifts. */
- /* NYI: Turn SHL+SHR or BAND+SHR into uxtb, uxth or ubfx. */
- /* NYI: Turn SHL+ASR into sxtb, sxth or sbfx. */
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
- int32_t shift = (IR(ir->op2)->i & 31);
- emit_dm(as, ARMI_MOV|ARMF_SH(sh, shift), dest, left);
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
- Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- emit_dm(as, ARMI_MOV|ARMF_RSH(sh, right), dest, left);
- }
-}
-
-static void asm_intmin_max(ASMState *as, IRIns *ir, int cc)
-{
- uint32_t kcmp = 0, kmov = 0;
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
- Reg right = 0;
- if (irref_isk(ir->op2)) {
- kcmp = emit_isk12(ARMI_CMP, IR(ir->op2)->i);
- if (kcmp) kmov = emit_isk12(ARMI_MOV, IR(ir->op2)->i);
- }
- if (!kmov) {
- kcmp = 0;
- right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- }
- if (kmov || dest != right) {
- emit_dm(as, ARMF_CC(ARMI_MOV, cc)^kmov, dest, right);
- cc ^= 1; /* Must use opposite conditions for paired moves. */
- } else {
- cc ^= (CC_LT^CC_GT); /* Otherwise may swap CC_LT <-> CC_GT. */
- }
- if (dest != left) emit_dm(as, ARMF_CC(ARMI_MOV, cc), dest, left);
- emit_nm(as, ARMI_CMP^kcmp, left, right);
-}
-
-#if LJ_SOFTFP
-static void asm_sfpmin_max(ASMState *as, IRIns *ir, int cc)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_softfp_cmp];
- RegSet drop = RSET_SCRATCH;
- Reg r;
- IRRef args[4];
- args[0] = ir->op1; args[1] = (ir+1)->op1;
- args[2] = ir->op2; args[3] = (ir+1)->op2;
- /* __aeabi_cdcmple preserves r0-r3. */
- if (ra_hasreg(ir->r)) rset_clear(drop, ir->r);
- if (ra_hasreg((ir+1)->r)) rset_clear(drop, (ir+1)->r);
- if (!rset_test(as->freeset, RID_R2) &&
- regcost_ref(as->cost[RID_R2]) == args[2]) rset_clear(drop, RID_R2);
- if (!rset_test(as->freeset, RID_R3) &&
- regcost_ref(as->cost[RID_R3]) == args[3]) rset_clear(drop, RID_R3);
- ra_evictset(as, drop);
- ra_destpair(as, ir);
- emit_dm(as, ARMF_CC(ARMI_MOV, cc), RID_RETHI, RID_R3);
- emit_dm(as, ARMF_CC(ARMI_MOV, cc), RID_RETLO, RID_R2);
- emit_call(as, (void *)ci->func);
- for (r = RID_R0; r <= RID_R3; r++)
- ra_leftov(as, r, args[r-RID_R0]);
-}
-#else
-static void asm_fpmin_max(ASMState *as, IRIns *ir, int cc)
-{
- Reg dest = (ra_dest(as, ir, RSET_FPR) & 15);
- Reg right, left = ra_alloc2(as, ir, RSET_FPR);
- right = ((left >> 8) & 15); left &= 15;
- if (dest != left) emit_dm(as, ARMF_CC(ARMI_VMOV_D, cc^1), dest, left);
- if (dest != right) emit_dm(as, ARMF_CC(ARMI_VMOV_D, cc), dest, right);
- emit_d(as, ARMI_VMRS, 0);
- emit_dm(as, ARMI_VCMP_D, left, right);
-}
-#endif
-
-static void asm_min_max(ASMState *as, IRIns *ir, int cc, int fcc)
-{
-#if LJ_SOFTFP
- UNUSED(fcc);
-#else
- if (irt_isnum(ir->t))
- asm_fpmin_max(as, ir, fcc);
- else
-#endif
- asm_intmin_max(as, ir, cc);
-}
-
-/* -- Comparisons --------------------------------------------------------- */
-
-/* Map of comparisons to flags. ORDER IR. */
-static const uint8_t asm_compmap[IR_ABC+1] = {
- /* op FP swp int cc FP cc */
- /* LT */ CC_GE + (CC_HS << 4),
- /* GE x */ CC_LT + (CC_HI << 4),
- /* LE */ CC_GT + (CC_HI << 4),
- /* GT x */ CC_LE + (CC_HS << 4),
- /* ULT x */ CC_HS + (CC_LS << 4),
- /* UGE */ CC_LO + (CC_LO << 4),
- /* ULE x */ CC_HI + (CC_LO << 4),
- /* UGT */ CC_LS + (CC_LS << 4),
- /* EQ */ CC_NE + (CC_NE << 4),
- /* NE */ CC_EQ + (CC_EQ << 4),
- /* ABC */ CC_LS + (CC_LS << 4) /* Same as UGT. */
-};
-
-#if LJ_SOFTFP
-/* FP comparisons. */
-static void asm_sfpcomp(ASMState *as, IRIns *ir)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_softfp_cmp];
- RegSet drop = RSET_SCRATCH;
- Reg r;
- IRRef args[4];
- int swp = (((ir->o ^ (ir->o >> 2)) & ~(ir->o >> 3) & 1) << 1);
- args[swp^0] = ir->op1; args[swp^1] = (ir+1)->op1;
- args[swp^2] = ir->op2; args[swp^3] = (ir+1)->op2;
- /* __aeabi_cdcmple preserves r0-r3. This helps to reduce spills. */
- for (r = RID_R0; r <= RID_R3; r++)
- if (!rset_test(as->freeset, r) &&
- regcost_ref(as->cost[r]) == args[r-RID_R0]) rset_clear(drop, r);
- ra_evictset(as, drop);
- asm_guardcc(as, (asm_compmap[ir->o] >> 4));
- emit_call(as, (void *)ci->func);
- for (r = RID_R0; r <= RID_R3; r++)
- ra_leftov(as, r, args[r-RID_R0]);
-}
-#else
-/* FP comparisons. */
-static void asm_fpcomp(ASMState *as, IRIns *ir)
-{
- Reg left, right;
- ARMIns ai;
- int swp = ((ir->o ^ (ir->o >> 2)) & ~(ir->o >> 3) & 1);
- if (!swp && irref_isk(ir->op2) && ir_knum(IR(ir->op2))->u64 == 0) {
- left = (ra_alloc1(as, ir->op1, RSET_FPR) & 15);
- right = 0;
- ai = ARMI_VCMPZ_D;
- } else {
- left = ra_alloc2(as, ir, RSET_FPR);
- if (swp) {
- right = (left & 15); left = ((left >> 8) & 15);
- } else {
- right = ((left >> 8) & 15); left &= 15;
- }
- ai = ARMI_VCMP_D;
- }
- asm_guardcc(as, (asm_compmap[ir->o] >> 4));
- emit_d(as, ARMI_VMRS, 0);
- emit_dm(as, ai, left, right);
-}
-#endif
-
-/* Integer comparisons. */
-static void asm_intcomp(ASMState *as, IRIns *ir)
-{
- ARMCC cc = (asm_compmap[ir->o] & 15);
- IRRef lref = ir->op1, rref = ir->op2;
- Reg left;
- uint32_t m;
- int cmpprev0 = 0;
- lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t));
- if (asm_swapops(as, lref, rref)) {
- Reg tmp = lref; lref = rref; rref = tmp;
- if (cc >= CC_GE) cc ^= 7; /* LT <-> GT, LE <-> GE */
- else if (cc > CC_NE) cc ^= 11; /* LO <-> HI, LS <-> HS */
- }
- if (irref_isk(rref) && IR(rref)->i == 0) {
- IRIns *irl = IR(lref);
- cmpprev0 = (irl+1 == ir);
- /* Combine comp(BAND(left, right), 0) into tst left, right. */
- if (cmpprev0 && irl->o == IR_BAND && !ra_used(irl)) {
- IRRef blref = irl->op1, brref = irl->op2;
- uint32_t m2 = 0;
- Reg bleft;
- if (asm_swapops(as, blref, brref)) {
- Reg tmp = blref; blref = brref; brref = tmp;
- }
- if (irref_isk(brref)) {
- m2 = emit_isk12(ARMI_AND, IR(brref)->i);
- if ((m2 & (ARMI_AND^ARMI_BIC)))
- goto notst; /* Not beneficial if we miss a constant operand. */
- }
- if (cc == CC_GE) cc = CC_PL;
- else if (cc == CC_LT) cc = CC_MI;
- else if (cc > CC_NE) goto notst; /* Other conds don't work with tst. */
- bleft = ra_alloc1(as, blref, RSET_GPR);
- if (!m2) m2 = asm_fuseopm(as, 0, brref, rset_exclude(RSET_GPR, bleft));
- asm_guardcc(as, cc);
- emit_n(as, ARMI_TST^m2, bleft);
- return;
- }
- }
-notst:
- left = ra_alloc1(as, lref, RSET_GPR);
- m = asm_fuseopm(as, ARMI_CMP, rref, rset_exclude(RSET_GPR, left));
- asm_guardcc(as, cc);
- emit_n(as, ARMI_CMP^m, left);
- /* Signed comparison with zero and referencing previous ins? */
- if (cmpprev0 && (cc <= CC_NE || cc >= CC_GE))
- as->flagmcp = as->mcp; /* Allow elimination of the compare. */
-}
-
-#if LJ_HASFFI
-/* 64 bit integer comparisons. */
-static void asm_int64comp(ASMState *as, IRIns *ir)
-{
- int signedcomp = (ir->o <= IR_GT);
- ARMCC cclo, cchi;
- Reg leftlo, lefthi;
- uint32_t mlo, mhi;
- RegSet allow = RSET_GPR, oldfree;
-
- /* Always use unsigned comparison for loword. */
- cclo = asm_compmap[ir->o + (signedcomp ? 4 : 0)] & 15;
- leftlo = ra_alloc1(as, ir->op1, allow);
- oldfree = as->freeset;
- mlo = asm_fuseopm(as, ARMI_CMP, ir->op2, rset_clear(allow, leftlo));
- allow &= ~(oldfree & ~as->freeset); /* Update for allocs of asm_fuseopm. */
-
- /* Use signed or unsigned comparison for hiword. */
- cchi = asm_compmap[ir->o] & 15;
- lefthi = ra_alloc1(as, (ir+1)->op1, allow);
- mhi = asm_fuseopm(as, ARMI_CMP, (ir+1)->op2, rset_clear(allow, lefthi));
-
- /* All register allocations must be performed _before_ this point. */
- if (signedcomp) {
- MCLabel l_around = emit_label(as);
- asm_guardcc(as, cclo);
- emit_n(as, ARMI_CMP^mlo, leftlo);
- emit_branch(as, ARMF_CC(ARMI_B, CC_NE), l_around);
- if (cchi == CC_GE || cchi == CC_LE) cchi ^= 6; /* GE -> GT, LE -> LT */
- asm_guardcc(as, cchi);
- } else {
- asm_guardcc(as, cclo);
- emit_n(as, ARMF_CC(ARMI_CMP, CC_EQ)^mlo, leftlo);
- }
- emit_n(as, ARMI_CMP^mhi, lefthi);
-}
-#endif
-
-/* -- Support for 64 bit ops in 32 bit mode ------------------------------- */
-
-/* Hiword op of a split 64 bit op. Previous op must be the loword op. */
-static void asm_hiop(ASMState *as, IRIns *ir)
-{
-#if LJ_HASFFI || LJ_SOFTFP
- /* HIOP is marked as a store because it needs its own DCE logic. */
- int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
- if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
- if ((ir-1)->o <= IR_NE) { /* 64 bit integer or FP comparisons. ORDER IR. */
- as->curins--; /* Always skip the loword comparison. */
-#if LJ_SOFTFP
- if (!irt_isint(ir->t)) {
- asm_sfpcomp(as, ir-1);
- return;
- }
-#endif
-#if LJ_HASFFI
- asm_int64comp(as, ir-1);
-#endif
- return;
-#if LJ_SOFTFP
- } else if ((ir-1)->o == IR_MIN || (ir-1)->o == IR_MAX) {
- as->curins--; /* Always skip the loword min/max. */
- if (uselo || usehi)
- asm_sfpmin_max(as, ir-1, (ir-1)->o == IR_MIN ? CC_HI : CC_LO);
- return;
-#elif LJ_HASFFI
- } else if ((ir-1)->o == IR_CONV) {
- as->curins--; /* Always skip the CONV. */
- if (usehi || uselo)
- asm_conv64(as, ir);
- return;
-#endif
- } else if ((ir-1)->o == IR_XSTORE) {
- if ((ir-1)->r != RID_SINK)
- asm_xstore(as, ir, 4);
- return;
- }
- if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
- switch ((ir-1)->o) {
-#if LJ_HASFFI
- case IR_ADD:
- as->curins--;
- asm_intop(as, ir, ARMI_ADC);
- asm_intop(as, ir-1, ARMI_ADD|ARMI_S);
- break;
- case IR_SUB:
- as->curins--;
- asm_intop(as, ir, ARMI_SBC);
- asm_intop(as, ir-1, ARMI_SUB|ARMI_S);
- break;
- case IR_NEG:
- as->curins--;
- asm_intneg(as, ir, ARMI_RSC);
- asm_intneg(as, ir-1, ARMI_RSB|ARMI_S);
- break;
-#endif
-#if LJ_SOFTFP
- case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
- case IR_STRTO:
- if (!uselo)
- ra_allocref(as, ir->op1, RSET_GPR); /* Mark lo op as used. */
- break;
-#endif
- case IR_CALLN:
- case IR_CALLS:
- case IR_CALLXS:
- if (!uselo)
- ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
- break;
-#if LJ_SOFTFP
- case IR_ASTORE: case IR_HSTORE: case IR_USTORE: case IR_TOSTR:
-#endif
- case IR_CNEWI:
- /* Nothing to do here. Handled by lo op itself. */
- break;
- default: lua_assert(0); break;
- }
-#else
- UNUSED(as); UNUSED(ir); lua_assert(0);
-#endif
-}
-
-/* -- Stack handling ------------------------------------------------------ */
-
-/* Check Lua stack size for overflow. Use exit handler as fallback. */
-static void asm_stack_check(ASMState *as, BCReg topslot,
- IRIns *irp, RegSet allow, ExitNo exitno)
-{
- Reg pbase;
- uint32_t k;
- if (irp) {
- if (!ra_hasspill(irp->s)) {
- pbase = irp->r;
- lua_assert(ra_hasreg(pbase));
- } else if (allow) {
- pbase = rset_pickbot(allow);
- } else {
- pbase = RID_RET;
- emit_lso(as, ARMI_LDR, RID_RET, RID_SP, 0); /* Restore temp. register. */
- }
- } else {
- pbase = RID_BASE;
- }
- emit_branch(as, ARMF_CC(ARMI_BL, CC_LS), exitstub_addr(as->J, exitno));
- k = emit_isk12(0, (int32_t)(8*topslot));
- lua_assert(k);
- emit_n(as, ARMI_CMP^k, RID_TMP);
- emit_dnm(as, ARMI_SUB, RID_TMP, RID_TMP, pbase);
- emit_lso(as, ARMI_LDR, RID_TMP, RID_TMP,
- (int32_t)offsetof(lua_State, maxstack));
- if (irp) { /* Must not spill arbitrary registers in head of side trace. */
- int32_t i = i32ptr(&J2G(as->J)->jit_L);
- if (ra_hasspill(irp->s))
- emit_lso(as, ARMI_LDR, pbase, RID_SP, sps_scale(irp->s));
- emit_lso(as, ARMI_LDR, RID_TMP, RID_TMP, (i & 4095));
- if (ra_hasspill(irp->s) && !allow)
- emit_lso(as, ARMI_STR, RID_RET, RID_SP, 0); /* Save temp. register. */
- emit_loadi(as, RID_TMP, (i & ~4095));
- } else {
- emit_getgl(as, RID_TMP, jit_L);
- }
-}
-
-/* Restore Lua stack from on-trace state. */
-static void asm_stack_restore(ASMState *as, SnapShot *snap)
-{
- SnapEntry *map = &as->T->snapmap[snap->mapofs];
- SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1];
- MSize n, nent = snap->nent;
- /* Store the value of all modified slots to the Lua stack. */
- for (n = 0; n < nent; n++) {
- SnapEntry sn = map[n];
- BCReg s = snap_slot(sn);
- int32_t ofs = 8*((int32_t)s-1);
- IRRef ref = snap_ref(sn);
- IRIns *ir = IR(ref);
- if ((sn & SNAP_NORESTORE))
- continue;
- if (irt_isnum(ir->t)) {
-#if LJ_SOFTFP
- RegSet odd = rset_exclude(RSET_GPRODD, RID_BASE);
- Reg tmp;
- lua_assert(irref_isk(ref)); /* LJ_SOFTFP: must be a number constant. */
- tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.lo,
- rset_exclude(RSET_GPREVEN, RID_BASE));
- emit_lso(as, ARMI_STR, tmp, RID_BASE, ofs);
- if (rset_test(as->freeset, tmp+1)) odd = RID2RSET(tmp+1);
- tmp = ra_allock(as, (int32_t)ir_knum(ir)->u32.hi, odd);
- emit_lso(as, ARMI_STR, tmp, RID_BASE, ofs+4);
-#else
- Reg src = ra_alloc1(as, ref, RSET_FPR);
- emit_vlso(as, ARMI_VSTR_D, src, RID_BASE, ofs);
-#endif
- } else {
- RegSet odd = rset_exclude(RSET_GPRODD, RID_BASE);
- Reg type;
- lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t));
- if (!irt_ispri(ir->t)) {
- Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPREVEN, RID_BASE));
- emit_lso(as, ARMI_STR, src, RID_BASE, ofs);
- if (rset_test(as->freeset, src+1)) odd = RID2RSET(src+1);
- }
- if ((sn & (SNAP_CONT|SNAP_FRAME))) {
- if (s == 0) continue; /* Do not overwrite link to previous frame. */
- type = ra_allock(as, (int32_t)(*flinks--), odd);
-#if LJ_SOFTFP
- } else if ((sn & SNAP_SOFTFPNUM)) {
- type = ra_alloc1(as, ref+1, rset_exclude(RSET_GPRODD, RID_BASE));
-#endif
- } else {
- type = ra_allock(as, (int32_t)irt_toitype(ir->t), odd);
- }
- emit_lso(as, ARMI_STR, type, RID_BASE, ofs+4);
- }
- checkmclim(as);
- }
- lua_assert(map + nent == flinks);
-}
-
-/* -- GC handling --------------------------------------------------------- */
-
-/* Check GC threshold and do one or more GC steps. */
-static void asm_gc_check(ASMState *as)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
- IRRef args[2];
- MCLabel l_end;
- Reg tmp1, tmp2;
- ra_evictset(as, RSET_SCRATCH);
- l_end = emit_label(as);
- /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
- asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */
- emit_n(as, ARMI_CMP|ARMI_K12|0, RID_RET);
- args[0] = ASMREF_TMP1; /* global_State *g */
- args[1] = ASMREF_TMP2; /* MSize steps */
- asm_gencall(as, ci, args);
- tmp1 = ra_releasetmp(as, ASMREF_TMP1);
- tmp2 = ra_releasetmp(as, ASMREF_TMP2);
- emit_loadi(as, tmp2, as->gcsteps);
- /* Jump around GC step if GC total < GC threshold. */
- emit_branch(as, ARMF_CC(ARMI_B, CC_LS), l_end);
- emit_nm(as, ARMI_CMP, RID_TMP, tmp2);
- emit_lso(as, ARMI_LDR, tmp2, tmp1,
- (int32_t)offsetof(global_State, gc.threshold));
- emit_lso(as, ARMI_LDR, RID_TMP, tmp1,
- (int32_t)offsetof(global_State, gc.total));
- ra_allockreg(as, i32ptr(J2G(as->J)), tmp1);
- as->gcsteps = 0;
- checkmclim(as);
-}
-
-/* -- Loop handling ------------------------------------------------------- */
-
-/* Fixup the loop branch. */
-static void asm_loop_fixup(ASMState *as)
-{
- MCode *p = as->mctop;
- MCode *target = as->mcp;
- if (as->loopinv) { /* Inverted loop branch? */
- /* asm_guardcc already inverted the bcc and patched the final bl. */
- p[-2] |= ((uint32_t)(target-p) & 0x00ffffffu);
- } else {
- p[-1] = ARMI_B | ((uint32_t)((target-p)-1) & 0x00ffffffu);
- }
-}
-
-/* -- Head of trace ------------------------------------------------------- */
-
-/* Reload L register from g->jit_L. */
-static void asm_head_lreg(ASMState *as)
-{
- IRIns *ir = IR(ASMREF_L);
- if (ra_used(ir)) {
- Reg r = ra_dest(as, ir, RSET_GPR);
- emit_getgl(as, r, jit_L);
- ra_evictk(as);
- }
-}
-
-/* Coalesce BASE register for a root trace. */
-static void asm_head_root_base(ASMState *as)
-{
- IRIns *ir;
- asm_head_lreg(as);
- ir = IR(REF_BASE);
- if (ra_hasreg(ir->r) && (rset_test(as->modset, ir->r) || irt_ismarked(ir->t)))
- ra_spill(as, ir);
- ra_destreg(as, ir, RID_BASE);
-}
-
-/* Coalesce BASE register for a side trace. */
-static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
-{
- IRIns *ir;
- asm_head_lreg(as);
- ir = IR(REF_BASE);
- if (ra_hasreg(ir->r) && (rset_test(as->modset, ir->r) || irt_ismarked(ir->t)))
- ra_spill(as, ir);
- if (ra_hasspill(irp->s)) {
- rset_clear(allow, ra_dest(as, ir, allow));
- } else {
- Reg r = irp->r;
- lua_assert(ra_hasreg(r));
- rset_clear(allow, r);
- if (r != ir->r && !rset_test(as->freeset, r))
- ra_restore(as, regcost_ref(as->cost[r]));
- ra_destreg(as, ir, r);
- }
- return allow;
-}
-
-/* -- Tail of trace ------------------------------------------------------- */
-
-/* Fixup the tail code. */
-static void asm_tail_fixup(ASMState *as, TraceNo lnk)
-{
- MCode *p = as->mctop;
- MCode *target;
- int32_t spadj = as->T->spadjust;
- if (spadj == 0) {
- as->mctop = --p;
- } else {
- /* Patch stack adjustment. */
- uint32_t k = emit_isk12(ARMI_ADD, spadj);
- lua_assert(k);
- p[-2] = (ARMI_ADD^k) | ARMF_D(RID_SP) | ARMF_N(RID_SP);
- }
- /* Patch exit branch. */
- target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp;
- p[-1] = ARMI_B|(((target-p)-1)&0x00ffffffu);
-}
-
-/* Prepare tail of code. */
-static void asm_tail_prep(ASMState *as)
-{
- MCode *p = as->mctop - 1; /* Leave room for exit branch. */
- if (as->loopref) {
- as->invmcp = as->mcp = p;
- } else {
- as->mcp = p-1; /* Leave room for stack pointer adjustment. */
- as->invmcp = NULL;
- }
- *p = 0; /* Prevent load/store merging. */
-}
-
-/* -- Instruction dispatch ------------------------------------------------ */
-
-/* Assemble a single instruction. */
-static void asm_ir(ASMState *as, IRIns *ir)
-{
- switch ((IROp)ir->o) {
- /* Miscellaneous ops. */
- case IR_LOOP: asm_loop(as); break;
- case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break;
- case IR_USE:
- ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break;
- case IR_PHI: asm_phi(as, ir); break;
- case IR_HIOP: asm_hiop(as, ir); break;
- case IR_GCSTEP: asm_gcstep(as, ir); break;
-
- /* Guarded assertions. */
- case IR_EQ: case IR_NE:
- if ((ir-1)->o == IR_HREF && ir->op1 == as->curins-1) {
- as->curins--;
- asm_href(as, ir-1, (IROp)ir->o);
- break;
- }
- /* fallthrough */
- case IR_LT: case IR_GE: case IR_LE: case IR_GT:
- case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
- case IR_ABC:
-#if !LJ_SOFTFP
- if (irt_isnum(ir->t)) { asm_fpcomp(as, ir); break; }
-#endif
- asm_intcomp(as, ir);
- break;
-
- case IR_RETF: asm_retf(as, ir); break;
-
- /* Bit ops. */
- case IR_BNOT: asm_bitop(as, ir, ARMI_MVN); break;
- case IR_BSWAP: asm_bitswap(as, ir); break;
-
- case IR_BAND: asm_bitop(as, ir, ARMI_AND); break;
- case IR_BOR: asm_bitop(as, ir, ARMI_ORR); break;
- case IR_BXOR: asm_bitop(as, ir, ARMI_EOR); break;
-
- case IR_BSHL: asm_bitshift(as, ir, ARMSH_LSL); break;
- case IR_BSHR: asm_bitshift(as, ir, ARMSH_LSR); break;
- case IR_BSAR: asm_bitshift(as, ir, ARMSH_ASR); break;
- case IR_BROR: asm_bitshift(as, ir, ARMSH_ROR); break;
- case IR_BROL: lua_assert(0); break;
-
- /* Arithmetic ops. */
- case IR_ADD: case IR_ADDOV: asm_add(as, ir); break;
- case IR_SUB: case IR_SUBOV: asm_sub(as, ir); break;
- case IR_MUL: case IR_MULOV: asm_mul(as, ir); break;
- case IR_MOD: asm_callid(as, ir, IRCALL_lj_vm_modi); break;
- case IR_NEG: asm_neg(as, ir); break;
-
-#if LJ_SOFTFP
- case IR_DIV: case IR_POW: case IR_ABS:
- case IR_ATAN2: case IR_LDEXP: case IR_FPMATH: case IR_TOBIT:
- lua_assert(0); /* Unused for LJ_SOFTFP. */
- break;
-#else
- case IR_DIV: asm_fparith(as, ir, ARMI_VDIV_D); break;
- case IR_POW: asm_callid(as, ir, IRCALL_lj_vm_powi); break;
- case IR_ABS: asm_fpunary(as, ir, ARMI_VABS_D); break;
- case IR_ATAN2: asm_callid(as, ir, IRCALL_atan2); break;
- case IR_LDEXP: asm_callid(as, ir, IRCALL_ldexp); break;
- case IR_FPMATH:
- if (ir->op2 == IRFPM_EXP2 && asm_fpjoin_pow(as, ir))
- break;
- if (ir->op2 <= IRFPM_TRUNC)
- asm_callround(as, ir, ir->op2);
- else if (ir->op2 == IRFPM_SQRT)
- asm_fpunary(as, ir, ARMI_VSQRT_D);
- else
- asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2);
- break;
- case IR_TOBIT: asm_tobit(as, ir); break;
-#endif
-
- case IR_MIN: asm_min_max(as, ir, CC_GT, CC_HI); break;
- case IR_MAX: asm_min_max(as, ir, CC_LT, CC_LO); break;
-
- /* Memory references. */
- case IR_AREF: asm_aref(as, ir); break;
- case IR_HREF: asm_href(as, ir, 0); break;
- case IR_HREFK: asm_hrefk(as, ir); break;
- case IR_NEWREF: asm_newref(as, ir); break;
- case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
- case IR_FREF: asm_fref(as, ir); break;
- case IR_STRREF: asm_strref(as, ir); break;
-
- /* Loads and stores. */
- case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
- asm_ahuvload(as, ir);
- break;
- case IR_FLOAD: asm_fload(as, ir); break;
- case IR_XLOAD: asm_xload(as, ir); break;
- case IR_SLOAD: asm_sload(as, ir); break;
-
- case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
- case IR_FSTORE: asm_fstore(as, ir); break;
- case IR_XSTORE: asm_xstore(as, ir, 0); break;
-
- /* Allocations. */
- case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
- case IR_TNEW: asm_tnew(as, ir); break;
- case IR_TDUP: asm_tdup(as, ir); break;
- case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break;
-
- /* Write barriers. */
- case IR_TBAR: asm_tbar(as, ir); break;
- case IR_OBAR: asm_obar(as, ir); break;
-
- /* Type conversions. */
- case IR_CONV: asm_conv(as, ir); break;
- case IR_TOSTR: asm_tostr(as, ir); break;
- case IR_STRTO: asm_strto(as, ir); break;
-
- /* Calls. */
- case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
- case IR_CALLXS: asm_callx(as, ir); break;
- case IR_CARG: break;
-
- default:
- setintV(&as->J->errinfo, ir->o);
- lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
- break;
- }
-}
-
-/* -- Trace setup --------------------------------------------------------- */
-
-/* Ensure there are enough stack slots for call arguments. */
-static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
-{
- IRRef args[CCI_NARGS_MAX*2];
- uint32_t i, nargs = (int)CCI_NARGS(ci);
- int nslots = 0, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR, fprodd = 0;
- asm_collectargs(as, ir, ci, args);
- for (i = 0; i < nargs; i++) {
- if (!LJ_SOFTFP && args[i] && irt_isfp(IR(args[i])->t)) {
- if (!LJ_ABI_SOFTFP && !(ci->flags & CCI_VARARG)) {
- if (irt_isnum(IR(args[i])->t)) {
- if (nfpr > 0) nfpr--;
- else fprodd = 0, nslots = (nslots + 3) & ~1;
- } else {
- if (fprodd) fprodd--;
- else if (nfpr > 0) fprodd = 1, nfpr--;
- else nslots++;
- }
- } else if (irt_isnum(IR(args[i])->t)) {
- ngpr &= ~1;
- if (ngpr > 0) ngpr -= 2; else nslots += 2;
- } else {
- if (ngpr > 0) ngpr--; else nslots++;
- }
- } else {
- if (ngpr > 0) ngpr--; else nslots++;
- }
- }
- if (nslots > as->evenspill) /* Leave room for args in stack slots. */
- as->evenspill = nslots;
- return REGSP_HINT(RID_RET);
-}
-
-static void asm_setup_target(ASMState *as)
-{
- /* May need extra exit for asm_stack_check on side traces. */
- asm_exitstub_setup(as, as->T->nsnap + (as->parent ? 1 : 0));
-}
-
-/* -- Trace patching ------------------------------------------------------ */
-
-/* Patch exit jumps of existing machine code to a new target. */
-void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
-{
- MCode *p = T->mcode;
- MCode *pe = (MCode *)((char *)p + T->szmcode);
- MCode *cstart = NULL, *cend = p;
- MCode *mcarea = lj_mcode_patch(J, p, 0);
- MCode *px = exitstub_addr(J, exitno) - 2;
- for (; p < pe; p++) {
- /* Look for bl_cc exitstub, replace with b_cc target. */
- uint32_t ins = *p;
- if ((ins & 0x0f000000u) == 0x0b000000u && ins < 0xf0000000u &&
- ((ins ^ (px-p)) & 0x00ffffffu) == 0) {
- *p = (ins & 0xfe000000u) | (((target-p)-2) & 0x00ffffffu);
- cend = p+1;
- if (!cstart) cstart = p;
- }
- }
- lua_assert(cstart != NULL);
- lj_mcode_sync(cstart, cend);
- lj_mcode_patch(J, mcarea, 1);
-}
-
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_asm_mips.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_asm_mips.h
deleted file mode 100644
index 7631190..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_asm_mips.h
+++ /dev/null
@@ -1,1977 +0,0 @@
-/*
-** MIPS IR assembler (SSA IR -> machine code).
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-/* -- Register allocator extensions --------------------------------------- */
-
-/* Allocate a register with a hint. */
-static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow)
-{
- Reg r = IR(ref)->r;
- if (ra_noreg(r)) {
- if (!ra_hashint(r) && !iscrossref(as, ref))
- ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */
- r = ra_allocref(as, ref, allow);
- }
- ra_noweak(as, r);
- return r;
-}
-
-/* Allocate a register or RID_ZERO. */
-static Reg ra_alloc1z(ASMState *as, IRRef ref, RegSet allow)
-{
- Reg r = IR(ref)->r;
- if (ra_noreg(r)) {
- if (!(allow & RSET_FPR) && irref_isk(ref) && IR(ref)->i == 0)
- return RID_ZERO;
- r = ra_allocref(as, ref, allow);
- } else {
- ra_noweak(as, r);
- }
- return r;
-}
-
-/* Allocate two source registers for three-operand instructions. */
-static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow)
-{
- IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
- Reg left = irl->r, right = irr->r;
- if (ra_hasreg(left)) {
- ra_noweak(as, left);
- if (ra_noreg(right))
- right = ra_alloc1z(as, ir->op2, rset_exclude(allow, left));
- else
- ra_noweak(as, right);
- } else if (ra_hasreg(right)) {
- ra_noweak(as, right);
- left = ra_alloc1z(as, ir->op1, rset_exclude(allow, right));
- } else if (ra_hashint(right)) {
- right = ra_alloc1z(as, ir->op2, allow);
- left = ra_alloc1z(as, ir->op1, rset_exclude(allow, right));
- } else {
- left = ra_alloc1z(as, ir->op1, allow);
- right = ra_alloc1z(as, ir->op2, rset_exclude(allow, left));
- }
- return left | (right << 8);
-}
-
-/* -- Guard handling ------------------------------------------------------ */
-
-/* Need some spare long-range jump slots, for out-of-range branches. */
-#define MIPS_SPAREJUMP 4
-
-/* Setup spare long-range jump slots per mcarea. */
-static void asm_sparejump_setup(ASMState *as)
-{
- MCode *mxp = as->mcbot;
- if (((uintptr_t)mxp & (LJ_PAGESIZE-1)) == sizeof(MCLink)) {
- lua_assert(MIPSI_NOP == 0);
- memset(mxp, 0, MIPS_SPAREJUMP*2*sizeof(MCode));
- mxp += MIPS_SPAREJUMP*2;
- lua_assert(mxp < as->mctop);
- lj_mcode_sync(as->mcbot, mxp);
- lj_mcode_commitbot(as->J, mxp);
- as->mcbot = mxp;
- as->mclim = as->mcbot + MCLIM_REDZONE;
- }
-}
-
-/* Setup exit stub after the end of each trace. */
-static void asm_exitstub_setup(ASMState *as)
-{
- MCode *mxp = as->mctop;
- /* sw TMP, 0(sp); j ->vm_exit_handler; li TMP, traceno */
- *--mxp = MIPSI_LI|MIPSF_T(RID_TMP)|as->T->traceno;
- *--mxp = MIPSI_J|((((uintptr_t)(void *)lj_vm_exit_handler)>>2)&0x03ffffffu);
- lua_assert(((uintptr_t)mxp ^ (uintptr_t)(void *)lj_vm_exit_handler)>>28 == 0);
- *--mxp = MIPSI_SW|MIPSF_T(RID_TMP)|MIPSF_S(RID_SP)|0;
- as->mctop = mxp;
-}
-
-/* Keep this in-sync with exitstub_trace_addr(). */
-#define asm_exitstub_addr(as) ((as)->mctop)
-
-/* Emit conditional branch to exit for guard. */
-static void asm_guard(ASMState *as, MIPSIns mi, Reg rs, Reg rt)
-{
- MCode *target = asm_exitstub_addr(as);
- MCode *p = as->mcp;
- if (LJ_UNLIKELY(p == as->invmcp)) {
- as->invmcp = NULL;
- as->loopinv = 1;
- as->mcp = p+1;
- mi = mi ^ ((mi>>28) == 1 ? 0x04000000u : 0x00010000u); /* Invert cond. */
- target = p; /* Patch target later in asm_loop_fixup. */
- }
- emit_ti(as, MIPSI_LI, RID_TMP, as->snapno);
- emit_branch(as, mi, rs, rt, target);
-}
-
-/* -- Operand fusion ------------------------------------------------------ */
-
-/* Limit linear search to this distance. Avoids O(n^2) behavior. */
-#define CONFLICT_SEARCH_LIM 31
-
-/* Check if there's no conflicting instruction between curins and ref. */
-static int noconflict(ASMState *as, IRRef ref, IROp conflict)
-{
- IRIns *ir = as->ir;
- IRRef i = as->curins;
- if (i > ref + CONFLICT_SEARCH_LIM)
- return 0; /* Give up, ref is too far away. */
- while (--i > ref)
- if (ir[i].o == conflict)
- return 0; /* Conflict found. */
- return 1; /* Ok, no conflict. */
-}
-
-/* Fuse the array base of colocated arrays. */
-static int32_t asm_fuseabase(ASMState *as, IRRef ref)
-{
- IRIns *ir = IR(ref);
- if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE &&
- !neverfuse(as) && noconflict(as, ref, IR_NEWREF))
- return (int32_t)sizeof(GCtab);
- return 0;
-}
-
-/* Fuse array/hash/upvalue reference into register+offset operand. */
-static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow)
-{
- IRIns *ir = IR(ref);
- if (ra_noreg(ir->r)) {
- if (ir->o == IR_AREF) {
- if (mayfuse(as, ref)) {
- if (irref_isk(ir->op2)) {
- IRRef tab = IR(ir->op1)->op1;
- int32_t ofs = asm_fuseabase(as, tab);
- IRRef refa = ofs ? tab : ir->op1;
- ofs += 8*IR(ir->op2)->i;
- if (checki16(ofs)) {
- *ofsp = ofs;
- return ra_alloc1(as, refa, allow);
- }
- }
- }
- } else if (ir->o == IR_HREFK) {
- if (mayfuse(as, ref)) {
- int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
- if (checki16(ofs)) {
- *ofsp = ofs;
- return ra_alloc1(as, ir->op1, allow);
- }
- }
- } else if (ir->o == IR_UREFC) {
- if (irref_isk(ir->op1)) {
- GCfunc *fn = ir_kfunc(IR(ir->op1));
- int32_t ofs = i32ptr(&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv);
- int32_t jgl = (intptr_t)J2G(as->J);
- if ((uint32_t)(ofs-jgl) < 65536) {
- *ofsp = ofs-jgl-32768;
- return RID_JGL;
- } else {
- *ofsp = (int16_t)ofs;
- return ra_allock(as, ofs-(int16_t)ofs, allow);
- }
- }
- }
- }
- *ofsp = 0;
- return ra_alloc1(as, ref, allow);
-}
-
-/* Fuse XLOAD/XSTORE reference into load/store operand. */
-static void asm_fusexref(ASMState *as, MIPSIns mi, Reg rt, IRRef ref,
- RegSet allow, int32_t ofs)
-{
- IRIns *ir = IR(ref);
- Reg base;
- if (ra_noreg(ir->r) && canfuse(as, ir)) {
- if (ir->o == IR_ADD) {
- int32_t ofs2;
- if (irref_isk(ir->op2) && (ofs2 = ofs + IR(ir->op2)->i, checki16(ofs2))) {
- ref = ir->op1;
- ofs = ofs2;
- }
- } else if (ir->o == IR_STRREF) {
- int32_t ofs2 = 65536;
- lua_assert(ofs == 0);
- ofs = (int32_t)sizeof(GCstr);
- if (irref_isk(ir->op2)) {
- ofs2 = ofs + IR(ir->op2)->i;
- ref = ir->op1;
- } else if (irref_isk(ir->op1)) {
- ofs2 = ofs + IR(ir->op1)->i;
- ref = ir->op2;
- }
- if (!checki16(ofs2)) {
- /* NYI: Fuse ADD with constant. */
- Reg right, left = ra_alloc2(as, ir, allow);
- right = (left >> 8); left &= 255;
- emit_hsi(as, mi, rt, RID_TMP, ofs);
- emit_dst(as, MIPSI_ADDU, RID_TMP, left, right);
- return;
- }
- ofs = ofs2;
- }
- }
- base = ra_alloc1(as, ref, allow);
- emit_hsi(as, mi, rt, base, ofs);
-}
-
-/* -- Calls --------------------------------------------------------------- */
-
-/* Generate a call to a C function. */
-static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
-{
- uint32_t n, nargs = CCI_NARGS(ci);
- int32_t ofs = 16;
- Reg gpr, fpr = REGARG_FIRSTFPR;
- if ((void *)ci->func)
- emit_call(as, (void *)ci->func);
- for (gpr = REGARG_FIRSTGPR; gpr <= REGARG_LASTGPR; gpr++)
- as->cost[gpr] = REGCOST(~0u, ASMREF_L);
- gpr = REGARG_FIRSTGPR;
- for (n = 0; n < nargs; n++) { /* Setup args. */
- IRRef ref = args[n];
- if (ref) {
- IRIns *ir = IR(ref);
- if (irt_isfp(ir->t) && fpr <= REGARG_LASTFPR &&
- !(ci->flags & CCI_VARARG)) {
- lua_assert(rset_test(as->freeset, fpr)); /* Already evicted. */
- ra_leftov(as, fpr, ref);
- fpr += 2;
- gpr += irt_isnum(ir->t) ? 2 : 1;
- } else {
- fpr = REGARG_LASTFPR+1;
- if (irt_isnum(ir->t)) gpr = (gpr+1) & ~1;
- if (gpr <= REGARG_LASTGPR) {
- lua_assert(rset_test(as->freeset, gpr)); /* Already evicted. */
- if (irt_isfp(ir->t)) {
- RegSet of = as->freeset;
- Reg r;
- /* Workaround to protect argument GPRs from being used for remat. */
- as->freeset &= ~RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1);
- r = ra_alloc1(as, ref, RSET_FPR);
- as->freeset |= (of & RSET_RANGE(REGARG_FIRSTGPR, REGARG_LASTGPR+1));
- if (irt_isnum(ir->t)) {
- emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?0:1), r+1);
- emit_tg(as, MIPSI_MFC1, gpr+(LJ_BE?1:0), r);
- lua_assert(rset_test(as->freeset, gpr+1)); /* Already evicted. */
- gpr += 2;
- } else if (irt_isfloat(ir->t)) {
- emit_tg(as, MIPSI_MFC1, gpr, r);
- gpr++;
- }
- } else {
- ra_leftov(as, gpr, ref);
- gpr++;
- }
- } else {
- Reg r = ra_alloc1z(as, ref, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
- if (irt_isnum(ir->t)) ofs = (ofs + 4) & ~4;
- emit_spstore(as, ir, r, ofs);
- ofs += irt_isnum(ir->t) ? 8 : 4;
- }
- }
- } else {
- fpr = REGARG_LASTFPR+1;
- if (gpr <= REGARG_LASTGPR)
- gpr++;
- else
- ofs += 4;
- }
- checkmclim(as);
- }
-}
-
-/* Setup result reg/sp for call. Evict scratch regs. */
-static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
-{
- RegSet drop = RSET_SCRATCH;
- int hiop = ((ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t));
- if ((ci->flags & CCI_NOFPRCLOBBER))
- drop &= ~RSET_FPR;
- if (ra_hasreg(ir->r))
- rset_clear(drop, ir->r); /* Dest reg handled below. */
- if (hiop && ra_hasreg((ir+1)->r))
- rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
- ra_evictset(as, drop); /* Evictions must be performed first. */
- if (ra_used(ir)) {
- lua_assert(!irt_ispri(ir->t));
- if (irt_isfp(ir->t)) {
- if ((ci->flags & CCI_CASTU64)) {
- int32_t ofs = sps_scale(ir->s);
- Reg dest = ir->r;
- if (ra_hasreg(dest)) {
- ra_free(as, dest);
- ra_modified(as, dest);
- emit_tg(as, MIPSI_MTC1, RID_RETHI, dest+1);
- emit_tg(as, MIPSI_MTC1, RID_RETLO, dest);
- }
- if (ofs) {
- emit_tsi(as, MIPSI_SW, RID_RETLO, RID_SP, ofs+(LJ_BE?4:0));
- emit_tsi(as, MIPSI_SW, RID_RETHI, RID_SP, ofs+(LJ_BE?0:4));
- }
- } else {
- ra_destreg(as, ir, RID_FPRET);
- }
- } else if (hiop) {
- ra_destpair(as, ir);
- } else {
- ra_destreg(as, ir, RID_RET);
- }
- }
-}
-
-static void asm_call(ASMState *as, IRIns *ir)
-{
- IRRef args[CCI_NARGS_MAX];
- const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
- asm_collectargs(as, ir, ci, args);
- asm_setupresult(as, ir, ci);
- asm_gencall(as, ci, args);
-}
-
-static void asm_callx(ASMState *as, IRIns *ir)
-{
- IRRef args[CCI_NARGS_MAX*2];
- CCallInfo ci;
- IRRef func;
- IRIns *irf;
- ci.flags = asm_callx_flags(as, ir);
- asm_collectargs(as, ir, &ci, args);
- asm_setupresult(as, ir, &ci);
- func = ir->op2; irf = IR(func);
- if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
- if (irref_isk(func)) { /* Call to constant address. */
- ci.func = (ASMFunction)(void *)(irf->i);
- } else { /* Need specific register for indirect calls. */
- Reg r = ra_alloc1(as, func, RID2RSET(RID_CFUNCADDR));
- MCode *p = as->mcp;
- if (r == RID_CFUNCADDR)
- *--p = MIPSI_NOP;
- else
- *--p = MIPSI_MOVE | MIPSF_D(RID_CFUNCADDR) | MIPSF_S(r);
- *--p = MIPSI_JALR | MIPSF_S(r);
- as->mcp = p;
- ci.func = (ASMFunction)(void *)0;
- }
- asm_gencall(as, &ci, args);
-}
-
-static void asm_callid(ASMState *as, IRIns *ir, IRCallID id)
-{
- const CCallInfo *ci = &lj_ir_callinfo[id];
- IRRef args[2];
- args[0] = ir->op1;
- args[1] = ir->op2;
- asm_setupresult(as, ir, ci);
- asm_gencall(as, ci, args);
-}
-
-static void asm_callround(ASMState *as, IRIns *ir, IRCallID id)
-{
- /* The modified regs must match with the *.dasc implementation. */
- RegSet drop = RID2RSET(RID_R1)|RID2RSET(RID_R12)|RID2RSET(RID_FPRET)|
- RID2RSET(RID_F2)|RID2RSET(RID_F4)|RID2RSET(REGARG_FIRSTFPR);
- if (ra_hasreg(ir->r)) rset_clear(drop, ir->r);
- ra_evictset(as, drop);
- ra_destreg(as, ir, RID_FPRET);
- emit_call(as, (void *)lj_ir_callinfo[id].func);
- ra_leftov(as, REGARG_FIRSTFPR, ir->op1);
-}
-
-/* -- Returns ------------------------------------------------------------- */
-
-/* Return to lower frame. Guard that it goes to the right spot. */
-static void asm_retf(ASMState *as, IRIns *ir)
-{
- Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
- void *pc = ir_kptr(IR(ir->op2));
- int32_t delta = 1+bc_a(*((const BCIns *)pc - 1));
- as->topslot -= (BCReg)delta;
- if ((int32_t)as->topslot < 0) as->topslot = 0;
- irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */
- emit_setgl(as, base, jit_base);
- emit_addptr(as, base, -8*delta);
- asm_guard(as, MIPSI_BNE, RID_TMP,
- ra_allock(as, i32ptr(pc), rset_exclude(RSET_GPR, base)));
- emit_tsi(as, MIPSI_LW, RID_TMP, base, -8);
-}
-
-/* -- Type conversions ---------------------------------------------------- */
-
-static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
-{
- Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
- Reg dest = ra_dest(as, ir, RSET_GPR);
- asm_guard(as, MIPSI_BC1F, 0, 0);
- emit_fgh(as, MIPSI_C_EQ_D, 0, tmp, left);
- emit_fg(as, MIPSI_CVT_D_W, tmp, tmp);
- emit_tg(as, MIPSI_MFC1, dest, tmp);
- emit_fg(as, MIPSI_CVT_W_D, tmp, left);
-}
-
-static void asm_tobit(ASMState *as, IRIns *ir)
-{
- RegSet allow = RSET_FPR;
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left = ra_alloc1(as, ir->op1, allow);
- Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left));
- Reg tmp = ra_scratch(as, rset_clear(allow, right));
- emit_tg(as, MIPSI_MFC1, dest, tmp);
- emit_fgh(as, MIPSI_ADD_D, tmp, left, right);
-}
-
-static void asm_conv(ASMState *as, IRIns *ir)
-{
- IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
- int stfp = (st == IRT_NUM || st == IRT_FLOAT);
- IRRef lref = ir->op1;
- lua_assert(irt_type(ir->t) != st);
- lua_assert(!(irt_isint64(ir->t) ||
- (st == IRT_I64 || st == IRT_U64))); /* Handled by SPLIT. */
- if (irt_isfp(ir->t)) {
- Reg dest = ra_dest(as, ir, RSET_FPR);
- if (stfp) { /* FP to FP conversion. */
- emit_fg(as, st == IRT_NUM ? MIPSI_CVT_S_D : MIPSI_CVT_D_S,
- dest, ra_alloc1(as, lref, RSET_FPR));
- } else if (st == IRT_U32) { /* U32 to FP conversion. */
- /* y = (x ^ 0x8000000) + 2147483648.0 */
- Reg left = ra_alloc1(as, lref, RSET_GPR);
- Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, dest));
- if (irt_isfloat(ir->t))
- emit_fg(as, MIPSI_CVT_S_D, dest, dest);
- /* Must perform arithmetic with doubles to keep the precision. */
- emit_fgh(as, MIPSI_ADD_D, dest, dest, tmp);
- emit_fg(as, MIPSI_CVT_D_W, dest, dest);
- emit_lsptr(as, MIPSI_LDC1, (tmp & 31),
- (void *)lj_ir_k64_find(as->J, U64x(41e00000,00000000)),
- RSET_GPR);
- emit_tg(as, MIPSI_MTC1, RID_TMP, dest);
- emit_dst(as, MIPSI_XOR, RID_TMP, RID_TMP, left);
- emit_ti(as, MIPSI_LUI, RID_TMP, 0x8000);
- } else { /* Integer to FP conversion. */
- Reg left = ra_alloc1(as, lref, RSET_GPR);
- emit_fg(as, irt_isfloat(ir->t) ? MIPSI_CVT_S_W : MIPSI_CVT_D_W,
- dest, dest);
- emit_tg(as, MIPSI_MTC1, left, dest);
- }
- } else if (stfp) { /* FP to integer conversion. */
- if (irt_isguard(ir->t)) {
- /* Checked conversions are only supported from number to int. */
- lua_assert(irt_isint(ir->t) && st == IRT_NUM);
- asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left = ra_alloc1(as, lref, RSET_FPR);
- Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
- if (irt_isu32(ir->t)) {
- /* y = (int)floor(x - 2147483648.0) ^ 0x80000000 */
- emit_dst(as, MIPSI_XOR, dest, dest, RID_TMP);
- emit_ti(as, MIPSI_LUI, RID_TMP, 0x8000);
- emit_tg(as, MIPSI_MFC1, dest, tmp);
- emit_fg(as, st == IRT_FLOAT ? MIPSI_FLOOR_W_S : MIPSI_FLOOR_W_D,
- tmp, tmp);
- emit_fgh(as, st == IRT_FLOAT ? MIPSI_SUB_S : MIPSI_SUB_D,
- tmp, left, tmp);
- if (st == IRT_FLOAT)
- emit_lsptr(as, MIPSI_LWC1, (tmp & 31),
- (void *)lj_ir_k64_find(as->J, U64x(4f000000,4f000000)),
- RSET_GPR);
- else
- emit_lsptr(as, MIPSI_LDC1, (tmp & 31),
- (void *)lj_ir_k64_find(as->J, U64x(41e00000,00000000)),
- RSET_GPR);
- } else {
- emit_tg(as, MIPSI_MFC1, dest, tmp);
- emit_fg(as, st == IRT_FLOAT ? MIPSI_TRUNC_W_S : MIPSI_TRUNC_W_D,
- tmp, left);
- }
- }
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
- Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
- lua_assert(irt_isint(ir->t) || irt_isu32(ir->t));
- if ((ir->op2 & IRCONV_SEXT)) {
- if ((as->flags & JIT_F_MIPS32R2)) {
- emit_dst(as, st == IRT_I8 ? MIPSI_SEB : MIPSI_SEH, dest, 0, left);
- } else {
- uint32_t shift = st == IRT_I8 ? 24 : 16;
- emit_dta(as, MIPSI_SRA, dest, dest, shift);
- emit_dta(as, MIPSI_SLL, dest, left, shift);
- }
- } else {
- emit_tsi(as, MIPSI_ANDI, dest, left,
- (int32_t)(st == IRT_U8 ? 0xff : 0xffff));
- }
- } else { /* 32/64 bit integer conversions. */
- /* Only need to handle 32/32 bit no-op (cast) on 32 bit archs. */
- ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
- }
- }
-}
-
-#if LJ_HASFFI
-static void asm_conv64(ASMState *as, IRIns *ir)
-{
- IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
- IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
- IRCallID id;
- const CCallInfo *ci;
- IRRef args[2];
- args[LJ_BE?0:1] = ir->op1;
- args[LJ_BE?1:0] = (ir-1)->op1;
- if (st == IRT_NUM || st == IRT_FLOAT) {
- id = IRCALL_fp64_d2l + ((st == IRT_FLOAT) ? 2 : 0) + (dt - IRT_I64);
- ir--;
- } else {
- id = IRCALL_fp64_l2d + ((dt == IRT_FLOAT) ? 2 : 0) + (st - IRT_I64);
- }
- ci = &lj_ir_callinfo[id];
- asm_setupresult(as, ir, ci);
- asm_gencall(as, ci, args);
-}
-#endif
-
-static void asm_strto(ASMState *as, IRIns *ir)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
- IRRef args[2];
- RegSet drop = RSET_SCRATCH;
- if (ra_hasreg(ir->r)) rset_set(drop, ir->r); /* Spill dest reg (if any). */
- ra_evictset(as, drop);
- asm_guard(as, MIPSI_BEQ, RID_RET, RID_ZERO); /* Test return status. */
- args[0] = ir->op1; /* GCstr *str */
- args[1] = ASMREF_TMP1; /* TValue *n */
- asm_gencall(as, ci, args);
- /* Store the result to the spill slot or temp slots. */
- emit_tsi(as, MIPSI_ADDIU, ra_releasetmp(as, ASMREF_TMP1),
- RID_SP, sps_scale(ir->s));
-}
-
-/* Get pointer to TValue. */
-static void asm_tvptr(ASMState *as, Reg dest, IRRef ref)
-{
- IRIns *ir = IR(ref);
- if (irt_isnum(ir->t)) {
- if (irref_isk(ref)) /* Use the number constant itself as a TValue. */
- ra_allockreg(as, i32ptr(ir_knum(ir)), dest);
- else /* Otherwise force a spill and use the spill slot. */
- emit_tsi(as, MIPSI_ADDIU, dest, RID_SP, ra_spill(as, ir));
- } else {
- /* Otherwise use g->tmptv to hold the TValue. */
- RegSet allow = rset_exclude(RSET_GPR, dest);
- Reg type;
- emit_tsi(as, MIPSI_ADDIU, dest, RID_JGL, offsetof(global_State, tmptv)-32768);
- if (!irt_ispri(ir->t)) {
- Reg src = ra_alloc1(as, ref, allow);
- emit_setgl(as, src, tmptv.gcr);
- }
- type = ra_allock(as, irt_toitype(ir->t), allow);
- emit_setgl(as, type, tmptv.it);
- }
-}
-
-static void asm_tostr(ASMState *as, IRIns *ir)
-{
- IRRef args[2];
- args[0] = ASMREF_L;
- as->gcsteps++;
- if (irt_isnum(IR(ir->op1)->t) || (ir+1)->o == IR_HIOP) {
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromnum];
- args[1] = ASMREF_TMP1; /* const lua_Number * */
- asm_setupresult(as, ir, ci); /* GCstr * */
- asm_gencall(as, ci, args);
- asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op1);
- } else {
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromint];
- args[1] = ir->op1; /* int32_t k */
- asm_setupresult(as, ir, ci); /* GCstr * */
- asm_gencall(as, ci, args);
- }
-}
-
-/* -- Memory references --------------------------------------------------- */
-
-static void asm_aref(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg idx, base;
- if (irref_isk(ir->op2)) {
- IRRef tab = IR(ir->op1)->op1;
- int32_t ofs = asm_fuseabase(as, tab);
- IRRef refa = ofs ? tab : ir->op1;
- ofs += 8*IR(ir->op2)->i;
- if (checki16(ofs)) {
- base = ra_alloc1(as, refa, RSET_GPR);
- emit_tsi(as, MIPSI_ADDIU, dest, base, ofs);
- return;
- }
- }
- base = ra_alloc1(as, ir->op1, RSET_GPR);
- idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
- emit_dst(as, MIPSI_ADDU, dest, RID_TMP, base);
- emit_dta(as, MIPSI_SLL, RID_TMP, idx, 3);
-}
-
-/* Inlined hash lookup. Specialized for key type and for const keys.
-** The equivalent C code is:
-** Node *n = hashkey(t, key);
-** do {
-** if (lj_obj_equal(&n->key, key)) return &n->val;
-** } while ((n = nextnode(n)));
-** return niltv(L);
-*/
-static void asm_href(ASMState *as, IRIns *ir)
-{
- RegSet allow = RSET_GPR;
- int destused = ra_used(ir);
- Reg dest = ra_dest(as, ir, allow);
- Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
- Reg key = RID_NONE, type = RID_NONE, tmpnum = RID_NONE, tmp1 = RID_TMP, tmp2;
- IRRef refkey = ir->op2;
- IRIns *irkey = IR(refkey);
- IRType1 kt = irkey->t;
- uint32_t khash;
- MCLabel l_end, l_loop, l_next;
-
- rset_clear(allow, tab);
- if (irt_isnum(kt)) {
- key = ra_alloc1(as, refkey, RSET_FPR);
- tmpnum = ra_scratch(as, rset_exclude(RSET_FPR, key));
- } else if (!irt_ispri(kt)) {
- key = ra_alloc1(as, refkey, allow);
- rset_clear(allow, key);
- type = ra_allock(as, irt_toitype(irkey->t), allow);
- rset_clear(allow, type);
- }
- tmp2 = ra_scratch(as, allow);
- rset_clear(allow, tmp2);
-
- /* Key not found in chain: load niltv. */
- l_end = emit_label(as);
- if (destused)
- emit_loada(as, dest, niltvg(J2G(as->J)));
- else
- *--as->mcp = MIPSI_NOP;
- /* Follow hash chain until the end. */
- emit_move(as, dest, tmp1);
- l_loop = --as->mcp;
- emit_tsi(as, MIPSI_LW, tmp1, dest, (int32_t)offsetof(Node, next));
- l_next = emit_label(as);
-
- /* Type and value comparison. */
- if (irt_isnum(kt)) {
- emit_branch(as, MIPSI_BC1T, 0, 0, l_end);
- emit_fgh(as, MIPSI_C_EQ_D, 0, tmpnum, key);
- emit_tg(as, MIPSI_MFC1, tmp1, key+1);
- emit_branch(as, MIPSI_BEQ, tmp1, RID_ZERO, l_next);
- emit_tsi(as, MIPSI_SLTIU, tmp1, tmp1, (int32_t)LJ_TISNUM);
- emit_hsi(as, MIPSI_LDC1, tmpnum, dest, (int32_t)offsetof(Node, key.n));
- } else {
- if (irt_ispri(kt)) {
- emit_branch(as, MIPSI_BEQ, tmp1, type, l_end);
- } else {
- emit_branch(as, MIPSI_BEQ, tmp2, key, l_end);
- emit_tsi(as, MIPSI_LW, tmp2, dest, (int32_t)offsetof(Node, key.gcr));
- emit_branch(as, MIPSI_BNE, tmp1, type, l_next);
- }
- }
- emit_tsi(as, MIPSI_LW, tmp1, dest, (int32_t)offsetof(Node, key.it));
- *l_loop = MIPSI_BNE | MIPSF_S(tmp1) | ((as->mcp-l_loop-1) & 0xffffu);
-
- /* Load main position relative to tab->node into dest. */
- khash = irref_isk(refkey) ? ir_khash(irkey) : 1;
- if (khash == 0) {
- emit_tsi(as, MIPSI_LW, dest, tab, (int32_t)offsetof(GCtab, node));
- } else {
- Reg tmphash = tmp1;
- if (irref_isk(refkey))
- tmphash = ra_allock(as, khash, allow);
- emit_dst(as, MIPSI_ADDU, dest, dest, tmp1);
- lua_assert(sizeof(Node) == 24);
- emit_dst(as, MIPSI_SUBU, tmp1, tmp2, tmp1);
- emit_dta(as, MIPSI_SLL, tmp1, tmp1, 3);
- emit_dta(as, MIPSI_SLL, tmp2, tmp1, 5);
- emit_dst(as, MIPSI_AND, tmp1, tmp2, tmphash);
- emit_tsi(as, MIPSI_LW, dest, tab, (int32_t)offsetof(GCtab, node));
- emit_tsi(as, MIPSI_LW, tmp2, tab, (int32_t)offsetof(GCtab, hmask));
- if (irref_isk(refkey)) {
- /* Nothing to do. */
- } else if (irt_isstr(kt)) {
- emit_tsi(as, MIPSI_LW, tmp1, key, (int32_t)offsetof(GCstr, hash));
- } else { /* Must match with hash*() in lj_tab.c. */
- emit_dst(as, MIPSI_SUBU, tmp1, tmp1, tmp2);
- emit_rotr(as, tmp2, tmp2, dest, (-HASH_ROT3)&31);
- emit_dst(as, MIPSI_XOR, tmp1, tmp1, tmp2);
- emit_rotr(as, tmp1, tmp1, dest, (-HASH_ROT2-HASH_ROT1)&31);
- emit_dst(as, MIPSI_SUBU, tmp2, tmp2, dest);
- if (irt_isnum(kt)) {
- emit_dst(as, MIPSI_XOR, tmp2, tmp2, tmp1);
- if ((as->flags & JIT_F_MIPS32R2)) {
- emit_dta(as, MIPSI_ROTR, dest, tmp1, (-HASH_ROT1)&31);
- } else {
- emit_dst(as, MIPSI_OR, dest, dest, tmp1);
- emit_dta(as, MIPSI_SLL, tmp1, tmp1, HASH_ROT1);
- emit_dta(as, MIPSI_SRL, dest, tmp1, (-HASH_ROT1)&31);
- }
- emit_dst(as, MIPSI_ADDU, tmp1, tmp1, tmp1);
- emit_tg(as, MIPSI_MFC1, tmp2, key);
- emit_tg(as, MIPSI_MFC1, tmp1, key+1);
- } else {
- emit_dst(as, MIPSI_XOR, tmp2, key, tmp1);
- emit_rotr(as, dest, tmp1, tmp2, (-HASH_ROT1)&31);
- emit_dst(as, MIPSI_ADDU, tmp1, key, ra_allock(as, HASH_BIAS, allow));
- }
- }
- }
-}
-
-static void asm_hrefk(ASMState *as, IRIns *ir)
-{
- IRIns *kslot = IR(ir->op2);
- IRIns *irkey = IR(kslot->op1);
- int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
- int32_t kofs = ofs + (int32_t)offsetof(Node, key);
- Reg dest = (ra_used(ir)||ofs > 32736) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
- Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
- Reg key = RID_NONE, type = RID_TMP, idx = node;
- RegSet allow = rset_exclude(RSET_GPR, node);
- int32_t lo, hi;
- lua_assert(ofs % sizeof(Node) == 0);
- if (ofs > 32736) {
- idx = dest;
- rset_clear(allow, dest);
- kofs = (int32_t)offsetof(Node, key);
- } else if (ra_hasreg(dest)) {
- emit_tsi(as, MIPSI_ADDIU, dest, node, ofs);
- }
- if (!irt_ispri(irkey->t)) {
- key = ra_scratch(as, allow);
- rset_clear(allow, key);
- }
- if (irt_isnum(irkey->t)) {
- lo = (int32_t)ir_knum(irkey)->u32.lo;
- hi = (int32_t)ir_knum(irkey)->u32.hi;
- } else {
- lo = irkey->i;
- hi = irt_toitype(irkey->t);
- if (!ra_hasreg(key))
- goto nolo;
- }
- asm_guard(as, MIPSI_BNE, key, lo ? ra_allock(as, lo, allow) : RID_ZERO);
-nolo:
- asm_guard(as, MIPSI_BNE, type, hi ? ra_allock(as, hi, allow) : RID_ZERO);
- if (ra_hasreg(key)) emit_tsi(as, MIPSI_LW, key, idx, kofs+(LJ_BE?4:0));
- emit_tsi(as, MIPSI_LW, type, idx, kofs+(LJ_BE?0:4));
- if (ofs > 32736)
- emit_tsi(as, MIPSI_ADDU, dest, node, ra_allock(as, ofs, allow));
-}
-
-static void asm_newref(ASMState *as, IRIns *ir)
-{
- if (ir->r != RID_SINK) {
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey];
- IRRef args[3];
- args[0] = ASMREF_L; /* lua_State *L */
- args[1] = ir->op1; /* GCtab *t */
- args[2] = ASMREF_TMP1; /* cTValue *key */
- asm_setupresult(as, ir, ci); /* TValue * */
- asm_gencall(as, ci, args);
- asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op2);
- }
-}
-
-static void asm_uref(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- if (irref_isk(ir->op1)) {
- GCfunc *fn = ir_kfunc(IR(ir->op1));
- MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
- emit_lsptr(as, MIPSI_LW, dest, v, RSET_GPR);
- } else {
- Reg uv = ra_scratch(as, RSET_GPR);
- Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
- if (ir->o == IR_UREFC) {
- asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO);
- emit_tsi(as, MIPSI_ADDIU, dest, uv, (int32_t)offsetof(GCupval, tv));
- emit_tsi(as, MIPSI_LBU, RID_TMP, uv, (int32_t)offsetof(GCupval, closed));
- } else {
- emit_tsi(as, MIPSI_LW, dest, uv, (int32_t)offsetof(GCupval, v));
- }
- emit_tsi(as, MIPSI_LW, uv, func,
- (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8));
- }
-}
-
-static void asm_fref(ASMState *as, IRIns *ir)
-{
- UNUSED(as); UNUSED(ir);
- lua_assert(!ra_used(ir));
-}
-
-static void asm_strref(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- IRRef ref = ir->op2, refk = ir->op1;
- int32_t ofs = (int32_t)sizeof(GCstr);
- Reg r;
- if (irref_isk(ref)) {
- IRRef tmp = refk; refk = ref; ref = tmp;
- } else if (!irref_isk(refk)) {
- Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
- IRIns *irr = IR(ir->op2);
- if (ra_hasreg(irr->r)) {
- ra_noweak(as, irr->r);
- right = irr->r;
- } else if (mayfuse(as, irr->op2) &&
- irr->o == IR_ADD && irref_isk(irr->op2) &&
- checki16(ofs + IR(irr->op2)->i)) {
- ofs += IR(irr->op2)->i;
- right = ra_alloc1(as, irr->op1, rset_exclude(RSET_GPR, left));
- } else {
- right = ra_allocref(as, ir->op2, rset_exclude(RSET_GPR, left));
- }
- emit_tsi(as, MIPSI_ADDIU, dest, dest, ofs);
- emit_dst(as, MIPSI_ADDU, dest, left, right);
- return;
- }
- r = ra_alloc1(as, ref, RSET_GPR);
- ofs += IR(refk)->i;
- if (checki16(ofs))
- emit_tsi(as, MIPSI_ADDIU, dest, r, ofs);
- else
- emit_dst(as, MIPSI_ADDU, dest, r,
- ra_allock(as, ofs, rset_exclude(RSET_GPR, r)));
-}
-
-/* -- Loads and stores ---------------------------------------------------- */
-
-static MIPSIns asm_fxloadins(IRIns *ir)
-{
- switch (irt_type(ir->t)) {
- case IRT_I8: return MIPSI_LB;
- case IRT_U8: return MIPSI_LBU;
- case IRT_I16: return MIPSI_LH;
- case IRT_U16: return MIPSI_LHU;
- case IRT_NUM: return MIPSI_LDC1;
- case IRT_FLOAT: return MIPSI_LWC1;
- default: return MIPSI_LW;
- }
-}
-
-static MIPSIns asm_fxstoreins(IRIns *ir)
-{
- switch (irt_type(ir->t)) {
- case IRT_I8: case IRT_U8: return MIPSI_SB;
- case IRT_I16: case IRT_U16: return MIPSI_SH;
- case IRT_NUM: return MIPSI_SDC1;
- case IRT_FLOAT: return MIPSI_SWC1;
- default: return MIPSI_SW;
- }
-}
-
-static void asm_fload(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg idx = ra_alloc1(as, ir->op1, RSET_GPR);
- MIPSIns mi = asm_fxloadins(ir);
- int32_t ofs;
- if (ir->op2 == IRFL_TAB_ARRAY) {
- ofs = asm_fuseabase(as, ir->op1);
- if (ofs) { /* Turn the t->array load into an add for colocated arrays. */
- emit_tsi(as, MIPSI_ADDIU, dest, idx, ofs);
- return;
- }
- }
- ofs = field_ofs[ir->op2];
- lua_assert(!irt_isfp(ir->t));
- emit_tsi(as, mi, dest, idx, ofs);
-}
-
-static void asm_fstore(ASMState *as, IRIns *ir)
-{
- if (ir->r != RID_SINK) {
- Reg src = ra_alloc1z(as, ir->op2, RSET_GPR);
- IRIns *irf = IR(ir->op1);
- Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src));
- int32_t ofs = field_ofs[irf->op2];
- MIPSIns mi = asm_fxstoreins(ir);
- lua_assert(!irt_isfp(ir->t));
- emit_tsi(as, mi, src, idx, ofs);
- }
-}
-
-static void asm_xload(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
- lua_assert(!(ir->op2 & IRXLOAD_UNALIGNED));
- asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR, 0);
-}
-
-static void asm_xstore(ASMState *as, IRIns *ir, int32_t ofs)
-{
- if (ir->r != RID_SINK) {
- Reg src = ra_alloc1z(as, ir->op2, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
- asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1,
- rset_exclude(RSET_GPR, src), ofs);
- }
-}
-
-static void asm_ahuvload(ASMState *as, IRIns *ir)
-{
- IRType1 t = ir->t;
- Reg dest = RID_NONE, type = RID_TMP, idx;
- RegSet allow = RSET_GPR;
- int32_t ofs = 0;
- if (ra_used(ir)) {
- lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t));
- dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : RSET_GPR);
- rset_clear(allow, dest);
- }
- idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
- rset_clear(allow, idx);
- if (irt_isnum(t)) {
- asm_guard(as, MIPSI_BEQ, type, RID_ZERO);
- emit_tsi(as, MIPSI_SLTIU, type, type, (int32_t)LJ_TISNUM);
- if (ra_hasreg(dest))
- emit_hsi(as, MIPSI_LDC1, dest, idx, ofs);
- } else {
- asm_guard(as, MIPSI_BNE, type, ra_allock(as, irt_toitype(t), allow));
- if (ra_hasreg(dest)) emit_tsi(as, MIPSI_LW, dest, idx, ofs+(LJ_BE?4:0));
- }
- emit_tsi(as, MIPSI_LW, type, idx, ofs+(LJ_BE?0:4));
-}
-
-static void asm_ahustore(ASMState *as, IRIns *ir)
-{
- RegSet allow = RSET_GPR;
- Reg idx, src = RID_NONE, type = RID_NONE;
- int32_t ofs = 0;
- if (ir->r == RID_SINK)
- return;
- if (irt_isnum(ir->t)) {
- src = ra_alloc1(as, ir->op2, RSET_FPR);
- } else {
- if (!irt_ispri(ir->t)) {
- src = ra_alloc1(as, ir->op2, allow);
- rset_clear(allow, src);
- }
- type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
- rset_clear(allow, type);
- }
- idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
- if (irt_isnum(ir->t)) {
- emit_hsi(as, MIPSI_SDC1, src, idx, ofs);
- } else {
- if (ra_hasreg(src))
- emit_tsi(as, MIPSI_SW, src, idx, ofs+(LJ_BE?4:0));
- emit_tsi(as, MIPSI_SW, type, idx, ofs+(LJ_BE?0:4));
- }
-}
-
-static void asm_sload(ASMState *as, IRIns *ir)
-{
- int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0);
- IRType1 t = ir->t;
- Reg dest = RID_NONE, type = RID_NONE, base;
- RegSet allow = RSET_GPR;
- lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */
- lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK));
- lua_assert(!irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME)));
- if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) {
- dest = ra_scratch(as, RSET_FPR);
- asm_tointg(as, ir, dest);
- t.irt = IRT_NUM; /* Continue with a regular number type check. */
- } else if (ra_used(ir)) {
- lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t));
- dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : RSET_GPR);
- rset_clear(allow, dest);
- base = ra_alloc1(as, REF_BASE, allow);
- rset_clear(allow, base);
- if ((ir->op2 & IRSLOAD_CONVERT)) {
- if (irt_isint(t)) {
- Reg tmp = ra_scratch(as, RSET_FPR);
- emit_tg(as, MIPSI_MFC1, dest, tmp);
- emit_fg(as, MIPSI_CVT_W_D, tmp, tmp);
- dest = tmp;
- t.irt = IRT_NUM; /* Check for original type. */
- } else {
- Reg tmp = ra_scratch(as, RSET_GPR);
- emit_fg(as, MIPSI_CVT_D_W, dest, dest);
- emit_tg(as, MIPSI_MTC1, tmp, dest);
- dest = tmp;
- t.irt = IRT_INT; /* Check for original type. */
- }
- }
- goto dotypecheck;
- }
- base = ra_alloc1(as, REF_BASE, allow);
- rset_clear(allow, base);
-dotypecheck:
- if (irt_isnum(t)) {
- if ((ir->op2 & IRSLOAD_TYPECHECK)) {
- asm_guard(as, MIPSI_BEQ, RID_TMP, RID_ZERO);
- emit_tsi(as, MIPSI_SLTIU, RID_TMP, RID_TMP, (int32_t)LJ_TISNUM);
- type = RID_TMP;
- }
- if (ra_hasreg(dest)) emit_hsi(as, MIPSI_LDC1, dest, base, ofs);
- } else {
- if ((ir->op2 & IRSLOAD_TYPECHECK)) {
- Reg ktype = ra_allock(as, irt_toitype(t), allow);
- asm_guard(as, MIPSI_BNE, RID_TMP, ktype);
- type = RID_TMP;
- }
- if (ra_hasreg(dest)) emit_tsi(as, MIPSI_LW, dest, base, ofs ^ (LJ_BE?4:0));
- }
- if (ra_hasreg(type)) emit_tsi(as, MIPSI_LW, type, base, ofs ^ (LJ_BE?0:4));
-}
-
-/* -- Allocations --------------------------------------------------------- */
-
-#if LJ_HASFFI
-static void asm_cnew(ASMState *as, IRIns *ir)
-{
- CTState *cts = ctype_ctsG(J2G(as->J));
- CTypeID ctypeid = (CTypeID)IR(ir->op1)->i;
- CTSize sz = (ir->o == IR_CNEWI || ir->op2 == REF_NIL) ?
- lj_ctype_size(cts, ctypeid) : (CTSize)IR(ir->op2)->i;
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
- IRRef args[2];
- RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
- RegSet drop = RSET_SCRATCH;
- lua_assert(sz != CTSIZE_INVALID);
-
- args[0] = ASMREF_L; /* lua_State *L */
- args[1] = ASMREF_TMP1; /* MSize size */
- as->gcsteps++;
-
- if (ra_hasreg(ir->r))
- rset_clear(drop, ir->r); /* Dest reg handled below. */
- ra_evictset(as, drop);
- if (ra_used(ir))
- ra_destreg(as, ir, RID_RET); /* GCcdata * */
-
- /* Initialize immutable cdata object. */
- if (ir->o == IR_CNEWI) {
- int32_t ofs = sizeof(GCcdata);
- lua_assert(sz == 4 || sz == 8);
- if (sz == 8) {
- ofs += 4;
- lua_assert((ir+1)->o == IR_HIOP);
- if (LJ_LE) ir++;
- }
- for (;;) {
- Reg r = ra_alloc1z(as, ir->op2, allow);
- emit_tsi(as, MIPSI_SW, r, RID_RET, ofs);
- rset_clear(allow, r);
- if (ofs == sizeof(GCcdata)) break;
- ofs -= 4; if (LJ_BE) ir++; else ir--;
- }
- }
- /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */
- emit_tsi(as, MIPSI_SB, RID_RET+1, RID_RET, offsetof(GCcdata, gct));
- emit_tsi(as, MIPSI_SH, RID_TMP, RID_RET, offsetof(GCcdata, ctypeid));
- emit_ti(as, MIPSI_LI, RID_RET+1, ~LJ_TCDATA);
- emit_ti(as, MIPSI_LI, RID_TMP, ctypeid); /* Lower 16 bit used. Sign-ext ok. */
- asm_gencall(as, ci, args);
- ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
- ra_releasetmp(as, ASMREF_TMP1));
-}
-#else
-#define asm_cnew(as, ir) ((void)0)
-#endif
-
-/* -- Write barriers ------------------------------------------------------ */
-
-static void asm_tbar(ASMState *as, IRIns *ir)
-{
- Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
- Reg mark = ra_scratch(as, rset_exclude(RSET_GPR, tab));
- Reg link = RID_TMP;
- MCLabel l_end = emit_label(as);
- emit_tsi(as, MIPSI_SW, link, tab, (int32_t)offsetof(GCtab, gclist));
- emit_tsi(as, MIPSI_SB, mark, tab, (int32_t)offsetof(GCtab, marked));
- emit_setgl(as, tab, gc.grayagain);
- emit_getgl(as, link, gc.grayagain);
- emit_dst(as, MIPSI_XOR, mark, mark, RID_TMP); /* Clear black bit. */
- emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end);
- emit_tsi(as, MIPSI_ANDI, RID_TMP, mark, LJ_GC_BLACK);
- emit_tsi(as, MIPSI_LBU, mark, tab, (int32_t)offsetof(GCtab, marked));
-}
-
-static void asm_obar(ASMState *as, IRIns *ir)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
- IRRef args[2];
- MCLabel l_end;
- Reg obj, val, tmp;
- /* No need for other object barriers (yet). */
- lua_assert(IR(ir->op1)->o == IR_UREFC);
- ra_evictset(as, RSET_SCRATCH);
- l_end = emit_label(as);
- args[0] = ASMREF_TMP1; /* global_State *g */
- args[1] = ir->op1; /* TValue *tv */
- asm_gencall(as, ci, args);
- emit_tsi(as, MIPSI_ADDIU, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768);
- obj = IR(ir->op1)->r;
- tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj));
- emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end);
- emit_tsi(as, MIPSI_ANDI, tmp, tmp, LJ_GC_BLACK);
- emit_branch(as, MIPSI_BEQ, RID_TMP, RID_ZERO, l_end);
- emit_tsi(as, MIPSI_ANDI, RID_TMP, RID_TMP, LJ_GC_WHITES);
- val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj));
- emit_tsi(as, MIPSI_LBU, tmp, obj,
- (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
- emit_tsi(as, MIPSI_LBU, RID_TMP, val, (int32_t)offsetof(GChead, marked));
-}
-
-/* -- Arithmetic and logic operations ------------------------------------- */
-
-static void asm_fparith(ASMState *as, IRIns *ir, MIPSIns mi)
-{
- Reg dest = ra_dest(as, ir, RSET_FPR);
- Reg right, left = ra_alloc2(as, ir, RSET_FPR);
- right = (left >> 8); left &= 255;
- emit_fgh(as, mi, dest, left, right);
-}
-
-static void asm_fpunary(ASMState *as, IRIns *ir, MIPSIns mi)
-{
- Reg dest = ra_dest(as, ir, RSET_FPR);
- Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR);
- emit_fg(as, mi, dest, left);
-}
-
-static int asm_fpjoin_pow(ASMState *as, IRIns *ir)
-{
- IRIns *irp = IR(ir->op1);
- if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) {
- IRIns *irpp = IR(irp->op1);
- if (irpp == ir-2 && irpp->o == IR_FPMATH &&
- irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) {
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_pow];
- IRRef args[2];
- args[0] = irpp->op1;
- args[1] = irp->op2;
- asm_setupresult(as, ir, ci);
- asm_gencall(as, ci, args);
- return 1;
- }
- }
- return 0;
-}
-
-static void asm_add(ASMState *as, IRIns *ir)
-{
- if (irt_isnum(ir->t)) {
- asm_fparith(as, ir, MIPSI_ADD_D);
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
- if (irref_isk(ir->op2)) {
- int32_t k = IR(ir->op2)->i;
- if (checki16(k)) {
- emit_tsi(as, MIPSI_ADDIU, dest, left, k);
- return;
- }
- }
- right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- emit_dst(as, MIPSI_ADDU, dest, left, right);
- }
-}
-
-static void asm_sub(ASMState *as, IRIns *ir)
-{
- if (irt_isnum(ir->t)) {
- asm_fparith(as, ir, MIPSI_SUB_D);
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg right, left = ra_alloc2(as, ir, RSET_GPR);
- right = (left >> 8); left &= 255;
- emit_dst(as, MIPSI_SUBU, dest, left, right);
- }
-}
-
-static void asm_mul(ASMState *as, IRIns *ir)
-{
- if (irt_isnum(ir->t)) {
- asm_fparith(as, ir, MIPSI_MUL_D);
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg right, left = ra_alloc2(as, ir, RSET_GPR);
- right = (left >> 8); left &= 255;
- emit_dst(as, MIPSI_MUL, dest, left, right);
- }
-}
-
-static void asm_neg(ASMState *as, IRIns *ir)
-{
- if (irt_isnum(ir->t)) {
- asm_fpunary(as, ir, MIPSI_NEG_D);
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
- emit_dst(as, MIPSI_SUBU, dest, RID_ZERO, left);
- }
-}
-
-static void asm_arithov(ASMState *as, IRIns *ir)
-{
- Reg right, left, tmp, dest = ra_dest(as, ir, RSET_GPR);
- if (irref_isk(ir->op2)) {
- int k = IR(ir->op2)->i;
- if (ir->o == IR_SUBOV) k = -k;
- if (checki16(k)) { /* (dest < left) == (k >= 0 ? 1 : 0) */
- left = ra_alloc1(as, ir->op1, RSET_GPR);
- asm_guard(as, k >= 0 ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO);
- emit_dst(as, MIPSI_SLT, RID_TMP, dest, dest == left ? RID_TMP : left);
- emit_tsi(as, MIPSI_ADDIU, dest, left, k);
- if (dest == left) emit_move(as, RID_TMP, left);
- return;
- }
- }
- left = ra_alloc2(as, ir, RSET_GPR);
- right = (left >> 8); left &= 255;
- tmp = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_GPR, left),
- right), dest));
- asm_guard(as, MIPSI_BLTZ, RID_TMP, 0);
- emit_dst(as, MIPSI_AND, RID_TMP, RID_TMP, tmp);
- if (ir->o == IR_ADDOV) { /* ((dest^left) & (dest^right)) < 0 */
- emit_dst(as, MIPSI_XOR, RID_TMP, dest, dest == right ? RID_TMP : right);
- } else { /* ((dest^left) & (dest^~right)) < 0 */
- emit_dst(as, MIPSI_XOR, RID_TMP, RID_TMP, dest);
- emit_dst(as, MIPSI_NOR, RID_TMP, dest == right ? RID_TMP : right, RID_ZERO);
- }
- emit_dst(as, MIPSI_XOR, tmp, dest, dest == left ? RID_TMP : left);
- emit_dst(as, ir->o == IR_ADDOV ? MIPSI_ADDU : MIPSI_SUBU, dest, left, right);
- if (dest == left || dest == right)
- emit_move(as, RID_TMP, dest == left ? left : right);
-}
-
-static void asm_mulov(ASMState *as, IRIns *ir)
-{
-#if LJ_DUALNUM
-#error "NYI: MULOV"
-#else
- UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused in single-number mode. */
-#endif
-}
-
-#if LJ_HASFFI
-static void asm_add64(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
- if (irref_isk(ir->op2)) {
- int32_t k = IR(ir->op2)->i;
- if (k == 0) {
- emit_dst(as, MIPSI_ADDU, dest, left, RID_TMP);
- goto loarith;
- } else if (checki16(k)) {
- emit_dst(as, MIPSI_ADDU, dest, dest, RID_TMP);
- emit_tsi(as, MIPSI_ADDIU, dest, left, k);
- goto loarith;
- }
- }
- emit_dst(as, MIPSI_ADDU, dest, dest, RID_TMP);
- right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- emit_dst(as, MIPSI_ADDU, dest, left, right);
-loarith:
- ir--;
- dest = ra_dest(as, ir, RSET_GPR);
- left = ra_alloc1(as, ir->op1, RSET_GPR);
- if (irref_isk(ir->op2)) {
- int32_t k = IR(ir->op2)->i;
- if (k == 0) {
- if (dest != left)
- emit_move(as, dest, left);
- return;
- } else if (checki16(k)) {
- if (dest == left) {
- Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, left));
- emit_move(as, dest, tmp);
- dest = tmp;
- }
- emit_dst(as, MIPSI_SLTU, RID_TMP, dest, left);
- emit_tsi(as, MIPSI_ADDIU, dest, left, k);
- return;
- }
- }
- right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- if (dest == left && dest == right) {
- Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right));
- emit_move(as, dest, tmp);
- dest = tmp;
- }
- emit_dst(as, MIPSI_SLTU, RID_TMP, dest, dest == left ? right : left);
- emit_dst(as, MIPSI_ADDU, dest, left, right);
-}
-
-static void asm_sub64(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg right, left = ra_alloc2(as, ir, RSET_GPR);
- right = (left >> 8); left &= 255;
- emit_dst(as, MIPSI_SUBU, dest, dest, RID_TMP);
- emit_dst(as, MIPSI_SUBU, dest, left, right);
- ir--;
- dest = ra_dest(as, ir, RSET_GPR);
- left = ra_alloc2(as, ir, RSET_GPR);
- right = (left >> 8); left &= 255;
- if (dest == left) {
- Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right));
- emit_move(as, dest, tmp);
- dest = tmp;
- }
- emit_dst(as, MIPSI_SLTU, RID_TMP, left, dest);
- emit_dst(as, MIPSI_SUBU, dest, left, right);
-}
-
-static void asm_neg64(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
- emit_dst(as, MIPSI_SUBU, dest, dest, RID_TMP);
- emit_dst(as, MIPSI_SUBU, dest, RID_ZERO, left);
- ir--;
- dest = ra_dest(as, ir, RSET_GPR);
- left = ra_alloc1(as, ir->op1, RSET_GPR);
- emit_dst(as, MIPSI_SLTU, RID_TMP, RID_ZERO, dest);
- emit_dst(as, MIPSI_SUBU, dest, RID_ZERO, left);
-}
-#endif
-
-static void asm_bitnot(ASMState *as, IRIns *ir)
-{
- Reg left, right, dest = ra_dest(as, ir, RSET_GPR);
- IRIns *irl = IR(ir->op1);
- if (mayfuse(as, ir->op1) && irl->o == IR_BOR) {
- left = ra_alloc2(as, irl, RSET_GPR);
- right = (left >> 8); left &= 255;
- } else {
- left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
- right = RID_ZERO;
- }
- emit_dst(as, MIPSI_NOR, dest, left, right);
-}
-
-static void asm_bitswap(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
- if ((as->flags & JIT_F_MIPS32R2)) {
- emit_dta(as, MIPSI_ROTR, dest, RID_TMP, 16);
- emit_dst(as, MIPSI_WSBH, RID_TMP, 0, left);
- } else {
- Reg tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), dest));
- emit_dst(as, MIPSI_OR, dest, dest, tmp);
- emit_dst(as, MIPSI_OR, dest, dest, RID_TMP);
- emit_tsi(as, MIPSI_ANDI, dest, dest, 0xff00);
- emit_dta(as, MIPSI_SLL, RID_TMP, RID_TMP, 8);
- emit_dta(as, MIPSI_SRL, dest, left, 8);
- emit_tsi(as, MIPSI_ANDI, RID_TMP, left, 0xff00);
- emit_dst(as, MIPSI_OR, tmp, tmp, RID_TMP);
- emit_dta(as, MIPSI_SRL, tmp, left, 24);
- emit_dta(as, MIPSI_SLL, RID_TMP, left, 24);
- }
-}
-
-static void asm_bitop(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
- if (irref_isk(ir->op2)) {
- int32_t k = IR(ir->op2)->i;
- if (checku16(k)) {
- emit_tsi(as, mik, dest, left, k);
- return;
- }
- }
- right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- emit_dst(as, mi, dest, left, right);
-}
-
-static void asm_bitshift(ASMState *as, IRIns *ir, MIPSIns mi, MIPSIns mik)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- if (irref_isk(ir->op2)) { /* Constant shifts. */
- uint32_t shift = (uint32_t)(IR(ir->op2)->i & 31);
- emit_dta(as, mik, dest, ra_hintalloc(as, ir->op1, dest, RSET_GPR), shift);
- } else {
- Reg right, left = ra_alloc2(as, ir, RSET_GPR);
- right = (left >> 8); left &= 255;
- emit_dst(as, mi, dest, right, left); /* Shift amount is in rs. */
- }
-}
-
-static void asm_bitror(ASMState *as, IRIns *ir)
-{
- if ((as->flags & JIT_F_MIPS32R2)) {
- asm_bitshift(as, ir, MIPSI_ROTRV, MIPSI_ROTR);
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- if (irref_isk(ir->op2)) { /* Constant shifts. */
- uint32_t shift = (uint32_t)(IR(ir->op2)->i & 31);
- Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
- emit_rotr(as, dest, left, RID_TMP, shift);
- } else {
- Reg right, left = ra_alloc2(as, ir, RSET_GPR);
- right = (left >> 8); left &= 255;
- emit_dst(as, MIPSI_OR, dest, dest, RID_TMP);
- emit_dst(as, MIPSI_SRLV, dest, right, left);
- emit_dst(as, MIPSI_SLLV, RID_TMP, RID_TMP, left);
- emit_dst(as, MIPSI_SUBU, RID_TMP, ra_allock(as, 32, RSET_GPR), right);
- }
- }
-}
-
-static void asm_min_max(ASMState *as, IRIns *ir, int ismax)
-{
- if (irt_isnum(ir->t)) {
- Reg dest = ra_dest(as, ir, RSET_FPR);
- Reg right, left = ra_alloc2(as, ir, RSET_FPR);
- right = (left >> 8); left &= 255;
- if (dest == left) {
- emit_fg(as, MIPSI_MOVT_D, dest, right);
- } else {
- emit_fg(as, MIPSI_MOVF_D, dest, left);
- if (dest != right) emit_fg(as, MIPSI_MOV_D, dest, right);
- }
- emit_fgh(as, MIPSI_C_OLT_D, 0, ismax ? left : right, ismax ? right : left);
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg right, left = ra_alloc2(as, ir, RSET_GPR);
- right = (left >> 8); left &= 255;
- if (dest == left) {
- emit_dst(as, MIPSI_MOVN, dest, right, RID_TMP);
- } else {
- emit_dst(as, MIPSI_MOVZ, dest, left, RID_TMP);
- if (dest != right) emit_move(as, dest, right);
- }
- emit_dst(as, MIPSI_SLT, RID_TMP,
- ismax ? left : right, ismax ? right : left);
- }
-}
-
-/* -- Comparisons --------------------------------------------------------- */
-
-static void asm_comp(ASMState *as, IRIns *ir)
-{
- /* ORDER IR: LT GE LE GT ULT UGE ULE UGT. */
- IROp op = ir->o;
- if (irt_isnum(ir->t)) {
- Reg right, left = ra_alloc2(as, ir, RSET_FPR);
- right = (left >> 8); left &= 255;
- asm_guard(as, (op&1) ? MIPSI_BC1T : MIPSI_BC1F, 0, 0);
- emit_fgh(as, MIPSI_C_OLT_D + ((op&3) ^ ((op>>2)&1)), 0, left, right);
- } else {
- Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
- if (op == IR_ABC) op = IR_UGT;
- if ((op&4) == 0 && irref_isk(ir->op2) && IR(ir->op2)->i == 0) {
- MIPSIns mi = (op&2) ? ((op&1) ? MIPSI_BLEZ : MIPSI_BGTZ) :
- ((op&1) ? MIPSI_BLTZ : MIPSI_BGEZ);
- asm_guard(as, mi, left, 0);
- } else {
- if (irref_isk(ir->op2)) {
- int32_t k = IR(ir->op2)->i;
- if ((op&2)) k++;
- if (checki16(k)) {
- asm_guard(as, (op&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO);
- emit_tsi(as, (op&4) ? MIPSI_SLTIU : MIPSI_SLTI,
- RID_TMP, left, k);
- return;
- }
- }
- right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- asm_guard(as, ((op^(op>>1))&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO);
- emit_dst(as, (op&4) ? MIPSI_SLTU : MIPSI_SLT,
- RID_TMP, (op&2) ? right : left, (op&2) ? left : right);
- }
- }
-}
-
-static void asm_compeq(ASMState *as, IRIns *ir)
-{
- Reg right, left = ra_alloc2(as, ir, irt_isnum(ir->t) ? RSET_FPR : RSET_GPR);
- right = (left >> 8); left &= 255;
- if (irt_isnum(ir->t)) {
- asm_guard(as, (ir->o & 1) ? MIPSI_BC1T : MIPSI_BC1F, 0, 0);
- emit_fgh(as, MIPSI_C_EQ_D, 0, left, right);
- } else {
- asm_guard(as, (ir->o & 1) ? MIPSI_BEQ : MIPSI_BNE, left, right);
- }
-}
-
-#if LJ_HASFFI
-/* 64 bit integer comparisons. */
-static void asm_comp64(ASMState *as, IRIns *ir)
-{
- /* ORDER IR: LT GE LE GT ULT UGE ULE UGT. */
- IROp op = (ir-1)->o;
- MCLabel l_end;
- Reg rightlo, leftlo, righthi, lefthi = ra_alloc2(as, ir, RSET_GPR);
- righthi = (lefthi >> 8); lefthi &= 255;
- leftlo = ra_alloc2(as, ir-1,
- rset_exclude(rset_exclude(RSET_GPR, lefthi), righthi));
- rightlo = (leftlo >> 8); leftlo &= 255;
- asm_guard(as, ((op^(op>>1))&1) ? MIPSI_BNE : MIPSI_BEQ, RID_TMP, RID_ZERO);
- l_end = emit_label(as);
- if (lefthi != righthi)
- emit_dst(as, (op&4) ? MIPSI_SLTU : MIPSI_SLT, RID_TMP,
- (op&2) ? righthi : lefthi, (op&2) ? lefthi : righthi);
- emit_dst(as, MIPSI_SLTU, RID_TMP,
- (op&2) ? rightlo : leftlo, (op&2) ? leftlo : rightlo);
- if (lefthi != righthi)
- emit_branch(as, MIPSI_BEQ, lefthi, righthi, l_end);
-}
-
-static void asm_comp64eq(ASMState *as, IRIns *ir)
-{
- Reg tmp, right, left = ra_alloc2(as, ir, RSET_GPR);
- right = (left >> 8); left &= 255;
- asm_guard(as, ((ir-1)->o & 1) ? MIPSI_BEQ : MIPSI_BNE, RID_TMP, RID_ZERO);
- tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR, left), right));
- emit_dst(as, MIPSI_OR, RID_TMP, RID_TMP, tmp);
- emit_dst(as, MIPSI_XOR, tmp, left, right);
- left = ra_alloc2(as, ir-1, RSET_GPR);
- right = (left >> 8); left &= 255;
- emit_dst(as, MIPSI_XOR, RID_TMP, left, right);
-}
-#endif
-
-/* -- Support for 64 bit ops in 32 bit mode ------------------------------- */
-
-/* Hiword op of a split 64 bit op. Previous op must be the loword op. */
-static void asm_hiop(ASMState *as, IRIns *ir)
-{
-#if LJ_HASFFI
- /* HIOP is marked as a store because it needs its own DCE logic. */
- int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
- if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
- if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */
- as->curins--; /* Always skip the CONV. */
- if (usehi || uselo)
- asm_conv64(as, ir);
- return;
- } else if ((ir-1)->o < IR_EQ) { /* 64 bit integer comparisons. ORDER IR. */
- as->curins--; /* Always skip the loword comparison. */
- asm_comp64(as, ir);
- return;
- } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */
- as->curins--; /* Always skip the loword comparison. */
- asm_comp64eq(as, ir);
- return;
- } else if ((ir-1)->o == IR_XSTORE) {
- as->curins--; /* Handle both stores here. */
- if ((ir-1)->r != RID_SINK) {
- asm_xstore(as, ir, LJ_LE ? 4 : 0);
- asm_xstore(as, ir-1, LJ_LE ? 0 : 4);
- }
- return;
- }
- if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
- switch ((ir-1)->o) {
- case IR_ADD: as->curins--; asm_add64(as, ir); break;
- case IR_SUB: as->curins--; asm_sub64(as, ir); break;
- case IR_NEG: as->curins--; asm_neg64(as, ir); break;
- case IR_CALLN:
- case IR_CALLXS:
- if (!uselo)
- ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
- break;
- case IR_CNEWI:
- /* Nothing to do here. Handled by lo op itself. */
- break;
- default: lua_assert(0); break;
- }
-#else
- UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused without FFI. */
-#endif
-}
-
-/* -- Stack handling ------------------------------------------------------ */
-
-/* Check Lua stack size for overflow. Use exit handler as fallback. */
-static void asm_stack_check(ASMState *as, BCReg topslot,
- IRIns *irp, RegSet allow, ExitNo exitno)
-{
- /* Try to get an unused temp. register, otherwise spill/restore RID_RET*. */
- Reg tmp, pbase = irp ? (ra_hasreg(irp->r) ? irp->r : RID_TMP) : RID_BASE;
- ExitNo oldsnap = as->snapno;
- rset_clear(allow, pbase);
- tmp = allow ? rset_pickbot(allow) :
- (pbase == RID_RETHI ? RID_RETLO : RID_RETHI);
- as->snapno = exitno;
- asm_guard(as, MIPSI_BNE, RID_TMP, RID_ZERO);
- as->snapno = oldsnap;
- if (allow == RSET_EMPTY) /* Restore temp. register. */
- emit_tsi(as, MIPSI_LW, tmp, RID_SP, 0);
- else
- ra_modified(as, tmp);
- emit_tsi(as, MIPSI_SLTIU, RID_TMP, RID_TMP, (int32_t)(8*topslot));
- emit_dst(as, MIPSI_SUBU, RID_TMP, tmp, pbase);
- emit_tsi(as, MIPSI_LW, tmp, tmp, offsetof(lua_State, maxstack));
- if (pbase == RID_TMP)
- emit_getgl(as, RID_TMP, jit_base);
- emit_getgl(as, tmp, jit_L);
- if (allow == RSET_EMPTY) /* Spill temp. register. */
- emit_tsi(as, MIPSI_SW, tmp, RID_SP, 0);
-}
-
-/* Restore Lua stack from on-trace state. */
-static void asm_stack_restore(ASMState *as, SnapShot *snap)
-{
- SnapEntry *map = &as->T->snapmap[snap->mapofs];
- SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1];
- MSize n, nent = snap->nent;
- /* Store the value of all modified slots to the Lua stack. */
- for (n = 0; n < nent; n++) {
- SnapEntry sn = map[n];
- BCReg s = snap_slot(sn);
- int32_t ofs = 8*((int32_t)s-1);
- IRRef ref = snap_ref(sn);
- IRIns *ir = IR(ref);
- if ((sn & SNAP_NORESTORE))
- continue;
- if (irt_isnum(ir->t)) {
- Reg src = ra_alloc1(as, ref, RSET_FPR);
- emit_hsi(as, MIPSI_SDC1, src, RID_BASE, ofs);
- } else {
- Reg type;
- RegSet allow = rset_exclude(RSET_GPR, RID_BASE);
- lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t));
- if (!irt_ispri(ir->t)) {
- Reg src = ra_alloc1(as, ref, allow);
- rset_clear(allow, src);
- emit_tsi(as, MIPSI_SW, src, RID_BASE, ofs+(LJ_BE?4:0));
- }
- if ((sn & (SNAP_CONT|SNAP_FRAME))) {
- if (s == 0) continue; /* Do not overwrite link to previous frame. */
- type = ra_allock(as, (int32_t)(*flinks--), allow);
- } else {
- type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
- }
- emit_tsi(as, MIPSI_SW, type, RID_BASE, ofs+(LJ_BE?0:4));
- }
- checkmclim(as);
- }
- lua_assert(map + nent == flinks);
-}
-
-/* -- GC handling --------------------------------------------------------- */
-
-/* Check GC threshold and do one or more GC steps. */
-static void asm_gc_check(ASMState *as)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
- IRRef args[2];
- MCLabel l_end;
- Reg tmp;
- ra_evictset(as, RSET_SCRATCH);
- l_end = emit_label(as);
- /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
- /* Assumes asm_snap_prep() already done. */
- asm_guard(as, MIPSI_BNE, RID_RET, RID_ZERO);
- args[0] = ASMREF_TMP1; /* global_State *g */
- args[1] = ASMREF_TMP2; /* MSize steps */
- asm_gencall(as, ci, args);
- emit_tsi(as, MIPSI_ADDIU, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768);
- tmp = ra_releasetmp(as, ASMREF_TMP2);
- emit_loadi(as, tmp, as->gcsteps);
- /* Jump around GC step if GC total < GC threshold. */
- emit_branch(as, MIPSI_BNE, RID_TMP, RID_ZERO, l_end);
- emit_dst(as, MIPSI_SLTU, RID_TMP, RID_TMP, tmp);
- emit_getgl(as, tmp, gc.threshold);
- emit_getgl(as, RID_TMP, gc.total);
- as->gcsteps = 0;
- checkmclim(as);
-}
-
-/* -- Loop handling ------------------------------------------------------- */
-
-/* Fixup the loop branch. */
-static void asm_loop_fixup(ASMState *as)
-{
- MCode *p = as->mctop;
- MCode *target = as->mcp;
- p[-1] = MIPSI_NOP;
- if (as->loopinv) { /* Inverted loop branch? */
- /* asm_guard already inverted the cond branch. Only patch the target. */
- p[-3] |= ((target-p+2) & 0x0000ffffu);
- } else {
- p[-2] = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu);
- }
-}
-
-/* -- Head of trace ------------------------------------------------------- */
-
-/* Coalesce BASE register for a root trace. */
-static void asm_head_root_base(ASMState *as)
-{
- IRIns *ir = IR(REF_BASE);
- Reg r = ir->r;
- if (as->loopinv) as->mctop--;
- if (ra_hasreg(r)) {
- ra_free(as, r);
- if (rset_test(as->modset, r) || irt_ismarked(ir->t))
- ir->r = RID_INIT; /* No inheritance for modified BASE register. */
- if (r != RID_BASE)
- emit_move(as, r, RID_BASE);
- }
-}
-
-/* Coalesce BASE register for a side trace. */
-static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
-{
- IRIns *ir = IR(REF_BASE);
- Reg r = ir->r;
- if (as->loopinv) as->mctop--;
- if (ra_hasreg(r)) {
- ra_free(as, r);
- if (rset_test(as->modset, r) || irt_ismarked(ir->t))
- ir->r = RID_INIT; /* No inheritance for modified BASE register. */
- if (irp->r == r) {
- rset_clear(allow, r); /* Mark same BASE register as coalesced. */
- } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) {
- rset_clear(allow, irp->r);
- emit_move(as, r, irp->r); /* Move from coalesced parent reg. */
- } else {
- emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */
- }
- }
- return allow;
-}
-
-/* -- Tail of trace ------------------------------------------------------- */
-
-/* Fixup the tail code. */
-static void asm_tail_fixup(ASMState *as, TraceNo lnk)
-{
- MCode *target = lnk ? traceref(as->J,lnk)->mcode : (MCode *)lj_vm_exit_interp;
- int32_t spadj = as->T->spadjust;
- MCode *p = as->mctop-1;
- *p = spadj ? (MIPSI_ADDIU|MIPSF_T(RID_SP)|MIPSF_S(RID_SP)|spadj) : MIPSI_NOP;
- p[-1] = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu);
-}
-
-/* Prepare tail of code. */
-static void asm_tail_prep(ASMState *as)
-{
- as->mcp = as->mctop-2; /* Leave room for branch plus nop or stack adj. */
- as->invmcp = as->loopref ? as->mcp : NULL;
-}
-
-/* -- Instruction dispatch ------------------------------------------------ */
-
-/* Assemble a single instruction. */
-static void asm_ir(ASMState *as, IRIns *ir)
-{
- switch ((IROp)ir->o) {
- /* Miscellaneous ops. */
- case IR_LOOP: asm_loop(as); break;
- case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break;
- case IR_USE:
- ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break;
- case IR_PHI: asm_phi(as, ir); break;
- case IR_HIOP: asm_hiop(as, ir); break;
- case IR_GCSTEP: asm_gcstep(as, ir); break;
-
- /* Guarded assertions. */
- case IR_EQ: case IR_NE: asm_compeq(as, ir); break;
- case IR_LT: case IR_GE: case IR_LE: case IR_GT:
- case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
- case IR_ABC:
- asm_comp(as, ir);
- break;
-
- case IR_RETF: asm_retf(as, ir); break;
-
- /* Bit ops. */
- case IR_BNOT: asm_bitnot(as, ir); break;
- case IR_BSWAP: asm_bitswap(as, ir); break;
-
- case IR_BAND: asm_bitop(as, ir, MIPSI_AND, MIPSI_ANDI); break;
- case IR_BOR: asm_bitop(as, ir, MIPSI_OR, MIPSI_ORI); break;
- case IR_BXOR: asm_bitop(as, ir, MIPSI_XOR, MIPSI_XORI); break;
-
- case IR_BSHL: asm_bitshift(as, ir, MIPSI_SLLV, MIPSI_SLL); break;
- case IR_BSHR: asm_bitshift(as, ir, MIPSI_SRLV, MIPSI_SRL); break;
- case IR_BSAR: asm_bitshift(as, ir, MIPSI_SRAV, MIPSI_SRA); break;
- case IR_BROL: lua_assert(0); break;
- case IR_BROR: asm_bitror(as, ir); break;
-
- /* Arithmetic ops. */
- case IR_ADD: asm_add(as, ir); break;
- case IR_SUB: asm_sub(as, ir); break;
- case IR_MUL: asm_mul(as, ir); break;
- case IR_DIV: asm_fparith(as, ir, MIPSI_DIV_D); break;
- case IR_MOD: asm_callid(as, ir, IRCALL_lj_vm_modi); break;
- case IR_POW: asm_callid(as, ir, IRCALL_lj_vm_powi); break;
- case IR_NEG: asm_neg(as, ir); break;
-
- case IR_ABS: asm_fpunary(as, ir, MIPSI_ABS_D); break;
- case IR_ATAN2: asm_callid(as, ir, IRCALL_atan2); break;
- case IR_LDEXP: asm_callid(as, ir, IRCALL_ldexp); break;
- case IR_MIN: asm_min_max(as, ir, 0); break;
- case IR_MAX: asm_min_max(as, ir, 1); break;
- case IR_FPMATH:
- if (ir->op2 == IRFPM_EXP2 && asm_fpjoin_pow(as, ir))
- break;
- if (ir->op2 <= IRFPM_TRUNC)
- asm_callround(as, ir, IRCALL_lj_vm_floor + ir->op2);
- else if (ir->op2 == IRFPM_SQRT)
- asm_fpunary(as, ir, MIPSI_SQRT_D);
- else
- asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2);
- break;
-
- /* Overflow-checking arithmetic ops. */
- case IR_ADDOV: asm_arithov(as, ir); break;
- case IR_SUBOV: asm_arithov(as, ir); break;
- case IR_MULOV: asm_mulov(as, ir); break;
-
- /* Memory references. */
- case IR_AREF: asm_aref(as, ir); break;
- case IR_HREF: asm_href(as, ir); break;
- case IR_HREFK: asm_hrefk(as, ir); break;
- case IR_NEWREF: asm_newref(as, ir); break;
- case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
- case IR_FREF: asm_fref(as, ir); break;
- case IR_STRREF: asm_strref(as, ir); break;
-
- /* Loads and stores. */
- case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
- asm_ahuvload(as, ir);
- break;
- case IR_FLOAD: asm_fload(as, ir); break;
- case IR_XLOAD: asm_xload(as, ir); break;
- case IR_SLOAD: asm_sload(as, ir); break;
-
- case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
- case IR_FSTORE: asm_fstore(as, ir); break;
- case IR_XSTORE: asm_xstore(as, ir, 0); break;
-
- /* Allocations. */
- case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
- case IR_TNEW: asm_tnew(as, ir); break;
- case IR_TDUP: asm_tdup(as, ir); break;
- case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break;
-
- /* Write barriers. */
- case IR_TBAR: asm_tbar(as, ir); break;
- case IR_OBAR: asm_obar(as, ir); break;
-
- /* Type conversions. */
- case IR_CONV: asm_conv(as, ir); break;
- case IR_TOBIT: asm_tobit(as, ir); break;
- case IR_TOSTR: asm_tostr(as, ir); break;
- case IR_STRTO: asm_strto(as, ir); break;
-
- /* Calls. */
- case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
- case IR_CALLXS: asm_callx(as, ir); break;
- case IR_CARG: break;
-
- default:
- setintV(&as->J->errinfo, ir->o);
- lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
- break;
- }
-}
-
-/* -- Trace setup --------------------------------------------------------- */
-
-/* Ensure there are enough stack slots for call arguments. */
-static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
-{
- IRRef args[CCI_NARGS_MAX*2];
- uint32_t i, nargs = (int)CCI_NARGS(ci);
- int nslots = 4, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR;
- asm_collectargs(as, ir, ci, args);
- for (i = 0; i < nargs; i++) {
- if (args[i] && irt_isfp(IR(args[i])->t) &&
- nfpr > 0 && !(ci->flags & CCI_VARARG)) {
- nfpr--;
- ngpr -= irt_isnum(IR(args[i])->t) ? 2 : 1;
- } else if (args[i] && irt_isnum(IR(args[i])->t)) {
- nfpr = 0;
- ngpr = ngpr & ~1;
- if (ngpr > 0) ngpr -= 2; else nslots = (nslots+3) & ~1;
- } else {
- nfpr = 0;
- if (ngpr > 0) ngpr--; else nslots++;
- }
- }
- if (nslots > as->evenspill) /* Leave room for args in stack slots. */
- as->evenspill = nslots;
- return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET);
-}
-
-static void asm_setup_target(ASMState *as)
-{
- asm_sparejump_setup(as);
- asm_exitstub_setup(as);
-}
-
-/* -- Trace patching ------------------------------------------------------ */
-
-/* Patch exit jumps of existing machine code to a new target. */
-void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
-{
- MCode *p = T->mcode;
- MCode *pe = (MCode *)((char *)p + T->szmcode);
- MCode *px = exitstub_trace_addr(T, exitno);
- MCode *cstart = NULL, *cstop = NULL;
- MCode *mcarea = lj_mcode_patch(J, p, 0);
- MCode exitload = MIPSI_LI | MIPSF_T(RID_TMP) | exitno;
- MCode tjump = MIPSI_J|(((uintptr_t)target>>2)&0x03ffffffu);
- for (p++; p < pe; p++) {
- if (*p == exitload) { /* Look for load of exit number. */
- /* Look for exitstub branch. Yes, this covers all used branch variants. */
- if (((p[-1] ^ (px-p)) & 0xffffu) == 0 &&
- ((p[-1] & 0xf0000000u) == MIPSI_BEQ ||
- (p[-1] & 0xfc1e0000u) == MIPSI_BLTZ ||
- (p[-1] & 0xffe00000u) == MIPSI_BC1F)) {
- ptrdiff_t delta = target - p;
- if (((delta + 0x8000) >> 16) == 0) { /* Patch in-range branch. */
- patchbranch:
- p[-1] = (p[-1] & 0xffff0000u) | (delta & 0xffffu);
- *p = MIPSI_NOP; /* Replace the load of the exit number. */
- cstop = p;
- if (!cstart) cstart = p-1;
- } else { /* Branch out of range. Use spare jump slot in mcarea. */
- int i;
- for (i = (int)(sizeof(MCLink)/sizeof(MCode));
- i < (int)(sizeof(MCLink)/sizeof(MCode)+MIPS_SPAREJUMP*2);
- i += 2) {
- if (mcarea[i] == tjump) {
- delta = mcarea+i - p;
- goto patchbranch;
- } else if (mcarea[i] == MIPSI_NOP) {
- mcarea[i] = tjump;
- cstart = mcarea+i;
- delta = mcarea+i - p;
- goto patchbranch;
- }
- }
- /* Ignore jump slot overflow. Child trace is simply not attached. */
- }
- } else if (p+1 == pe) {
- /* Patch NOP after code for inverted loop branch. Use of J is ok. */
- lua_assert(p[1] == MIPSI_NOP);
- p[1] = tjump;
- *p = MIPSI_NOP; /* Replace the load of the exit number. */
- cstop = p+2;
- if (!cstart) cstart = p+1;
- }
- }
- }
- if (cstart) lj_mcode_sync(cstart, cstop);
- lj_mcode_patch(J, mcarea, 1);
-}
-
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_asm_ppc.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_asm_ppc.h
deleted file mode 100644
index d8a14c8..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_asm_ppc.h
+++ /dev/null
@@ -1,2168 +0,0 @@
-/*
-** PPC IR assembler (SSA IR -> machine code).
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-/* -- Register allocator extensions --------------------------------------- */
-
-/* Allocate a register with a hint. */
-static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow)
-{
- Reg r = IR(ref)->r;
- if (ra_noreg(r)) {
- if (!ra_hashint(r) && !iscrossref(as, ref))
- ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */
- r = ra_allocref(as, ref, allow);
- }
- ra_noweak(as, r);
- return r;
-}
-
-/* Allocate two source registers for three-operand instructions. */
-static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow)
-{
- IRIns *irl = IR(ir->op1), *irr = IR(ir->op2);
- Reg left = irl->r, right = irr->r;
- if (ra_hasreg(left)) {
- ra_noweak(as, left);
- if (ra_noreg(right))
- right = ra_allocref(as, ir->op2, rset_exclude(allow, left));
- else
- ra_noweak(as, right);
- } else if (ra_hasreg(right)) {
- ra_noweak(as, right);
- left = ra_allocref(as, ir->op1, rset_exclude(allow, right));
- } else if (ra_hashint(right)) {
- right = ra_allocref(as, ir->op2, allow);
- left = ra_alloc1(as, ir->op1, rset_exclude(allow, right));
- } else {
- left = ra_allocref(as, ir->op1, allow);
- right = ra_alloc1(as, ir->op2, rset_exclude(allow, left));
- }
- return left | (right << 8);
-}
-
-/* -- Guard handling ------------------------------------------------------ */
-
-/* Setup exit stubs after the end of each trace. */
-static void asm_exitstub_setup(ASMState *as, ExitNo nexits)
-{
- ExitNo i;
- MCode *mxp = as->mctop;
- if (mxp - (nexits + 3 + MCLIM_REDZONE) < as->mclim)
- asm_mclimit(as);
- /* 1: mflr r0; bl ->vm_exit_handler; li r0, traceno; bl <1; bl <1; ... */
- for (i = nexits-1; (int32_t)i >= 0; i--)
- *--mxp = PPCI_BL|(((-3-i)&0x00ffffffu)<<2);
- *--mxp = PPCI_LI|PPCF_T(RID_TMP)|as->T->traceno; /* Read by exit handler. */
- mxp--;
- *mxp = PPCI_BL|((((MCode *)(void *)lj_vm_exit_handler-mxp)&0x00ffffffu)<<2);
- *--mxp = PPCI_MFLR|PPCF_T(RID_TMP);
- as->mctop = mxp;
-}
-
-static MCode *asm_exitstub_addr(ASMState *as, ExitNo exitno)
-{
- /* Keep this in-sync with exitstub_trace_addr(). */
- return as->mctop + exitno + 3;
-}
-
-/* Emit conditional branch to exit for guard. */
-static void asm_guardcc(ASMState *as, PPCCC cc)
-{
- MCode *target = asm_exitstub_addr(as, as->snapno);
- MCode *p = as->mcp;
- if (LJ_UNLIKELY(p == as->invmcp)) {
- as->loopinv = 1;
- *p = PPCI_B | (((target-p) & 0x00ffffffu) << 2);
- emit_condbranch(as, PPCI_BC, cc^4, p);
- return;
- }
- emit_condbranch(as, PPCI_BC, cc, target);
-}
-
-/* -- Operand fusion ------------------------------------------------------ */
-
-/* Limit linear search to this distance. Avoids O(n^2) behavior. */
-#define CONFLICT_SEARCH_LIM 31
-
-/* Check if there's no conflicting instruction between curins and ref. */
-static int noconflict(ASMState *as, IRRef ref, IROp conflict)
-{
- IRIns *ir = as->ir;
- IRRef i = as->curins;
- if (i > ref + CONFLICT_SEARCH_LIM)
- return 0; /* Give up, ref is too far away. */
- while (--i > ref)
- if (ir[i].o == conflict)
- return 0; /* Conflict found. */
- return 1; /* Ok, no conflict. */
-}
-
-/* Fuse the array base of colocated arrays. */
-static int32_t asm_fuseabase(ASMState *as, IRRef ref)
-{
- IRIns *ir = IR(ref);
- if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE &&
- !neverfuse(as) && noconflict(as, ref, IR_NEWREF))
- return (int32_t)sizeof(GCtab);
- return 0;
-}
-
-/* Indicates load/store indexed is ok. */
-#define AHUREF_LSX ((int32_t)0x80000000)
-
-/* Fuse array/hash/upvalue reference into register+offset operand. */
-static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow)
-{
- IRIns *ir = IR(ref);
- if (ra_noreg(ir->r)) {
- if (ir->o == IR_AREF) {
- if (mayfuse(as, ref)) {
- if (irref_isk(ir->op2)) {
- IRRef tab = IR(ir->op1)->op1;
- int32_t ofs = asm_fuseabase(as, tab);
- IRRef refa = ofs ? tab : ir->op1;
- ofs += 8*IR(ir->op2)->i;
- if (checki16(ofs)) {
- *ofsp = ofs;
- return ra_alloc1(as, refa, allow);
- }
- }
- if (*ofsp == AHUREF_LSX) {
- Reg base = ra_alloc1(as, ir->op1, allow);
- Reg idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
- return base | (idx << 8);
- }
- }
- } else if (ir->o == IR_HREFK) {
- if (mayfuse(as, ref)) {
- int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
- if (checki16(ofs)) {
- *ofsp = ofs;
- return ra_alloc1(as, ir->op1, allow);
- }
- }
- } else if (ir->o == IR_UREFC) {
- if (irref_isk(ir->op1)) {
- GCfunc *fn = ir_kfunc(IR(ir->op1));
- int32_t ofs = i32ptr(&gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.tv);
- int32_t jgl = (intptr_t)J2G(as->J);
- if ((uint32_t)(ofs-jgl) < 65536) {
- *ofsp = ofs-jgl-32768;
- return RID_JGL;
- } else {
- *ofsp = (int16_t)ofs;
- return ra_allock(as, ofs-(int16_t)ofs, allow);
- }
- }
- }
- }
- *ofsp = 0;
- return ra_alloc1(as, ref, allow);
-}
-
-/* Fuse XLOAD/XSTORE reference into load/store operand. */
-static void asm_fusexref(ASMState *as, PPCIns pi, Reg rt, IRRef ref,
- RegSet allow, int32_t ofs)
-{
- IRIns *ir = IR(ref);
- Reg base;
- if (ra_noreg(ir->r) && canfuse(as, ir)) {
- if (ir->o == IR_ADD) {
- int32_t ofs2;
- if (irref_isk(ir->op2) && (ofs2 = ofs + IR(ir->op2)->i, checki16(ofs2))) {
- ofs = ofs2;
- ref = ir->op1;
- } else if (ofs == 0) {
- Reg right, left = ra_alloc2(as, ir, allow);
- right = (left >> 8); left &= 255;
- emit_fab(as, PPCI_LWZX | ((pi >> 20) & 0x780), rt, left, right);
- return;
- }
- } else if (ir->o == IR_STRREF) {
- lua_assert(ofs == 0);
- ofs = (int32_t)sizeof(GCstr);
- if (irref_isk(ir->op2)) {
- ofs += IR(ir->op2)->i;
- ref = ir->op1;
- } else if (irref_isk(ir->op1)) {
- ofs += IR(ir->op1)->i;
- ref = ir->op2;
- } else {
- /* NYI: Fuse ADD with constant. */
- Reg tmp, right, left = ra_alloc2(as, ir, allow);
- right = (left >> 8); left &= 255;
- tmp = ra_scratch(as, rset_exclude(rset_exclude(allow, left), right));
- emit_fai(as, pi, rt, tmp, ofs);
- emit_tab(as, PPCI_ADD, tmp, left, right);
- return;
- }
- if (!checki16(ofs)) {
- Reg left = ra_alloc1(as, ref, allow);
- Reg right = ra_allock(as, ofs, rset_exclude(allow, left));
- emit_fab(as, PPCI_LWZX | ((pi >> 20) & 0x780), rt, left, right);
- return;
- }
- }
- }
- base = ra_alloc1(as, ref, allow);
- emit_fai(as, pi, rt, base, ofs);
-}
-
-/* Fuse XLOAD/XSTORE reference into indexed-only load/store operand. */
-static void asm_fusexrefx(ASMState *as, PPCIns pi, Reg rt, IRRef ref,
- RegSet allow)
-{
- IRIns *ira = IR(ref);
- Reg right, left;
- if (canfuse(as, ira) && ira->o == IR_ADD && ra_noreg(ira->r)) {
- left = ra_alloc2(as, ira, allow);
- right = (left >> 8); left &= 255;
- } else {
- right = ra_alloc1(as, ref, allow);
- left = RID_R0;
- }
- emit_tab(as, pi, rt, left, right);
-}
-
-/* Fuse to multiply-add/sub instruction. */
-static int asm_fusemadd(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pir)
-{
- IRRef lref = ir->op1, rref = ir->op2;
- IRIns *irm;
- if (lref != rref &&
- ((mayfuse(as, lref) && (irm = IR(lref), irm->o == IR_MUL) &&
- ra_noreg(irm->r)) ||
- (mayfuse(as, rref) && (irm = IR(rref), irm->o == IR_MUL) &&
- (rref = lref, pi = pir, ra_noreg(irm->r))))) {
- Reg dest = ra_dest(as, ir, RSET_FPR);
- Reg add = ra_alloc1(as, rref, RSET_FPR);
- Reg right, left = ra_alloc2(as, irm, rset_exclude(RSET_FPR, add));
- right = (left >> 8); left &= 255;
- emit_facb(as, pi, dest, left, right, add);
- return 1;
- }
- return 0;
-}
-
-/* -- Calls --------------------------------------------------------------- */
-
-/* Generate a call to a C function. */
-static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
-{
- uint32_t n, nargs = CCI_NARGS(ci);
- int32_t ofs = 8;
- Reg gpr = REGARG_FIRSTGPR, fpr = REGARG_FIRSTFPR;
- if ((void *)ci->func)
- emit_call(as, (void *)ci->func);
- for (n = 0; n < nargs; n++) { /* Setup args. */
- IRRef ref = args[n];
- if (ref) {
- IRIns *ir = IR(ref);
- if (irt_isfp(ir->t)) {
- if (fpr <= REGARG_LASTFPR) {
- lua_assert(rset_test(as->freeset, fpr)); /* Already evicted. */
- ra_leftov(as, fpr, ref);
- fpr++;
- } else {
- Reg r = ra_alloc1(as, ref, RSET_FPR);
- if (irt_isnum(ir->t)) ofs = (ofs + 4) & ~4;
- emit_spstore(as, ir, r, ofs);
- ofs += irt_isnum(ir->t) ? 8 : 4;
- }
- } else {
- if (gpr <= REGARG_LASTGPR) {
- lua_assert(rset_test(as->freeset, gpr)); /* Already evicted. */
- ra_leftov(as, gpr, ref);
- gpr++;
- } else {
- Reg r = ra_alloc1(as, ref, RSET_GPR);
- emit_spstore(as, ir, r, ofs);
- ofs += 4;
- }
- }
- } else {
- if (gpr <= REGARG_LASTGPR)
- gpr++;
- else
- ofs += 4;
- }
- checkmclim(as);
- }
- if ((ci->flags & CCI_VARARG)) /* Vararg calls need to know about FPR use. */
- emit_tab(as, fpr == REGARG_FIRSTFPR ? PPCI_CRXOR : PPCI_CREQV, 6, 6, 6);
-}
-
-/* Setup result reg/sp for call. Evict scratch regs. */
-static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
-{
- RegSet drop = RSET_SCRATCH;
- int hiop = ((ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t));
- if ((ci->flags & CCI_NOFPRCLOBBER))
- drop &= ~RSET_FPR;
- if (ra_hasreg(ir->r))
- rset_clear(drop, ir->r); /* Dest reg handled below. */
- if (hiop && ra_hasreg((ir+1)->r))
- rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
- ra_evictset(as, drop); /* Evictions must be performed first. */
- if (ra_used(ir)) {
- lua_assert(!irt_ispri(ir->t));
- if (irt_isfp(ir->t)) {
- if ((ci->flags & CCI_CASTU64)) {
- /* Use spill slot or temp slots. */
- int32_t ofs = ir->s ? sps_scale(ir->s) : SPOFS_TMP;
- Reg dest = ir->r;
- if (ra_hasreg(dest)) {
- ra_free(as, dest);
- ra_modified(as, dest);
- emit_fai(as, PPCI_LFD, dest, RID_SP, ofs);
- }
- emit_tai(as, PPCI_STW, RID_RETHI, RID_SP, ofs);
- emit_tai(as, PPCI_STW, RID_RETLO, RID_SP, ofs+4);
- } else {
- ra_destreg(as, ir, RID_FPRET);
- }
- } else if (hiop) {
- ra_destpair(as, ir);
- } else {
- ra_destreg(as, ir, RID_RET);
- }
- }
-}
-
-static void asm_call(ASMState *as, IRIns *ir)
-{
- IRRef args[CCI_NARGS_MAX];
- const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
- asm_collectargs(as, ir, ci, args);
- asm_setupresult(as, ir, ci);
- asm_gencall(as, ci, args);
-}
-
-static void asm_callx(ASMState *as, IRIns *ir)
-{
- IRRef args[CCI_NARGS_MAX*2];
- CCallInfo ci;
- IRRef func;
- IRIns *irf;
- ci.flags = asm_callx_flags(as, ir);
- asm_collectargs(as, ir, &ci, args);
- asm_setupresult(as, ir, &ci);
- func = ir->op2; irf = IR(func);
- if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
- if (irref_isk(func)) { /* Call to constant address. */
- ci.func = (ASMFunction)(void *)(irf->i);
- } else { /* Need a non-argument register for indirect calls. */
- RegSet allow = RSET_GPR & ~RSET_RANGE(RID_R0, REGARG_LASTGPR+1);
- Reg freg = ra_alloc1(as, func, allow);
- *--as->mcp = PPCI_BCTRL;
- *--as->mcp = PPCI_MTCTR | PPCF_T(freg);
- ci.func = (ASMFunction)(void *)0;
- }
- asm_gencall(as, &ci, args);
-}
-
-static void asm_callid(ASMState *as, IRIns *ir, IRCallID id)
-{
- const CCallInfo *ci = &lj_ir_callinfo[id];
- IRRef args[2];
- args[0] = ir->op1;
- args[1] = ir->op2;
- asm_setupresult(as, ir, ci);
- asm_gencall(as, ci, args);
-}
-
-/* -- Returns ------------------------------------------------------------- */
-
-/* Return to lower frame. Guard that it goes to the right spot. */
-static void asm_retf(ASMState *as, IRIns *ir)
-{
- Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
- void *pc = ir_kptr(IR(ir->op2));
- int32_t delta = 1+bc_a(*((const BCIns *)pc - 1));
- as->topslot -= (BCReg)delta;
- if ((int32_t)as->topslot < 0) as->topslot = 0;
- irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */
- emit_setgl(as, base, jit_base);
- emit_addptr(as, base, -8*delta);
- asm_guardcc(as, CC_NE);
- emit_ab(as, PPCI_CMPW, RID_TMP,
- ra_allock(as, i32ptr(pc), rset_exclude(RSET_GPR, base)));
- emit_tai(as, PPCI_LWZ, RID_TMP, base, -8);
-}
-
-/* -- Type conversions ---------------------------------------------------- */
-
-static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
-{
- RegSet allow = RSET_FPR;
- Reg tmp = ra_scratch(as, rset_clear(allow, left));
- Reg fbias = ra_scratch(as, rset_clear(allow, tmp));
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg hibias = ra_allock(as, 0x43300000, rset_exclude(RSET_GPR, dest));
- asm_guardcc(as, CC_NE);
- emit_fab(as, PPCI_FCMPU, 0, tmp, left);
- emit_fab(as, PPCI_FSUB, tmp, tmp, fbias);
- emit_fai(as, PPCI_LFD, tmp, RID_SP, SPOFS_TMP);
- emit_tai(as, PPCI_STW, RID_TMP, RID_SP, SPOFS_TMPLO);
- emit_tai(as, PPCI_STW, hibias, RID_SP, SPOFS_TMPHI);
- emit_asi(as, PPCI_XORIS, RID_TMP, dest, 0x8000);
- emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
- emit_lsptr(as, PPCI_LFS, (fbias & 31),
- (void *)lj_ir_k64_find(as->J, U64x(59800004,59800000)),
- RSET_GPR);
- emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
- emit_fb(as, PPCI_FCTIWZ, tmp, left);
-}
-
-static void asm_tobit(ASMState *as, IRIns *ir)
-{
- RegSet allow = RSET_FPR;
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left = ra_alloc1(as, ir->op1, allow);
- Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left));
- Reg tmp = ra_scratch(as, rset_clear(allow, right));
- emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
- emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
- emit_fab(as, PPCI_FADD, tmp, left, right);
-}
-
-static void asm_conv(ASMState *as, IRIns *ir)
-{
- IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
- int stfp = (st == IRT_NUM || st == IRT_FLOAT);
- IRRef lref = ir->op1;
- lua_assert(irt_type(ir->t) != st);
- lua_assert(!(irt_isint64(ir->t) ||
- (st == IRT_I64 || st == IRT_U64))); /* Handled by SPLIT. */
- if (irt_isfp(ir->t)) {
- Reg dest = ra_dest(as, ir, RSET_FPR);
- if (stfp) { /* FP to FP conversion. */
- if (st == IRT_NUM) /* double -> float conversion. */
- emit_fb(as, PPCI_FRSP, dest, ra_alloc1(as, lref, RSET_FPR));
- else /* float -> double conversion is a no-op on PPC. */
- ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
- } else { /* Integer to FP conversion. */
- /* IRT_INT: Flip hibit, bias with 2^52, subtract 2^52+2^31. */
- /* IRT_U32: Bias with 2^52, subtract 2^52. */
- RegSet allow = RSET_GPR;
- Reg left = ra_alloc1(as, lref, allow);
- Reg hibias = ra_allock(as, 0x43300000, rset_clear(allow, left));
- Reg fbias = ra_scratch(as, rset_exclude(RSET_FPR, dest));
- const float *kbias;
- if (irt_isfloat(ir->t)) emit_fb(as, PPCI_FRSP, dest, dest);
- emit_fab(as, PPCI_FSUB, dest, dest, fbias);
- emit_fai(as, PPCI_LFD, dest, RID_SP, SPOFS_TMP);
- kbias = (const float *)lj_ir_k64_find(as->J, U64x(59800004,59800000));
- if (st == IRT_U32) kbias++;
- emit_lsptr(as, PPCI_LFS, (fbias & 31), (void *)kbias,
- rset_clear(allow, hibias));
- emit_tai(as, PPCI_STW, st == IRT_U32 ? left : RID_TMP,
- RID_SP, SPOFS_TMPLO);
- emit_tai(as, PPCI_STW, hibias, RID_SP, SPOFS_TMPHI);
- if (st != IRT_U32) emit_asi(as, PPCI_XORIS, RID_TMP, left, 0x8000);
- }
- } else if (stfp) { /* FP to integer conversion. */
- if (irt_isguard(ir->t)) {
- /* Checked conversions are only supported from number to int. */
- lua_assert(irt_isint(ir->t) && st == IRT_NUM);
- asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left = ra_alloc1(as, lref, RSET_FPR);
- Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
- if (irt_isu32(ir->t)) {
- /* Convert both x and x-2^31 to int and merge results. */
- Reg tmpi = ra_scratch(as, rset_exclude(RSET_GPR, dest));
- emit_asb(as, PPCI_OR, dest, dest, tmpi); /* Select with mask idiom. */
- emit_asb(as, PPCI_AND, tmpi, tmpi, RID_TMP);
- emit_asb(as, PPCI_ANDC, dest, dest, RID_TMP);
- emit_tai(as, PPCI_LWZ, tmpi, RID_SP, SPOFS_TMPLO); /* tmp = (int)(x) */
- emit_tai(as, PPCI_ADDIS, dest, dest, 0x8000); /* dest += 2^31 */
- emit_asb(as, PPCI_SRAWI, RID_TMP, dest, 31); /* mask = -(dest < 0) */
- emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
- emit_tai(as, PPCI_LWZ, dest,
- RID_SP, SPOFS_TMPLO); /* dest = (int)(x-2^31) */
- emit_fb(as, PPCI_FCTIWZ, tmp, left);
- emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
- emit_fb(as, PPCI_FCTIWZ, tmp, tmp);
- emit_fab(as, PPCI_FSUB, tmp, left, tmp);
- emit_lsptr(as, PPCI_LFS, (tmp & 31),
- (void *)lj_ir_k64_find(as->J, U64x(4f000000,00000000)),
- RSET_GPR);
- } else {
- emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
- emit_fai(as, PPCI_STFD, tmp, RID_SP, SPOFS_TMP);
- emit_fb(as, PPCI_FCTIWZ, tmp, left);
- }
- }
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
- Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
- lua_assert(irt_isint(ir->t) || irt_isu32(ir->t));
- if ((ir->op2 & IRCONV_SEXT))
- emit_as(as, st == IRT_I8 ? PPCI_EXTSB : PPCI_EXTSH, dest, left);
- else
- emit_rot(as, PPCI_RLWINM, dest, left, 0, st == IRT_U8 ? 24 : 16, 31);
- } else { /* 32/64 bit integer conversions. */
- /* Only need to handle 32/32 bit no-op (cast) on 32 bit archs. */
- ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */
- }
- }
-}
-
-#if LJ_HASFFI
-static void asm_conv64(ASMState *as, IRIns *ir)
-{
- IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
- IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
- IRCallID id;
- const CCallInfo *ci;
- IRRef args[2];
- args[0] = ir->op1;
- args[1] = (ir-1)->op1;
- if (st == IRT_NUM || st == IRT_FLOAT) {
- id = IRCALL_fp64_d2l + ((st == IRT_FLOAT) ? 2 : 0) + (dt - IRT_I64);
- ir--;
- } else {
- id = IRCALL_fp64_l2d + ((dt == IRT_FLOAT) ? 2 : 0) + (st - IRT_I64);
- }
- ci = &lj_ir_callinfo[id];
- asm_setupresult(as, ir, ci);
- asm_gencall(as, ci, args);
-}
-#endif
-
-static void asm_strto(ASMState *as, IRIns *ir)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
- IRRef args[2];
- int32_t ofs;
- RegSet drop = RSET_SCRATCH;
- if (ra_hasreg(ir->r)) rset_set(drop, ir->r); /* Spill dest reg (if any). */
- ra_evictset(as, drop);
- asm_guardcc(as, CC_EQ);
- emit_ai(as, PPCI_CMPWI, RID_RET, 0); /* Test return status. */
- args[0] = ir->op1; /* GCstr *str */
- args[1] = ASMREF_TMP1; /* TValue *n */
- asm_gencall(as, ci, args);
- /* Store the result to the spill slot or temp slots. */
- ofs = ir->s ? sps_scale(ir->s) : SPOFS_TMP;
- emit_tai(as, PPCI_ADDI, ra_releasetmp(as, ASMREF_TMP1), RID_SP, ofs);
-}
-
-/* Get pointer to TValue. */
-static void asm_tvptr(ASMState *as, Reg dest, IRRef ref)
-{
- IRIns *ir = IR(ref);
- if (irt_isnum(ir->t)) {
- if (irref_isk(ref)) /* Use the number constant itself as a TValue. */
- ra_allockreg(as, i32ptr(ir_knum(ir)), dest);
- else /* Otherwise force a spill and use the spill slot. */
- emit_tai(as, PPCI_ADDI, dest, RID_SP, ra_spill(as, ir));
- } else {
- /* Otherwise use g->tmptv to hold the TValue. */
- RegSet allow = rset_exclude(RSET_GPR, dest);
- Reg type;
- emit_tai(as, PPCI_ADDI, dest, RID_JGL, offsetof(global_State, tmptv)-32768);
- if (!irt_ispri(ir->t)) {
- Reg src = ra_alloc1(as, ref, allow);
- emit_setgl(as, src, tmptv.gcr);
- }
- type = ra_allock(as, irt_toitype(ir->t), allow);
- emit_setgl(as, type, tmptv.it);
- }
-}
-
-static void asm_tostr(ASMState *as, IRIns *ir)
-{
- IRRef args[2];
- args[0] = ASMREF_L;
- as->gcsteps++;
- if (irt_isnum(IR(ir->op1)->t) || (ir+1)->o == IR_HIOP) {
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromnum];
- args[1] = ASMREF_TMP1; /* const lua_Number * */
- asm_setupresult(as, ir, ci); /* GCstr * */
- asm_gencall(as, ci, args);
- asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op1);
- } else {
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromint];
- args[1] = ir->op1; /* int32_t k */
- asm_setupresult(as, ir, ci); /* GCstr * */
- asm_gencall(as, ci, args);
- }
-}
-
-/* -- Memory references --------------------------------------------------- */
-
-static void asm_aref(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg idx, base;
- if (irref_isk(ir->op2)) {
- IRRef tab = IR(ir->op1)->op1;
- int32_t ofs = asm_fuseabase(as, tab);
- IRRef refa = ofs ? tab : ir->op1;
- ofs += 8*IR(ir->op2)->i;
- if (checki16(ofs)) {
- base = ra_alloc1(as, refa, RSET_GPR);
- emit_tai(as, PPCI_ADDI, dest, base, ofs);
- return;
- }
- }
- base = ra_alloc1(as, ir->op1, RSET_GPR);
- idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base));
- emit_tab(as, PPCI_ADD, dest, RID_TMP, base);
- emit_slwi(as, RID_TMP, idx, 3);
-}
-
-/* Inlined hash lookup. Specialized for key type and for const keys.
-** The equivalent C code is:
-** Node *n = hashkey(t, key);
-** do {
-** if (lj_obj_equal(&n->key, key)) return &n->val;
-** } while ((n = nextnode(n)));
-** return niltv(L);
-*/
-static void asm_href(ASMState *as, IRIns *ir, IROp merge)
-{
- RegSet allow = RSET_GPR;
- int destused = ra_used(ir);
- Reg dest = ra_dest(as, ir, allow);
- Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
- Reg key = RID_NONE, tmp1 = RID_TMP, tmp2;
- Reg tisnum = RID_NONE, tmpnum = RID_NONE;
- IRRef refkey = ir->op2;
- IRIns *irkey = IR(refkey);
- IRType1 kt = irkey->t;
- uint32_t khash;
- MCLabel l_end, l_loop, l_next;
-
- rset_clear(allow, tab);
- if (irt_isnum(kt)) {
- key = ra_alloc1(as, refkey, RSET_FPR);
- tmpnum = ra_scratch(as, rset_exclude(RSET_FPR, key));
- tisnum = ra_allock(as, (int32_t)LJ_TISNUM, allow);
- rset_clear(allow, tisnum);
- } else if (!irt_ispri(kt)) {
- key = ra_alloc1(as, refkey, allow);
- rset_clear(allow, key);
- }
- tmp2 = ra_scratch(as, allow);
- rset_clear(allow, tmp2);
-
- /* Key not found in chain: jump to exit (if merged) or load niltv. */
- l_end = emit_label(as);
- as->invmcp = NULL;
- if (merge == IR_NE)
- asm_guardcc(as, CC_EQ);
- else if (destused)
- emit_loada(as, dest, niltvg(J2G(as->J)));
-
- /* Follow hash chain until the end. */
- l_loop = --as->mcp;
- emit_ai(as, PPCI_CMPWI, dest, 0);
- emit_tai(as, PPCI_LWZ, dest, dest, (int32_t)offsetof(Node, next));
- l_next = emit_label(as);
-
- /* Type and value comparison. */
- if (merge == IR_EQ)
- asm_guardcc(as, CC_EQ);
- else
- emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end);
- if (irt_isnum(kt)) {
- emit_fab(as, PPCI_FCMPU, 0, tmpnum, key);
- emit_condbranch(as, PPCI_BC, CC_GE, l_next);
- emit_ab(as, PPCI_CMPLW, tmp1, tisnum);
- emit_fai(as, PPCI_LFD, tmpnum, dest, (int32_t)offsetof(Node, key.n));
- } else {
- if (!irt_ispri(kt)) {
- emit_ab(as, PPCI_CMPW, tmp2, key);
- emit_condbranch(as, PPCI_BC, CC_NE, l_next);
- }
- emit_ai(as, PPCI_CMPWI, tmp1, irt_toitype(irkey->t));
- if (!irt_ispri(kt))
- emit_tai(as, PPCI_LWZ, tmp2, dest, (int32_t)offsetof(Node, key.gcr));
- }
- emit_tai(as, PPCI_LWZ, tmp1, dest, (int32_t)offsetof(Node, key.it));
- *l_loop = PPCI_BC | PPCF_Y | PPCF_CC(CC_NE) |
- (((char *)as->mcp-(char *)l_loop) & 0xffffu);
-
- /* Load main position relative to tab->node into dest. */
- khash = irref_isk(refkey) ? ir_khash(irkey) : 1;
- if (khash == 0) {
- emit_tai(as, PPCI_LWZ, dest, tab, (int32_t)offsetof(GCtab, node));
- } else {
- Reg tmphash = tmp1;
- if (irref_isk(refkey))
- tmphash = ra_allock(as, khash, allow);
- emit_tab(as, PPCI_ADD, dest, dest, tmp1);
- emit_tai(as, PPCI_MULLI, tmp1, tmp1, sizeof(Node));
- emit_asb(as, PPCI_AND, tmp1, tmp2, tmphash);
- emit_tai(as, PPCI_LWZ, dest, tab, (int32_t)offsetof(GCtab, node));
- emit_tai(as, PPCI_LWZ, tmp2, tab, (int32_t)offsetof(GCtab, hmask));
- if (irref_isk(refkey)) {
- /* Nothing to do. */
- } else if (irt_isstr(kt)) {
- emit_tai(as, PPCI_LWZ, tmp1, key, (int32_t)offsetof(GCstr, hash));
- } else { /* Must match with hash*() in lj_tab.c. */
- emit_tab(as, PPCI_SUBF, tmp1, tmp2, tmp1);
- emit_rotlwi(as, tmp2, tmp2, HASH_ROT3);
- emit_asb(as, PPCI_XOR, tmp1, tmp1, tmp2);
- emit_rotlwi(as, tmp1, tmp1, (HASH_ROT2+HASH_ROT1)&31);
- emit_tab(as, PPCI_SUBF, tmp2, dest, tmp2);
- if (irt_isnum(kt)) {
- int32_t ofs = ra_spill(as, irkey);
- emit_asb(as, PPCI_XOR, tmp2, tmp2, tmp1);
- emit_rotlwi(as, dest, tmp1, HASH_ROT1);
- emit_tab(as, PPCI_ADD, tmp1, tmp1, tmp1);
- emit_tai(as, PPCI_LWZ, tmp2, RID_SP, ofs+4);
- emit_tai(as, PPCI_LWZ, tmp1, RID_SP, ofs);
- } else {
- emit_asb(as, PPCI_XOR, tmp2, key, tmp1);
- emit_rotlwi(as, dest, tmp1, HASH_ROT1);
- emit_tai(as, PPCI_ADDI, tmp1, tmp2, HASH_BIAS);
- emit_tai(as, PPCI_ADDIS, tmp2, key, (HASH_BIAS + 32768)>>16);
- }
- }
- }
-}
-
-static void asm_hrefk(ASMState *as, IRIns *ir)
-{
- IRIns *kslot = IR(ir->op2);
- IRIns *irkey = IR(kslot->op1);
- int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
- int32_t kofs = ofs + (int32_t)offsetof(Node, key);
- Reg dest = (ra_used(ir)||ofs > 32736) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
- Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
- Reg key = RID_NONE, type = RID_TMP, idx = node;
- RegSet allow = rset_exclude(RSET_GPR, node);
- lua_assert(ofs % sizeof(Node) == 0);
- if (ofs > 32736) {
- idx = dest;
- rset_clear(allow, dest);
- kofs = (int32_t)offsetof(Node, key);
- } else if (ra_hasreg(dest)) {
- emit_tai(as, PPCI_ADDI, dest, node, ofs);
- }
- asm_guardcc(as, CC_NE);
- if (!irt_ispri(irkey->t)) {
- key = ra_scratch(as, allow);
- rset_clear(allow, key);
- }
- rset_clear(allow, type);
- if (irt_isnum(irkey->t)) {
- emit_cmpi(as, key, (int32_t)ir_knum(irkey)->u32.lo);
- asm_guardcc(as, CC_NE);
- emit_cmpi(as, type, (int32_t)ir_knum(irkey)->u32.hi);
- } else {
- if (ra_hasreg(key)) {
- emit_cmpi(as, key, irkey->i); /* May use RID_TMP, i.e. type. */
- asm_guardcc(as, CC_NE);
- }
- emit_ai(as, PPCI_CMPWI, type, irt_toitype(irkey->t));
- }
- if (ra_hasreg(key)) emit_tai(as, PPCI_LWZ, key, idx, kofs+4);
- emit_tai(as, PPCI_LWZ, type, idx, kofs);
- if (ofs > 32736) {
- emit_tai(as, PPCI_ADDIS, dest, dest, (ofs + 32768) >> 16);
- emit_tai(as, PPCI_ADDI, dest, node, ofs);
- }
-}
-
-static void asm_newref(ASMState *as, IRIns *ir)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey];
- IRRef args[3];
- if (ir->r == RID_SINK)
- return;
- args[0] = ASMREF_L; /* lua_State *L */
- args[1] = ir->op1; /* GCtab *t */
- args[2] = ASMREF_TMP1; /* cTValue *key */
- asm_setupresult(as, ir, ci); /* TValue * */
- asm_gencall(as, ci, args);
- asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op2);
-}
-
-static void asm_uref(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- if (irref_isk(ir->op1)) {
- GCfunc *fn = ir_kfunc(IR(ir->op1));
- MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
- emit_lsptr(as, PPCI_LWZ, dest, v, RSET_GPR);
- } else {
- Reg uv = ra_scratch(as, RSET_GPR);
- Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
- if (ir->o == IR_UREFC) {
- asm_guardcc(as, CC_NE);
- emit_ai(as, PPCI_CMPWI, RID_TMP, 1);
- emit_tai(as, PPCI_ADDI, dest, uv, (int32_t)offsetof(GCupval, tv));
- emit_tai(as, PPCI_LBZ, RID_TMP, uv, (int32_t)offsetof(GCupval, closed));
- } else {
- emit_tai(as, PPCI_LWZ, dest, uv, (int32_t)offsetof(GCupval, v));
- }
- emit_tai(as, PPCI_LWZ, uv, func,
- (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8));
- }
-}
-
-static void asm_fref(ASMState *as, IRIns *ir)
-{
- UNUSED(as); UNUSED(ir);
- lua_assert(!ra_used(ir));
-}
-
-static void asm_strref(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- IRRef ref = ir->op2, refk = ir->op1;
- int32_t ofs = (int32_t)sizeof(GCstr);
- Reg r;
- if (irref_isk(ref)) {
- IRRef tmp = refk; refk = ref; ref = tmp;
- } else if (!irref_isk(refk)) {
- Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
- IRIns *irr = IR(ir->op2);
- if (ra_hasreg(irr->r)) {
- ra_noweak(as, irr->r);
- right = irr->r;
- } else if (mayfuse(as, irr->op2) &&
- irr->o == IR_ADD && irref_isk(irr->op2) &&
- checki16(ofs + IR(irr->op2)->i)) {
- ofs += IR(irr->op2)->i;
- right = ra_alloc1(as, irr->op1, rset_exclude(RSET_GPR, left));
- } else {
- right = ra_allocref(as, ir->op2, rset_exclude(RSET_GPR, left));
- }
- emit_tai(as, PPCI_ADDI, dest, dest, ofs);
- emit_tab(as, PPCI_ADD, dest, left, right);
- return;
- }
- r = ra_alloc1(as, ref, RSET_GPR);
- ofs += IR(refk)->i;
- if (checki16(ofs))
- emit_tai(as, PPCI_ADDI, dest, r, ofs);
- else
- emit_tab(as, PPCI_ADD, dest, r,
- ra_allock(as, ofs, rset_exclude(RSET_GPR, r)));
-}
-
-/* -- Loads and stores ---------------------------------------------------- */
-
-static PPCIns asm_fxloadins(IRIns *ir)
-{
- switch (irt_type(ir->t)) {
- case IRT_I8: return PPCI_LBZ; /* Needs sign-extension. */
- case IRT_U8: return PPCI_LBZ;
- case IRT_I16: return PPCI_LHA;
- case IRT_U16: return PPCI_LHZ;
- case IRT_NUM: return PPCI_LFD;
- case IRT_FLOAT: return PPCI_LFS;
- default: return PPCI_LWZ;
- }
-}
-
-static PPCIns asm_fxstoreins(IRIns *ir)
-{
- switch (irt_type(ir->t)) {
- case IRT_I8: case IRT_U8: return PPCI_STB;
- case IRT_I16: case IRT_U16: return PPCI_STH;
- case IRT_NUM: return PPCI_STFD;
- case IRT_FLOAT: return PPCI_STFS;
- default: return PPCI_STW;
- }
-}
-
-static void asm_fload(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg idx = ra_alloc1(as, ir->op1, RSET_GPR);
- PPCIns pi = asm_fxloadins(ir);
- int32_t ofs;
- if (ir->op2 == IRFL_TAB_ARRAY) {
- ofs = asm_fuseabase(as, ir->op1);
- if (ofs) { /* Turn the t->array load into an add for colocated arrays. */
- emit_tai(as, PPCI_ADDI, dest, idx, ofs);
- return;
- }
- }
- ofs = field_ofs[ir->op2];
- lua_assert(!irt_isi8(ir->t));
- emit_tai(as, pi, dest, idx, ofs);
-}
-
-static void asm_fstore(ASMState *as, IRIns *ir)
-{
- if (ir->r != RID_SINK) {
- Reg src = ra_alloc1(as, ir->op2, RSET_GPR);
- IRIns *irf = IR(ir->op1);
- Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src));
- int32_t ofs = field_ofs[irf->op2];
- PPCIns pi = asm_fxstoreins(ir);
- emit_tai(as, pi, src, idx, ofs);
- }
-}
-
-static void asm_xload(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
- lua_assert(!(ir->op2 & IRXLOAD_UNALIGNED));
- if (irt_isi8(ir->t))
- emit_as(as, PPCI_EXTSB, dest, dest);
- asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR, 0);
-}
-
-static void asm_xstore(ASMState *as, IRIns *ir, int32_t ofs)
-{
- IRIns *irb;
- if (ir->r == RID_SINK)
- return;
- if (ofs == 0 && mayfuse(as, ir->op2) && (irb = IR(ir->op2))->o == IR_BSWAP &&
- ra_noreg(irb->r) && (irt_isint(ir->t) || irt_isu32(ir->t))) {
- /* Fuse BSWAP with XSTORE to stwbrx. */
- Reg src = ra_alloc1(as, irb->op1, RSET_GPR);
- asm_fusexrefx(as, PPCI_STWBRX, src, ir->op1, rset_exclude(RSET_GPR, src));
- } else {
- Reg src = ra_alloc1(as, ir->op2, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
- asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1,
- rset_exclude(RSET_GPR, src), ofs);
- }
-}
-
-static void asm_ahuvload(ASMState *as, IRIns *ir)
-{
- IRType1 t = ir->t;
- Reg dest = RID_NONE, type = RID_TMP, tmp = RID_TMP, idx;
- RegSet allow = RSET_GPR;
- int32_t ofs = AHUREF_LSX;
- if (ra_used(ir)) {
- lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t));
- if (!irt_isnum(t)) ofs = 0;
- dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : RSET_GPR);
- rset_clear(allow, dest);
- }
- idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
- if (irt_isnum(t)) {
- Reg tisnum = ra_allock(as, (int32_t)LJ_TISNUM, rset_exclude(allow, idx));
- asm_guardcc(as, CC_GE);
- emit_ab(as, PPCI_CMPLW, type, tisnum);
- if (ra_hasreg(dest)) {
- if (ofs == AHUREF_LSX) {
- tmp = ra_scratch(as, rset_exclude(rset_exclude(RSET_GPR,
- (idx&255)), (idx>>8)));
- emit_fab(as, PPCI_LFDX, dest, (idx&255), tmp);
- } else {
- emit_fai(as, PPCI_LFD, dest, idx, ofs);
- }
- }
- } else {
- asm_guardcc(as, CC_NE);
- emit_ai(as, PPCI_CMPWI, type, irt_toitype(t));
- if (ra_hasreg(dest)) emit_tai(as, PPCI_LWZ, dest, idx, ofs+4);
- }
- if (ofs == AHUREF_LSX) {
- emit_tab(as, PPCI_LWZX, type, (idx&255), tmp);
- emit_slwi(as, tmp, (idx>>8), 3);
- } else {
- emit_tai(as, PPCI_LWZ, type, idx, ofs);
- }
-}
-
-static void asm_ahustore(ASMState *as, IRIns *ir)
-{
- RegSet allow = RSET_GPR;
- Reg idx, src = RID_NONE, type = RID_NONE;
- int32_t ofs = AHUREF_LSX;
- if (ir->r == RID_SINK)
- return;
- if (irt_isnum(ir->t)) {
- src = ra_alloc1(as, ir->op2, RSET_FPR);
- } else {
- if (!irt_ispri(ir->t)) {
- src = ra_alloc1(as, ir->op2, allow);
- rset_clear(allow, src);
- ofs = 0;
- }
- type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
- rset_clear(allow, type);
- }
- idx = asm_fuseahuref(as, ir->op1, &ofs, allow);
- if (irt_isnum(ir->t)) {
- if (ofs == AHUREF_LSX) {
- emit_fab(as, PPCI_STFDX, src, (idx&255), RID_TMP);
- emit_slwi(as, RID_TMP, (idx>>8), 3);
- } else {
- emit_fai(as, PPCI_STFD, src, idx, ofs);
- }
- } else {
- if (ra_hasreg(src))
- emit_tai(as, PPCI_STW, src, idx, ofs+4);
- if (ofs == AHUREF_LSX) {
- emit_tab(as, PPCI_STWX, type, (idx&255), RID_TMP);
- emit_slwi(as, RID_TMP, (idx>>8), 3);
- } else {
- emit_tai(as, PPCI_STW, type, idx, ofs);
- }
- }
-}
-
-static void asm_sload(ASMState *as, IRIns *ir)
-{
- int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 0 : 4);
- IRType1 t = ir->t;
- Reg dest = RID_NONE, type = RID_NONE, base;
- RegSet allow = RSET_GPR;
- lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */
- lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK));
- lua_assert(LJ_DUALNUM ||
- !irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME)));
- if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) {
- dest = ra_scratch(as, RSET_FPR);
- asm_tointg(as, ir, dest);
- t.irt = IRT_NUM; /* Continue with a regular number type check. */
- } else if (ra_used(ir)) {
- lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t));
- dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : RSET_GPR);
- rset_clear(allow, dest);
- base = ra_alloc1(as, REF_BASE, allow);
- rset_clear(allow, base);
- if ((ir->op2 & IRSLOAD_CONVERT)) {
- if (irt_isint(t)) {
- emit_tai(as, PPCI_LWZ, dest, RID_SP, SPOFS_TMPLO);
- dest = ra_scratch(as, RSET_FPR);
- emit_fai(as, PPCI_STFD, dest, RID_SP, SPOFS_TMP);
- emit_fb(as, PPCI_FCTIWZ, dest, dest);
- t.irt = IRT_NUM; /* Check for original type. */
- } else {
- Reg tmp = ra_scratch(as, allow);
- Reg hibias = ra_allock(as, 0x43300000, rset_clear(allow, tmp));
- Reg fbias = ra_scratch(as, rset_exclude(RSET_FPR, dest));
- emit_fab(as, PPCI_FSUB, dest, dest, fbias);
- emit_fai(as, PPCI_LFD, dest, RID_SP, SPOFS_TMP);
- emit_lsptr(as, PPCI_LFS, (fbias & 31),
- (void *)lj_ir_k64_find(as->J, U64x(59800004,59800000)),
- rset_clear(allow, hibias));
- emit_tai(as, PPCI_STW, tmp, RID_SP, SPOFS_TMPLO);
- emit_tai(as, PPCI_STW, hibias, RID_SP, SPOFS_TMPHI);
- emit_asi(as, PPCI_XORIS, tmp, tmp, 0x8000);
- dest = tmp;
- t.irt = IRT_INT; /* Check for original type. */
- }
- }
- goto dotypecheck;
- }
- base = ra_alloc1(as, REF_BASE, allow);
- rset_clear(allow, base);
-dotypecheck:
- if (irt_isnum(t)) {
- if ((ir->op2 & IRSLOAD_TYPECHECK)) {
- Reg tisnum = ra_allock(as, (int32_t)LJ_TISNUM, allow);
- asm_guardcc(as, CC_GE);
- emit_ab(as, PPCI_CMPLW, RID_TMP, tisnum);
- type = RID_TMP;
- }
- if (ra_hasreg(dest)) emit_fai(as, PPCI_LFD, dest, base, ofs-4);
- } else {
- if ((ir->op2 & IRSLOAD_TYPECHECK)) {
- asm_guardcc(as, CC_NE);
- emit_ai(as, PPCI_CMPWI, RID_TMP, irt_toitype(t));
- type = RID_TMP;
- }
- if (ra_hasreg(dest)) emit_tai(as, PPCI_LWZ, dest, base, ofs);
- }
- if (ra_hasreg(type)) emit_tai(as, PPCI_LWZ, type, base, ofs-4);
-}
-
-/* -- Allocations --------------------------------------------------------- */
-
-#if LJ_HASFFI
-static void asm_cnew(ASMState *as, IRIns *ir)
-{
- CTState *cts = ctype_ctsG(J2G(as->J));
- CTypeID ctypeid = (CTypeID)IR(ir->op1)->i;
- CTSize sz = (ir->o == IR_CNEWI || ir->op2 == REF_NIL) ?
- lj_ctype_size(cts, ctypeid) : (CTSize)IR(ir->op2)->i;
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
- IRRef args[2];
- RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
- RegSet drop = RSET_SCRATCH;
- lua_assert(sz != CTSIZE_INVALID);
-
- args[0] = ASMREF_L; /* lua_State *L */
- args[1] = ASMREF_TMP1; /* MSize size */
- as->gcsteps++;
-
- if (ra_hasreg(ir->r))
- rset_clear(drop, ir->r); /* Dest reg handled below. */
- ra_evictset(as, drop);
- if (ra_used(ir))
- ra_destreg(as, ir, RID_RET); /* GCcdata * */
-
- /* Initialize immutable cdata object. */
- if (ir->o == IR_CNEWI) {
- int32_t ofs = sizeof(GCcdata);
- lua_assert(sz == 4 || sz == 8);
- if (sz == 8) {
- ofs += 4;
- lua_assert((ir+1)->o == IR_HIOP);
- }
- for (;;) {
- Reg r = ra_alloc1(as, ir->op2, allow);
- emit_tai(as, PPCI_STW, r, RID_RET, ofs);
- rset_clear(allow, r);
- if (ofs == sizeof(GCcdata)) break;
- ofs -= 4; ir++;
- }
- }
- /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */
- emit_tai(as, PPCI_STB, RID_RET+1, RID_RET, offsetof(GCcdata, gct));
- emit_tai(as, PPCI_STH, RID_TMP, RID_RET, offsetof(GCcdata, ctypeid));
- emit_ti(as, PPCI_LI, RID_RET+1, ~LJ_TCDATA);
- emit_ti(as, PPCI_LI, RID_TMP, ctypeid); /* Lower 16 bit used. Sign-ext ok. */
- asm_gencall(as, ci, args);
- ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)),
- ra_releasetmp(as, ASMREF_TMP1));
-}
-#else
-#define asm_cnew(as, ir) ((void)0)
-#endif
-
-/* -- Write barriers ------------------------------------------------------ */
-
-static void asm_tbar(ASMState *as, IRIns *ir)
-{
- Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
- Reg mark = ra_scratch(as, rset_exclude(RSET_GPR, tab));
- Reg link = RID_TMP;
- MCLabel l_end = emit_label(as);
- emit_tai(as, PPCI_STW, link, tab, (int32_t)offsetof(GCtab, gclist));
- emit_tai(as, PPCI_STB, mark, tab, (int32_t)offsetof(GCtab, marked));
- emit_setgl(as, tab, gc.grayagain);
- lua_assert(LJ_GC_BLACK == 0x04);
- emit_rot(as, PPCI_RLWINM, mark, mark, 0, 30, 28); /* Clear black bit. */
- emit_getgl(as, link, gc.grayagain);
- emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end);
- emit_asi(as, PPCI_ANDIDOT, RID_TMP, mark, LJ_GC_BLACK);
- emit_tai(as, PPCI_LBZ, mark, tab, (int32_t)offsetof(GCtab, marked));
-}
-
-static void asm_obar(ASMState *as, IRIns *ir)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
- IRRef args[2];
- MCLabel l_end;
- Reg obj, val, tmp;
- /* No need for other object barriers (yet). */
- lua_assert(IR(ir->op1)->o == IR_UREFC);
- ra_evictset(as, RSET_SCRATCH);
- l_end = emit_label(as);
- args[0] = ASMREF_TMP1; /* global_State *g */
- args[1] = ir->op1; /* TValue *tv */
- asm_gencall(as, ci, args);
- emit_tai(as, PPCI_ADDI, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768);
- obj = IR(ir->op1)->r;
- tmp = ra_scratch(as, rset_exclude(RSET_GPR, obj));
- emit_condbranch(as, PPCI_BC|PPCF_Y, CC_EQ, l_end);
- emit_asi(as, PPCI_ANDIDOT, tmp, tmp, LJ_GC_BLACK);
- emit_condbranch(as, PPCI_BC, CC_EQ, l_end);
- emit_asi(as, PPCI_ANDIDOT, RID_TMP, RID_TMP, LJ_GC_WHITES);
- val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj));
- emit_tai(as, PPCI_LBZ, tmp, obj,
- (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
- emit_tai(as, PPCI_LBZ, RID_TMP, val, (int32_t)offsetof(GChead, marked));
-}
-
-/* -- Arithmetic and logic operations ------------------------------------- */
-
-static void asm_fparith(ASMState *as, IRIns *ir, PPCIns pi)
-{
- Reg dest = ra_dest(as, ir, RSET_FPR);
- Reg right, left = ra_alloc2(as, ir, RSET_FPR);
- right = (left >> 8); left &= 255;
- if (pi == PPCI_FMUL)
- emit_fac(as, pi, dest, left, right);
- else
- emit_fab(as, pi, dest, left, right);
-}
-
-static void asm_fpunary(ASMState *as, IRIns *ir, PPCIns pi)
-{
- Reg dest = ra_dest(as, ir, RSET_FPR);
- Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR);
- emit_fb(as, pi, dest, left);
-}
-
-static int asm_fpjoin_pow(ASMState *as, IRIns *ir)
-{
- IRIns *irp = IR(ir->op1);
- if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) {
- IRIns *irpp = IR(irp->op1);
- if (irpp == ir-2 && irpp->o == IR_FPMATH &&
- irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) {
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_pow];
- IRRef args[2];
- args[0] = irpp->op1;
- args[1] = irp->op2;
- asm_setupresult(as, ir, ci);
- asm_gencall(as, ci, args);
- return 1;
- }
- }
- return 0;
-}
-
-static void asm_add(ASMState *as, IRIns *ir)
-{
- if (irt_isnum(ir->t)) {
- if (!asm_fusemadd(as, ir, PPCI_FMADD, PPCI_FMADD))
- asm_fparith(as, ir, PPCI_FADD);
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
- PPCIns pi;
- if (irref_isk(ir->op2)) {
- int32_t k = IR(ir->op2)->i;
- if (checki16(k)) {
- pi = PPCI_ADDI;
- /* May fail due to spills/restores above, but simplifies the logic. */
- if (as->flagmcp == as->mcp) {
- as->flagmcp = NULL;
- as->mcp++;
- pi = PPCI_ADDICDOT;
- }
- emit_tai(as, pi, dest, left, k);
- return;
- } else if ((k & 0xffff) == 0) {
- emit_tai(as, PPCI_ADDIS, dest, left, (k >> 16));
- return;
- } else if (!as->sectref) {
- emit_tai(as, PPCI_ADDIS, dest, dest, (k + 32768) >> 16);
- emit_tai(as, PPCI_ADDI, dest, left, k);
- return;
- }
- }
- pi = PPCI_ADD;
- /* May fail due to spills/restores above, but simplifies the logic. */
- if (as->flagmcp == as->mcp) {
- as->flagmcp = NULL;
- as->mcp++;
- pi |= PPCF_DOT;
- }
- right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- emit_tab(as, pi, dest, left, right);
- }
-}
-
-static void asm_sub(ASMState *as, IRIns *ir)
-{
- if (irt_isnum(ir->t)) {
- if (!asm_fusemadd(as, ir, PPCI_FMSUB, PPCI_FNMSUB))
- asm_fparith(as, ir, PPCI_FSUB);
- } else {
- PPCIns pi = PPCI_SUBF;
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left, right;
- if (irref_isk(ir->op1)) {
- int32_t k = IR(ir->op1)->i;
- if (checki16(k)) {
- right = ra_alloc1(as, ir->op2, RSET_GPR);
- emit_tai(as, PPCI_SUBFIC, dest, right, k);
- return;
- }
- }
- /* May fail due to spills/restores above, but simplifies the logic. */
- if (as->flagmcp == as->mcp) {
- as->flagmcp = NULL;
- as->mcp++;
- pi |= PPCF_DOT;
- }
- left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
- right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- emit_tab(as, pi, dest, right, left); /* Subtract right _from_ left. */
- }
-}
-
-static void asm_mul(ASMState *as, IRIns *ir)
-{
- if (irt_isnum(ir->t)) {
- asm_fparith(as, ir, PPCI_FMUL);
- } else {
- PPCIns pi = PPCI_MULLW;
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
- if (irref_isk(ir->op2)) {
- int32_t k = IR(ir->op2)->i;
- if (checki16(k)) {
- emit_tai(as, PPCI_MULLI, dest, left, k);
- return;
- }
- }
- /* May fail due to spills/restores above, but simplifies the logic. */
- if (as->flagmcp == as->mcp) {
- as->flagmcp = NULL;
- as->mcp++;
- pi |= PPCF_DOT;
- }
- right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- emit_tab(as, pi, dest, left, right);
- }
-}
-
-static void asm_neg(ASMState *as, IRIns *ir)
-{
- if (irt_isnum(ir->t)) {
- asm_fpunary(as, ir, PPCI_FNEG);
- } else {
- Reg dest, left;
- PPCIns pi = PPCI_NEG;
- if (as->flagmcp == as->mcp) {
- as->flagmcp = NULL;
- as->mcp++;
- pi |= PPCF_DOT;
- }
- dest = ra_dest(as, ir, RSET_GPR);
- left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
- emit_tab(as, pi, dest, left, 0);
- }
-}
-
-static void asm_arithov(ASMState *as, IRIns *ir, PPCIns pi)
-{
- Reg dest, left, right;
- if (as->flagmcp == as->mcp) {
- as->flagmcp = NULL;
- as->mcp++;
- }
- asm_guardcc(as, CC_SO);
- dest = ra_dest(as, ir, RSET_GPR);
- left = ra_alloc2(as, ir, RSET_GPR);
- right = (left >> 8); left &= 255;
- if (pi == PPCI_SUBFO) { Reg tmp = left; left = right; right = tmp; }
- emit_tab(as, pi|PPCF_DOT, dest, left, right);
-}
-
-#if LJ_HASFFI
-static void asm_add64(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg right, left = ra_alloc1(as, ir->op1, RSET_GPR);
- PPCIns pi = PPCI_ADDE;
- if (irref_isk(ir->op2)) {
- int32_t k = IR(ir->op2)->i;
- if (k == 0)
- pi = PPCI_ADDZE;
- else if (k == -1)
- pi = PPCI_ADDME;
- else
- goto needright;
- right = 0;
- } else {
- needright:
- right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- }
- emit_tab(as, pi, dest, left, right);
- ir--;
- dest = ra_dest(as, ir, RSET_GPR);
- left = ra_alloc1(as, ir->op1, RSET_GPR);
- if (irref_isk(ir->op2)) {
- int32_t k = IR(ir->op2)->i;
- if (checki16(k)) {
- emit_tai(as, PPCI_ADDIC, dest, left, k);
- return;
- }
- }
- right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- emit_tab(as, PPCI_ADDC, dest, left, right);
-}
-
-static void asm_sub64(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left, right = ra_alloc1(as, ir->op2, RSET_GPR);
- PPCIns pi = PPCI_SUBFE;
- if (irref_isk(ir->op1)) {
- int32_t k = IR(ir->op1)->i;
- if (k == 0)
- pi = PPCI_SUBFZE;
- else if (k == -1)
- pi = PPCI_SUBFME;
- else
- goto needleft;
- left = 0;
- } else {
- needleft:
- left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, right));
- }
- emit_tab(as, pi, dest, right, left); /* Subtract right _from_ left. */
- ir--;
- dest = ra_dest(as, ir, RSET_GPR);
- right = ra_alloc1(as, ir->op2, RSET_GPR);
- if (irref_isk(ir->op1)) {
- int32_t k = IR(ir->op1)->i;
- if (checki16(k)) {
- emit_tai(as, PPCI_SUBFIC, dest, right, k);
- return;
- }
- }
- left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, right));
- emit_tab(as, PPCI_SUBFC, dest, right, left);
-}
-
-static void asm_neg64(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
- emit_tab(as, PPCI_SUBFZE, dest, left, 0);
- ir--;
- dest = ra_dest(as, ir, RSET_GPR);
- left = ra_alloc1(as, ir->op1, RSET_GPR);
- emit_tai(as, PPCI_SUBFIC, dest, left, 0);
-}
-#endif
-
-static void asm_bitnot(ASMState *as, IRIns *ir)
-{
- Reg dest, left, right;
- PPCIns pi = PPCI_NOR;
- if (as->flagmcp == as->mcp) {
- as->flagmcp = NULL;
- as->mcp++;
- pi |= PPCF_DOT;
- }
- dest = ra_dest(as, ir, RSET_GPR);
- if (mayfuse(as, ir->op1)) {
- IRIns *irl = IR(ir->op1);
- if (irl->o == IR_BAND)
- pi ^= (PPCI_NOR ^ PPCI_NAND);
- else if (irl->o == IR_BXOR)
- pi ^= (PPCI_NOR ^ PPCI_EQV);
- else if (irl->o != IR_BOR)
- goto nofuse;
- left = ra_hintalloc(as, irl->op1, dest, RSET_GPR);
- right = ra_alloc1(as, irl->op2, rset_exclude(RSET_GPR, left));
- } else {
-nofuse:
- left = right = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
- }
- emit_asb(as, pi, dest, left, right);
-}
-
-static void asm_bitswap(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- IRIns *irx;
- if (mayfuse(as, ir->op1) && (irx = IR(ir->op1))->o == IR_XLOAD &&
- ra_noreg(irx->r) && (irt_isint(irx->t) || irt_isu32(irx->t))) {
- /* Fuse BSWAP with XLOAD to lwbrx. */
- asm_fusexrefx(as, PPCI_LWBRX, dest, irx->op1, RSET_GPR);
- } else {
- Reg left = ra_alloc1(as, ir->op1, RSET_GPR);
- Reg tmp = dest;
- if (tmp == left) {
- tmp = RID_TMP;
- emit_mr(as, dest, RID_TMP);
- }
- emit_rot(as, PPCI_RLWIMI, tmp, left, 24, 16, 23);
- emit_rot(as, PPCI_RLWIMI, tmp, left, 24, 0, 7);
- emit_rotlwi(as, tmp, left, 8);
- }
-}
-
-static void asm_bitop(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pik)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg right, left = ra_hintalloc(as, ir->op1, dest, RSET_GPR);
- if (irref_isk(ir->op2)) {
- int32_t k = IR(ir->op2)->i;
- Reg tmp = left;
- if ((checku16(k) || (k & 0xffff) == 0) || (tmp = dest, !as->sectref)) {
- if (!checku16(k)) {
- emit_asi(as, pik ^ (PPCI_ORI ^ PPCI_ORIS), dest, tmp, (k >> 16));
- if ((k & 0xffff) == 0) return;
- }
- emit_asi(as, pik, dest, left, k);
- return;
- }
- }
- /* May fail due to spills/restores above, but simplifies the logic. */
- if (as->flagmcp == as->mcp) {
- as->flagmcp = NULL;
- as->mcp++;
- pi |= PPCF_DOT;
- }
- right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- emit_asb(as, pi, dest, left, right);
-}
-
-/* Fuse BAND with contiguous bitmask and a shift to rlwinm. */
-static void asm_fuseandsh(ASMState *as, PPCIns pi, int32_t mask, IRRef ref)
-{
- IRIns *ir;
- Reg left;
- if (mayfuse(as, ref) && (ir = IR(ref), ra_noreg(ir->r)) &&
- irref_isk(ir->op2) && ir->o >= IR_BSHL && ir->o <= IR_BROR) {
- int32_t sh = (IR(ir->op2)->i & 31);
- switch (ir->o) {
- case IR_BSHL:
- if ((mask & ((1u<<sh)-1))) goto nofuse;
- break;
- case IR_BSHR:
- if ((mask & ~((~0u)>>sh))) goto nofuse;
- sh = ((32-sh)&31);
- break;
- case IR_BROL:
- break;
- default:
- goto nofuse;
- }
- left = ra_alloc1(as, ir->op1, RSET_GPR);
- *--as->mcp = pi | PPCF_T(left) | PPCF_B(sh);
- return;
- }
-nofuse:
- left = ra_alloc1(as, ref, RSET_GPR);
- *--as->mcp = pi | PPCF_T(left);
-}
-
-static void asm_bitand(ASMState *as, IRIns *ir)
-{
- Reg dest, left, right;
- IRRef lref = ir->op1;
- PPCIns dot = 0;
- IRRef op2;
- if (as->flagmcp == as->mcp) {
- as->flagmcp = NULL;
- as->mcp++;
- dot = PPCF_DOT;
- }
- dest = ra_dest(as, ir, RSET_GPR);
- if (irref_isk(ir->op2)) {
- int32_t k = IR(ir->op2)->i;
- if (k) {
- /* First check for a contiguous bitmask as used by rlwinm. */
- uint32_t s1 = lj_ffs((uint32_t)k);
- uint32_t k1 = ((uint32_t)k >> s1);
- if ((k1 & (k1+1)) == 0) {
- asm_fuseandsh(as, PPCI_RLWINM|dot | PPCF_A(dest) |
- PPCF_MB(31-lj_fls((uint32_t)k)) | PPCF_ME(31-s1),
- k, lref);
- return;
- }
- if (~(uint32_t)k) {
- uint32_t s2 = lj_ffs(~(uint32_t)k);
- uint32_t k2 = (~(uint32_t)k >> s2);
- if ((k2 & (k2+1)) == 0) {
- asm_fuseandsh(as, PPCI_RLWINM|dot | PPCF_A(dest) |
- PPCF_MB(32-s2) | PPCF_ME(30-lj_fls(~(uint32_t)k)),
- k, lref);
- return;
- }
- }
- }
- if (checku16(k)) {
- left = ra_alloc1(as, lref, RSET_GPR);
- emit_asi(as, PPCI_ANDIDOT, dest, left, k);
- return;
- } else if ((k & 0xffff) == 0) {
- left = ra_alloc1(as, lref, RSET_GPR);
- emit_asi(as, PPCI_ANDISDOT, dest, left, (k >> 16));
- return;
- }
- }
- op2 = ir->op2;
- if (mayfuse(as, op2) && IR(op2)->o == IR_BNOT && ra_noreg(IR(op2)->r)) {
- dot ^= (PPCI_AND ^ PPCI_ANDC);
- op2 = IR(op2)->op1;
- }
- left = ra_hintalloc(as, lref, dest, RSET_GPR);
- right = ra_alloc1(as, op2, rset_exclude(RSET_GPR, left));
- emit_asb(as, PPCI_AND ^ dot, dest, left, right);
-}
-
-static void asm_bitshift(ASMState *as, IRIns *ir, PPCIns pi, PPCIns pik)
-{
- Reg dest, left;
- Reg dot = 0;
- if (as->flagmcp == as->mcp) {
- as->flagmcp = NULL;
- as->mcp++;
- dot = PPCF_DOT;
- }
- dest = ra_dest(as, ir, RSET_GPR);
- left = ra_alloc1(as, ir->op1, RSET_GPR);
- if (irref_isk(ir->op2)) { /* Constant shifts. */
- int32_t shift = (IR(ir->op2)->i & 31);
- if (pik == 0) /* SLWI */
- emit_rot(as, PPCI_RLWINM|dot, dest, left, shift, 0, 31-shift);
- else if (pik == 1) /* SRWI */
- emit_rot(as, PPCI_RLWINM|dot, dest, left, (32-shift)&31, shift, 31);
- else
- emit_asb(as, pik|dot, dest, left, shift);
- } else {
- Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left));
- emit_asb(as, pi|dot, dest, left, right);
- }
-}
-
-static void asm_min_max(ASMState *as, IRIns *ir, int ismax)
-{
- if (irt_isnum(ir->t)) {
- Reg dest = ra_dest(as, ir, RSET_FPR);
- Reg tmp = dest;
- Reg right, left = ra_alloc2(as, ir, RSET_FPR);
- right = (left >> 8); left &= 255;
- if (tmp == left || tmp == right)
- tmp = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_FPR,
- dest), left), right));
- emit_facb(as, PPCI_FSEL, dest, tmp,
- ismax ? left : right, ismax ? right : left);
- emit_fab(as, PPCI_FSUB, tmp, left, right);
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg tmp1 = RID_TMP, tmp2 = dest;
- Reg right, left = ra_alloc2(as, ir, RSET_GPR);
- right = (left >> 8); left &= 255;
- if (tmp2 == left || tmp2 == right)
- tmp2 = ra_scratch(as, rset_exclude(rset_exclude(rset_exclude(RSET_GPR,
- dest), left), right));
- emit_tab(as, PPCI_ADD, dest, tmp2, right);
- emit_asb(as, ismax ? PPCI_ANDC : PPCI_AND, tmp2, tmp2, tmp1);
- emit_tab(as, PPCI_SUBFE, tmp1, tmp1, tmp1);
- emit_tab(as, PPCI_SUBFC, tmp2, tmp2, tmp1);
- emit_asi(as, PPCI_XORIS, tmp2, right, 0x8000);
- emit_asi(as, PPCI_XORIS, tmp1, left, 0x8000);
- }
-}
-
-/* -- Comparisons --------------------------------------------------------- */
-
-#define CC_UNSIGNED 0x08 /* Unsigned integer comparison. */
-#define CC_TWO 0x80 /* Check two flags for FP comparison. */
-
-/* Map of comparisons to flags. ORDER IR. */
-static const uint8_t asm_compmap[IR_ABC+1] = {
- /* op int cc FP cc */
- /* LT */ CC_GE + (CC_GE<<4),
- /* GE */ CC_LT + (CC_LE<<4) + CC_TWO,
- /* LE */ CC_GT + (CC_GE<<4) + CC_TWO,
- /* GT */ CC_LE + (CC_LE<<4),
- /* ULT */ CC_GE + CC_UNSIGNED + (CC_GT<<4) + CC_TWO,
- /* UGE */ CC_LT + CC_UNSIGNED + (CC_LT<<4),
- /* ULE */ CC_GT + CC_UNSIGNED + (CC_GT<<4),
- /* UGT */ CC_LE + CC_UNSIGNED + (CC_LT<<4) + CC_TWO,
- /* EQ */ CC_NE + (CC_NE<<4),
- /* NE */ CC_EQ + (CC_EQ<<4),
- /* ABC */ CC_LE + CC_UNSIGNED + (CC_LT<<4) + CC_TWO /* Same as UGT. */
-};
-
-static void asm_intcomp_(ASMState *as, IRRef lref, IRRef rref, Reg cr, PPCCC cc)
-{
- Reg right, left = ra_alloc1(as, lref, RSET_GPR);
- if (irref_isk(rref)) {
- int32_t k = IR(rref)->i;
- if ((cc & CC_UNSIGNED) == 0) { /* Signed comparison with constant. */
- if (checki16(k)) {
- emit_tai(as, PPCI_CMPWI, cr, left, k);
- /* Signed comparison with zero and referencing previous ins? */
- if (k == 0 && lref == as->curins-1)
- as->flagmcp = as->mcp; /* Allow elimination of the compare. */
- return;
- } else if ((cc & 3) == (CC_EQ & 3)) { /* Use CMPLWI for EQ or NE. */
- if (checku16(k)) {
- emit_tai(as, PPCI_CMPLWI, cr, left, k);
- return;
- } else if (!as->sectref && ra_noreg(IR(rref)->r)) {
- emit_tai(as, PPCI_CMPLWI, cr, RID_TMP, k);
- emit_asi(as, PPCI_XORIS, RID_TMP, left, (k >> 16));
- return;
- }
- }
- } else { /* Unsigned comparison with constant. */
- if (checku16(k)) {
- emit_tai(as, PPCI_CMPLWI, cr, left, k);
- return;
- }
- }
- }
- right = ra_alloc1(as, rref, rset_exclude(RSET_GPR, left));
- emit_tab(as, (cc & CC_UNSIGNED) ? PPCI_CMPLW : PPCI_CMPW, cr, left, right);
-}
-
-static void asm_comp(ASMState *as, IRIns *ir)
-{
- PPCCC cc = asm_compmap[ir->o];
- if (irt_isnum(ir->t)) {
- Reg right, left = ra_alloc2(as, ir, RSET_FPR);
- right = (left >> 8); left &= 255;
- asm_guardcc(as, (cc >> 4));
- if ((cc & CC_TWO))
- emit_tab(as, PPCI_CROR, ((cc>>4)&3), ((cc>>4)&3), (CC_EQ&3));
- emit_fab(as, PPCI_FCMPU, 0, left, right);
- } else {
- IRRef lref = ir->op1, rref = ir->op2;
- if (irref_isk(lref) && !irref_isk(rref)) {
- /* Swap constants to the right (only for ABC). */
- IRRef tmp = lref; lref = rref; rref = tmp;
- if ((cc & 2) == 0) cc ^= 1; /* LT <-> GT, LE <-> GE */
- }
- asm_guardcc(as, cc);
- asm_intcomp_(as, lref, rref, 0, cc);
- }
-}
-
-#if LJ_HASFFI
-/* 64 bit integer comparisons. */
-static void asm_comp64(ASMState *as, IRIns *ir)
-{
- PPCCC cc = asm_compmap[(ir-1)->o];
- if ((cc&3) == (CC_EQ&3)) {
- asm_guardcc(as, cc);
- emit_tab(as, (cc&4) ? PPCI_CRAND : PPCI_CROR,
- (CC_EQ&3), (CC_EQ&3), 4+(CC_EQ&3));
- } else {
- asm_guardcc(as, CC_EQ);
- emit_tab(as, PPCI_CROR, (CC_EQ&3), (CC_EQ&3), ((cc^~(cc>>2))&1));
- emit_tab(as, (cc&4) ? PPCI_CRAND : PPCI_CRANDC,
- (CC_EQ&3), (CC_EQ&3), 4+(cc&3));
- }
- /* Loword comparison sets cr1 and is unsigned, except for equality. */
- asm_intcomp_(as, (ir-1)->op1, (ir-1)->op2, 4,
- cc | ((cc&3) == (CC_EQ&3) ? 0 : CC_UNSIGNED));
- /* Hiword comparison sets cr0. */
- asm_intcomp_(as, ir->op1, ir->op2, 0, cc);
- as->flagmcp = NULL; /* Doesn't work here. */
-}
-#endif
-
-/* -- Support for 64 bit ops in 32 bit mode ------------------------------- */
-
-/* Hiword op of a split 64 bit op. Previous op must be the loword op. */
-static void asm_hiop(ASMState *as, IRIns *ir)
-{
-#if LJ_HASFFI
- /* HIOP is marked as a store because it needs its own DCE logic. */
- int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
- if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
- if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */
- as->curins--; /* Always skip the CONV. */
- if (usehi || uselo)
- asm_conv64(as, ir);
- return;
- } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */
- as->curins--; /* Always skip the loword comparison. */
- asm_comp64(as, ir);
- return;
- } else if ((ir-1)->o == IR_XSTORE) {
- as->curins--; /* Handle both stores here. */
- if ((ir-1)->r != RID_SINK) {
- asm_xstore(as, ir, 0);
- asm_xstore(as, ir-1, 4);
- }
- return;
- }
- if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
- switch ((ir-1)->o) {
- case IR_ADD: as->curins--; asm_add64(as, ir); break;
- case IR_SUB: as->curins--; asm_sub64(as, ir); break;
- case IR_NEG: as->curins--; asm_neg64(as, ir); break;
- case IR_CALLN:
- case IR_CALLXS:
- if (!uselo)
- ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
- break;
- case IR_CNEWI:
- /* Nothing to do here. Handled by lo op itself. */
- break;
- default: lua_assert(0); break;
- }
-#else
- UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused without FFI. */
-#endif
-}
-
-/* -- Stack handling ------------------------------------------------------ */
-
-/* Check Lua stack size for overflow. Use exit handler as fallback. */
-static void asm_stack_check(ASMState *as, BCReg topslot,
- IRIns *irp, RegSet allow, ExitNo exitno)
-{
- /* Try to get an unused temp. register, otherwise spill/restore RID_RET*. */
- Reg tmp, pbase = irp ? (ra_hasreg(irp->r) ? irp->r : RID_TMP) : RID_BASE;
- rset_clear(allow, pbase);
- tmp = allow ? rset_pickbot(allow) :
- (pbase == RID_RETHI ? RID_RETLO : RID_RETHI);
- emit_condbranch(as, PPCI_BC, CC_LT, asm_exitstub_addr(as, exitno));
- if (allow == RSET_EMPTY) /* Restore temp. register. */
- emit_tai(as, PPCI_LWZ, tmp, RID_SP, SPOFS_TMPW);
- else
- ra_modified(as, tmp);
- emit_ai(as, PPCI_CMPLWI, RID_TMP, (int32_t)(8*topslot));
- emit_tab(as, PPCI_SUBF, RID_TMP, pbase, tmp);
- emit_tai(as, PPCI_LWZ, tmp, tmp, offsetof(lua_State, maxstack));
- if (pbase == RID_TMP)
- emit_getgl(as, RID_TMP, jit_base);
- emit_getgl(as, tmp, jit_L);
- if (allow == RSET_EMPTY) /* Spill temp. register. */
- emit_tai(as, PPCI_STW, tmp, RID_SP, SPOFS_TMPW);
-}
-
-/* Restore Lua stack from on-trace state. */
-static void asm_stack_restore(ASMState *as, SnapShot *snap)
-{
- SnapEntry *map = &as->T->snapmap[snap->mapofs];
- SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1];
- MSize n, nent = snap->nent;
- /* Store the value of all modified slots to the Lua stack. */
- for (n = 0; n < nent; n++) {
- SnapEntry sn = map[n];
- BCReg s = snap_slot(sn);
- int32_t ofs = 8*((int32_t)s-1);
- IRRef ref = snap_ref(sn);
- IRIns *ir = IR(ref);
- if ((sn & SNAP_NORESTORE))
- continue;
- if (irt_isnum(ir->t)) {
- Reg src = ra_alloc1(as, ref, RSET_FPR);
- emit_fai(as, PPCI_STFD, src, RID_BASE, ofs);
- } else {
- Reg type;
- RegSet allow = rset_exclude(RSET_GPR, RID_BASE);
- lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t));
- if (!irt_ispri(ir->t)) {
- Reg src = ra_alloc1(as, ref, allow);
- rset_clear(allow, src);
- emit_tai(as, PPCI_STW, src, RID_BASE, ofs+4);
- }
- if ((sn & (SNAP_CONT|SNAP_FRAME))) {
- if (s == 0) continue; /* Do not overwrite link to previous frame. */
- type = ra_allock(as, (int32_t)(*flinks--), allow);
- } else {
- type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow);
- }
- emit_tai(as, PPCI_STW, type, RID_BASE, ofs);
- }
- checkmclim(as);
- }
- lua_assert(map + nent == flinks);
-}
-
-/* -- GC handling --------------------------------------------------------- */
-
-/* Check GC threshold and do one or more GC steps. */
-static void asm_gc_check(ASMState *as)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
- IRRef args[2];
- MCLabel l_end;
- Reg tmp;
- ra_evictset(as, RSET_SCRATCH);
- l_end = emit_label(as);
- /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
- asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */
- emit_ai(as, PPCI_CMPWI, RID_RET, 0);
- args[0] = ASMREF_TMP1; /* global_State *g */
- args[1] = ASMREF_TMP2; /* MSize steps */
- asm_gencall(as, ci, args);
- emit_tai(as, PPCI_ADDI, ra_releasetmp(as, ASMREF_TMP1), RID_JGL, -32768);
- tmp = ra_releasetmp(as, ASMREF_TMP2);
- emit_loadi(as, tmp, as->gcsteps);
- /* Jump around GC step if GC total < GC threshold. */
- emit_condbranch(as, PPCI_BC|PPCF_Y, CC_LT, l_end);
- emit_ab(as, PPCI_CMPLW, RID_TMP, tmp);
- emit_getgl(as, tmp, gc.threshold);
- emit_getgl(as, RID_TMP, gc.total);
- as->gcsteps = 0;
- checkmclim(as);
-}
-
-/* -- Loop handling ------------------------------------------------------- */
-
-/* Fixup the loop branch. */
-static void asm_loop_fixup(ASMState *as)
-{
- MCode *p = as->mctop;
- MCode *target = as->mcp;
- if (as->loopinv) { /* Inverted loop branch? */
- /* asm_guardcc already inverted the cond branch and patched the final b. */
- p[-2] = (p[-2] & (0xffff0000u & ~PPCF_Y)) | (((target-p+2) & 0x3fffu) << 2);
- } else {
- p[-1] = PPCI_B|(((target-p+1)&0x00ffffffu)<<2);
- }
-}
-
-/* -- Head of trace ------------------------------------------------------- */
-
-/* Coalesce BASE register for a root trace. */
-static void asm_head_root_base(ASMState *as)
-{
- IRIns *ir = IR(REF_BASE);
- Reg r = ir->r;
- if (ra_hasreg(r)) {
- ra_free(as, r);
- if (rset_test(as->modset, r) || irt_ismarked(ir->t))
- ir->r = RID_INIT; /* No inheritance for modified BASE register. */
- if (r != RID_BASE)
- emit_mr(as, r, RID_BASE);
- }
-}
-
-/* Coalesce BASE register for a side trace. */
-static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
-{
- IRIns *ir = IR(REF_BASE);
- Reg r = ir->r;
- if (ra_hasreg(r)) {
- ra_free(as, r);
- if (rset_test(as->modset, r) || irt_ismarked(ir->t))
- ir->r = RID_INIT; /* No inheritance for modified BASE register. */
- if (irp->r == r) {
- rset_clear(allow, r); /* Mark same BASE register as coalesced. */
- } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) {
- rset_clear(allow, irp->r);
- emit_mr(as, r, irp->r); /* Move from coalesced parent reg. */
- } else {
- emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */
- }
- }
- return allow;
-}
-
-/* -- Tail of trace ------------------------------------------------------- */
-
-/* Fixup the tail code. */
-static void asm_tail_fixup(ASMState *as, TraceNo lnk)
-{
- MCode *p = as->mctop;
- MCode *target;
- int32_t spadj = as->T->spadjust;
- if (spadj == 0) {
- *--p = PPCI_NOP;
- *--p = PPCI_NOP;
- as->mctop = p;
- } else {
- /* Patch stack adjustment. */
- lua_assert(checki16(CFRAME_SIZE+spadj));
- p[-3] = PPCI_ADDI | PPCF_T(RID_TMP) | PPCF_A(RID_SP) | (CFRAME_SIZE+spadj);
- p[-2] = PPCI_STWU | PPCF_T(RID_TMP) | PPCF_A(RID_SP) | spadj;
- }
- /* Patch exit branch. */
- target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp;
- p[-1] = PPCI_B|(((target-p+1)&0x00ffffffu)<<2);
-}
-
-/* Prepare tail of code. */
-static void asm_tail_prep(ASMState *as)
-{
- MCode *p = as->mctop - 1; /* Leave room for exit branch. */
- if (as->loopref) {
- as->invmcp = as->mcp = p;
- } else {
- as->mcp = p-2; /* Leave room for stack pointer adjustment. */
- as->invmcp = NULL;
- }
-}
-
-/* -- Instruction dispatch ------------------------------------------------ */
-
-/* Assemble a single instruction. */
-static void asm_ir(ASMState *as, IRIns *ir)
-{
- switch ((IROp)ir->o) {
- /* Miscellaneous ops. */
- case IR_LOOP: asm_loop(as); break;
- case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break;
- case IR_USE:
- ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break;
- case IR_PHI: asm_phi(as, ir); break;
- case IR_HIOP: asm_hiop(as, ir); break;
- case IR_GCSTEP: asm_gcstep(as, ir); break;
-
- /* Guarded assertions. */
- case IR_EQ: case IR_NE:
- if ((ir-1)->o == IR_HREF && ir->op1 == as->curins-1) {
- as->curins--;
- asm_href(as, ir-1, (IROp)ir->o);
- break;
- }
- /* fallthrough */
- case IR_LT: case IR_GE: case IR_LE: case IR_GT:
- case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
- case IR_ABC:
- asm_comp(as, ir);
- break;
-
- case IR_RETF: asm_retf(as, ir); break;
-
- /* Bit ops. */
- case IR_BNOT: asm_bitnot(as, ir); break;
- case IR_BSWAP: asm_bitswap(as, ir); break;
-
- case IR_BAND: asm_bitand(as, ir); break;
- case IR_BOR: asm_bitop(as, ir, PPCI_OR, PPCI_ORI); break;
- case IR_BXOR: asm_bitop(as, ir, PPCI_XOR, PPCI_XORI); break;
-
- case IR_BSHL: asm_bitshift(as, ir, PPCI_SLW, 0); break;
- case IR_BSHR: asm_bitshift(as, ir, PPCI_SRW, 1); break;
- case IR_BSAR: asm_bitshift(as, ir, PPCI_SRAW, PPCI_SRAWI); break;
- case IR_BROL: asm_bitshift(as, ir, PPCI_RLWNM|PPCF_MB(0)|PPCF_ME(31),
- PPCI_RLWINM|PPCF_MB(0)|PPCF_ME(31)); break;
- case IR_BROR: lua_assert(0); break;
-
- /* Arithmetic ops. */
- case IR_ADD: asm_add(as, ir); break;
- case IR_SUB: asm_sub(as, ir); break;
- case IR_MUL: asm_mul(as, ir); break;
- case IR_DIV: asm_fparith(as, ir, PPCI_FDIV); break;
- case IR_MOD: asm_callid(as, ir, IRCALL_lj_vm_modi); break;
- case IR_POW: asm_callid(as, ir, IRCALL_lj_vm_powi); break;
- case IR_NEG: asm_neg(as, ir); break;
-
- case IR_ABS: asm_fpunary(as, ir, PPCI_FABS); break;
- case IR_ATAN2: asm_callid(as, ir, IRCALL_atan2); break;
- case IR_LDEXP: asm_callid(as, ir, IRCALL_ldexp); break;
- case IR_MIN: asm_min_max(as, ir, 0); break;
- case IR_MAX: asm_min_max(as, ir, 1); break;
- case IR_FPMATH:
- if (ir->op2 == IRFPM_EXP2 && asm_fpjoin_pow(as, ir))
- break;
- if (ir->op2 == IRFPM_SQRT && (as->flags & JIT_F_SQRT))
- asm_fpunary(as, ir, PPCI_FSQRT);
- else
- asm_callid(as, ir, IRCALL_lj_vm_floor + ir->op2);
- break;
-
- /* Overflow-checking arithmetic ops. */
- case IR_ADDOV: asm_arithov(as, ir, PPCI_ADDO); break;
- case IR_SUBOV: asm_arithov(as, ir, PPCI_SUBFO); break;
- case IR_MULOV: asm_arithov(as, ir, PPCI_MULLWO); break;
-
- /* Memory references. */
- case IR_AREF: asm_aref(as, ir); break;
- case IR_HREF: asm_href(as, ir, 0); break;
- case IR_HREFK: asm_hrefk(as, ir); break;
- case IR_NEWREF: asm_newref(as, ir); break;
- case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
- case IR_FREF: asm_fref(as, ir); break;
- case IR_STRREF: asm_strref(as, ir); break;
-
- /* Loads and stores. */
- case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
- asm_ahuvload(as, ir);
- break;
- case IR_FLOAD: asm_fload(as, ir); break;
- case IR_XLOAD: asm_xload(as, ir); break;
- case IR_SLOAD: asm_sload(as, ir); break;
-
- case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
- case IR_FSTORE: asm_fstore(as, ir); break;
- case IR_XSTORE: asm_xstore(as, ir, 0); break;
-
- /* Allocations. */
- case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
- case IR_TNEW: asm_tnew(as, ir); break;
- case IR_TDUP: asm_tdup(as, ir); break;
- case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break;
-
- /* Write barriers. */
- case IR_TBAR: asm_tbar(as, ir); break;
- case IR_OBAR: asm_obar(as, ir); break;
-
- /* Type conversions. */
- case IR_CONV: asm_conv(as, ir); break;
- case IR_TOBIT: asm_tobit(as, ir); break;
- case IR_TOSTR: asm_tostr(as, ir); break;
- case IR_STRTO: asm_strto(as, ir); break;
-
- /* Calls. */
- case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
- case IR_CALLXS: asm_callx(as, ir); break;
- case IR_CARG: break;
-
- default:
- setintV(&as->J->errinfo, ir->o);
- lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
- break;
- }
-}
-
-/* -- Trace setup --------------------------------------------------------- */
-
-/* Ensure there are enough stack slots for call arguments. */
-static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
-{
- IRRef args[CCI_NARGS_MAX*2];
- uint32_t i, nargs = (int)CCI_NARGS(ci);
- int nslots = 2, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR;
- asm_collectargs(as, ir, ci, args);
- for (i = 0; i < nargs; i++)
- if (args[i] && irt_isfp(IR(args[i])->t)) {
- if (nfpr > 0) nfpr--; else nslots = (nslots+3) & ~1;
- } else {
- if (ngpr > 0) ngpr--; else nslots++;
- }
- if (nslots > as->evenspill) /* Leave room for args in stack slots. */
- as->evenspill = nslots;
- return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET);
-}
-
-static void asm_setup_target(ASMState *as)
-{
- asm_exitstub_setup(as, as->T->nsnap + (as->parent ? 1 : 0));
-}
-
-/* -- Trace patching ------------------------------------------------------ */
-
-/* Patch exit jumps of existing machine code to a new target. */
-void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
-{
- MCode *p = T->mcode;
- MCode *pe = (MCode *)((char *)p + T->szmcode);
- MCode *px = exitstub_trace_addr(T, exitno);
- MCode *cstart = NULL;
- MCode *mcarea = lj_mcode_patch(J, p, 0);
- int clearso = 0;
- for (; p < pe; p++) {
- /* Look for exitstub branch, try to replace with branch to target. */
- uint32_t ins = *p;
- if ((ins & 0xfc000000u) == 0x40000000u &&
- ((ins ^ ((char *)px-(char *)p)) & 0xffffu) == 0) {
- ptrdiff_t delta = (char *)target - (char *)p;
- if (((ins >> 16) & 3) == (CC_SO&3)) {
- clearso = sizeof(MCode);
- delta -= sizeof(MCode);
- }
- /* Many, but not all short-range branches can be patched directly. */
- if (((delta + 0x8000) >> 16) == 0) {
- *p = (ins & 0xffdf0000u) | ((uint32_t)delta & 0xffffu) |
- ((delta & 0x8000) * (PPCF_Y/0x8000));
- if (!cstart) cstart = p;
- }
- } else if ((ins & 0xfc000000u) == PPCI_B &&
- ((ins ^ ((char *)px-(char *)p)) & 0x03ffffffu) == 0) {
- ptrdiff_t delta = (char *)target - (char *)p;
- lua_assert(((delta + 0x02000000) >> 26) == 0);
- *p = PPCI_B | ((uint32_t)delta & 0x03ffffffu);
- if (!cstart) cstart = p;
- }
- }
- { /* Always patch long-range branch in exit stub itself. */
- ptrdiff_t delta = (char *)target - (char *)px - clearso;
- lua_assert(((delta + 0x02000000) >> 26) == 0);
- *px = PPCI_B | ((uint32_t)delta & 0x03ffffffu);
- }
- if (!cstart) cstart = px;
- lj_mcode_sync(cstart, px+1);
- if (clearso) { /* Extend the current trace. Ugly workaround. */
- MCode *pp = J->cur.mcode;
- J->cur.szmcode += sizeof(MCode);
- *--pp = PPCI_MCRXR; /* Clear SO flag. */
- J->cur.mcode = pp;
- lj_mcode_sync(pp, pp+1);
- }
- lj_mcode_patch(J, mcarea, 1);
-}
-
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_asm_x86.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_asm_x86.h
deleted file mode 100644
index 10468bb..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_asm_x86.h
+++ /dev/null
@@ -1,2900 +0,0 @@
-/*
-** x86/x64 IR assembler (SSA IR -> machine code).
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-/* -- Guard handling ------------------------------------------------------ */
-
-/* Generate an exit stub group at the bottom of the reserved MCode memory. */
-static MCode *asm_exitstub_gen(ASMState *as, ExitNo group)
-{
- ExitNo i, groupofs = (group*EXITSTUBS_PER_GROUP) & 0xff;
- MCode *mxp = as->mcbot;
- MCode *mxpstart = mxp;
- if (mxp + (2+2)*EXITSTUBS_PER_GROUP+8+5 >= as->mctop)
- asm_mclimit(as);
- /* Push low byte of exitno for each exit stub. */
- *mxp++ = XI_PUSHi8; *mxp++ = (MCode)groupofs;
- for (i = 1; i < EXITSTUBS_PER_GROUP; i++) {
- *mxp++ = XI_JMPs; *mxp++ = (MCode)((2+2)*(EXITSTUBS_PER_GROUP - i) - 2);
- *mxp++ = XI_PUSHi8; *mxp++ = (MCode)(groupofs + i);
- }
- /* Push the high byte of the exitno for each exit stub group. */
- *mxp++ = XI_PUSHi8; *mxp++ = (MCode)((group*EXITSTUBS_PER_GROUP)>>8);
- /* Store DISPATCH at original stack slot 0. Account for the two push ops. */
- *mxp++ = XI_MOVmi;
- *mxp++ = MODRM(XM_OFS8, 0, RID_ESP);
- *mxp++ = MODRM(XM_SCALE1, RID_ESP, RID_ESP);
- *mxp++ = 2*sizeof(void *);
- *(int32_t *)mxp = ptr2addr(J2GG(as->J)->dispatch); mxp += 4;
- /* Jump to exit handler which fills in the ExitState. */
- *mxp++ = XI_JMP; mxp += 4;
- *((int32_t *)(mxp-4)) = jmprel(mxp, (MCode *)(void *)lj_vm_exit_handler);
- /* Commit the code for this group (even if assembly fails later on). */
- lj_mcode_commitbot(as->J, mxp);
- as->mcbot = mxp;
- as->mclim = as->mcbot + MCLIM_REDZONE;
- return mxpstart;
-}
-
-/* Setup all needed exit stubs. */
-static void asm_exitstub_setup(ASMState *as, ExitNo nexits)
-{
- ExitNo i;
- if (nexits >= EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR)
- lj_trace_err(as->J, LJ_TRERR_SNAPOV);
- for (i = 0; i < (nexits+EXITSTUBS_PER_GROUP-1)/EXITSTUBS_PER_GROUP; i++)
- if (as->J->exitstubgroup[i] == NULL)
- as->J->exitstubgroup[i] = asm_exitstub_gen(as, i);
-}
-
-/* Emit conditional branch to exit for guard.
-** It's important to emit this *after* all registers have been allocated,
-** because rematerializations may invalidate the flags.
-*/
-static void asm_guardcc(ASMState *as, int cc)
-{
- MCode *target = exitstub_addr(as->J, as->snapno);
- MCode *p = as->mcp;
- if (LJ_UNLIKELY(p == as->invmcp)) {
- as->loopinv = 1;
- *(int32_t *)(p+1) = jmprel(p+5, target);
- target = p;
- cc ^= 1;
- if (as->realign) {
- emit_sjcc(as, cc, target);
- return;
- }
- }
- emit_jcc(as, cc, target);
-}
-
-/* -- Memory operand fusion ----------------------------------------------- */
-
-/* Limit linear search to this distance. Avoids O(n^2) behavior. */
-#define CONFLICT_SEARCH_LIM 31
-
-/* Check if a reference is a signed 32 bit constant. */
-static int asm_isk32(ASMState *as, IRRef ref, int32_t *k)
-{
- if (irref_isk(ref)) {
- IRIns *ir = IR(ref);
- if (ir->o != IR_KINT64) {
- *k = ir->i;
- return 1;
- } else if (checki32((int64_t)ir_kint64(ir)->u64)) {
- *k = (int32_t)ir_kint64(ir)->u64;
- return 1;
- }
- }
- return 0;
-}
-
-/* Check if there's no conflicting instruction between curins and ref.
-** Also avoid fusing loads if there are multiple references.
-*/
-static int noconflict(ASMState *as, IRRef ref, IROp conflict, int noload)
-{
- IRIns *ir = as->ir;
- IRRef i = as->curins;
- if (i > ref + CONFLICT_SEARCH_LIM)
- return 0; /* Give up, ref is too far away. */
- while (--i > ref) {
- if (ir[i].o == conflict)
- return 0; /* Conflict found. */
- else if (!noload && (ir[i].op1 == ref || ir[i].op2 == ref))
- return 0;
- }
- return 1; /* Ok, no conflict. */
-}
-
-/* Fuse array base into memory operand. */
-static IRRef asm_fuseabase(ASMState *as, IRRef ref)
-{
- IRIns *irb = IR(ref);
- as->mrm.ofs = 0;
- if (irb->o == IR_FLOAD) {
- IRIns *ira = IR(irb->op1);
- lua_assert(irb->op2 == IRFL_TAB_ARRAY);
- /* We can avoid the FLOAD of t->array for colocated arrays. */
- if (ira->o == IR_TNEW && ira->op1 <= LJ_MAX_COLOSIZE &&
- !neverfuse(as) && noconflict(as, irb->op1, IR_NEWREF, 1)) {
- as->mrm.ofs = (int32_t)sizeof(GCtab); /* Ofs to colocated array. */
- return irb->op1; /* Table obj. */
- }
- } else if (irb->o == IR_ADD && irref_isk(irb->op2)) {
- /* Fuse base offset (vararg load). */
- as->mrm.ofs = IR(irb->op2)->i;
- return irb->op1;
- }
- return ref; /* Otherwise use the given array base. */
-}
-
-/* Fuse array reference into memory operand. */
-static void asm_fusearef(ASMState *as, IRIns *ir, RegSet allow)
-{
- IRIns *irx;
- lua_assert(ir->o == IR_AREF);
- as->mrm.base = (uint8_t)ra_alloc1(as, asm_fuseabase(as, ir->op1), allow);
- irx = IR(ir->op2);
- if (irref_isk(ir->op2)) {
- as->mrm.ofs += 8*irx->i;
- as->mrm.idx = RID_NONE;
- } else {
- rset_clear(allow, as->mrm.base);
- as->mrm.scale = XM_SCALE8;
- /* Fuse a constant ADD (e.g. t[i+1]) into the offset.
- ** Doesn't help much without ABCelim, but reduces register pressure.
- */
- if (!LJ_64 && /* Has bad effects with negative index on x64. */
- mayfuse(as, ir->op2) && ra_noreg(irx->r) &&
- irx->o == IR_ADD && irref_isk(irx->op2)) {
- as->mrm.ofs += 8*IR(irx->op2)->i;
- as->mrm.idx = (uint8_t)ra_alloc1(as, irx->op1, allow);
- } else {
- as->mrm.idx = (uint8_t)ra_alloc1(as, ir->op2, allow);
- }
- }
-}
-
-/* Fuse array/hash/upvalue reference into memory operand.
-** Caveat: this may allocate GPRs for the base/idx registers. Be sure to
-** pass the final allow mask, excluding any GPRs used for other inputs.
-** In particular: 2-operand GPR instructions need to call ra_dest() first!
-*/
-static void asm_fuseahuref(ASMState *as, IRRef ref, RegSet allow)
-{
- IRIns *ir = IR(ref);
- if (ra_noreg(ir->r)) {
- switch ((IROp)ir->o) {
- case IR_AREF:
- if (mayfuse(as, ref)) {
- asm_fusearef(as, ir, allow);
- return;
- }
- break;
- case IR_HREFK:
- if (mayfuse(as, ref)) {
- as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow);
- as->mrm.ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node));
- as->mrm.idx = RID_NONE;
- return;
- }
- break;
- case IR_UREFC:
- if (irref_isk(ir->op1)) {
- GCfunc *fn = ir_kfunc(IR(ir->op1));
- GCupval *uv = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv;
- as->mrm.ofs = ptr2addr(&uv->tv);
- as->mrm.base = as->mrm.idx = RID_NONE;
- return;
- }
- break;
- default:
- lua_assert(ir->o == IR_HREF || ir->o == IR_NEWREF || ir->o == IR_UREFO ||
- ir->o == IR_KKPTR);
- break;
- }
- }
- as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow);
- as->mrm.ofs = 0;
- as->mrm.idx = RID_NONE;
-}
-
-/* Fuse FLOAD/FREF reference into memory operand. */
-static void asm_fusefref(ASMState *as, IRIns *ir, RegSet allow)
-{
- lua_assert(ir->o == IR_FLOAD || ir->o == IR_FREF);
- as->mrm.ofs = field_ofs[ir->op2];
- as->mrm.idx = RID_NONE;
- if (irref_isk(ir->op1)) {
- as->mrm.ofs += IR(ir->op1)->i;
- as->mrm.base = RID_NONE;
- } else {
- as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow);
- }
-}
-
-/* Fuse string reference into memory operand. */
-static void asm_fusestrref(ASMState *as, IRIns *ir, RegSet allow)
-{
- IRIns *irr;
- lua_assert(ir->o == IR_STRREF);
- as->mrm.base = as->mrm.idx = RID_NONE;
- as->mrm.scale = XM_SCALE1;
- as->mrm.ofs = sizeof(GCstr);
- if (irref_isk(ir->op1)) {
- as->mrm.ofs += IR(ir->op1)->i;
- } else {
- Reg r = ra_alloc1(as, ir->op1, allow);
- rset_clear(allow, r);
- as->mrm.base = (uint8_t)r;
- }
- irr = IR(ir->op2);
- if (irref_isk(ir->op2)) {
- as->mrm.ofs += irr->i;
- } else {
- Reg r;
- /* Fuse a constant add into the offset, e.g. string.sub(s, i+10). */
- if (!LJ_64 && /* Has bad effects with negative index on x64. */
- mayfuse(as, ir->op2) && irr->o == IR_ADD && irref_isk(irr->op2)) {
- as->mrm.ofs += IR(irr->op2)->i;
- r = ra_alloc1(as, irr->op1, allow);
- } else {
- r = ra_alloc1(as, ir->op2, allow);
- }
- if (as->mrm.base == RID_NONE)
- as->mrm.base = (uint8_t)r;
- else
- as->mrm.idx = (uint8_t)r;
- }
-}
-
-static void asm_fusexref(ASMState *as, IRRef ref, RegSet allow)
-{
- IRIns *ir = IR(ref);
- as->mrm.idx = RID_NONE;
- if (ir->o == IR_KPTR || ir->o == IR_KKPTR) {
- as->mrm.ofs = ir->i;
- as->mrm.base = RID_NONE;
- } else if (ir->o == IR_STRREF) {
- asm_fusestrref(as, ir, allow);
- } else {
- as->mrm.ofs = 0;
- if (canfuse(as, ir) && ir->o == IR_ADD && ra_noreg(ir->r)) {
- /* Gather (base+idx*sz)+ofs as emitted by cdata ptr/array indexing. */
- IRIns *irx;
- IRRef idx;
- Reg r;
- if (asm_isk32(as, ir->op2, &as->mrm.ofs)) { /* Recognize x+ofs. */
- ref = ir->op1;
- ir = IR(ref);
- if (!(ir->o == IR_ADD && canfuse(as, ir) && ra_noreg(ir->r)))
- goto noadd;
- }
- as->mrm.scale = XM_SCALE1;
- idx = ir->op1;
- ref = ir->op2;
- irx = IR(idx);
- if (!(irx->o == IR_BSHL || irx->o == IR_ADD)) { /* Try other operand. */
- idx = ir->op2;
- ref = ir->op1;
- irx = IR(idx);
- }
- if (canfuse(as, irx) && ra_noreg(irx->r)) {
- if (irx->o == IR_BSHL && irref_isk(irx->op2) && IR(irx->op2)->i <= 3) {
- /* Recognize idx<<b with b = 0-3, corresponding to sz = (1),2,4,8. */
- idx = irx->op1;
- as->mrm.scale = (uint8_t)(IR(irx->op2)->i << 6);
- } else if (irx->o == IR_ADD && irx->op1 == irx->op2) {
- /* FOLD does idx*2 ==> idx<<1 ==> idx+idx. */
- idx = irx->op1;
- as->mrm.scale = XM_SCALE2;
- }
- }
- r = ra_alloc1(as, idx, allow);
- rset_clear(allow, r);
- as->mrm.idx = (uint8_t)r;
- }
- noadd:
- as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow);
- }
-}
-
-/* Fuse load into memory operand. */
-static Reg asm_fuseload(ASMState *as, IRRef ref, RegSet allow)
-{
- IRIns *ir = IR(ref);
- if (ra_hasreg(ir->r)) {
- if (allow != RSET_EMPTY) { /* Fast path. */
- ra_noweak(as, ir->r);
- return ir->r;
- }
- fusespill:
- /* Force a spill if only memory operands are allowed (asm_x87load). */
- as->mrm.base = RID_ESP;
- as->mrm.ofs = ra_spill(as, ir);
- as->mrm.idx = RID_NONE;
- return RID_MRM;
- }
- if (ir->o == IR_KNUM) {
- RegSet avail = as->freeset & ~as->modset & RSET_FPR;
- lua_assert(allow != RSET_EMPTY);
- if (!(avail & (avail-1))) { /* Fuse if less than two regs available. */
- as->mrm.ofs = ptr2addr(ir_knum(ir));
- as->mrm.base = as->mrm.idx = RID_NONE;
- return RID_MRM;
- }
- } else if (ref == REF_BASE || ir->o == IR_KINT64) {
- RegSet avail = as->freeset & ~as->modset & RSET_GPR;
- lua_assert(allow != RSET_EMPTY);
- if (!(avail & (avail-1))) { /* Fuse if less than two regs available. */
- as->mrm.ofs = ptr2addr(ref == REF_BASE ? (void *)&J2G(as->J)->jit_base : (void *)ir_kint64(ir));
- as->mrm.base = as->mrm.idx = RID_NONE;
- return RID_MRM;
- }
- } else if (mayfuse(as, ref)) {
- RegSet xallow = (allow & RSET_GPR) ? allow : RSET_GPR;
- if (ir->o == IR_SLOAD) {
- if (!(ir->op2 & (IRSLOAD_PARENT|IRSLOAD_CONVERT)) &&
- noconflict(as, ref, IR_RETF, 0)) {
- as->mrm.base = (uint8_t)ra_alloc1(as, REF_BASE, xallow);
- as->mrm.ofs = 8*((int32_t)ir->op1-1) + ((ir->op2&IRSLOAD_FRAME)?4:0);
- as->mrm.idx = RID_NONE;
- return RID_MRM;
- }
- } else if (ir->o == IR_FLOAD) {
- /* Generic fusion is only ok for 32 bit operand (but see asm_comp). */
- if ((irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)) &&
- noconflict(as, ref, IR_FSTORE, 0)) {
- asm_fusefref(as, ir, xallow);
- return RID_MRM;
- }
- } else if (ir->o == IR_ALOAD || ir->o == IR_HLOAD || ir->o == IR_ULOAD) {
- if (noconflict(as, ref, ir->o + IRDELTA_L2S, 0)) {
- asm_fuseahuref(as, ir->op1, xallow);
- return RID_MRM;
- }
- } else if (ir->o == IR_XLOAD) {
- /* Generic fusion is not ok for 8/16 bit operands (but see asm_comp).
- ** Fusing unaligned memory operands is ok on x86 (except for SIMD types).
- */
- if ((!irt_typerange(ir->t, IRT_I8, IRT_U16)) &&
- noconflict(as, ref, IR_XSTORE, 0)) {
- asm_fusexref(as, ir->op1, xallow);
- return RID_MRM;
- }
- } else if (ir->o == IR_VLOAD) {
- asm_fuseahuref(as, ir->op1, xallow);
- return RID_MRM;
- }
- }
- if (!(as->freeset & allow) && !emit_canremat(ref) &&
- (allow == RSET_EMPTY || ra_hasspill(ir->s) || iscrossref(as, ref)))
- goto fusespill;
- return ra_allocref(as, ref, allow);
-}
-
-#if LJ_64
-/* Don't fuse a 32 bit load into a 64 bit operation. */
-static Reg asm_fuseloadm(ASMState *as, IRRef ref, RegSet allow, int is64)
-{
- if (is64 && !irt_is64(IR(ref)->t))
- return ra_alloc1(as, ref, allow);
- return asm_fuseload(as, ref, allow);
-}
-#else
-#define asm_fuseloadm(as, ref, allow, is64) asm_fuseload(as, (ref), (allow))
-#endif
-
-/* -- Calls --------------------------------------------------------------- */
-
-/* Count the required number of stack slots for a call. */
-static int asm_count_call_slots(ASMState *as, const CCallInfo *ci, IRRef *args)
-{
- uint32_t i, nargs = CCI_NARGS(ci);
- int nslots = 0;
-#if LJ_64
- if (LJ_ABI_WIN) {
- nslots = (int)(nargs*2); /* Only matters for more than four args. */
- } else {
- int ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR;
- for (i = 0; i < nargs; i++)
- if (args[i] && irt_isfp(IR(args[i])->t)) {
- if (nfpr > 0) nfpr--; else nslots += 2;
- } else {
- if (ngpr > 0) ngpr--; else nslots += 2;
- }
- }
-#else
- int ngpr = 0;
- if ((ci->flags & CCI_CC_MASK) == CCI_CC_FASTCALL)
- ngpr = 2;
- else if ((ci->flags & CCI_CC_MASK) == CCI_CC_THISCALL)
- ngpr = 1;
- for (i = 0; i < nargs; i++)
- if (args[i] && irt_isfp(IR(args[i])->t)) {
- nslots += irt_isnum(IR(args[i])->t) ? 2 : 1;
- } else {
- if (ngpr > 0) ngpr--; else nslots++;
- }
-#endif
- return nslots;
-}
-
-/* Generate a call to a C function. */
-static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args)
-{
- uint32_t n, nargs = CCI_NARGS(ci);
- int32_t ofs = STACKARG_OFS;
-#if LJ_64
- uint32_t gprs = REGARG_GPRS;
- Reg fpr = REGARG_FIRSTFPR;
-#if !LJ_ABI_WIN
- MCode *patchnfpr = NULL;
-#endif
-#else
- uint32_t gprs = 0;
- if ((ci->flags & CCI_CC_MASK) != CCI_CC_CDECL) {
- if ((ci->flags & CCI_CC_MASK) == CCI_CC_THISCALL)
- gprs = (REGARG_GPRS & 31);
- else if ((ci->flags & CCI_CC_MASK) == CCI_CC_FASTCALL)
- gprs = REGARG_GPRS;
- }
-#endif
- if ((void *)ci->func)
- emit_call(as, ci->func);
-#if LJ_64
- if ((ci->flags & CCI_VARARG)) { /* Special handling for vararg calls. */
-#if LJ_ABI_WIN
- for (n = 0; n < 4 && n < nargs; n++) {
- IRIns *ir = IR(args[n]);
- if (irt_isfp(ir->t)) /* Duplicate FPRs in GPRs. */
- emit_rr(as, XO_MOVDto, (irt_isnum(ir->t) ? REX_64 : 0) | (fpr+n),
- ((gprs >> (n*5)) & 31)); /* Either MOVD or MOVQ. */
- }
-#else
- patchnfpr = --as->mcp; /* Indicate number of used FPRs in register al. */
- *--as->mcp = XI_MOVrib | RID_EAX;
-#endif
- }
-#endif
- for (n = 0; n < nargs; n++) { /* Setup args. */
- IRRef ref = args[n];
- IRIns *ir = IR(ref);
- Reg r;
-#if LJ_64 && LJ_ABI_WIN
- /* Windows/x64 argument registers are strictly positional. */
- r = irt_isfp(ir->t) ? (fpr <= REGARG_LASTFPR ? fpr : 0) : (gprs & 31);
- fpr++; gprs >>= 5;
-#elif LJ_64
- /* POSIX/x64 argument registers are used in order of appearance. */
- if (irt_isfp(ir->t)) {
- r = fpr <= REGARG_LASTFPR ? fpr++ : 0;
- } else {
- r = gprs & 31; gprs >>= 5;
- }
-#else
- if (ref && irt_isfp(ir->t)) {
- r = 0;
- } else {
- r = gprs & 31; gprs >>= 5;
- if (!ref) continue;
- }
-#endif
- if (r) { /* Argument is in a register. */
- if (r < RID_MAX_GPR && ref < ASMREF_TMP1) {
-#if LJ_64
- if (ir->o == IR_KINT64)
- emit_loadu64(as, r, ir_kint64(ir)->u64);
- else
-#endif
- emit_loadi(as, r, ir->i);
- } else {
- lua_assert(rset_test(as->freeset, r)); /* Must have been evicted. */
- if (ra_hasreg(ir->r)) {
- ra_noweak(as, ir->r);
- emit_movrr(as, ir, r, ir->r);
- } else {
- ra_allocref(as, ref, RID2RSET(r));
- }
- }
- } else if (irt_isfp(ir->t)) { /* FP argument is on stack. */
- lua_assert(!(irt_isfloat(ir->t) && irref_isk(ref))); /* No float k. */
- if (LJ_32 && (ofs & 4) && irref_isk(ref)) {
- /* Split stores for unaligned FP consts. */
- emit_movmroi(as, RID_ESP, ofs, (int32_t)ir_knum(ir)->u32.lo);
- emit_movmroi(as, RID_ESP, ofs+4, (int32_t)ir_knum(ir)->u32.hi);
- } else {
- r = ra_alloc1(as, ref, RSET_FPR);
- emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSDto : XO_MOVSSto,
- r, RID_ESP, ofs);
- }
- ofs += (LJ_32 && irt_isfloat(ir->t)) ? 4 : 8;
- } else { /* Non-FP argument is on stack. */
- if (LJ_32 && ref < ASMREF_TMP1) {
- emit_movmroi(as, RID_ESP, ofs, ir->i);
- } else {
- r = ra_alloc1(as, ref, RSET_GPR);
- emit_movtomro(as, REX_64 + r, RID_ESP, ofs);
- }
- ofs += sizeof(intptr_t);
- }
- checkmclim(as);
- }
-#if LJ_64 && !LJ_ABI_WIN
- if (patchnfpr) *patchnfpr = fpr - REGARG_FIRSTFPR;
-#endif
-}
-
-/* Setup result reg/sp for call. Evict scratch regs. */
-static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci)
-{
- RegSet drop = RSET_SCRATCH;
- int hiop = (LJ_32 && (ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t));
- if ((ci->flags & CCI_NOFPRCLOBBER))
- drop &= ~RSET_FPR;
- if (ra_hasreg(ir->r))
- rset_clear(drop, ir->r); /* Dest reg handled below. */
- if (hiop && ra_hasreg((ir+1)->r))
- rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */
- ra_evictset(as, drop); /* Evictions must be performed first. */
- if (ra_used(ir)) {
- if (irt_isfp(ir->t)) {
- int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */
-#if LJ_64
- if ((ci->flags & CCI_CASTU64)) {
- Reg dest = ir->r;
- if (ra_hasreg(dest)) {
- ra_free(as, dest);
- ra_modified(as, dest);
- emit_rr(as, XO_MOVD, dest|REX_64, RID_RET); /* Really MOVQ. */
- }
- if (ofs) emit_movtomro(as, RID_RET|REX_64, RID_ESP, ofs);
- } else {
- ra_destreg(as, ir, RID_FPRET);
- }
-#else
- /* Number result is in x87 st0 for x86 calling convention. */
- Reg dest = ir->r;
- if (ra_hasreg(dest)) {
- ra_free(as, dest);
- ra_modified(as, dest);
- emit_rmro(as, irt_isnum(ir->t) ? XMM_MOVRM(as) : XO_MOVSS,
- dest, RID_ESP, ofs);
- }
- if ((ci->flags & CCI_CASTU64)) {
- emit_movtomro(as, RID_RETLO, RID_ESP, ofs);
- emit_movtomro(as, RID_RETHI, RID_ESP, ofs+4);
- } else {
- emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd,
- irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs);
- }
-#endif
-#if LJ_32
- } else if (hiop) {
- ra_destpair(as, ir);
-#endif
- } else {
- lua_assert(!irt_ispri(ir->t));
- ra_destreg(as, ir, RID_RET);
- }
- } else if (LJ_32 && irt_isfp(ir->t) && !(ci->flags & CCI_CASTU64)) {
- emit_x87op(as, XI_FPOP); /* Pop unused result from x87 st0. */
- }
-}
-
-static void asm_call(ASMState *as, IRIns *ir)
-{
- IRRef args[CCI_NARGS_MAX];
- const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
- asm_collectargs(as, ir, ci, args);
- asm_setupresult(as, ir, ci);
- asm_gencall(as, ci, args);
-}
-
-/* Return a constant function pointer or NULL for indirect calls. */
-static void *asm_callx_func(ASMState *as, IRIns *irf, IRRef func)
-{
-#if LJ_32
- UNUSED(as);
- if (irref_isk(func))
- return (void *)irf->i;
-#else
- if (irref_isk(func)) {
- MCode *p;
- if (irf->o == IR_KINT64)
- p = (MCode *)(void *)ir_k64(irf)->u64;
- else
- p = (MCode *)(void *)(uintptr_t)(uint32_t)irf->i;
- if (p - as->mcp == (int32_t)(p - as->mcp))
- return p; /* Call target is still in +-2GB range. */
- /* Avoid the indirect case of emit_call(). Try to hoist func addr. */
- }
-#endif
- return NULL;
-}
-
-static void asm_callx(ASMState *as, IRIns *ir)
-{
- IRRef args[CCI_NARGS_MAX*2];
- CCallInfo ci;
- IRRef func;
- IRIns *irf;
- int32_t spadj = 0;
- ci.flags = asm_callx_flags(as, ir);
- asm_collectargs(as, ir, &ci, args);
- asm_setupresult(as, ir, &ci);
-#if LJ_32
- /* Have to readjust stack after non-cdecl calls due to callee cleanup. */
- if ((ci.flags & CCI_CC_MASK) != CCI_CC_CDECL)
- spadj = 4 * asm_count_call_slots(as, &ci, args);
-#endif
- func = ir->op2; irf = IR(func);
- if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); }
- ci.func = (ASMFunction)asm_callx_func(as, irf, func);
- if (!(void *)ci.func) {
- /* Use a (hoistable) non-scratch register for indirect calls. */
- RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
- Reg r = ra_alloc1(as, func, allow);
- if (LJ_32) emit_spsub(as, spadj); /* Above code may cause restores! */
- emit_rr(as, XO_GROUP5, XOg_CALL, r);
- } else if (LJ_32) {
- emit_spsub(as, spadj);
- }
- asm_gencall(as, &ci, args);
-}
-
-/* -- Returns ------------------------------------------------------------- */
-
-/* Return to lower frame. Guard that it goes to the right spot. */
-static void asm_retf(ASMState *as, IRIns *ir)
-{
- Reg base = ra_alloc1(as, REF_BASE, RSET_GPR);
- void *pc = ir_kptr(IR(ir->op2));
- int32_t delta = 1+bc_a(*((const BCIns *)pc - 1));
- as->topslot -= (BCReg)delta;
- if ((int32_t)as->topslot < 0) as->topslot = 0;
- irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */
- emit_setgl(as, base, jit_base);
- emit_addptr(as, base, -8*delta);
- asm_guardcc(as, CC_NE);
- emit_gmroi(as, XG_ARITHi(XOg_CMP), base, -4, ptr2addr(pc));
-}
-
-/* -- Type conversions ---------------------------------------------------- */
-
-static void asm_tointg(ASMState *as, IRIns *ir, Reg left)
-{
- Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left));
- Reg dest = ra_dest(as, ir, RSET_GPR);
- asm_guardcc(as, CC_P);
- asm_guardcc(as, CC_NE);
- emit_rr(as, XO_UCOMISD, left, tmp);
- emit_rr(as, XO_CVTSI2SD, tmp, dest);
- if (!(as->flags & JIT_F_SPLIT_XMM))
- emit_rr(as, XO_XORPS, tmp, tmp); /* Avoid partial register stall. */
- emit_rr(as, XO_CVTTSD2SI, dest, left);
- /* Can't fuse since left is needed twice. */
-}
-
-static void asm_tobit(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- Reg tmp = ra_noreg(IR(ir->op1)->r) ?
- ra_alloc1(as, ir->op1, RSET_FPR) :
- ra_scratch(as, RSET_FPR);
- Reg right = asm_fuseload(as, ir->op2, rset_exclude(RSET_FPR, tmp));
- emit_rr(as, XO_MOVDto, tmp, dest);
- emit_mrm(as, XO_ADDSD, tmp, right);
- ra_left(as, tmp, ir->op1);
-}
-
-static void asm_conv(ASMState *as, IRIns *ir)
-{
- IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK);
- int st64 = (st == IRT_I64 || st == IRT_U64 || (LJ_64 && st == IRT_P64));
- int stfp = (st == IRT_NUM || st == IRT_FLOAT);
- IRRef lref = ir->op1;
- lua_assert(irt_type(ir->t) != st);
- lua_assert(!(LJ_32 && (irt_isint64(ir->t) || st64))); /* Handled by SPLIT. */
- if (irt_isfp(ir->t)) {
- Reg dest = ra_dest(as, ir, RSET_FPR);
- if (stfp) { /* FP to FP conversion. */
- Reg left = asm_fuseload(as, lref, RSET_FPR);
- emit_mrm(as, st == IRT_NUM ? XO_CVTSD2SS : XO_CVTSS2SD, dest, left);
- if (left == dest) return; /* Avoid the XO_XORPS. */
- } else if (LJ_32 && st == IRT_U32) { /* U32 to FP conversion on x86. */
- /* number = (2^52+2^51 .. u32) - (2^52+2^51) */
- cTValue *k = lj_ir_k64_find(as->J, U64x(43380000,00000000));
- Reg bias = ra_scratch(as, rset_exclude(RSET_FPR, dest));
- if (irt_isfloat(ir->t))
- emit_rr(as, XO_CVTSD2SS, dest, dest);
- emit_rr(as, XO_SUBSD, dest, bias); /* Subtract 2^52+2^51 bias. */
- emit_rr(as, XO_XORPS, dest, bias); /* Merge bias and integer. */
- emit_loadn(as, bias, k);
- emit_mrm(as, XO_MOVD, dest, asm_fuseload(as, lref, RSET_GPR));
- return;
- } else { /* Integer to FP conversion. */
- Reg left = (LJ_64 && (st == IRT_U32 || st == IRT_U64)) ?
- ra_alloc1(as, lref, RSET_GPR) :
- asm_fuseloadm(as, lref, RSET_GPR, st64);
- if (LJ_64 && st == IRT_U64) {
- MCLabel l_end = emit_label(as);
- const void *k = lj_ir_k64_find(as->J, U64x(43f00000,00000000));
- emit_rma(as, XO_ADDSD, dest, k); /* Add 2^64 to compensate. */
- emit_sjcc(as, CC_NS, l_end);
- emit_rr(as, XO_TEST, left|REX_64, left); /* Check if u64 >= 2^63. */
- }
- emit_mrm(as, irt_isnum(ir->t) ? XO_CVTSI2SD : XO_CVTSI2SS,
- dest|((LJ_64 && (st64 || st == IRT_U32)) ? REX_64 : 0), left);
- }
- if (!(as->flags & JIT_F_SPLIT_XMM))
- emit_rr(as, XO_XORPS, dest, dest); /* Avoid partial register stall. */
- } else if (stfp) { /* FP to integer conversion. */
- if (irt_isguard(ir->t)) {
- /* Checked conversions are only supported from number to int. */
- lua_assert(irt_isint(ir->t) && st == IRT_NUM);
- asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR));
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- x86Op op = st == IRT_NUM ?
- ((ir->op2 & IRCONV_TRUNC) ? XO_CVTTSD2SI : XO_CVTSD2SI) :
- ((ir->op2 & IRCONV_TRUNC) ? XO_CVTTSS2SI : XO_CVTSS2SI);
- if (LJ_64 ? irt_isu64(ir->t) : irt_isu32(ir->t)) {
- /* LJ_64: For inputs >= 2^63 add -2^64, convert again. */
- /* LJ_32: For inputs >= 2^31 add -2^31, convert again and add 2^31. */
- Reg tmp = ra_noreg(IR(lref)->r) ? ra_alloc1(as, lref, RSET_FPR) :
- ra_scratch(as, RSET_FPR);
- MCLabel l_end = emit_label(as);
- if (LJ_32)
- emit_gri(as, XG_ARITHi(XOg_ADD), dest, (int32_t)0x80000000);
- emit_rr(as, op, dest|REX_64, tmp);
- if (st == IRT_NUM)
- emit_rma(as, XO_ADDSD, tmp, lj_ir_k64_find(as->J,
- LJ_64 ? U64x(c3f00000,00000000) : U64x(c1e00000,00000000)));
- else
- emit_rma(as, XO_ADDSS, tmp, lj_ir_k64_find(as->J,
- LJ_64 ? U64x(00000000,df800000) : U64x(00000000,cf000000)));
- emit_sjcc(as, CC_NS, l_end);
- emit_rr(as, XO_TEST, dest|REX_64, dest); /* Check if dest negative. */
- emit_rr(as, op, dest|REX_64, tmp);
- ra_left(as, tmp, lref);
- } else {
- Reg left = asm_fuseload(as, lref, RSET_FPR);
- if (LJ_64 && irt_isu32(ir->t))
- emit_rr(as, XO_MOV, dest, dest); /* Zero hiword. */
- emit_mrm(as, op,
- dest|((LJ_64 &&
- (irt_is64(ir->t) || irt_isu32(ir->t))) ? REX_64 : 0),
- left);
- }
- }
- } else if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */
- Reg left, dest = ra_dest(as, ir, RSET_GPR);
- RegSet allow = RSET_GPR;
- x86Op op;
- lua_assert(irt_isint(ir->t) || irt_isu32(ir->t));
- if (st == IRT_I8) {
- op = XO_MOVSXb; allow = RSET_GPR8; dest |= FORCE_REX;
- } else if (st == IRT_U8) {
- op = XO_MOVZXb; allow = RSET_GPR8; dest |= FORCE_REX;
- } else if (st == IRT_I16) {
- op = XO_MOVSXw;
- } else {
- op = XO_MOVZXw;
- }
- left = asm_fuseload(as, lref, allow);
- /* Add extra MOV if source is already in wrong register. */
- if (!LJ_64 && left != RID_MRM && !rset_test(allow, left)) {
- Reg tmp = ra_scratch(as, allow);
- emit_rr(as, op, dest, tmp);
- emit_rr(as, XO_MOV, tmp, left);
- } else {
- emit_mrm(as, op, dest, left);
- }
- } else { /* 32/64 bit integer conversions. */
- if (LJ_32) { /* Only need to handle 32/32 bit no-op (cast) on x86. */
- Reg dest = ra_dest(as, ir, RSET_GPR);
- ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */
- } else if (irt_is64(ir->t)) {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- if (st64 || !(ir->op2 & IRCONV_SEXT)) {
- /* 64/64 bit no-op (cast) or 32 to 64 bit zero extension. */
- ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */
- } else { /* 32 to 64 bit sign extension. */
- Reg left = asm_fuseload(as, lref, RSET_GPR);
- emit_mrm(as, XO_MOVSXd, dest|REX_64, left);
- }
- } else {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- if (st64) {
- Reg left = asm_fuseload(as, lref, RSET_GPR);
- /* This is either a 32 bit reg/reg mov which zeroes the hiword
- ** or a load of the loword from a 64 bit address.
- */
- emit_mrm(as, XO_MOV, dest, left);
- } else { /* 32/32 bit no-op (cast). */
- ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */
- }
- }
- }
-}
-
-#if LJ_32 && LJ_HASFFI
-/* No SSE conversions to/from 64 bit on x86, so resort to ugly x87 code. */
-
-/* 64 bit integer to FP conversion in 32 bit mode. */
-static void asm_conv_fp_int64(ASMState *as, IRIns *ir)
-{
- Reg hi = ra_alloc1(as, ir->op1, RSET_GPR);
- Reg lo = ra_alloc1(as, (ir-1)->op1, rset_exclude(RSET_GPR, hi));
- int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */
- Reg dest = ir->r;
- if (ra_hasreg(dest)) {
- ra_free(as, dest);
- ra_modified(as, dest);
- emit_rmro(as, irt_isnum(ir->t) ? XMM_MOVRM(as) : XO_MOVSS,
- dest, RID_ESP, ofs);
- }
- emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd,
- irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs);
- if (((ir-1)->op2 & IRCONV_SRCMASK) == IRT_U64) {
- /* For inputs in [2^63,2^64-1] add 2^64 to compensate. */
- MCLabel l_end = emit_label(as);
- emit_rma(as, XO_FADDq, XOg_FADDq,
- lj_ir_k64_find(as->J, U64x(43f00000,00000000)));
- emit_sjcc(as, CC_NS, l_end);
- emit_rr(as, XO_TEST, hi, hi); /* Check if u64 >= 2^63. */
- } else {
- lua_assert(((ir-1)->op2 & IRCONV_SRCMASK) == IRT_I64);
- }
- emit_rmro(as, XO_FILDq, XOg_FILDq, RID_ESP, 0);
- /* NYI: Avoid narrow-to-wide store-to-load forwarding stall. */
- emit_rmro(as, XO_MOVto, hi, RID_ESP, 4);
- emit_rmro(as, XO_MOVto, lo, RID_ESP, 0);
-}
-
-/* FP to 64 bit integer conversion in 32 bit mode. */
-static void asm_conv_int64_fp(ASMState *as, IRIns *ir)
-{
- IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
- IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
- Reg lo, hi;
- lua_assert(st == IRT_NUM || st == IRT_FLOAT);
- lua_assert(dt == IRT_I64 || dt == IRT_U64);
- lua_assert(((ir-1)->op2 & IRCONV_TRUNC));
- hi = ra_dest(as, ir, RSET_GPR);
- lo = ra_dest(as, ir-1, rset_exclude(RSET_GPR, hi));
- if (ra_used(ir-1)) emit_rmro(as, XO_MOV, lo, RID_ESP, 0);
- /* NYI: Avoid wide-to-narrow store-to-load forwarding stall. */
- if (!(as->flags & JIT_F_SSE3)) { /* Set FPU rounding mode to default. */
- emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 4);
- emit_rmro(as, XO_MOVto, lo, RID_ESP, 4);
- emit_gri(as, XG_ARITHi(XOg_AND), lo, 0xf3ff);
- }
- if (dt == IRT_U64) {
- /* For inputs in [2^63,2^64-1] add -2^64 and convert again. */
- MCLabel l_pop, l_end = emit_label(as);
- emit_x87op(as, XI_FPOP);
- l_pop = emit_label(as);
- emit_sjmp(as, l_end);
- emit_rmro(as, XO_MOV, hi, RID_ESP, 4);
- if ((as->flags & JIT_F_SSE3))
- emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0);
- else
- emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0);
- emit_rma(as, XO_FADDq, XOg_FADDq,
- lj_ir_k64_find(as->J, U64x(c3f00000,00000000)));
- emit_sjcc(as, CC_NS, l_pop);
- emit_rr(as, XO_TEST, hi, hi); /* Check if out-of-range (2^63). */
- }
- emit_rmro(as, XO_MOV, hi, RID_ESP, 4);
- if ((as->flags & JIT_F_SSE3)) { /* Truncation is easy with SSE3. */
- emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0);
- } else { /* Otherwise set FPU rounding mode to truncate before the store. */
- emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0);
- emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 0);
- emit_rmro(as, XO_MOVtow, lo, RID_ESP, 0);
- emit_rmro(as, XO_ARITHw(XOg_OR), lo, RID_ESP, 0);
- emit_loadi(as, lo, 0xc00);
- emit_rmro(as, XO_FNSTCW, XOg_FNSTCW, RID_ESP, 0);
- }
- if (dt == IRT_U64)
- emit_x87op(as, XI_FDUP);
- emit_mrm(as, st == IRT_NUM ? XO_FLDq : XO_FLDd,
- st == IRT_NUM ? XOg_FLDq: XOg_FLDd,
- asm_fuseload(as, ir->op1, RSET_EMPTY));
-}
-#endif
-
-static void asm_strto(ASMState *as, IRIns *ir)
-{
- /* Force a spill slot for the destination register (if any). */
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num];
- IRRef args[2];
- RegSet drop = RSET_SCRATCH;
- if ((drop & RSET_FPR) != RSET_FPR && ra_hasreg(ir->r))
- rset_set(drop, ir->r); /* WIN64 doesn't spill all FPRs. */
- ra_evictset(as, drop);
- asm_guardcc(as, CC_E);
- emit_rr(as, XO_TEST, RID_RET, RID_RET); /* Test return status. */
- args[0] = ir->op1; /* GCstr *str */
- args[1] = ASMREF_TMP1; /* TValue *n */
- asm_gencall(as, ci, args);
- /* Store the result to the spill slot or temp slots. */
- emit_rmro(as, XO_LEA, ra_releasetmp(as, ASMREF_TMP1)|REX_64,
- RID_ESP, sps_scale(ir->s));
-}
-
-static void asm_tostr(ASMState *as, IRIns *ir)
-{
- IRIns *irl = IR(ir->op1);
- IRRef args[2];
- args[0] = ASMREF_L;
- as->gcsteps++;
- if (irt_isnum(irl->t)) {
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromnum];
- args[1] = ASMREF_TMP1; /* const lua_Number * */
- asm_setupresult(as, ir, ci); /* GCstr * */
- asm_gencall(as, ci, args);
- emit_rmro(as, XO_LEA, ra_releasetmp(as, ASMREF_TMP1)|REX_64,
- RID_ESP, ra_spill(as, irl));
- } else {
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_fromint];
- args[1] = ir->op1; /* int32_t k */
- asm_setupresult(as, ir, ci); /* GCstr * */
- asm_gencall(as, ci, args);
- }
-}
-
-/* -- Memory references --------------------------------------------------- */
-
-static void asm_aref(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- asm_fusearef(as, ir, RSET_GPR);
- if (!(as->mrm.idx == RID_NONE && as->mrm.ofs == 0))
- emit_mrm(as, XO_LEA, dest, RID_MRM);
- else if (as->mrm.base != dest)
- emit_rr(as, XO_MOV, dest, as->mrm.base);
-}
-
-/* Merge NE(HREF, niltv) check. */
-static MCode *merge_href_niltv(ASMState *as, IRIns *ir)
-{
- /* Assumes nothing else generates NE of HREF. */
- if ((ir[1].o == IR_NE || ir[1].o == IR_EQ) && ir[1].op1 == as->curins &&
- ra_hasreg(ir->r)) {
- MCode *p = as->mcp;
- p += (LJ_64 && *p != XI_ARITHi) ? 7+6 : 6+6;
- /* Ensure no loop branch inversion happened. */
- if (p[-6] == 0x0f && p[-5] == XI_JCCn+(CC_NE^(ir[1].o & 1))) {
- as->mcp = p; /* Kill cmp reg, imm32 + jz exit. */
- return p + *(int32_t *)(p-4); /* Return exit address. */
- }
- }
- return NULL;
-}
-
-/* Inlined hash lookup. Specialized for key type and for const keys.
-** The equivalent C code is:
-** Node *n = hashkey(t, key);
-** do {
-** if (lj_obj_equal(&n->key, key)) return &n->val;
-** } while ((n = nextnode(n)));
-** return niltv(L);
-*/
-static void asm_href(ASMState *as, IRIns *ir)
-{
- MCode *nilexit = merge_href_niltv(as, ir); /* Do this before any restores. */
- RegSet allow = RSET_GPR;
- Reg dest = ra_dest(as, ir, allow);
- Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest));
- Reg key = RID_NONE, tmp = RID_NONE;
- IRIns *irkey = IR(ir->op2);
- int isk = irref_isk(ir->op2);
- IRType1 kt = irkey->t;
- uint32_t khash;
- MCLabel l_end, l_loop, l_next;
-
- if (!isk) {
- rset_clear(allow, tab);
- key = ra_alloc1(as, ir->op2, irt_isnum(kt) ? RSET_FPR : allow);
- if (!irt_isstr(kt))
- tmp = ra_scratch(as, rset_exclude(allow, key));
- }
-
- /* Key not found in chain: jump to exit (if merged with NE) or load niltv. */
- l_end = emit_label(as);
- if (nilexit && ir[1].o == IR_NE) {
- emit_jcc(as, CC_E, nilexit); /* XI_JMP is not found by lj_asm_patchexit. */
- nilexit = NULL;
- } else {
- emit_loada(as, dest, niltvg(J2G(as->J)));
- }
-
- /* Follow hash chain until the end. */
- l_loop = emit_sjcc_label(as, CC_NZ);
- emit_rr(as, XO_TEST, dest, dest);
- emit_rmro(as, XO_MOV, dest, dest, offsetof(Node, next));
- l_next = emit_label(as);
-
- /* Type and value comparison. */
- if (nilexit)
- emit_jcc(as, CC_E, nilexit);
- else
- emit_sjcc(as, CC_E, l_end);
- if (irt_isnum(kt)) {
- if (isk) {
- /* Assumes -0.0 is already canonicalized to +0.0. */
- emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.lo),
- (int32_t)ir_knum(irkey)->u32.lo);
- emit_sjcc(as, CC_NE, l_next);
- emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.hi),
- (int32_t)ir_knum(irkey)->u32.hi);
- } else {
- emit_sjcc(as, CC_P, l_next);
- emit_rmro(as, XO_UCOMISD, key, dest, offsetof(Node, key.n));
- emit_sjcc(as, CC_AE, l_next);
- /* The type check avoids NaN penalties and complaints from Valgrind. */
-#if LJ_64
- emit_u32(as, LJ_TISNUM);
- emit_rmro(as, XO_ARITHi, XOg_CMP, dest, offsetof(Node, key.it));
-#else
- emit_i8(as, LJ_TISNUM);
- emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it));
-#endif
- }
-#if LJ_64
- } else if (irt_islightud(kt)) {
- emit_rmro(as, XO_CMP, key|REX_64, dest, offsetof(Node, key.u64));
-#endif
- } else {
- if (!irt_ispri(kt)) {
- lua_assert(irt_isaddr(kt));
- if (isk)
- emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.gcr),
- ptr2addr(ir_kgc(irkey)));
- else
- emit_rmro(as, XO_CMP, key, dest, offsetof(Node, key.gcr));
- emit_sjcc(as, CC_NE, l_next);
- }
- lua_assert(!irt_isnil(kt));
- emit_i8(as, irt_toitype(kt));
- emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it));
- }
- emit_sfixup(as, l_loop);
- checkmclim(as);
-
- /* Load main position relative to tab->node into dest. */
- khash = isk ? ir_khash(irkey) : 1;
- if (khash == 0) {
- emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, node));
- } else {
- emit_rmro(as, XO_ARITH(XOg_ADD), dest, tab, offsetof(GCtab, node));
- if ((as->flags & JIT_F_PREFER_IMUL)) {
- emit_i8(as, sizeof(Node));
- emit_rr(as, XO_IMULi8, dest, dest);
- } else {
- emit_shifti(as, XOg_SHL, dest, 3);
- emit_rmrxo(as, XO_LEA, dest, dest, dest, XM_SCALE2, 0);
- }
- if (isk) {
- emit_gri(as, XG_ARITHi(XOg_AND), dest, (int32_t)khash);
- emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask));
- } else if (irt_isstr(kt)) {
- emit_rmro(as, XO_ARITH(XOg_AND), dest, key, offsetof(GCstr, hash));
- emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask));
- } else { /* Must match with hashrot() in lj_tab.c. */
- emit_rmro(as, XO_ARITH(XOg_AND), dest, tab, offsetof(GCtab, hmask));
- emit_rr(as, XO_ARITH(XOg_SUB), dest, tmp);
- emit_shifti(as, XOg_ROL, tmp, HASH_ROT3);
- emit_rr(as, XO_ARITH(XOg_XOR), dest, tmp);
- emit_shifti(as, XOg_ROL, dest, HASH_ROT2);
- emit_rr(as, XO_ARITH(XOg_SUB), tmp, dest);
- emit_shifti(as, XOg_ROL, dest, HASH_ROT1);
- emit_rr(as, XO_ARITH(XOg_XOR), tmp, dest);
- if (irt_isnum(kt)) {
- emit_rr(as, XO_ARITH(XOg_ADD), dest, dest);
-#if LJ_64
- emit_shifti(as, XOg_SHR|REX_64, dest, 32);
- emit_rr(as, XO_MOV, tmp, dest);
- emit_rr(as, XO_MOVDto, key|REX_64, dest);
-#else
- emit_rmro(as, XO_MOV, dest, RID_ESP, ra_spill(as, irkey)+4);
- emit_rr(as, XO_MOVDto, key, tmp);
-#endif
- } else {
- emit_rr(as, XO_MOV, tmp, key);
- emit_rmro(as, XO_LEA, dest, key, HASH_BIAS);
- }
- }
- }
-}
-
-static void asm_hrefk(ASMState *as, IRIns *ir)
-{
- IRIns *kslot = IR(ir->op2);
- IRIns *irkey = IR(kslot->op1);
- int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node));
- Reg dest = ra_used(ir) ? ra_dest(as, ir, RSET_GPR) : RID_NONE;
- Reg node = ra_alloc1(as, ir->op1, RSET_GPR);
-#if !LJ_64
- MCLabel l_exit;
-#endif
- lua_assert(ofs % sizeof(Node) == 0);
- if (ra_hasreg(dest)) {
- if (ofs != 0) {
- if (dest == node && !(as->flags & JIT_F_LEA_AGU))
- emit_gri(as, XG_ARITHi(XOg_ADD), dest, ofs);
- else
- emit_rmro(as, XO_LEA, dest, node, ofs);
- } else if (dest != node) {
- emit_rr(as, XO_MOV, dest, node);
- }
- }
- asm_guardcc(as, CC_NE);
-#if LJ_64
- if (!irt_ispri(irkey->t)) {
- Reg key = ra_scratch(as, rset_exclude(RSET_GPR, node));
- emit_rmro(as, XO_CMP, key|REX_64, node,
- ofs + (int32_t)offsetof(Node, key.u64));
- lua_assert(irt_isnum(irkey->t) || irt_isgcv(irkey->t));
- /* Assumes -0.0 is already canonicalized to +0.0. */
- emit_loadu64(as, key, irt_isnum(irkey->t) ? ir_knum(irkey)->u64 :
- ((uint64_t)irt_toitype(irkey->t) << 32) |
- (uint64_t)(uint32_t)ptr2addr(ir_kgc(irkey)));
- } else {
- lua_assert(!irt_isnil(irkey->t));
- emit_i8(as, irt_toitype(irkey->t));
- emit_rmro(as, XO_ARITHi8, XOg_CMP, node,
- ofs + (int32_t)offsetof(Node, key.it));
- }
-#else
- l_exit = emit_label(as);
- if (irt_isnum(irkey->t)) {
- /* Assumes -0.0 is already canonicalized to +0.0. */
- emit_gmroi(as, XG_ARITHi(XOg_CMP), node,
- ofs + (int32_t)offsetof(Node, key.u32.lo),
- (int32_t)ir_knum(irkey)->u32.lo);
- emit_sjcc(as, CC_NE, l_exit);
- emit_gmroi(as, XG_ARITHi(XOg_CMP), node,
- ofs + (int32_t)offsetof(Node, key.u32.hi),
- (int32_t)ir_knum(irkey)->u32.hi);
- } else {
- if (!irt_ispri(irkey->t)) {
- lua_assert(irt_isgcv(irkey->t));
- emit_gmroi(as, XG_ARITHi(XOg_CMP), node,
- ofs + (int32_t)offsetof(Node, key.gcr),
- ptr2addr(ir_kgc(irkey)));
- emit_sjcc(as, CC_NE, l_exit);
- }
- lua_assert(!irt_isnil(irkey->t));
- emit_i8(as, irt_toitype(irkey->t));
- emit_rmro(as, XO_ARITHi8, XOg_CMP, node,
- ofs + (int32_t)offsetof(Node, key.it));
- }
-#endif
-}
-
-static void asm_newref(ASMState *as, IRIns *ir)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey];
- IRRef args[3];
- IRIns *irkey;
- Reg tmp;
- if (ir->r == RID_SINK)
- return;
- args[0] = ASMREF_L; /* lua_State *L */
- args[1] = ir->op1; /* GCtab *t */
- args[2] = ASMREF_TMP1; /* cTValue *key */
- asm_setupresult(as, ir, ci); /* TValue * */
- asm_gencall(as, ci, args);
- tmp = ra_releasetmp(as, ASMREF_TMP1);
- irkey = IR(ir->op2);
- if (irt_isnum(irkey->t)) {
- /* For numbers use the constant itself or a spill slot as a TValue. */
- if (irref_isk(ir->op2))
- emit_loada(as, tmp, ir_knum(irkey));
- else
- emit_rmro(as, XO_LEA, tmp|REX_64, RID_ESP, ra_spill(as, irkey));
- } else {
- /* Otherwise use g->tmptv to hold the TValue. */
- if (!irref_isk(ir->op2)) {
- Reg src = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, tmp));
- emit_movtomro(as, REX_64IR(irkey, src), tmp, 0);
- } else if (!irt_ispri(irkey->t)) {
- emit_movmroi(as, tmp, 0, irkey->i);
- }
- if (!(LJ_64 && irt_islightud(irkey->t)))
- emit_movmroi(as, tmp, 4, irt_toitype(irkey->t));
- emit_loada(as, tmp, &J2G(as->J)->tmptv);
- }
-}
-
-static void asm_uref(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- if (irref_isk(ir->op1)) {
- GCfunc *fn = ir_kfunc(IR(ir->op1));
- MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v;
- emit_rma(as, XO_MOV, dest, v);
- } else {
- Reg uv = ra_scratch(as, RSET_GPR);
- Reg func = ra_alloc1(as, ir->op1, RSET_GPR);
- if (ir->o == IR_UREFC) {
- emit_rmro(as, XO_LEA, dest, uv, offsetof(GCupval, tv));
- asm_guardcc(as, CC_NE);
- emit_i8(as, 1);
- emit_rmro(as, XO_ARITHib, XOg_CMP, uv, offsetof(GCupval, closed));
- } else {
- emit_rmro(as, XO_MOV, dest, uv, offsetof(GCupval, v));
- }
- emit_rmro(as, XO_MOV, uv, func,
- (int32_t)offsetof(GCfuncL, uvptr) + 4*(int32_t)(ir->op2 >> 8));
- }
-}
-
-static void asm_fref(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- asm_fusefref(as, ir, RSET_GPR);
- emit_mrm(as, XO_LEA, dest, RID_MRM);
-}
-
-static void asm_strref(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- asm_fusestrref(as, ir, RSET_GPR);
- if (as->mrm.base == RID_NONE)
- emit_loadi(as, dest, as->mrm.ofs);
- else if (as->mrm.base == dest && as->mrm.idx == RID_NONE)
- emit_gri(as, XG_ARITHi(XOg_ADD), dest, as->mrm.ofs);
- else
- emit_mrm(as, XO_LEA, dest, RID_MRM);
-}
-
-/* -- Loads and stores ---------------------------------------------------- */
-
-static void asm_fxload(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR);
- x86Op xo;
- if (ir->o == IR_FLOAD)
- asm_fusefref(as, ir, RSET_GPR);
- else
- asm_fusexref(as, ir->op1, RSET_GPR);
- /* ir->op2 is ignored -- unaligned loads are ok on x86. */
- switch (irt_type(ir->t)) {
- case IRT_I8: xo = XO_MOVSXb; break;
- case IRT_U8: xo = XO_MOVZXb; break;
- case IRT_I16: xo = XO_MOVSXw; break;
- case IRT_U16: xo = XO_MOVZXw; break;
- case IRT_NUM: xo = XMM_MOVRM(as); break;
- case IRT_FLOAT: xo = XO_MOVSS; break;
- default:
- if (LJ_64 && irt_is64(ir->t))
- dest |= REX_64;
- else
- lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t));
- xo = XO_MOV;
- break;
- }
- emit_mrm(as, xo, dest, RID_MRM);
-}
-
-static void asm_fxstore(ASMState *as, IRIns *ir)
-{
- RegSet allow = RSET_GPR;
- Reg src = RID_NONE, osrc = RID_NONE;
- int32_t k = 0;
- if (ir->r == RID_SINK)
- return;
- /* The IRT_I16/IRT_U16 stores should never be simplified for constant
- ** values since mov word [mem], imm16 has a length-changing prefix.
- */
- if (irt_isi16(ir->t) || irt_isu16(ir->t) || irt_isfp(ir->t) ||
- !asm_isk32(as, ir->op2, &k)) {
- RegSet allow8 = irt_isfp(ir->t) ? RSET_FPR :
- (irt_isi8(ir->t) || irt_isu8(ir->t)) ? RSET_GPR8 : RSET_GPR;
- src = osrc = ra_alloc1(as, ir->op2, allow8);
- if (!LJ_64 && !rset_test(allow8, src)) { /* Already in wrong register. */
- rset_clear(allow, osrc);
- src = ra_scratch(as, allow8);
- }
- rset_clear(allow, src);
- }
- if (ir->o == IR_FSTORE) {
- asm_fusefref(as, IR(ir->op1), allow);
- } else {
- asm_fusexref(as, ir->op1, allow);
- if (LJ_32 && ir->o == IR_HIOP) as->mrm.ofs += 4;
- }
- if (ra_hasreg(src)) {
- x86Op xo;
- switch (irt_type(ir->t)) {
- case IRT_I8: case IRT_U8: xo = XO_MOVtob; src |= FORCE_REX; break;
- case IRT_I16: case IRT_U16: xo = XO_MOVtow; break;
- case IRT_NUM: xo = XO_MOVSDto; break;
- case IRT_FLOAT: xo = XO_MOVSSto; break;
-#if LJ_64
- case IRT_LIGHTUD: lua_assert(0); /* NYI: mask 64 bit lightuserdata. */
-#endif
- default:
- if (LJ_64 && irt_is64(ir->t))
- src |= REX_64;
- else
- lua_assert(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t));
- xo = XO_MOVto;
- break;
- }
- emit_mrm(as, xo, src, RID_MRM);
- if (!LJ_64 && src != osrc) {
- ra_noweak(as, osrc);
- emit_rr(as, XO_MOV, src, osrc);
- }
- } else {
- if (irt_isi8(ir->t) || irt_isu8(ir->t)) {
- emit_i8(as, k);
- emit_mrm(as, XO_MOVmib, 0, RID_MRM);
- } else {
- lua_assert(irt_is64(ir->t) || irt_isint(ir->t) || irt_isu32(ir->t) ||
- irt_isaddr(ir->t));
- emit_i32(as, k);
- emit_mrm(as, XO_MOVmi, REX_64IR(ir, 0), RID_MRM);
- }
- }
-}
-
-#if LJ_64
-static Reg asm_load_lightud64(ASMState *as, IRIns *ir, int typecheck)
-{
- if (ra_used(ir) || typecheck) {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- if (typecheck) {
- Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, dest));
- asm_guardcc(as, CC_NE);
- emit_i8(as, -2);
- emit_rr(as, XO_ARITHi8, XOg_CMP, tmp);
- emit_shifti(as, XOg_SAR|REX_64, tmp, 47);
- emit_rr(as, XO_MOV, tmp|REX_64, dest);
- }
- return dest;
- } else {
- return RID_NONE;
- }
-}
-#endif
-
-static void asm_ahuvload(ASMState *as, IRIns *ir)
-{
- lua_assert(irt_isnum(ir->t) || irt_ispri(ir->t) || irt_isaddr(ir->t) ||
- (LJ_DUALNUM && irt_isint(ir->t)));
-#if LJ_64
- if (irt_islightud(ir->t)) {
- Reg dest = asm_load_lightud64(as, ir, 1);
- if (ra_hasreg(dest)) {
- asm_fuseahuref(as, ir->op1, RSET_GPR);
- emit_mrm(as, XO_MOV, dest|REX_64, RID_MRM);
- }
- return;
- } else
-#endif
- if (ra_used(ir)) {
- RegSet allow = irt_isnum(ir->t) ? RSET_FPR : RSET_GPR;
- Reg dest = ra_dest(as, ir, allow);
- asm_fuseahuref(as, ir->op1, RSET_GPR);
- emit_mrm(as, dest < RID_MAX_GPR ? XO_MOV : XMM_MOVRM(as), dest, RID_MRM);
- } else {
- asm_fuseahuref(as, ir->op1, RSET_GPR);
- }
- /* Always do the type check, even if the load result is unused. */
- as->mrm.ofs += 4;
- asm_guardcc(as, irt_isnum(ir->t) ? CC_AE : CC_NE);
- if (LJ_64 && irt_type(ir->t) >= IRT_NUM) {
- lua_assert(irt_isinteger(ir->t) || irt_isnum(ir->t));
- emit_u32(as, LJ_TISNUM);
- emit_mrm(as, XO_ARITHi, XOg_CMP, RID_MRM);
- } else {
- emit_i8(as, irt_toitype(ir->t));
- emit_mrm(as, XO_ARITHi8, XOg_CMP, RID_MRM);
- }
-}
-
-static void asm_ahustore(ASMState *as, IRIns *ir)
-{
- if (ir->r == RID_SINK)
- return;
- if (irt_isnum(ir->t)) {
- Reg src = ra_alloc1(as, ir->op2, RSET_FPR);
- asm_fuseahuref(as, ir->op1, RSET_GPR);
- emit_mrm(as, XO_MOVSDto, src, RID_MRM);
-#if LJ_64
- } else if (irt_islightud(ir->t)) {
- Reg src = ra_alloc1(as, ir->op2, RSET_GPR);
- asm_fuseahuref(as, ir->op1, rset_exclude(RSET_GPR, src));
- emit_mrm(as, XO_MOVto, src|REX_64, RID_MRM);
-#endif
- } else {
- IRIns *irr = IR(ir->op2);
- RegSet allow = RSET_GPR;
- Reg src = RID_NONE;
- if (!irref_isk(ir->op2)) {
- src = ra_alloc1(as, ir->op2, allow);
- rset_clear(allow, src);
- }
- asm_fuseahuref(as, ir->op1, allow);
- if (ra_hasreg(src)) {
- emit_mrm(as, XO_MOVto, src, RID_MRM);
- } else if (!irt_ispri(irr->t)) {
- lua_assert(irt_isaddr(ir->t) || (LJ_DUALNUM && irt_isinteger(ir->t)));
- emit_i32(as, irr->i);
- emit_mrm(as, XO_MOVmi, 0, RID_MRM);
- }
- as->mrm.ofs += 4;
- emit_i32(as, (int32_t)irt_toitype(ir->t));
- emit_mrm(as, XO_MOVmi, 0, RID_MRM);
- }
-}
-
-static void asm_sload(ASMState *as, IRIns *ir)
-{
- int32_t ofs = 8*((int32_t)ir->op1-1) + ((ir->op2 & IRSLOAD_FRAME) ? 4 : 0);
- IRType1 t = ir->t;
- Reg base;
- lua_assert(!(ir->op2 & IRSLOAD_PARENT)); /* Handled by asm_head_side(). */
- lua_assert(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK));
- lua_assert(LJ_DUALNUM ||
- !irt_isint(t) || (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME)));
- if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) {
- Reg left = ra_scratch(as, RSET_FPR);
- asm_tointg(as, ir, left); /* Frees dest reg. Do this before base alloc. */
- base = ra_alloc1(as, REF_BASE, RSET_GPR);
- emit_rmro(as, XMM_MOVRM(as), left, base, ofs);
- t.irt = IRT_NUM; /* Continue with a regular number type check. */
-#if LJ_64
- } else if (irt_islightud(t)) {
- Reg dest = asm_load_lightud64(as, ir, (ir->op2 & IRSLOAD_TYPECHECK));
- if (ra_hasreg(dest)) {
- base = ra_alloc1(as, REF_BASE, RSET_GPR);
- emit_rmro(as, XO_MOV, dest|REX_64, base, ofs);
- }
- return;
-#endif
- } else if (ra_used(ir)) {
- RegSet allow = irt_isnum(t) ? RSET_FPR : RSET_GPR;
- Reg dest = ra_dest(as, ir, allow);
- base = ra_alloc1(as, REF_BASE, RSET_GPR);
- lua_assert(irt_isnum(t) || irt_isint(t) || irt_isaddr(t));
- if ((ir->op2 & IRSLOAD_CONVERT)) {
- t.irt = irt_isint(t) ? IRT_NUM : IRT_INT; /* Check for original type. */
- emit_rmro(as, irt_isint(t) ? XO_CVTSI2SD : XO_CVTSD2SI, dest, base, ofs);
- } else if (irt_isnum(t)) {
- emit_rmro(as, XMM_MOVRM(as), dest, base, ofs);
- } else {
- emit_rmro(as, XO_MOV, dest, base, ofs);
- }
- } else {
- if (!(ir->op2 & IRSLOAD_TYPECHECK))
- return; /* No type check: avoid base alloc. */
- base = ra_alloc1(as, REF_BASE, RSET_GPR);
- }
- if ((ir->op2 & IRSLOAD_TYPECHECK)) {
- /* Need type check, even if the load result is unused. */
- asm_guardcc(as, irt_isnum(t) ? CC_AE : CC_NE);
- if (LJ_64 && irt_type(t) >= IRT_NUM) {
- lua_assert(irt_isinteger(t) || irt_isnum(t));
- emit_u32(as, LJ_TISNUM);
- emit_rmro(as, XO_ARITHi, XOg_CMP, base, ofs+4);
- } else {
- emit_i8(as, irt_toitype(t));
- emit_rmro(as, XO_ARITHi8, XOg_CMP, base, ofs+4);
- }
- }
-}
-
-/* -- Allocations --------------------------------------------------------- */
-
-#if LJ_HASFFI
-static void asm_cnew(ASMState *as, IRIns *ir)
-{
- CTState *cts = ctype_ctsG(J2G(as->J));
- CTypeID ctypeid = (CTypeID)IR(ir->op1)->i;
- CTSize sz = (ir->o == IR_CNEWI || ir->op2 == REF_NIL) ?
- lj_ctype_size(cts, ctypeid) : (CTSize)IR(ir->op2)->i;
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco];
- IRRef args[2];
- lua_assert(sz != CTSIZE_INVALID);
-
- args[0] = ASMREF_L; /* lua_State *L */
- args[1] = ASMREF_TMP1; /* MSize size */
- as->gcsteps++;
- asm_setupresult(as, ir, ci); /* GCcdata * */
-
- /* Initialize immutable cdata object. */
- if (ir->o == IR_CNEWI) {
- RegSet allow = (RSET_GPR & ~RSET_SCRATCH);
-#if LJ_64
- Reg r64 = sz == 8 ? REX_64 : 0;
- if (irref_isk(ir->op2)) {
- IRIns *irk = IR(ir->op2);
- uint64_t k = irk->o == IR_KINT64 ? ir_k64(irk)->u64 :
- (uint64_t)(uint32_t)irk->i;
- if (sz == 4 || checki32((int64_t)k)) {
- emit_i32(as, (int32_t)k);
- emit_rmro(as, XO_MOVmi, r64, RID_RET, sizeof(GCcdata));
- } else {
- emit_movtomro(as, RID_ECX + r64, RID_RET, sizeof(GCcdata));
- emit_loadu64(as, RID_ECX, k);
- }
- } else {
- Reg r = ra_alloc1(as, ir->op2, allow);
- emit_movtomro(as, r + r64, RID_RET, sizeof(GCcdata));
- }
-#else
- int32_t ofs = sizeof(GCcdata);
- if (sz == 8) {
- ofs += 4; ir++;
- lua_assert(ir->o == IR_HIOP);
- }
- do {
- if (irref_isk(ir->op2)) {
- emit_movmroi(as, RID_RET, ofs, IR(ir->op2)->i);
- } else {
- Reg r = ra_alloc1(as, ir->op2, allow);
- emit_movtomro(as, r, RID_RET, ofs);
- rset_clear(allow, r);
- }
- if (ofs == sizeof(GCcdata)) break;
- ofs -= 4; ir--;
- } while (1);
-#endif
- lua_assert(sz == 4 || sz == 8);
- }
-
- /* Combine initialization of marked, gct and ctypeid. */
- emit_movtomro(as, RID_ECX, RID_RET, offsetof(GCcdata, marked));
- emit_gri(as, XG_ARITHi(XOg_OR), RID_ECX,
- (int32_t)((~LJ_TCDATA<<8)+(ctypeid<<16)));
- emit_gri(as, XG_ARITHi(XOg_AND), RID_ECX, LJ_GC_WHITES);
- emit_opgl(as, XO_MOVZXb, RID_ECX, gc.currentwhite);
-
- asm_gencall(as, ci, args);
- emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)(sz+sizeof(GCcdata)));
-}
-#else
-#define asm_cnew(as, ir) ((void)0)
-#endif
-
-/* -- Write barriers ------------------------------------------------------ */
-
-static void asm_tbar(ASMState *as, IRIns *ir)
-{
- Reg tab = ra_alloc1(as, ir->op1, RSET_GPR);
- Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, tab));
- MCLabel l_end = emit_label(as);
- emit_movtomro(as, tmp, tab, offsetof(GCtab, gclist));
- emit_setgl(as, tab, gc.grayagain);
- emit_getgl(as, tmp, gc.grayagain);
- emit_i8(as, ~LJ_GC_BLACK);
- emit_rmro(as, XO_ARITHib, XOg_AND, tab, offsetof(GCtab, marked));
- emit_sjcc(as, CC_Z, l_end);
- emit_i8(as, LJ_GC_BLACK);
- emit_rmro(as, XO_GROUP3b, XOg_TEST, tab, offsetof(GCtab, marked));
-}
-
-static void asm_obar(ASMState *as, IRIns *ir)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv];
- IRRef args[2];
- MCLabel l_end;
- Reg obj;
- /* No need for other object barriers (yet). */
- lua_assert(IR(ir->op1)->o == IR_UREFC);
- ra_evictset(as, RSET_SCRATCH);
- l_end = emit_label(as);
- args[0] = ASMREF_TMP1; /* global_State *g */
- args[1] = ir->op1; /* TValue *tv */
- asm_gencall(as, ci, args);
- emit_loada(as, ra_releasetmp(as, ASMREF_TMP1), J2G(as->J));
- obj = IR(ir->op1)->r;
- emit_sjcc(as, CC_Z, l_end);
- emit_i8(as, LJ_GC_WHITES);
- if (irref_isk(ir->op2)) {
- GCobj *vp = ir_kgc(IR(ir->op2));
- emit_rma(as, XO_GROUP3b, XOg_TEST, &vp->gch.marked);
- } else {
- Reg val = ra_alloc1(as, ir->op2, rset_exclude(RSET_SCRATCH&RSET_GPR, obj));
- emit_rmro(as, XO_GROUP3b, XOg_TEST, val, (int32_t)offsetof(GChead, marked));
- }
- emit_sjcc(as, CC_Z, l_end);
- emit_i8(as, LJ_GC_BLACK);
- emit_rmro(as, XO_GROUP3b, XOg_TEST, obj,
- (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv));
-}
-
-/* -- FP/int arithmetic and logic operations ------------------------------ */
-
-/* Load reference onto x87 stack. Force a spill to memory if needed. */
-static void asm_x87load(ASMState *as, IRRef ref)
-{
- IRIns *ir = IR(ref);
- if (ir->o == IR_KNUM) {
- cTValue *tv = ir_knum(ir);
- if (tvispzero(tv)) /* Use fldz only for +0. */
- emit_x87op(as, XI_FLDZ);
- else if (tvispone(tv))
- emit_x87op(as, XI_FLD1);
- else
- emit_rma(as, XO_FLDq, XOg_FLDq, tv);
- } else if (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT && !ra_used(ir) &&
- !irref_isk(ir->op1) && mayfuse(as, ir->op1)) {
- IRIns *iri = IR(ir->op1);
- emit_rmro(as, XO_FILDd, XOg_FILDd, RID_ESP, ra_spill(as, iri));
- } else {
- emit_mrm(as, XO_FLDq, XOg_FLDq, asm_fuseload(as, ref, RSET_EMPTY));
- }
-}
-
-/* Try to rejoin pow from EXP2, MUL and LOG2 (if still unsplit). */
-static int fpmjoin_pow(ASMState *as, IRIns *ir)
-{
- IRIns *irp = IR(ir->op1);
- if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) {
- IRIns *irpp = IR(irp->op1);
- if (irpp == ir-2 && irpp->o == IR_FPMATH &&
- irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) {
- /* The modified regs must match with the *.dasc implementation. */
- RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM2+1)|RID2RSET(RID_EAX);
- IRIns *irx;
- if (ra_hasreg(ir->r))
- rset_clear(drop, ir->r); /* Dest reg handled below. */
- ra_evictset(as, drop);
- ra_destreg(as, ir, RID_XMM0);
- emit_call(as, lj_vm_pow_sse);
- irx = IR(irpp->op1);
- if (ra_noreg(irx->r) && ra_gethint(irx->r) == RID_XMM1)
- irx->r = RID_INIT; /* Avoid allocating xmm1 for x. */
- ra_left(as, RID_XMM0, irpp->op1);
- ra_left(as, RID_XMM1, irp->op2);
- return 1;
- }
- }
- return 0;
-}
-
-static void asm_fpmath(ASMState *as, IRIns *ir)
-{
- IRFPMathOp fpm = ir->o == IR_FPMATH ? (IRFPMathOp)ir->op2 : IRFPM_OTHER;
- if (fpm == IRFPM_SQRT) {
- Reg dest = ra_dest(as, ir, RSET_FPR);
- Reg left = asm_fuseload(as, ir->op1, RSET_FPR);
- emit_mrm(as, XO_SQRTSD, dest, left);
- } else if (fpm <= IRFPM_TRUNC) {
- if (as->flags & JIT_F_SSE4_1) { /* SSE4.1 has a rounding instruction. */
- Reg dest = ra_dest(as, ir, RSET_FPR);
- Reg left = asm_fuseload(as, ir->op1, RSET_FPR);
- /* ROUNDSD has a 4-byte opcode which doesn't fit in x86Op.
- ** Let's pretend it's a 3-byte opcode, and compensate afterwards.
- ** This is atrocious, but the alternatives are much worse.
- */
- /* Round down/up/trunc == 1001/1010/1011. */
- emit_i8(as, 0x09 + fpm);
- emit_mrm(as, XO_ROUNDSD, dest, left);
- if (LJ_64 && as->mcp[1] != (MCode)(XO_ROUNDSD >> 16)) {
- as->mcp[0] = as->mcp[1]; as->mcp[1] = 0x0f; /* Swap 0F and REX. */
- }
- *--as->mcp = 0x66; /* 1st byte of ROUNDSD opcode. */
- } else { /* Call helper functions for SSE2 variant. */
- /* The modified regs must match with the *.dasc implementation. */
- RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX);
- if (ra_hasreg(ir->r))
- rset_clear(drop, ir->r); /* Dest reg handled below. */
- ra_evictset(as, drop);
- ra_destreg(as, ir, RID_XMM0);
- emit_call(as, fpm == IRFPM_FLOOR ? lj_vm_floor_sse :
- fpm == IRFPM_CEIL ? lj_vm_ceil_sse : lj_vm_trunc_sse);
- ra_left(as, RID_XMM0, ir->op1);
- }
- } else if (fpm == IRFPM_EXP2 && fpmjoin_pow(as, ir)) {
- /* Rejoined to pow(). */
- } else { /* Handle x87 ops. */
- int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */
- Reg dest = ir->r;
- if (ra_hasreg(dest)) {
- ra_free(as, dest);
- ra_modified(as, dest);
- emit_rmro(as, XMM_MOVRM(as), dest, RID_ESP, ofs);
- }
- emit_rmro(as, XO_FSTPq, XOg_FSTPq, RID_ESP, ofs);
- switch (fpm) { /* st0 = lj_vm_*(st0) */
- case IRFPM_EXP: emit_call(as, lj_vm_exp_x87); break;
- case IRFPM_EXP2: emit_call(as, lj_vm_exp2_x87); break;
- case IRFPM_SIN: emit_x87op(as, XI_FSIN); break;
- case IRFPM_COS: emit_x87op(as, XI_FCOS); break;
- case IRFPM_TAN: emit_x87op(as, XI_FPOP); emit_x87op(as, XI_FPTAN); break;
- case IRFPM_LOG: case IRFPM_LOG2: case IRFPM_LOG10:
- /* Note: the use of fyl2xp1 would be pointless here. When computing
- ** log(1.0+eps) the precision is already lost after 1.0 is added.
- ** Subtracting 1.0 won't recover it. OTOH math.log1p would make sense.
- */
- emit_x87op(as, XI_FYL2X); break;
- case IRFPM_OTHER:
- switch (ir->o) {
- case IR_ATAN2:
- emit_x87op(as, XI_FPATAN); asm_x87load(as, ir->op2); break;
- case IR_LDEXP:
- emit_x87op(as, XI_FPOP1); emit_x87op(as, XI_FSCALE); break;
- default: lua_assert(0); break;
- }
- break;
- default: lua_assert(0); break;
- }
- asm_x87load(as, ir->op1);
- switch (fpm) {
- case IRFPM_LOG: emit_x87op(as, XI_FLDLN2); break;
- case IRFPM_LOG2: emit_x87op(as, XI_FLD1); break;
- case IRFPM_LOG10: emit_x87op(as, XI_FLDLG2); break;
- case IRFPM_OTHER:
- if (ir->o == IR_LDEXP) asm_x87load(as, ir->op2);
- break;
- default: break;
- }
- }
-}
-
-static void asm_fppowi(ASMState *as, IRIns *ir)
-{
- /* The modified regs must match with the *.dasc implementation. */
- RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM1+1)|RID2RSET(RID_EAX);
- if (ra_hasreg(ir->r))
- rset_clear(drop, ir->r); /* Dest reg handled below. */
- ra_evictset(as, drop);
- ra_destreg(as, ir, RID_XMM0);
- emit_call(as, lj_vm_powi_sse);
- ra_left(as, RID_XMM0, ir->op1);
- ra_left(as, RID_EAX, ir->op2);
-}
-
-#if LJ_64 && LJ_HASFFI
-static void asm_arith64(ASMState *as, IRIns *ir, IRCallID id)
-{
- const CCallInfo *ci = &lj_ir_callinfo[id];
- IRRef args[2];
- args[0] = ir->op1;
- args[1] = ir->op2;
- asm_setupresult(as, ir, ci);
- asm_gencall(as, ci, args);
-}
-#endif
-
-static void asm_intmod(ASMState *as, IRIns *ir)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_vm_modi];
- IRRef args[2];
- args[0] = ir->op1;
- args[1] = ir->op2;
- asm_setupresult(as, ir, ci);
- asm_gencall(as, ci, args);
-}
-
-static int asm_swapops(ASMState *as, IRIns *ir)
-{
- IRIns *irl = IR(ir->op1);
- IRIns *irr = IR(ir->op2);
- lua_assert(ra_noreg(irr->r));
- if (!irm_iscomm(lj_ir_mode[ir->o]))
- return 0; /* Can't swap non-commutative operations. */
- if (irref_isk(ir->op2))
- return 0; /* Don't swap constants to the left. */
- if (ra_hasreg(irl->r))
- return 1; /* Swap if left already has a register. */
- if (ra_samehint(ir->r, irr->r))
- return 1; /* Swap if dest and right have matching hints. */
- if (as->curins > as->loopref) { /* In variant part? */
- if (ir->op2 < as->loopref && !irt_isphi(irr->t))
- return 0; /* Keep invariants on the right. */
- if (ir->op1 < as->loopref && !irt_isphi(irl->t))
- return 1; /* Swap invariants to the right. */
- }
- if (opisfusableload(irl->o))
- return 1; /* Swap fusable loads to the right. */
- return 0; /* Otherwise don't swap. */
-}
-
-static void asm_fparith(ASMState *as, IRIns *ir, x86Op xo)
-{
- IRRef lref = ir->op1;
- IRRef rref = ir->op2;
- RegSet allow = RSET_FPR;
- Reg dest;
- Reg right = IR(rref)->r;
- if (ra_hasreg(right)) {
- rset_clear(allow, right);
- ra_noweak(as, right);
- }
- dest = ra_dest(as, ir, allow);
- if (lref == rref) {
- right = dest;
- } else if (ra_noreg(right)) {
- if (asm_swapops(as, ir)) {
- IRRef tmp = lref; lref = rref; rref = tmp;
- }
- right = asm_fuseload(as, rref, rset_clear(allow, dest));
- }
- emit_mrm(as, xo, dest, right);
- ra_left(as, dest, lref);
-}
-
-static void asm_intarith(ASMState *as, IRIns *ir, x86Arith xa)
-{
- IRRef lref = ir->op1;
- IRRef rref = ir->op2;
- RegSet allow = RSET_GPR;
- Reg dest, right;
- int32_t k = 0;
- if (as->flagmcp == as->mcp) { /* Drop test r,r instruction. */
- MCode *p = as->mcp + ((LJ_64 && *as->mcp < XI_TESTb) ? 3 : 2);
- if ((p[1] & 15) < 14) {
- if ((p[1] & 15) >= 12) p[1] -= 4; /* L <->S, NL <-> NS */
- as->flagmcp = NULL;
- as->mcp = p;
- } /* else: cannot transform LE/NLE to cc without use of OF. */
- }
- right = IR(rref)->r;
- if (ra_hasreg(right)) {
- rset_clear(allow, right);
- ra_noweak(as, right);
- }
- dest = ra_dest(as, ir, allow);
- if (lref == rref) {
- right = dest;
- } else if (ra_noreg(right) && !asm_isk32(as, rref, &k)) {
- if (asm_swapops(as, ir)) {
- IRRef tmp = lref; lref = rref; rref = tmp;
- }
- right = asm_fuseloadm(as, rref, rset_clear(allow, dest), irt_is64(ir->t));
- }
- if (irt_isguard(ir->t)) /* For IR_ADDOV etc. */
- asm_guardcc(as, CC_O);
- if (xa != XOg_X_IMUL) {
- if (ra_hasreg(right))
- emit_mrm(as, XO_ARITH(xa), REX_64IR(ir, dest), right);
- else
- emit_gri(as, XG_ARITHi(xa), REX_64IR(ir, dest), k);
- } else if (ra_hasreg(right)) { /* IMUL r, mrm. */
- emit_mrm(as, XO_IMUL, REX_64IR(ir, dest), right);
- } else { /* IMUL r, r, k. */
- /* NYI: use lea/shl/add/sub (FOLD only does 2^k) depending on CPU. */
- Reg left = asm_fuseloadm(as, lref, RSET_GPR, irt_is64(ir->t));
- x86Op xo;
- if (checki8(k)) { emit_i8(as, k); xo = XO_IMULi8;
- } else { emit_i32(as, k); xo = XO_IMULi; }
- emit_mrm(as, xo, REX_64IR(ir, dest), left);
- return;
- }
- ra_left(as, dest, lref);
-}
-
-/* LEA is really a 4-operand ADD with an independent destination register,
-** up to two source registers and an immediate. One register can be scaled
-** by 1, 2, 4 or 8. This can be used to avoid moves or to fuse several
-** instructions.
-**
-** Currently only a few common cases are supported:
-** - 3-operand ADD: y = a+b; y = a+k with a and b already allocated
-** - Left ADD fusion: y = (a+b)+k; y = (a+k)+b
-** - Right ADD fusion: y = a+(b+k)
-** The ommited variants have already been reduced by FOLD.
-**
-** There are more fusion opportunities, like gathering shifts or joining
-** common references. But these are probably not worth the trouble, since
-** array indexing is not decomposed and already makes use of all fields
-** of the ModRM operand.
-*/
-static int asm_lea(ASMState *as, IRIns *ir)
-{
- IRIns *irl = IR(ir->op1);
- IRIns *irr = IR(ir->op2);
- RegSet allow = RSET_GPR;
- Reg dest;
- as->mrm.base = as->mrm.idx = RID_NONE;
- as->mrm.scale = XM_SCALE1;
- as->mrm.ofs = 0;
- if (ra_hasreg(irl->r)) {
- rset_clear(allow, irl->r);
- ra_noweak(as, irl->r);
- as->mrm.base = irl->r;
- if (irref_isk(ir->op2) || ra_hasreg(irr->r)) {
- /* The PHI renaming logic does a better job in some cases. */
- if (ra_hasreg(ir->r) &&
- ((irt_isphi(irl->t) && as->phireg[ir->r] == ir->op1) ||
- (irt_isphi(irr->t) && as->phireg[ir->r] == ir->op2)))
- return 0;
- if (irref_isk(ir->op2)) {
- as->mrm.ofs = irr->i;
- } else {
- rset_clear(allow, irr->r);
- ra_noweak(as, irr->r);
- as->mrm.idx = irr->r;
- }
- } else if (irr->o == IR_ADD && mayfuse(as, ir->op2) &&
- irref_isk(irr->op2)) {
- Reg idx = ra_alloc1(as, irr->op1, allow);
- rset_clear(allow, idx);
- as->mrm.idx = (uint8_t)idx;
- as->mrm.ofs = IR(irr->op2)->i;
- } else {
- return 0;
- }
- } else if (ir->op1 != ir->op2 && irl->o == IR_ADD && mayfuse(as, ir->op1) &&
- (irref_isk(ir->op2) || irref_isk(irl->op2))) {
- Reg idx, base = ra_alloc1(as, irl->op1, allow);
- rset_clear(allow, base);
- as->mrm.base = (uint8_t)base;
- if (irref_isk(ir->op2)) {
- as->mrm.ofs = irr->i;
- idx = ra_alloc1(as, irl->op2, allow);
- } else {
- as->mrm.ofs = IR(irl->op2)->i;
- idx = ra_alloc1(as, ir->op2, allow);
- }
- rset_clear(allow, idx);
- as->mrm.idx = (uint8_t)idx;
- } else {
- return 0;
- }
- dest = ra_dest(as, ir, allow);
- emit_mrm(as, XO_LEA, dest, RID_MRM);
- return 1; /* Success. */
-}
-
-static void asm_add(ASMState *as, IRIns *ir)
-{
- if (irt_isnum(ir->t))
- asm_fparith(as, ir, XO_ADDSD);
- else if ((as->flags & JIT_F_LEA_AGU) || as->flagmcp == as->mcp ||
- irt_is64(ir->t) || !asm_lea(as, ir))
- asm_intarith(as, ir, XOg_ADD);
-}
-
-static void asm_neg_not(ASMState *as, IRIns *ir, x86Group3 xg)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- emit_rr(as, XO_GROUP3, REX_64IR(ir, xg), dest);
- ra_left(as, dest, ir->op1);
-}
-
-static void asm_min_max(ASMState *as, IRIns *ir, int cc)
-{
- Reg right, dest = ra_dest(as, ir, RSET_GPR);
- IRRef lref = ir->op1, rref = ir->op2;
- if (irref_isk(rref)) { lref = rref; rref = ir->op1; }
- right = ra_alloc1(as, rref, rset_exclude(RSET_GPR, dest));
- emit_rr(as, XO_CMOV + (cc<<24), REX_64IR(ir, dest), right);
- emit_rr(as, XO_CMP, REX_64IR(ir, dest), right);
- ra_left(as, dest, lref);
-}
-
-static void asm_bitswap(ASMState *as, IRIns *ir)
-{
- Reg dest = ra_dest(as, ir, RSET_GPR);
- as->mcp = emit_op(XO_BSWAP + ((dest&7) << 24),
- REX_64IR(ir, 0), dest, 0, as->mcp, 1);
- ra_left(as, dest, ir->op1);
-}
-
-static void asm_bitshift(ASMState *as, IRIns *ir, x86Shift xs)
-{
- IRRef rref = ir->op2;
- IRIns *irr = IR(rref);
- Reg dest;
- if (irref_isk(rref)) { /* Constant shifts. */
- int shift;
- dest = ra_dest(as, ir, RSET_GPR);
- shift = irr->i & (irt_is64(ir->t) ? 63 : 31);
- switch (shift) {
- case 0: break;
- case 1: emit_rr(as, XO_SHIFT1, REX_64IR(ir, xs), dest); break;
- default: emit_shifti(as, REX_64IR(ir, xs), dest, shift); break;
- }
- } else { /* Variable shifts implicitly use register cl (i.e. ecx). */
- Reg right;
- dest = ra_dest(as, ir, rset_exclude(RSET_GPR, RID_ECX));
- if (dest == RID_ECX) {
- dest = ra_scratch(as, rset_exclude(RSET_GPR, RID_ECX));
- emit_rr(as, XO_MOV, RID_ECX, dest);
- }
- right = irr->r;
- if (ra_noreg(right))
- right = ra_allocref(as, rref, RID2RSET(RID_ECX));
- else if (right != RID_ECX)
- ra_scratch(as, RID2RSET(RID_ECX));
- emit_rr(as, XO_SHIFTcl, REX_64IR(ir, xs), dest);
- ra_noweak(as, right);
- if (right != RID_ECX)
- emit_rr(as, XO_MOV, RID_ECX, right);
- }
- ra_left(as, dest, ir->op1);
- /*
- ** Note: avoid using the flags resulting from a shift or rotate!
- ** All of them cause a partial flag stall, except for r,1 shifts
- ** (but not rotates). And a shift count of 0 leaves the flags unmodified.
- */
-}
-
-/* -- Comparisons --------------------------------------------------------- */
-
-/* Virtual flags for unordered FP comparisons. */
-#define VCC_U 0x1000 /* Unordered. */
-#define VCC_P 0x2000 /* Needs extra CC_P branch. */
-#define VCC_S 0x4000 /* Swap avoids CC_P branch. */
-#define VCC_PS (VCC_P|VCC_S)
-
-/* Map of comparisons to flags. ORDER IR. */
-#define COMPFLAGS(ci, cin, cu, cf) ((ci)+((cu)<<4)+((cin)<<8)+(cf))
-static const uint16_t asm_compmap[IR_ABC+1] = {
- /* signed non-eq unsigned flags */
- /* LT */ COMPFLAGS(CC_GE, CC_G, CC_AE, VCC_PS),
- /* GE */ COMPFLAGS(CC_L, CC_L, CC_B, 0),
- /* LE */ COMPFLAGS(CC_G, CC_G, CC_A, VCC_PS),
- /* GT */ COMPFLAGS(CC_LE, CC_L, CC_BE, 0),
- /* ULT */ COMPFLAGS(CC_AE, CC_A, CC_AE, VCC_U),
- /* UGE */ COMPFLAGS(CC_B, CC_B, CC_B, VCC_U|VCC_PS),
- /* ULE */ COMPFLAGS(CC_A, CC_A, CC_A, VCC_U),
- /* UGT */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS),
- /* EQ */ COMPFLAGS(CC_NE, CC_NE, CC_NE, VCC_P),
- /* NE */ COMPFLAGS(CC_E, CC_E, CC_E, VCC_U|VCC_P),
- /* ABC */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS) /* Same as UGT. */
-};
-
-/* FP and integer comparisons. */
-static void asm_comp(ASMState *as, IRIns *ir, uint32_t cc)
-{
- if (irt_isnum(ir->t)) {
- IRRef lref = ir->op1;
- IRRef rref = ir->op2;
- Reg left, right;
- MCLabel l_around;
- /*
- ** An extra CC_P branch is required to preserve ordered/unordered
- ** semantics for FP comparisons. This can be avoided by swapping
- ** the operands and inverting the condition (except for EQ and UNE).
- ** So always try to swap if possible.
- **
- ** Another option would be to swap operands to achieve better memory
- ** operand fusion. But it's unlikely that this outweighs the cost
- ** of the extra branches.
- */
- if (cc & VCC_S) { /* Swap? */
- IRRef tmp = lref; lref = rref; rref = tmp;
- cc ^= (VCC_PS|(5<<4)); /* A <-> B, AE <-> BE, PS <-> none */
- }
- left = ra_alloc1(as, lref, RSET_FPR);
- right = asm_fuseload(as, rref, rset_exclude(RSET_FPR, left));
- l_around = emit_label(as);
- asm_guardcc(as, cc >> 4);
- if (cc & VCC_P) { /* Extra CC_P branch required? */
- if (!(cc & VCC_U)) {
- asm_guardcc(as, CC_P); /* Branch to exit for ordered comparisons. */
- } else if (l_around != as->invmcp) {
- emit_sjcc(as, CC_P, l_around); /* Branch around for unordered. */
- } else {
- /* Patched to mcloop by asm_loop_fixup. */
- as->loopinv = 2;
- if (as->realign)
- emit_sjcc(as, CC_P, as->mcp);
- else
- emit_jcc(as, CC_P, as->mcp);
- }
- }
- emit_mrm(as, XO_UCOMISD, left, right);
- } else {
- IRRef lref = ir->op1, rref = ir->op2;
- IROp leftop = (IROp)(IR(lref)->o);
- Reg r64 = REX_64IR(ir, 0);
- int32_t imm = 0;
- lua_assert(irt_is64(ir->t) || irt_isint(ir->t) ||
- irt_isu32(ir->t) || irt_isaddr(ir->t) || irt_isu8(ir->t));
- /* Swap constants (only for ABC) and fusable loads to the right. */
- if (irref_isk(lref) || (!irref_isk(rref) && opisfusableload(leftop))) {
- if ((cc & 0xc) == 0xc) cc ^= 0x53; /* L <-> G, LE <-> GE */
- else if ((cc & 0xa) == 0x2) cc ^= 0x55; /* A <-> B, AE <-> BE */
- lref = ir->op2; rref = ir->op1;
- }
- if (asm_isk32(as, rref, &imm)) {
- IRIns *irl = IR(lref);
- /* Check wether we can use test ins. Not for unsigned, since CF=0. */
- int usetest = (imm == 0 && (cc & 0xa) != 0x2);
- if (usetest && irl->o == IR_BAND && irl+1 == ir && !ra_used(irl)) {
- /* Combine comp(BAND(ref, r/imm), 0) into test mrm, r/imm. */
- Reg right, left = RID_NONE;
- RegSet allow = RSET_GPR;
- if (!asm_isk32(as, irl->op2, &imm)) {
- left = ra_alloc1(as, irl->op2, allow);
- rset_clear(allow, left);
- } else { /* Try to Fuse IRT_I8/IRT_U8 loads, too. See below. */
- IRIns *irll = IR(irl->op1);
- if (opisfusableload((IROp)irll->o) &&
- (irt_isi8(irll->t) || irt_isu8(irll->t))) {
- IRType1 origt = irll->t; /* Temporarily flip types. */
- irll->t.irt = (irll->t.irt & ~IRT_TYPE) | IRT_INT;
- as->curins--; /* Skip to BAND to avoid failing in noconflict(). */
- right = asm_fuseload(as, irl->op1, RSET_GPR);
- as->curins++;
- irll->t = origt;
- if (right != RID_MRM) goto test_nofuse;
- /* Fusion succeeded, emit test byte mrm, imm8. */
- asm_guardcc(as, cc);
- emit_i8(as, (imm & 0xff));
- emit_mrm(as, XO_GROUP3b, XOg_TEST, RID_MRM);
- return;
- }
- }
- as->curins--; /* Skip to BAND to avoid failing in noconflict(). */
- right = asm_fuseloadm(as, irl->op1, allow, r64);
- as->curins++; /* Undo the above. */
- test_nofuse:
- asm_guardcc(as, cc);
- if (ra_noreg(left)) {
- emit_i32(as, imm);
- emit_mrm(as, XO_GROUP3, r64 + XOg_TEST, right);
- } else {
- emit_mrm(as, XO_TEST, r64 + left, right);
- }
- } else {
- Reg left;
- if (opisfusableload((IROp)irl->o) &&
- ((irt_isu8(irl->t) && checku8(imm)) ||
- ((irt_isi8(irl->t) || irt_isi16(irl->t)) && checki8(imm)) ||
- (irt_isu16(irl->t) && checku16(imm) && checki8((int16_t)imm)))) {
- /* Only the IRT_INT case is fused by asm_fuseload.
- ** The IRT_I8/IRT_U8 loads and some IRT_I16/IRT_U16 loads
- ** are handled here.
- ** Note that cmp word [mem], imm16 should not be generated,
- ** since it has a length-changing prefix. Compares of a word
- ** against a sign-extended imm8 are ok, however.
- */
- IRType1 origt = irl->t; /* Temporarily flip types. */
- irl->t.irt = (irl->t.irt & ~IRT_TYPE) | IRT_INT;
- left = asm_fuseload(as, lref, RSET_GPR);
- irl->t = origt;
- if (left == RID_MRM) { /* Fusion succeeded? */
- if (irt_isu8(irl->t) || irt_isu16(irl->t))
- cc >>= 4; /* Need unsigned compare. */
- asm_guardcc(as, cc);
- emit_i8(as, imm);
- emit_mrm(as, (irt_isi8(origt) || irt_isu8(origt)) ?
- XO_ARITHib : XO_ARITHiw8, r64 + XOg_CMP, RID_MRM);
- return;
- } /* Otherwise handle register case as usual. */
- } else {
- left = asm_fuseloadm(as, lref,
- irt_isu8(ir->t) ? RSET_GPR8 : RSET_GPR, r64);
- }
- asm_guardcc(as, cc);
- if (usetest && left != RID_MRM) {
- /* Use test r,r instead of cmp r,0. */
- x86Op xo = XO_TEST;
- if (irt_isu8(ir->t)) {
- lua_assert(ir->o == IR_EQ || ir->o == IR_NE);
- xo = XO_TESTb;
- if (!rset_test(RSET_RANGE(RID_EAX, RID_EBX+1), left)) {
- if (LJ_64) {
- left |= FORCE_REX;
- } else {
- emit_i32(as, 0xff);
- emit_mrm(as, XO_GROUP3, XOg_TEST, left);
- return;
- }
- }
- }
- emit_rr(as, xo, r64 + left, left);
- if (irl+1 == ir) /* Referencing previous ins? */
- as->flagmcp = as->mcp; /* Set flag to drop test r,r if possible. */
- } else {
- emit_gmrmi(as, XG_ARITHi(XOg_CMP), r64 + left, imm);
- }
- }
- } else {
- Reg left = ra_alloc1(as, lref, RSET_GPR);
- Reg right = asm_fuseloadm(as, rref, rset_exclude(RSET_GPR, left), r64);
- asm_guardcc(as, cc);
- emit_mrm(as, XO_CMP, r64 + left, right);
- }
- }
-}
-
-#if LJ_32 && LJ_HASFFI
-/* 64 bit integer comparisons in 32 bit mode. */
-static void asm_comp_int64(ASMState *as, IRIns *ir)
-{
- uint32_t cc = asm_compmap[(ir-1)->o];
- RegSet allow = RSET_GPR;
- Reg lefthi = RID_NONE, leftlo = RID_NONE;
- Reg righthi = RID_NONE, rightlo = RID_NONE;
- MCLabel l_around;
- x86ModRM mrm;
-
- as->curins--; /* Skip loword ins. Avoids failing in noconflict(), too. */
-
- /* Allocate/fuse hiword operands. */
- if (irref_isk(ir->op2)) {
- lefthi = asm_fuseload(as, ir->op1, allow);
- } else {
- lefthi = ra_alloc1(as, ir->op1, allow);
- rset_clear(allow, lefthi);
- righthi = asm_fuseload(as, ir->op2, allow);
- if (righthi == RID_MRM) {
- if (as->mrm.base != RID_NONE) rset_clear(allow, as->mrm.base);
- if (as->mrm.idx != RID_NONE) rset_clear(allow, as->mrm.idx);
- } else {
- rset_clear(allow, righthi);
- }
- }
- mrm = as->mrm; /* Save state for hiword instruction. */
-
- /* Allocate/fuse loword operands. */
- if (irref_isk((ir-1)->op2)) {
- leftlo = asm_fuseload(as, (ir-1)->op1, allow);
- } else {
- leftlo = ra_alloc1(as, (ir-1)->op1, allow);
- rset_clear(allow, leftlo);
- rightlo = asm_fuseload(as, (ir-1)->op2, allow);
- }
-
- /* All register allocations must be performed _before_ this point. */
- l_around = emit_label(as);
- as->invmcp = as->flagmcp = NULL; /* Cannot use these optimizations. */
-
- /* Loword comparison and branch. */
- asm_guardcc(as, cc >> 4); /* Always use unsigned compare for loword. */
- if (ra_noreg(rightlo)) {
- int32_t imm = IR((ir-1)->op2)->i;
- if (imm == 0 && ((cc >> 4) & 0xa) != 0x2 && leftlo != RID_MRM)
- emit_rr(as, XO_TEST, leftlo, leftlo);
- else
- emit_gmrmi(as, XG_ARITHi(XOg_CMP), leftlo, imm);
- } else {
- emit_mrm(as, XO_CMP, leftlo, rightlo);
- }
-
- /* Hiword comparison and branches. */
- if ((cc & 15) != CC_NE)
- emit_sjcc(as, CC_NE, l_around); /* Hiword unequal: skip loword compare. */
- if ((cc & 15) != CC_E)
- asm_guardcc(as, cc >> 8); /* Hiword compare without equality check. */
- as->mrm = mrm; /* Restore state. */
- if (ra_noreg(righthi)) {
- int32_t imm = IR(ir->op2)->i;
- if (imm == 0 && (cc & 0xa) != 0x2 && lefthi != RID_MRM)
- emit_rr(as, XO_TEST, lefthi, lefthi);
- else
- emit_gmrmi(as, XG_ARITHi(XOg_CMP), lefthi, imm);
- } else {
- emit_mrm(as, XO_CMP, lefthi, righthi);
- }
-}
-#endif
-
-/* -- Support for 64 bit ops in 32 bit mode ------------------------------- */
-
-/* Hiword op of a split 64 bit op. Previous op must be the loword op. */
-static void asm_hiop(ASMState *as, IRIns *ir)
-{
-#if LJ_32 && LJ_HASFFI
- /* HIOP is marked as a store because it needs its own DCE logic. */
- int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */
- if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1;
- if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */
- if (usehi || uselo) {
- if (irt_isfp(ir->t))
- asm_conv_fp_int64(as, ir);
- else
- asm_conv_int64_fp(as, ir);
- }
- as->curins--; /* Always skip the CONV. */
- return;
- } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */
- asm_comp_int64(as, ir);
- return;
- } else if ((ir-1)->o == IR_XSTORE) {
- if ((ir-1)->r != RID_SINK)
- asm_fxstore(as, ir);
- return;
- }
- if (!usehi) return; /* Skip unused hiword op for all remaining ops. */
- switch ((ir-1)->o) {
- case IR_ADD:
- as->flagmcp = NULL;
- as->curins--;
- asm_intarith(as, ir, XOg_ADC);
- asm_intarith(as, ir-1, XOg_ADD);
- break;
- case IR_SUB:
- as->flagmcp = NULL;
- as->curins--;
- asm_intarith(as, ir, XOg_SBB);
- asm_intarith(as, ir-1, XOg_SUB);
- break;
- case IR_NEG: {
- Reg dest = ra_dest(as, ir, RSET_GPR);
- emit_rr(as, XO_GROUP3, XOg_NEG, dest);
- emit_i8(as, 0);
- emit_rr(as, XO_ARITHi8, XOg_ADC, dest);
- ra_left(as, dest, ir->op1);
- as->curins--;
- asm_neg_not(as, ir-1, XOg_NEG);
- break;
- }
- case IR_CALLN:
- case IR_CALLXS:
- if (!uselo)
- ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */
- break;
- case IR_CNEWI:
- /* Nothing to do here. Handled by CNEWI itself. */
- break;
- default: lua_assert(0); break;
- }
-#else
- UNUSED(as); UNUSED(ir); lua_assert(0); /* Unused on x64 or without FFI. */
-#endif
-}
-
-/* -- Stack handling ------------------------------------------------------ */
-
-/* Check Lua stack size for overflow. Use exit handler as fallback. */
-static void asm_stack_check(ASMState *as, BCReg topslot,
- IRIns *irp, RegSet allow, ExitNo exitno)
-{
- /* Try to get an unused temp. register, otherwise spill/restore eax. */
- Reg pbase = irp ? irp->r : RID_BASE;
- Reg r = allow ? rset_pickbot(allow) : RID_EAX;
- emit_jcc(as, CC_B, exitstub_addr(as->J, exitno));
- if (allow == RSET_EMPTY) /* Restore temp. register. */
- emit_rmro(as, XO_MOV, r|REX_64, RID_ESP, 0);
- else
- ra_modified(as, r);
- emit_gri(as, XG_ARITHi(XOg_CMP), r, (int32_t)(8*topslot));
- if (ra_hasreg(pbase) && pbase != r)
- emit_rr(as, XO_ARITH(XOg_SUB), r, pbase);
- else
- emit_rmro(as, XO_ARITH(XOg_SUB), r, RID_NONE,
- ptr2addr(&J2G(as->J)->jit_base));
- emit_rmro(as, XO_MOV, r, r, offsetof(lua_State, maxstack));
- emit_getgl(as, r, jit_L);
- if (allow == RSET_EMPTY) /* Spill temp. register. */
- emit_rmro(as, XO_MOVto, r|REX_64, RID_ESP, 0);
-}
-
-/* Restore Lua stack from on-trace state. */
-static void asm_stack_restore(ASMState *as, SnapShot *snap)
-{
- SnapEntry *map = &as->T->snapmap[snap->mapofs];
- SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1];
- MSize n, nent = snap->nent;
- /* Store the value of all modified slots to the Lua stack. */
- for (n = 0; n < nent; n++) {
- SnapEntry sn = map[n];
- BCReg s = snap_slot(sn);
- int32_t ofs = 8*((int32_t)s-1);
- IRRef ref = snap_ref(sn);
- IRIns *ir = IR(ref);
- if ((sn & SNAP_NORESTORE))
- continue;
- if (irt_isnum(ir->t)) {
- Reg src = ra_alloc1(as, ref, RSET_FPR);
- emit_rmro(as, XO_MOVSDto, src, RID_BASE, ofs);
- } else {
- lua_assert(irt_ispri(ir->t) || irt_isaddr(ir->t) ||
- (LJ_DUALNUM && irt_isinteger(ir->t)));
- if (!irref_isk(ref)) {
- Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, RID_BASE));
- emit_movtomro(as, REX_64IR(ir, src), RID_BASE, ofs);
- } else if (!irt_ispri(ir->t)) {
- emit_movmroi(as, RID_BASE, ofs, ir->i);
- }
- if ((sn & (SNAP_CONT|SNAP_FRAME))) {
- if (s != 0) /* Do not overwrite link to previous frame. */
- emit_movmroi(as, RID_BASE, ofs+4, (int32_t)(*flinks--));
- } else {
- if (!(LJ_64 && irt_islightud(ir->t)))
- emit_movmroi(as, RID_BASE, ofs+4, irt_toitype(ir->t));
- }
- }
- checkmclim(as);
- }
- lua_assert(map + nent == flinks);
-}
-
-/* -- GC handling --------------------------------------------------------- */
-
-/* Check GC threshold and do one or more GC steps. */
-static void asm_gc_check(ASMState *as)
-{
- const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit];
- IRRef args[2];
- MCLabel l_end;
- Reg tmp;
- ra_evictset(as, RSET_SCRATCH);
- l_end = emit_label(as);
- /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */
- asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */
- emit_rr(as, XO_TEST, RID_RET, RID_RET);
- args[0] = ASMREF_TMP1; /* global_State *g */
- args[1] = ASMREF_TMP2; /* MSize steps */
- asm_gencall(as, ci, args);
- tmp = ra_releasetmp(as, ASMREF_TMP1);
- emit_loada(as, tmp, J2G(as->J));
- emit_loadi(as, ra_releasetmp(as, ASMREF_TMP2), as->gcsteps);
- /* Jump around GC step if GC total < GC threshold. */
- emit_sjcc(as, CC_B, l_end);
- emit_opgl(as, XO_ARITH(XOg_CMP), tmp, gc.threshold);
- emit_getgl(as, tmp, gc.total);
- as->gcsteps = 0;
- checkmclim(as);
-}
-
-/* -- Loop handling ------------------------------------------------------- */
-
-/* Fixup the loop branch. */
-static void asm_loop_fixup(ASMState *as)
-{
- MCode *p = as->mctop;
- MCode *target = as->mcp;
- if (as->realign) { /* Realigned loops use short jumps. */
- as->realign = NULL; /* Stop another retry. */
- lua_assert(((intptr_t)target & 15) == 0);
- if (as->loopinv) { /* Inverted loop branch? */
- p -= 5;
- p[0] = XI_JMP;
- lua_assert(target - p >= -128);
- p[-1] = (MCode)(target - p); /* Patch sjcc. */
- if (as->loopinv == 2)
- p[-3] = (MCode)(target - p + 2); /* Patch opt. short jp. */
- } else {
- lua_assert(target - p >= -128);
- p[-1] = (MCode)(int8_t)(target - p); /* Patch short jmp. */
- p[-2] = XI_JMPs;
- }
- } else {
- MCode *newloop;
- p[-5] = XI_JMP;
- if (as->loopinv) { /* Inverted loop branch? */
- /* asm_guardcc already inverted the jcc and patched the jmp. */
- p -= 5;
- newloop = target+4;
- *(int32_t *)(p-4) = (int32_t)(target - p); /* Patch jcc. */
- if (as->loopinv == 2) {
- *(int32_t *)(p-10) = (int32_t)(target - p + 6); /* Patch opt. jp. */
- newloop = target+8;
- }
- } else { /* Otherwise just patch jmp. */
- *(int32_t *)(p-4) = (int32_t)(target - p);
- newloop = target+3;
- }
- /* Realign small loops and shorten the loop branch. */
- if (newloop >= p - 128) {
- as->realign = newloop; /* Force a retry and remember alignment. */
- as->curins = as->stopins; /* Abort asm_trace now. */
- as->T->nins = as->orignins; /* Remove any added renames. */
- }
- }
-}
-
-/* -- Head of trace ------------------------------------------------------- */
-
-/* Coalesce BASE register for a root trace. */
-static void asm_head_root_base(ASMState *as)
-{
- IRIns *ir = IR(REF_BASE);
- Reg r = ir->r;
- if (ra_hasreg(r)) {
- ra_free(as, r);
- if (rset_test(as->modset, r) || irt_ismarked(ir->t))
- ir->r = RID_INIT; /* No inheritance for modified BASE register. */
- if (r != RID_BASE)
- emit_rr(as, XO_MOV, r, RID_BASE);
- }
-}
-
-/* Coalesce or reload BASE register for a side trace. */
-static RegSet asm_head_side_base(ASMState *as, IRIns *irp, RegSet allow)
-{
- IRIns *ir = IR(REF_BASE);
- Reg r = ir->r;
- if (ra_hasreg(r)) {
- ra_free(as, r);
- if (rset_test(as->modset, r) || irt_ismarked(ir->t))
- ir->r = RID_INIT; /* No inheritance for modified BASE register. */
- if (irp->r == r) {
- rset_clear(allow, r); /* Mark same BASE register as coalesced. */
- } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) {
- rset_clear(allow, irp->r);
- emit_rr(as, XO_MOV, r, irp->r); /* Move from coalesced parent reg. */
- } else {
- emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */
- }
- }
- return allow;
-}
-
-/* -- Tail of trace ------------------------------------------------------- */
-
-/* Fixup the tail code. */
-static void asm_tail_fixup(ASMState *as, TraceNo lnk)
-{
- /* Note: don't use as->mcp swap + emit_*: emit_op overwrites more bytes. */
- MCode *p = as->mctop;
- MCode *target, *q;
- int32_t spadj = as->T->spadjust;
- if (spadj == 0) {
- p -= ((as->flags & JIT_F_LEA_AGU) ? 7 : 6) + (LJ_64 ? 1 : 0);
- } else {
- MCode *p1;
- /* Patch stack adjustment. */
- if (checki8(spadj)) {
- p -= 3;
- p1 = p-6;
- *p1 = (MCode)spadj;
- } else {
- p1 = p-9;
- *(int32_t *)p1 = spadj;
- }
- if ((as->flags & JIT_F_LEA_AGU)) {
-#if LJ_64
- p1[-4] = 0x48;
-#endif
- p1[-3] = (MCode)XI_LEA;
- p1[-2] = MODRM(checki8(spadj) ? XM_OFS8 : XM_OFS32, RID_ESP, RID_ESP);
- p1[-1] = MODRM(XM_SCALE1, RID_ESP, RID_ESP);
- } else {
-#if LJ_64
- p1[-3] = 0x48;
-#endif
- p1[-2] = (MCode)(checki8(spadj) ? XI_ARITHi8 : XI_ARITHi);
- p1[-1] = MODRM(XM_REG, XOg_ADD, RID_ESP);
- }
- }
- /* Patch exit branch. */
- target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp;
- *(int32_t *)(p-4) = jmprel(p, target);
- p[-5] = XI_JMP;
- /* Drop unused mcode tail. Fill with NOPs to make the prefetcher happy. */
- for (q = as->mctop-1; q >= p; q--)
- *q = XI_NOP;
- as->mctop = p;
-}
-
-/* Prepare tail of code. */
-static void asm_tail_prep(ASMState *as)
-{
- MCode *p = as->mctop;
- /* Realign and leave room for backwards loop branch or exit branch. */
- if (as->realign) {
- int i = ((int)(intptr_t)as->realign) & 15;
- /* Fill unused mcode tail with NOPs to make the prefetcher happy. */
- while (i-- > 0)
- *--p = XI_NOP;
- as->mctop = p;
- p -= (as->loopinv ? 5 : 2); /* Space for short/near jmp. */
- } else {
- p -= 5; /* Space for exit branch (near jmp). */
- }
- if (as->loopref) {
- as->invmcp = as->mcp = p;
- } else {
- /* Leave room for ESP adjustment: add esp, imm or lea esp, [esp+imm] */
- as->mcp = p - (((as->flags & JIT_F_LEA_AGU) ? 7 : 6) + (LJ_64 ? 1 : 0));
- as->invmcp = NULL;
- }
-}
-
-/* -- Instruction dispatch ------------------------------------------------ */
-
-/* Assemble a single instruction. */
-static void asm_ir(ASMState *as, IRIns *ir)
-{
- switch ((IROp)ir->o) {
- /* Miscellaneous ops. */
- case IR_LOOP: asm_loop(as); break;
- case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break;
- case IR_USE:
- ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break;
- case IR_PHI: asm_phi(as, ir); break;
- case IR_HIOP: asm_hiop(as, ir); break;
- case IR_GCSTEP: asm_gcstep(as, ir); break;
-
- /* Guarded assertions. */
- case IR_LT: case IR_GE: case IR_LE: case IR_GT:
- case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
- case IR_EQ: case IR_NE: case IR_ABC:
- asm_comp(as, ir, asm_compmap[ir->o]);
- break;
-
- case IR_RETF: asm_retf(as, ir); break;
-
- /* Bit ops. */
- case IR_BNOT: asm_neg_not(as, ir, XOg_NOT); break;
- case IR_BSWAP: asm_bitswap(as, ir); break;
-
- case IR_BAND: asm_intarith(as, ir, XOg_AND); break;
- case IR_BOR: asm_intarith(as, ir, XOg_OR); break;
- case IR_BXOR: asm_intarith(as, ir, XOg_XOR); break;
-
- case IR_BSHL: asm_bitshift(as, ir, XOg_SHL); break;
- case IR_BSHR: asm_bitshift(as, ir, XOg_SHR); break;
- case IR_BSAR: asm_bitshift(as, ir, XOg_SAR); break;
- case IR_BROL: asm_bitshift(as, ir, XOg_ROL); break;
- case IR_BROR: asm_bitshift(as, ir, XOg_ROR); break;
-
- /* Arithmetic ops. */
- case IR_ADD: asm_add(as, ir); break;
- case IR_SUB:
- if (irt_isnum(ir->t))
- asm_fparith(as, ir, XO_SUBSD);
- else /* Note: no need for LEA trick here. i-k is encoded as i+(-k). */
- asm_intarith(as, ir, XOg_SUB);
- break;
- case IR_MUL:
- if (irt_isnum(ir->t))
- asm_fparith(as, ir, XO_MULSD);
- else
- asm_intarith(as, ir, XOg_X_IMUL);
- break;
- case IR_DIV:
-#if LJ_64 && LJ_HASFFI
- if (!irt_isnum(ir->t))
- asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 :
- IRCALL_lj_carith_divu64);
- else
-#endif
- asm_fparith(as, ir, XO_DIVSD);
- break;
- case IR_MOD:
-#if LJ_64 && LJ_HASFFI
- if (!irt_isint(ir->t))
- asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 :
- IRCALL_lj_carith_modu64);
- else
-#endif
- asm_intmod(as, ir);
- break;
-
- case IR_NEG:
- if (irt_isnum(ir->t))
- asm_fparith(as, ir, XO_XORPS);
- else
- asm_neg_not(as, ir, XOg_NEG);
- break;
- case IR_ABS: asm_fparith(as, ir, XO_ANDPS); break;
-
- case IR_MIN:
- if (irt_isnum(ir->t))
- asm_fparith(as, ir, XO_MINSD);
- else
- asm_min_max(as, ir, CC_G);
- break;
- case IR_MAX:
- if (irt_isnum(ir->t))
- asm_fparith(as, ir, XO_MAXSD);
- else
- asm_min_max(as, ir, CC_L);
- break;
-
- case IR_FPMATH: case IR_ATAN2: case IR_LDEXP:
- asm_fpmath(as, ir);
- break;
- case IR_POW:
-#if LJ_64 && LJ_HASFFI
- if (!irt_isnum(ir->t))
- asm_arith64(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 :
- IRCALL_lj_carith_powu64);
- else
-#endif
- asm_fppowi(as, ir);
- break;
-
- /* Overflow-checking arithmetic ops. Note: don't use LEA here! */
- case IR_ADDOV: asm_intarith(as, ir, XOg_ADD); break;
- case IR_SUBOV: asm_intarith(as, ir, XOg_SUB); break;
- case IR_MULOV: asm_intarith(as, ir, XOg_X_IMUL); break;
-
- /* Memory references. */
- case IR_AREF: asm_aref(as, ir); break;
- case IR_HREF: asm_href(as, ir); break;
- case IR_HREFK: asm_hrefk(as, ir); break;
- case IR_NEWREF: asm_newref(as, ir); break;
- case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
- case IR_FREF: asm_fref(as, ir); break;
- case IR_STRREF: asm_strref(as, ir); break;
-
- /* Loads and stores. */
- case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
- asm_ahuvload(as, ir);
- break;
- case IR_FLOAD: case IR_XLOAD: asm_fxload(as, ir); break;
- case IR_SLOAD: asm_sload(as, ir); break;
-
- case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
- case IR_FSTORE: case IR_XSTORE: asm_fxstore(as, ir); break;
-
- /* Allocations. */
- case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
- case IR_TNEW: asm_tnew(as, ir); break;
- case IR_TDUP: asm_tdup(as, ir); break;
- case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break;
-
- /* Write barriers. */
- case IR_TBAR: asm_tbar(as, ir); break;
- case IR_OBAR: asm_obar(as, ir); break;
-
- /* Type conversions. */
- case IR_TOBIT: asm_tobit(as, ir); break;
- case IR_CONV: asm_conv(as, ir); break;
- case IR_TOSTR: asm_tostr(as, ir); break;
- case IR_STRTO: asm_strto(as, ir); break;
-
- /* Calls. */
- case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
- case IR_CALLXS: asm_callx(as, ir); break;
- case IR_CARG: break;
-
- default:
- setintV(&as->J->errinfo, ir->o);
- lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
- break;
- }
-}
-
-/* -- Trace setup --------------------------------------------------------- */
-
-/* Ensure there are enough stack slots for call arguments. */
-static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci)
-{
- IRRef args[CCI_NARGS_MAX*2];
- int nslots;
- asm_collectargs(as, ir, ci, args);
- nslots = asm_count_call_slots(as, ci, args);
- if (nslots > as->evenspill) /* Leave room for args in stack slots. */
- as->evenspill = nslots;
-#if LJ_64
- return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET);
-#else
- return irt_isfp(ir->t) ? REGSP_INIT : REGSP_HINT(RID_RET);
-#endif
-}
-
-/* Target-specific setup. */
-static void asm_setup_target(ASMState *as)
-{
- asm_exitstub_setup(as, as->T->nsnap);
-}
-
-/* -- Trace patching ------------------------------------------------------ */
-
-static const uint8_t map_op1[256] = {
-0x92,0x92,0x92,0x92,0x52,0x45,0x51,0x51,0x92,0x92,0x92,0x92,0x52,0x45,0x51,0x20,
-0x92,0x92,0x92,0x92,0x52,0x45,0x51,0x51,0x92,0x92,0x92,0x92,0x52,0x45,0x51,0x51,
-0x92,0x92,0x92,0x92,0x52,0x45,0x10,0x51,0x92,0x92,0x92,0x92,0x52,0x45,0x10,0x51,
-0x92,0x92,0x92,0x92,0x52,0x45,0x10,0x51,0x92,0x92,0x92,0x92,0x52,0x45,0x10,0x51,
-#if LJ_64
-0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x14,0x14,0x14,0x14,0x14,0x14,0x14,0x14,
-#else
-0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,
-#endif
-0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,
-0x51,0x51,0x92,0x92,0x10,0x10,0x12,0x11,0x45,0x86,0x52,0x93,0x51,0x51,0x51,0x51,
-0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,
-0x93,0x86,0x93,0x93,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,
-0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x47,0x51,0x51,0x51,0x51,0x51,
-#if LJ_64
-0x59,0x59,0x59,0x59,0x51,0x51,0x51,0x51,0x52,0x45,0x51,0x51,0x51,0x51,0x51,0x51,
-#else
-0x55,0x55,0x55,0x55,0x51,0x51,0x51,0x51,0x52,0x45,0x51,0x51,0x51,0x51,0x51,0x51,
-#endif
-0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x05,0x05,0x05,0x05,0x05,0x05,0x05,0x05,
-0x93,0x93,0x53,0x51,0x70,0x71,0x93,0x86,0x54,0x51,0x53,0x51,0x51,0x52,0x51,0x51,
-0x92,0x92,0x92,0x92,0x52,0x52,0x51,0x51,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,
-0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x45,0x45,0x47,0x52,0x51,0x51,0x51,0x51,
-0x10,0x51,0x10,0x10,0x51,0x51,0x63,0x66,0x51,0x51,0x51,0x51,0x51,0x51,0x92,0x92
-};
-
-static const uint8_t map_op2[256] = {
-0x93,0x93,0x93,0x93,0x52,0x52,0x52,0x52,0x52,0x52,0x51,0x52,0x51,0x93,0x52,0x94,
-0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
-0x53,0x53,0x53,0x53,0x53,0x53,0x53,0x53,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
-0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x34,0x51,0x35,0x51,0x51,0x51,0x51,0x51,
-0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
-0x53,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
-0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
-0x94,0x54,0x54,0x54,0x93,0x93,0x93,0x52,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
-0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,
-0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
-0x52,0x52,0x52,0x93,0x94,0x93,0x51,0x51,0x52,0x52,0x52,0x93,0x94,0x93,0x93,0x93,
-0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x94,0x93,0x93,0x93,0x93,0x93,
-0x93,0x93,0x94,0x93,0x94,0x94,0x94,0x93,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,
-0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
-0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,
-0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x52
-};
-
-static uint32_t asm_x86_inslen(const uint8_t* p)
-{
- uint32_t result = 0;
- uint32_t prefixes = 0;
- uint32_t x = map_op1[*p];
- for (;;) {
- switch (x >> 4) {
- case 0: return result + x + (prefixes & 4);
- case 1: prefixes |= x; x = map_op1[*++p]; result++; break;
- case 2: x = map_op2[*++p]; break;
- case 3: p++; goto mrm;
- case 4: result -= (prefixes & 2); /* fallthrough */
- case 5: return result + (x & 15);
- case 6: /* Group 3. */
- if (p[1] & 0x38) x = 2;
- else if ((prefixes & 2) && (x == 0x66)) x = 4;
- goto mrm;
- case 7: /* VEX c4/c5. */
- if (LJ_32 && p[1] < 0xc0) {
- x = 2;
- goto mrm;
- }
- if (x == 0x70) {
- x = *++p & 0x1f;
- result++;
- if (x >= 2) {
- p += 2;
- result += 2;
- goto mrm;
- }
- }
- p++;
- result++;
- x = map_op2[*++p];
- break;
- case 8: result -= (prefixes & 2); /* fallthrough */
- case 9: mrm: /* ModR/M and possibly SIB. */
- result += (x & 15);
- x = *++p;
- switch (x >> 6) {
- case 0: if ((x & 7) == 5) return result + 4; break;
- case 1: result++; break;
- case 2: result += 4; break;
- case 3: return result;
- }
- if ((x & 7) == 4) {
- result++;
- if (x < 0x40 && (p[1] & 7) == 5) result += 4;
- }
- return result;
- }
- }
-}
-
-/* Patch exit jumps of existing machine code to a new target. */
-void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target)
-{
- MCode *p = T->mcode;
- MCode *mcarea = lj_mcode_patch(J, p, 0);
- MSize len = T->szmcode;
- MCode *px = exitstub_addr(J, exitno) - 6;
- MCode *pe = p+len-6;
- uint32_t stateaddr = u32ptr(&J2G(J)->vmstate);
- if (len > 5 && p[len-5] == XI_JMP && p+len-6 + *(int32_t *)(p+len-4) == px)
- *(int32_t *)(p+len-4) = jmprel(p+len, target);
- /* Do not patch parent exit for a stack check. Skip beyond vmstate update. */
- for (; p < pe; p += asm_x86_inslen(p))
- if (*(uint32_t *)(p+(LJ_64 ? 3 : 2)) == stateaddr && p[0] == XI_MOVmi)
- break;
- lua_assert(p < pe);
- for (; p < pe; p += asm_x86_inslen(p))
- if ((*(uint16_t *)p & 0xf0ff) == 0x800f && p + *(int32_t *)(p+2) == px)
- *(int32_t *)(p+2) = jmprel(p+6, target);
- lj_mcode_sync(T->mcode, T->mcode + T->szmcode);
- lj_mcode_patch(J, mcarea, 1);
-}
-
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_bc.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_bc.h
deleted file mode 100644
index 108c10f..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_bc.h
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
-** Bytecode instruction format.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_BC_H
-#define _LJ_BC_H
-
-#include "lj_def.h"
-#include "lj_arch.h"
-
-/* Bytecode instruction format, 32 bit wide, fields of 8 or 16 bit:
-**
-** +----+----+----+----+
-** | B | C | A | OP | Format ABC
-** +----+----+----+----+
-** | D | A | OP | Format AD
-** +--------------------
-** MSB LSB
-**
-** In-memory instructions are always stored in host byte order.
-*/
-
-/* Operand ranges and related constants. */
-#define BCMAX_A 0xff
-#define BCMAX_B 0xff
-#define BCMAX_C 0xff
-#define BCMAX_D 0xffff
-#define BCBIAS_J 0x8000
-#define NO_REG BCMAX_A
-#define NO_JMP (~(BCPos)0)
-
-/* Macros to get instruction fields. */
-#define bc_op(i) ((BCOp)((i)&0xff))
-#define bc_a(i) ((BCReg)(((i)>>8)&0xff))
-#define bc_b(i) ((BCReg)((i)>>24))
-#define bc_c(i) ((BCReg)(((i)>>16)&0xff))
-#define bc_d(i) ((BCReg)((i)>>16))
-#define bc_j(i) ((ptrdiff_t)bc_d(i)-BCBIAS_J)
-
-/* Macros to set instruction fields. */
-#define setbc_byte(p, x, ofs) \
- ((uint8_t *)(p))[LJ_ENDIAN_SELECT(ofs, 3-ofs)] = (uint8_t)(x)
-#define setbc_op(p, x) setbc_byte(p, (x), 0)
-#define setbc_a(p, x) setbc_byte(p, (x), 1)
-#define setbc_b(p, x) setbc_byte(p, (x), 3)
-#define setbc_c(p, x) setbc_byte(p, (x), 2)
-#define setbc_d(p, x) \
- ((uint16_t *)(p))[LJ_ENDIAN_SELECT(1, 0)] = (uint16_t)(x)
-#define setbc_j(p, x) setbc_d(p, (BCPos)((int32_t)(x)+BCBIAS_J))
-
-/* Macros to compose instructions. */
-#define BCINS_ABC(o, a, b, c) \
- (((BCIns)(o))|((BCIns)(a)<<8)|((BCIns)(b)<<24)|((BCIns)(c)<<16))
-#define BCINS_AD(o, a, d) \
- (((BCIns)(o))|((BCIns)(a)<<8)|((BCIns)(d)<<16))
-#define BCINS_AJ(o, a, j) BCINS_AD(o, a, (BCPos)((int32_t)(j)+BCBIAS_J))
-
-/* Bytecode instruction definition. Order matters, see below.
-**
-** (name, filler, Amode, Bmode, Cmode or Dmode, metamethod)
-**
-** The opcode name suffixes specify the type for RB/RC or RD:
-** V = variable slot
-** S = string const
-** N = number const
-** P = primitive type (~itype)
-** B = unsigned byte literal
-** M = multiple args/results
-*/
-#define BCDEF(_) \
- /* Comparison ops. ORDER OPR. */ \
- _(ISLT, var, ___, var, lt) \
- _(ISGE, var, ___, var, lt) \
- _(ISLE, var, ___, var, le) \
- _(ISGT, var, ___, var, le) \
- \
- _(ISEQV, var, ___, var, eq) \
- _(ISNEV, var, ___, var, eq) \
- _(ISEQS, var, ___, str, eq) \
- _(ISNES, var, ___, str, eq) \
- _(ISEQN, var, ___, num, eq) \
- _(ISNEN, var, ___, num, eq) \
- _(ISEQP, var, ___, pri, eq) \
- _(ISNEP, var, ___, pri, eq) \
- \
- /* Unary test and copy ops. */ \
- _(ISTC, dst, ___, var, ___) \
- _(ISFC, dst, ___, var, ___) \
- _(IST, ___, ___, var, ___) \
- _(ISF, ___, ___, var, ___) \
- \
- /* Unary ops. */ \
- _(MOV, dst, ___, var, ___) \
- _(NOT, dst, ___, var, ___) \
- _(UNM, dst, ___, var, unm) \
- _(LEN, dst, ___, var, len) \
- \
- /* Binary ops. ORDER OPR. VV last, POW must be next. */ \
- _(ADDVN, dst, var, num, add) \
- _(SUBVN, dst, var, num, sub) \
- _(MULVN, dst, var, num, mul) \
- _(DIVVN, dst, var, num, div) \
- _(MODVN, dst, var, num, mod) \
- \
- _(ADDNV, dst, var, num, add) \
- _(SUBNV, dst, var, num, sub) \
- _(MULNV, dst, var, num, mul) \
- _(DIVNV, dst, var, num, div) \
- _(MODNV, dst, var, num, mod) \
- \
- _(ADDVV, dst, var, var, add) \
- _(SUBVV, dst, var, var, sub) \
- _(MULVV, dst, var, var, mul) \
- _(DIVVV, dst, var, var, div) \
- _(MODVV, dst, var, var, mod) \
- \
- _(POW, dst, var, var, pow) \
- _(CAT, dst, rbase, rbase, concat) \
- \
- /* Constant ops. */ \
- _(KSTR, dst, ___, str, ___) \
- _(KCDATA, dst, ___, cdata, ___) \
- _(KSHORT, dst, ___, lits, ___) \
- _(KNUM, dst, ___, num, ___) \
- _(KPRI, dst, ___, pri, ___) \
- _(KNIL, base, ___, base, ___) \
- \
- /* Upvalue and function ops. */ \
- _(UGET, dst, ___, uv, ___) \
- _(USETV, uv, ___, var, ___) \
- _(USETS, uv, ___, str, ___) \
- _(USETN, uv, ___, num, ___) \
- _(USETP, uv, ___, pri, ___) \
- _(UCLO, rbase, ___, jump, ___) \
- _(FNEW, dst, ___, func, gc) \
- \
- /* Table ops. */ \
- _(TNEW, dst, ___, lit, gc) \
- _(TDUP, dst, ___, tab, gc) \
- _(GGET, dst, ___, str, index) \
- _(GSET, var, ___, str, newindex) \
- _(TGETV, dst, var, var, index) \
- _(TGETS, dst, var, str, index) \
- _(TGETB, dst, var, lit, index) \
- _(TSETV, var, var, var, newindex) \
- _(TSETS, var, var, str, newindex) \
- _(TSETB, var, var, lit, newindex) \
- _(TSETM, base, ___, num, newindex) \
- \
- /* Calls and vararg handling. T = tail call. */ \
- _(CALLM, base, lit, lit, call) \
- _(CALL, base, lit, lit, call) \
- _(CALLMT, base, ___, lit, call) \
- _(CALLT, base, ___, lit, call) \
- _(ITERC, base, lit, lit, call) \
- _(ITERN, base, lit, lit, call) \
- _(VARG, base, lit, lit, ___) \
- _(ISNEXT, base, ___, jump, ___) \
- \
- /* Returns. */ \
- _(RETM, base, ___, lit, ___) \
- _(RET, rbase, ___, lit, ___) \
- _(RET0, rbase, ___, lit, ___) \
- _(RET1, rbase, ___, lit, ___) \
- \
- /* Loops and branches. I/J = interp/JIT, I/C/L = init/call/loop. */ \
- _(FORI, base, ___, jump, ___) \
- _(JFORI, base, ___, jump, ___) \
- \
- _(FORL, base, ___, jump, ___) \
- _(IFORL, base, ___, jump, ___) \
- _(JFORL, base, ___, lit, ___) \
- \
- _(ITERL, base, ___, jump, ___) \
- _(IITERL, base, ___, jump, ___) \
- _(JITERL, base, ___, lit, ___) \
- \
- _(LOOP, rbase, ___, jump, ___) \
- _(ILOOP, rbase, ___, jump, ___) \
- _(JLOOP, rbase, ___, lit, ___) \
- \
- _(JMP, rbase, ___, jump, ___) \
- \
- /* Function headers. I/J = interp/JIT, F/V/C = fixarg/vararg/C func. */ \
- _(FUNCF, rbase, ___, ___, ___) \
- _(IFUNCF, rbase, ___, ___, ___) \
- _(JFUNCF, rbase, ___, lit, ___) \
- _(FUNCV, rbase, ___, ___, ___) \
- _(IFUNCV, rbase, ___, ___, ___) \
- _(JFUNCV, rbase, ___, lit, ___) \
- _(FUNCC, rbase, ___, ___, ___) \
- _(FUNCCW, rbase, ___, ___, ___)
-
-/* Bytecode opcode numbers. */
-typedef enum {
-#define BCENUM(name, ma, mb, mc, mt) BC_##name,
-BCDEF(BCENUM)
-#undef BCENUM
- BC__MAX
-} BCOp;
-
-LJ_STATIC_ASSERT((int)BC_ISEQV+1 == (int)BC_ISNEV);
-LJ_STATIC_ASSERT(((int)BC_ISEQV^1) == (int)BC_ISNEV);
-LJ_STATIC_ASSERT(((int)BC_ISEQS^1) == (int)BC_ISNES);
-LJ_STATIC_ASSERT(((int)BC_ISEQN^1) == (int)BC_ISNEN);
-LJ_STATIC_ASSERT(((int)BC_ISEQP^1) == (int)BC_ISNEP);
-LJ_STATIC_ASSERT(((int)BC_ISLT^1) == (int)BC_ISGE);
-LJ_STATIC_ASSERT(((int)BC_ISLE^1) == (int)BC_ISGT);
-LJ_STATIC_ASSERT(((int)BC_ISLT^3) == (int)BC_ISGT);
-LJ_STATIC_ASSERT((int)BC_IST-(int)BC_ISTC == (int)BC_ISF-(int)BC_ISFC);
-LJ_STATIC_ASSERT((int)BC_CALLT-(int)BC_CALL == (int)BC_CALLMT-(int)BC_CALLM);
-LJ_STATIC_ASSERT((int)BC_CALLMT + 1 == (int)BC_CALLT);
-LJ_STATIC_ASSERT((int)BC_RETM + 1 == (int)BC_RET);
-LJ_STATIC_ASSERT((int)BC_FORL + 1 == (int)BC_IFORL);
-LJ_STATIC_ASSERT((int)BC_FORL + 2 == (int)BC_JFORL);
-LJ_STATIC_ASSERT((int)BC_ITERL + 1 == (int)BC_IITERL);
-LJ_STATIC_ASSERT((int)BC_ITERL + 2 == (int)BC_JITERL);
-LJ_STATIC_ASSERT((int)BC_LOOP + 1 == (int)BC_ILOOP);
-LJ_STATIC_ASSERT((int)BC_LOOP + 2 == (int)BC_JLOOP);
-LJ_STATIC_ASSERT((int)BC_FUNCF + 1 == (int)BC_IFUNCF);
-LJ_STATIC_ASSERT((int)BC_FUNCF + 2 == (int)BC_JFUNCF);
-LJ_STATIC_ASSERT((int)BC_FUNCV + 1 == (int)BC_IFUNCV);
-LJ_STATIC_ASSERT((int)BC_FUNCV + 2 == (int)BC_JFUNCV);
-
-/* This solves a circular dependency problem, change as needed. */
-#define FF_next_N 4
-
-/* Stack slots used by FORI/FORL, relative to operand A. */
-enum {
- FORL_IDX, FORL_STOP, FORL_STEP, FORL_EXT
-};
-
-/* Bytecode operand modes. ORDER BCMode */
-typedef enum {
- BCMnone, BCMdst, BCMbase, BCMvar, BCMrbase, BCMuv, /* Mode A must be <= 7 */
- BCMlit, BCMlits, BCMpri, BCMnum, BCMstr, BCMtab, BCMfunc, BCMjump, BCMcdata,
- BCM_max
-} BCMode;
-#define BCM___ BCMnone
-
-#define bcmode_a(op) ((BCMode)(lj_bc_mode[op] & 7))
-#define bcmode_b(op) ((BCMode)((lj_bc_mode[op]>>3) & 15))
-#define bcmode_c(op) ((BCMode)((lj_bc_mode[op]>>7) & 15))
-#define bcmode_d(op) bcmode_c(op)
-#define bcmode_hasd(op) ((lj_bc_mode[op] & (15<<3)) == (BCMnone<<3))
-#define bcmode_mm(op) ((MMS)(lj_bc_mode[op]>>11))
-
-#define BCMODE(name, ma, mb, mc, mm) \
- (BCM##ma|(BCM##mb<<3)|(BCM##mc<<7)|(MM_##mm<<11)),
-#define BCMODE_FF 0
-
-static LJ_AINLINE int bc_isret(BCOp op)
-{
- return (op == BC_RETM || op == BC_RET || op == BC_RET0 || op == BC_RET1);
-}
-
-LJ_DATA const uint16_t lj_bc_mode[];
-LJ_DATA const uint16_t lj_bc_ofs[];
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_bcdump.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_bcdump.h
deleted file mode 100644
index ba53c0a..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_bcdump.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
-** Bytecode dump definitions.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_BCDUMP_H
-#define _LJ_BCDUMP_H
-
-#include "lj_obj.h"
-#include "lj_lex.h"
-
-/* -- Bytecode dump format ------------------------------------------------ */
-
-/*
-** dump = header proto+ 0U
-** header = ESC 'L' 'J' versionB flagsU [namelenU nameB*]
-** proto = lengthU pdata
-** pdata = phead bcinsW* uvdataH* kgc* knum* [debugB*]
-** phead = flagsB numparamsB framesizeB numuvB numkgcU numknU numbcU
-** [debuglenU [firstlineU numlineU]]
-** kgc = kgctypeU { ktab | (loU hiU) | (rloU rhiU iloU ihiU) | strB* }
-** knum = intU0 | (loU1 hiU)
-** ktab = narrayU nhashU karray* khash*
-** karray = ktabk
-** khash = ktabk ktabk
-** ktabk = ktabtypeU { intU | (loU hiU) | strB* }
-**
-** B = 8 bit, H = 16 bit, W = 32 bit, U = ULEB128 of W, U0/U1 = ULEB128 of W+1
-*/
-
-/* Bytecode dump header. */
-#define BCDUMP_HEAD1 0x1b
-#define BCDUMP_HEAD2 0x4c
-#define BCDUMP_HEAD3 0x4a
-
-/* If you perform *any* kind of private modifications to the bytecode itself
-** or to the dump format, you *must* set BCDUMP_VERSION to 0x80 or higher.
-*/
-#define BCDUMP_VERSION 1
-
-/* Compatibility flags. */
-#define BCDUMP_F_BE 0x01
-#define BCDUMP_F_STRIP 0x02
-#define BCDUMP_F_FFI 0x04
-
-#define BCDUMP_F_KNOWN (BCDUMP_F_FFI*2-1)
-
-/* Type codes for the GC constants of a prototype. Plus length for strings. */
-enum {
- BCDUMP_KGC_CHILD, BCDUMP_KGC_TAB, BCDUMP_KGC_I64, BCDUMP_KGC_U64,
- BCDUMP_KGC_COMPLEX, BCDUMP_KGC_STR
-};
-
-/* Type codes for the keys/values of a constant table. */
-enum {
- BCDUMP_KTAB_NIL, BCDUMP_KTAB_FALSE, BCDUMP_KTAB_TRUE,
- BCDUMP_KTAB_INT, BCDUMP_KTAB_NUM, BCDUMP_KTAB_STR
-};
-
-/* -- Bytecode reader/writer ---------------------------------------------- */
-
-LJ_FUNC int lj_bcwrite(lua_State *L, GCproto *pt, lua_Writer writer,
- void *data, int strip);
-LJ_FUNC GCproto *lj_bcread(LexState *ls);
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_carith.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_carith.h
deleted file mode 100644
index 82fc824..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_carith.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
-** C data arithmetic.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_CARITH_H
-#define _LJ_CARITH_H
-
-#include "lj_obj.h"
-
-#if LJ_HASFFI
-
-LJ_FUNC int lj_carith_op(lua_State *L, MMS mm);
-LJ_FUNC int lj_carith_len(lua_State *L);
-
-#if LJ_32 && LJ_HASJIT
-LJ_FUNC int64_t lj_carith_mul64(int64_t x, int64_t k);
-#endif
-LJ_FUNC uint64_t lj_carith_divu64(uint64_t a, uint64_t b);
-LJ_FUNC int64_t lj_carith_divi64(int64_t a, int64_t b);
-LJ_FUNC uint64_t lj_carith_modu64(uint64_t a, uint64_t b);
-LJ_FUNC int64_t lj_carith_modi64(int64_t a, int64_t b);
-LJ_FUNC uint64_t lj_carith_powu64(uint64_t x, uint64_t k);
-LJ_FUNC int64_t lj_carith_powi64(int64_t x, int64_t k);
-
-#endif
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_ccall.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_ccall.h
deleted file mode 100644
index 9089e6c..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_ccall.h
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
-** FFI C call handling.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_CCALL_H
-#define _LJ_CCALL_H
-
-#include "lj_obj.h"
-#include "lj_ctype.h"
-
-#if LJ_HASFFI
-
-/* -- C calling conventions ----------------------------------------------- */
-
-#if LJ_TARGET_X86ORX64
-
-#if LJ_TARGET_X86
-#define CCALL_NARG_GPR 2 /* For fastcall arguments. */
-#define CCALL_NARG_FPR 0
-#define CCALL_NRET_GPR 2
-#define CCALL_NRET_FPR 1 /* For FP results on x87 stack. */
-#define CCALL_ALIGN_STACKARG 0 /* Don't align argument on stack. */
-#elif LJ_ABI_WIN
-#define CCALL_NARG_GPR 4
-#define CCALL_NARG_FPR 4
-#define CCALL_NRET_GPR 1
-#define CCALL_NRET_FPR 1
-#define CCALL_SPS_EXTRA 4
-#else
-#define CCALL_NARG_GPR 6
-#define CCALL_NARG_FPR 8
-#define CCALL_NRET_GPR 2
-#define CCALL_NRET_FPR 2
-#define CCALL_VECTOR_REG 1 /* Pass vectors in registers. */
-#endif
-
-#define CCALL_SPS_FREE 1
-#define CCALL_ALIGN_CALLSTATE 16
-
-typedef LJ_ALIGN(16) union FPRArg {
- double d[2];
- float f[4];
- uint8_t b[16];
- uint16_t s[8];
- int i[4];
- int64_t l[2];
-} FPRArg;
-
-typedef intptr_t GPRArg;
-
-#elif LJ_TARGET_ARM
-
-#define CCALL_NARG_GPR 4
-#define CCALL_NRET_GPR 2 /* For softfp double. */
-#if LJ_ABI_SOFTFP
-#define CCALL_NARG_FPR 0
-#define CCALL_NRET_FPR 0
-#else
-#define CCALL_NARG_FPR 8
-#define CCALL_NRET_FPR 4
-#endif
-#define CCALL_SPS_FREE 0
-
-typedef intptr_t GPRArg;
-typedef union FPRArg {
- double d;
- float f[2];
-} FPRArg;
-
-#elif LJ_TARGET_PPC
-
-#define CCALL_NARG_GPR 8
-#define CCALL_NARG_FPR 8
-#define CCALL_NRET_GPR 4 /* For complex double. */
-#define CCALL_NRET_FPR 1
-#define CCALL_SPS_EXTRA 4
-#define CCALL_SPS_FREE 0
-
-typedef intptr_t GPRArg;
-typedef double FPRArg;
-
-#elif LJ_TARGET_PPCSPE
-
-#define CCALL_NARG_GPR 8
-#define CCALL_NARG_FPR 0
-#define CCALL_NRET_GPR 4 /* For softfp complex double. */
-#define CCALL_NRET_FPR 0
-#define CCALL_SPS_FREE 0 /* NYI */
-
-typedef intptr_t GPRArg;
-
-#elif LJ_TARGET_MIPS
-
-#define CCALL_NARG_GPR 4
-#define CCALL_NARG_FPR 2
-#define CCALL_NRET_GPR 2
-#define CCALL_NRET_FPR 2
-#define CCALL_SPS_EXTRA 7
-#define CCALL_SPS_FREE 1
-
-typedef intptr_t GPRArg;
-typedef union FPRArg {
- double d;
- struct { LJ_ENDIAN_LOHI(float f; , float g;) };
-} FPRArg;
-
-#else
-#error "Missing calling convention definitions for this architecture"
-#endif
-
-#ifndef CCALL_SPS_EXTRA
-#define CCALL_SPS_EXTRA 0
-#endif
-#ifndef CCALL_VECTOR_REG
-#define CCALL_VECTOR_REG 0
-#endif
-#ifndef CCALL_ALIGN_STACKARG
-#define CCALL_ALIGN_STACKARG 1
-#endif
-#ifndef CCALL_ALIGN_CALLSTATE
-#define CCALL_ALIGN_CALLSTATE 8
-#endif
-
-#define CCALL_NUM_GPR \
- (CCALL_NARG_GPR > CCALL_NRET_GPR ? CCALL_NARG_GPR : CCALL_NRET_GPR)
-#define CCALL_NUM_FPR \
- (CCALL_NARG_FPR > CCALL_NRET_FPR ? CCALL_NARG_FPR : CCALL_NRET_FPR)
-
-/* Check against constants in lj_ctype.h. */
-LJ_STATIC_ASSERT(CCALL_NUM_GPR <= CCALL_MAX_GPR);
-LJ_STATIC_ASSERT(CCALL_NUM_FPR <= CCALL_MAX_FPR);
-
-#define CCALL_MAXSTACK 32
-
-/* -- C call state -------------------------------------------------------- */
-
-typedef LJ_ALIGN(CCALL_ALIGN_CALLSTATE) struct CCallState {
- void (*func)(void); /* Pointer to called function. */
- uint32_t spadj; /* Stack pointer adjustment. */
- uint8_t nsp; /* Number of stack slots. */
- uint8_t retref; /* Return value by reference. */
-#if LJ_TARGET_X64
- uint8_t ngpr; /* Number of arguments in GPRs. */
- uint8_t nfpr; /* Number of arguments in FPRs. */
-#elif LJ_TARGET_X86
- uint8_t resx87; /* Result on x87 stack: 1:float, 2:double. */
-#elif LJ_TARGET_PPC
- uint8_t nfpr; /* Number of arguments in FPRs. */
-#endif
-#if LJ_32
- int32_t align1;
-#endif
-#if CCALL_NUM_FPR
- FPRArg fpr[CCALL_NUM_FPR]; /* Arguments/results in FPRs. */
-#endif
- GPRArg gpr[CCALL_NUM_GPR]; /* Arguments/results in GPRs. */
- GPRArg stack[CCALL_MAXSTACK]; /* Stack slots. */
-} CCallState;
-
-/* -- C call handling ----------------------------------------------------- */
-
-/* Really belongs to lj_vm.h. */
-LJ_ASMF void LJ_FASTCALL lj_vm_ffi_call(CCallState *cc);
-
-LJ_FUNC CTypeID lj_ccall_ctid_vararg(CTState *cts, cTValue *o);
-LJ_FUNC int lj_ccall_func(lua_State *L, GCcdata *cd);
-
-#endif
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_ccallback.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_ccallback.h
deleted file mode 100644
index a8cdad3..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_ccallback.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
-** FFI C callback handling.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_CCALLBACK_H
-#define _LJ_CCALLBACK_H
-
-#include "lj_obj.h"
-#include "lj_ctype.h"
-
-#if LJ_HASFFI
-
-/* Really belongs to lj_vm.h. */
-LJ_ASMF void lj_vm_ffi_callback(void);
-
-LJ_FUNC MSize lj_ccallback_ptr2slot(CTState *cts, void *p);
-LJ_FUNCA lua_State * LJ_FASTCALL lj_ccallback_enter(CTState *cts, void *cf);
-LJ_FUNCA void LJ_FASTCALL lj_ccallback_leave(CTState *cts, TValue *o);
-LJ_FUNC void *lj_ccallback_new(CTState *cts, CType *ct, GCfunc *fn);
-LJ_FUNC void lj_ccallback_mcode_free(CTState *cts);
-
-#endif
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_cconv.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_cconv.h
deleted file mode 100644
index 0a0b66c..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_cconv.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
-** C type conversions.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_CCONV_H
-#define _LJ_CCONV_H
-
-#include "lj_obj.h"
-#include "lj_ctype.h"
-
-#if LJ_HASFFI
-
-/* Compressed C type index. ORDER CCX. */
-enum {
- CCX_B, /* Bool. */
- CCX_I, /* Integer. */
- CCX_F, /* Floating-point number. */
- CCX_C, /* Complex. */
- CCX_V, /* Vector. */
- CCX_P, /* Pointer. */
- CCX_A, /* Refarray. */
- CCX_S /* Struct/union. */
-};
-
-/* Convert C type info to compressed C type index. ORDER CT. ORDER CCX. */
-static LJ_AINLINE uint32_t cconv_idx(CTInfo info)
-{
- uint32_t idx = ((info >> 26) & 15u); /* Dispatch bits. */
- lua_assert(ctype_type(info) <= CT_MAYCONVERT);
-#if LJ_64
- idx = ((uint32_t)(U64x(f436fff5,fff7f021) >> 4*idx) & 15u);
-#else
- idx = (((idx < 8 ? 0xfff7f021u : 0xf436fff5) >> 4*(idx & 7u)) & 15u);
-#endif
- lua_assert(idx < 8);
- return idx;
-}
-
-#define cconv_idx2(dinfo, sinfo) \
- ((cconv_idx((dinfo)) << 3) + cconv_idx((sinfo)))
-
-#define CCX(dst, src) ((CCX_##dst << 3) + CCX_##src)
-
-/* Conversion flags. */
-#define CCF_CAST 0x00000001u
-#define CCF_FROMTV 0x00000002u
-#define CCF_SAME 0x00000004u
-#define CCF_IGNQUAL 0x00000008u
-
-#define CCF_ARG_SHIFT 8
-#define CCF_ARG(n) ((n) << CCF_ARG_SHIFT)
-#define CCF_GETARG(f) ((f) >> CCF_ARG_SHIFT)
-
-LJ_FUNC int lj_cconv_compatptr(CTState *cts, CType *d, CType *s, CTInfo flags);
-LJ_FUNC void lj_cconv_ct_ct(CTState *cts, CType *d, CType *s,
- uint8_t *dp, uint8_t *sp, CTInfo flags);
-LJ_FUNC int lj_cconv_tv_ct(CTState *cts, CType *s, CTypeID sid,
- TValue *o, uint8_t *sp);
-LJ_FUNC int lj_cconv_tv_bf(CTState *cts, CType *s, TValue *o, uint8_t *sp);
-LJ_FUNC void lj_cconv_ct_tv(CTState *cts, CType *d,
- uint8_t *dp, TValue *o, CTInfo flags);
-LJ_FUNC void lj_cconv_bf_tv(CTState *cts, CType *d, uint8_t *dp, TValue *o);
-LJ_FUNC int lj_cconv_multi_init(CTState *cts, CType *d, TValue *o);
-LJ_FUNC void lj_cconv_ct_init(CTState *cts, CType *d, CTSize sz,
- uint8_t *dp, TValue *o, MSize len);
-
-#endif
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_cdata.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_cdata.h
deleted file mode 100644
index 4bb65db..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_cdata.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
-** C data management.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_CDATA_H
-#define _LJ_CDATA_H
-
-#include "lj_obj.h"
-#include "lj_gc.h"
-#include "lj_ctype.h"
-
-#if LJ_HASFFI
-
-/* Get C data pointer. */
-static LJ_AINLINE void *cdata_getptr(void *p, CTSize sz)
-{
- if (LJ_64 && sz == 4) { /* Support 32 bit pointers on 64 bit targets. */
- return ((void *)(uintptr_t)*(uint32_t *)p);
- } else {
- lua_assert(sz == CTSIZE_PTR);
- return *(void **)p;
- }
-}
-
-/* Set C data pointer. */
-static LJ_AINLINE void cdata_setptr(void *p, CTSize sz, const void *v)
-{
- if (LJ_64 && sz == 4) { /* Support 32 bit pointers on 64 bit targets. */
- *(uint32_t *)p = (uint32_t)(uintptr_t)v;
- } else {
- lua_assert(sz == CTSIZE_PTR);
- *(void **)p = (void *)v;
- }
-}
-
-/* Allocate fixed-size C data object. */
-static LJ_AINLINE GCcdata *lj_cdata_new(CTState *cts, CTypeID id, CTSize sz)
-{
- GCcdata *cd;
-#ifdef LUA_USE_ASSERT
- CType *ct = ctype_raw(cts, id);
- lua_assert((ctype_hassize(ct->info) ? ct->size : CTSIZE_PTR) == sz);
-#endif
- cd = (GCcdata *)lj_mem_newgco(cts->L, sizeof(GCcdata) + sz);
- cd->gct = ~LJ_TCDATA;
- cd->ctypeid = ctype_check(cts, id);
- return cd;
-}
-
-/* Variant which works without a valid CTState. */
-static LJ_AINLINE GCcdata *lj_cdata_new_(lua_State *L, CTypeID id, CTSize sz)
-{
- GCcdata *cd = (GCcdata *)lj_mem_newgco(L, sizeof(GCcdata) + sz);
- cd->gct = ~LJ_TCDATA;
- cd->ctypeid = id;
- return cd;
-}
-
-LJ_FUNC GCcdata *lj_cdata_newref(CTState *cts, const void *pp, CTypeID id);
-LJ_FUNC GCcdata *lj_cdata_newv(CTState *cts, CTypeID id, CTSize sz,
- CTSize align);
-
-LJ_FUNC void LJ_FASTCALL lj_cdata_free(global_State *g, GCcdata *cd);
-LJ_FUNCA TValue * LJ_FASTCALL lj_cdata_setfin(lua_State *L, GCcdata *cd);
-
-LJ_FUNC CType *lj_cdata_index(CTState *cts, GCcdata *cd, cTValue *key,
- uint8_t **pp, CTInfo *qual);
-LJ_FUNC int lj_cdata_get(CTState *cts, CType *s, TValue *o, uint8_t *sp);
-LJ_FUNC void lj_cdata_set(CTState *cts, CType *d, uint8_t *dp, TValue *o,
- CTInfo qual);
-
-#endif
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_char.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_char.h
deleted file mode 100644
index c3c86d3..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_char.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
-** Character types.
-** Donated to the public domain.
-*/
-
-#ifndef _LJ_CHAR_H
-#define _LJ_CHAR_H
-
-#include "lj_def.h"
-
-#define LJ_CHAR_CNTRL 0x01
-#define LJ_CHAR_SPACE 0x02
-#define LJ_CHAR_PUNCT 0x04
-#define LJ_CHAR_DIGIT 0x08
-#define LJ_CHAR_XDIGIT 0x10
-#define LJ_CHAR_UPPER 0x20
-#define LJ_CHAR_LOWER 0x40
-#define LJ_CHAR_IDENT 0x80
-#define LJ_CHAR_ALPHA (LJ_CHAR_LOWER|LJ_CHAR_UPPER)
-#define LJ_CHAR_ALNUM (LJ_CHAR_ALPHA|LJ_CHAR_DIGIT)
-#define LJ_CHAR_GRAPH (LJ_CHAR_ALNUM|LJ_CHAR_PUNCT)
-
-/* Only pass -1 or 0..255 to these macros. Never pass a signed char! */
-#define lj_char_isa(c, t) ((lj_char_bits+1)[(c)] & t)
-#define lj_char_iscntrl(c) lj_char_isa((c), LJ_CHAR_CNTRL)
-#define lj_char_isspace(c) lj_char_isa((c), LJ_CHAR_SPACE)
-#define lj_char_ispunct(c) lj_char_isa((c), LJ_CHAR_PUNCT)
-#define lj_char_isdigit(c) lj_char_isa((c), LJ_CHAR_DIGIT)
-#define lj_char_isxdigit(c) lj_char_isa((c), LJ_CHAR_XDIGIT)
-#define lj_char_isupper(c) lj_char_isa((c), LJ_CHAR_UPPER)
-#define lj_char_islower(c) lj_char_isa((c), LJ_CHAR_LOWER)
-#define lj_char_isident(c) lj_char_isa((c), LJ_CHAR_IDENT)
-#define lj_char_isalpha(c) lj_char_isa((c), LJ_CHAR_ALPHA)
-#define lj_char_isalnum(c) lj_char_isa((c), LJ_CHAR_ALNUM)
-#define lj_char_isgraph(c) lj_char_isa((c), LJ_CHAR_GRAPH)
-
-#define lj_char_toupper(c) ((c) - (lj_char_islower(c) >> 1))
-#define lj_char_tolower(c) ((c) + lj_char_isupper(c))
-
-LJ_DATA const uint8_t lj_char_bits[257];
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_clib.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_clib.h
deleted file mode 100644
index fcc9dac..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_clib.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
-** FFI C library loader.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_CLIB_H
-#define _LJ_CLIB_H
-
-#include "lj_obj.h"
-
-#if LJ_HASFFI
-
-/* Namespace for C library indexing. */
-#define CLNS_INDEX ((1u<<CT_FUNC)|(1u<<CT_EXTERN)|(1u<<CT_CONSTVAL))
-
-/* C library namespace. */
-typedef struct CLibrary {
- void *handle; /* Opaque handle for dynamic library loader. */
- GCtab *cache; /* Cache for resolved symbols. Anchored in ud->env. */
-} CLibrary;
-
-LJ_FUNC TValue *lj_clib_index(lua_State *L, CLibrary *cl, GCstr *name);
-LJ_FUNC void lj_clib_load(lua_State *L, GCtab *mt, GCstr *name, int global);
-LJ_FUNC void lj_clib_unload(CLibrary *cl);
-LJ_FUNC void lj_clib_default(lua_State *L, GCtab *mt);
-
-#endif
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_cparse.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_cparse.h
deleted file mode 100644
index bad1060..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_cparse.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
-** C declaration parser.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_CPARSE_H
-#define _LJ_CPARSE_H
-
-#include "lj_obj.h"
-#include "lj_ctype.h"
-
-#if LJ_HASFFI
-
-/* C parser limits. */
-#define CPARSE_MAX_BUF 32768 /* Max. token buffer size. */
-#define CPARSE_MAX_DECLSTACK 100 /* Max. declaration stack depth. */
-#define CPARSE_MAX_DECLDEPTH 20 /* Max. recursive declaration depth. */
-#define CPARSE_MAX_PACKSTACK 7 /* Max. pack pragma stack depth. */
-
-/* Flags for C parser mode. */
-#define CPARSE_MODE_MULTI 1 /* Process multiple declarations. */
-#define CPARSE_MODE_ABSTRACT 2 /* Accept abstract declarators. */
-#define CPARSE_MODE_DIRECT 4 /* Accept direct declarators. */
-#define CPARSE_MODE_FIELD 8 /* Accept field width in bits, too. */
-#define CPARSE_MODE_NOIMPLICIT 16 /* Reject implicit declarations. */
-#define CPARSE_MODE_SKIP 32 /* Skip definitions, ignore errors. */
-
-typedef int CPChar; /* C parser character. Unsigned ext. from char. */
-typedef int CPToken; /* C parser token. */
-
-/* C parser internal value representation. */
-typedef struct CPValue {
- union {
- int32_t i32; /* Value for CTID_INT32. */
- uint32_t u32; /* Value for CTID_UINT32. */
- };
- CTypeID id; /* C Type ID of the value. */
-} CPValue;
-
-/* C parser state. */
-typedef struct CPState {
- CPChar c; /* Current character. */
- CPToken tok; /* Current token. */
- CPValue val; /* Token value. */
- GCstr *str; /* Interned string of identifier/keyword. */
- CType *ct; /* C type table entry. */
- const char *p; /* Current position in input buffer. */
- SBuf sb; /* String buffer for tokens. */
- lua_State *L; /* Lua state. */
- CTState *cts; /* C type state. */
- TValue *param; /* C type parameters. */
- const char *srcname; /* Current source name. */
- BCLine linenumber; /* Input line counter. */
- int depth; /* Recursive declaration depth. */
- uint32_t tmask; /* Type mask for next identifier. */
- uint32_t mode; /* C parser mode. */
- uint8_t packstack[CPARSE_MAX_PACKSTACK]; /* Stack for pack pragmas. */
- uint8_t curpack; /* Current position in pack pragma stack. */
-} CPState;
-
-LJ_FUNC int lj_cparse(CPState *cp);
-
-#endif
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_crecord.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_crecord.h
deleted file mode 100644
index 8e0afd1..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_crecord.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
-** Trace recorder for C data operations.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_CRECORD_H
-#define _LJ_CRECORD_H
-
-#include "lj_obj.h"
-#include "lj_jit.h"
-#include "lj_ffrecord.h"
-
-#if LJ_HASJIT && LJ_HASFFI
-LJ_FUNC void LJ_FASTCALL recff_cdata_index(jit_State *J, RecordFFData *rd);
-LJ_FUNC void LJ_FASTCALL recff_cdata_call(jit_State *J, RecordFFData *rd);
-LJ_FUNC void LJ_FASTCALL recff_cdata_arith(jit_State *J, RecordFFData *rd);
-LJ_FUNC void LJ_FASTCALL recff_clib_index(jit_State *J, RecordFFData *rd);
-LJ_FUNC void LJ_FASTCALL recff_ffi_new(jit_State *J, RecordFFData *rd);
-LJ_FUNC void LJ_FASTCALL recff_ffi_errno(jit_State *J, RecordFFData *rd);
-LJ_FUNC void LJ_FASTCALL recff_ffi_string(jit_State *J, RecordFFData *rd);
-LJ_FUNC void LJ_FASTCALL recff_ffi_copy(jit_State *J, RecordFFData *rd);
-LJ_FUNC void LJ_FASTCALL recff_ffi_fill(jit_State *J, RecordFFData *rd);
-LJ_FUNC void LJ_FASTCALL recff_ffi_typeof(jit_State *J, RecordFFData *rd);
-LJ_FUNC void LJ_FASTCALL recff_ffi_istype(jit_State *J, RecordFFData *rd);
-LJ_FUNC void LJ_FASTCALL recff_ffi_abi(jit_State *J, RecordFFData *rd);
-LJ_FUNC void LJ_FASTCALL recff_ffi_xof(jit_State *J, RecordFFData *rd);
-LJ_FUNC void LJ_FASTCALL recff_ffi_gc(jit_State *J, RecordFFData *rd);
-LJ_FUNC void LJ_FASTCALL lj_crecord_tonumber(jit_State *J, RecordFFData *rd);
-#endif
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_ctype.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_ctype.h
deleted file mode 100644
index 2aefd3b..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_ctype.h
+++ /dev/null
@@ -1,461 +0,0 @@
-/*
-** C type management.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_CTYPE_H
-#define _LJ_CTYPE_H
-
-#include "lj_obj.h"
-#include "lj_gc.h"
-
-#if LJ_HASFFI
-
-/* -- C type definitions -------------------------------------------------- */
-
-/* C type numbers. Highest 4 bits of C type info. ORDER CT. */
-enum {
- /* Externally visible types. */
- CT_NUM, /* Integer or floating-point numbers. */
- CT_STRUCT, /* Struct or union. */
- CT_PTR, /* Pointer or reference. */
- CT_ARRAY, /* Array or complex type. */
- CT_MAYCONVERT = CT_ARRAY,
- CT_VOID, /* Void type. */
- CT_ENUM, /* Enumeration. */
- CT_HASSIZE = CT_ENUM, /* Last type where ct->size holds the actual size. */
- CT_FUNC, /* Function. */
- CT_TYPEDEF, /* Typedef. */
- CT_ATTRIB, /* Miscellaneous attributes. */
- /* Internal element types. */
- CT_FIELD, /* Struct/union field or function parameter. */
- CT_BITFIELD, /* Struct/union bitfield. */
- CT_CONSTVAL, /* Constant value. */
- CT_EXTERN, /* External reference. */
- CT_KW /* Keyword. */
-};
-
-LJ_STATIC_ASSERT(((int)CT_PTR & (int)CT_ARRAY) == CT_PTR);
-LJ_STATIC_ASSERT(((int)CT_STRUCT & (int)CT_ARRAY) == CT_STRUCT);
-
-/*
-** ---------- info ------------
-** |type flags... A cid | size | sib | next | name |
-** +----------------------------+--------+-------+-------+-------+--
-** |NUM BFcvUL.. A | size | | type | |
-** |STRUCT ..cvU..V A | size | field | name? | name? |
-** |PTR ..cvR... A cid | size | | type | |
-** |ARRAY VCcv...V A cid | size | | type | |
-** |VOID ..cv.... A | size | | type | |
-** |ENUM A cid | size | const | name? | name? |
-** |FUNC ....VS.. cc cid | nargs | field | name? | name? |
-** |TYPEDEF cid | | | name | name |
-** |ATTRIB attrnum cid | attr | sib? | type? | |
-** |FIELD cid | offset | field | | name? |
-** |BITFIELD B.cvU csz bsz pos | offset | field | | name? |
-** |CONSTVAL c cid | value | const | name | name |
-** |EXTERN cid | | sib? | name | name |
-** |KW tok | size | | name | name |
-** +----------------------------+--------+-------+-------+-------+--
-** ^^ ^^--- bits used for C type conversion dispatch
-*/
-
-/* C type info flags. TFFArrrr */
-#define CTF_BOOL 0x08000000u /* Boolean: NUM, BITFIELD. */
-#define CTF_FP 0x04000000u /* Floating-point: NUM. */
-#define CTF_CONST 0x02000000u /* Const qualifier. */
-#define CTF_VOLATILE 0x01000000u /* Volatile qualifier. */
-#define CTF_UNSIGNED 0x00800000u /* Unsigned: NUM, BITFIELD. */
-#define CTF_LONG 0x00400000u /* Long: NUM. */
-#define CTF_VLA 0x00100000u /* Variable-length: ARRAY, STRUCT. */
-#define CTF_REF 0x00800000u /* Reference: PTR. */
-#define CTF_VECTOR 0x08000000u /* Vector: ARRAY. */
-#define CTF_COMPLEX 0x04000000u /* Complex: ARRAY. */
-#define CTF_UNION 0x00800000u /* Union: STRUCT. */
-#define CTF_VARARG 0x00800000u /* Vararg: FUNC. */
-#define CTF_SSEREGPARM 0x00400000u /* SSE register parameters: FUNC. */
-
-#define CTF_QUAL (CTF_CONST|CTF_VOLATILE)
-#define CTF_ALIGN (CTMASK_ALIGN<<CTSHIFT_ALIGN)
-#define CTF_UCHAR ((char)-1 > 0 ? CTF_UNSIGNED : 0)
-
-/* Flags used in parser. .F.Ammvf cp->attr */
-#define CTFP_ALIGNED 0x00000001u /* cp->attr + ALIGN */
-#define CTFP_PACKED 0x00000002u /* cp->attr */
-/* ...C...f cp->fattr */
-#define CTFP_CCONV 0x00000001u /* cp->fattr + CCONV/[SSE]REGPARM */
-
-/* C type info bitfields. */
-#define CTMASK_CID 0x0000ffffu /* Max. 65536 type IDs. */
-#define CTMASK_NUM 0xf0000000u /* Max. 16 type numbers. */
-#define CTSHIFT_NUM 28
-#define CTMASK_ALIGN 15 /* Max. alignment is 2^15. */
-#define CTSHIFT_ALIGN 16
-#define CTMASK_ATTRIB 255 /* Max. 256 attributes. */
-#define CTSHIFT_ATTRIB 16
-#define CTMASK_CCONV 3 /* Max. 4 calling conventions. */
-#define CTSHIFT_CCONV 16
-#define CTMASK_REGPARM 3 /* Max. 0-3 regparms. */
-#define CTSHIFT_REGPARM 18
-/* Bitfields only used in parser. */
-#define CTMASK_VSIZEP 15 /* Max. vector size is 2^15. */
-#define CTSHIFT_VSIZEP 4
-#define CTMASK_MSIZEP 255 /* Max. type size (via mode) is 128. */
-#define CTSHIFT_MSIZEP 8
-
-/* Info bits for BITFIELD. Max. size of bitfield is 64 bits. */
-#define CTBSZ_MAX 32 /* Max. size of bitfield is 32 bit. */
-#define CTBSZ_FIELD 127 /* Temp. marker for regular field. */
-#define CTMASK_BITPOS 127
-#define CTMASK_BITBSZ 127
-#define CTMASK_BITCSZ 127
-#define CTSHIFT_BITPOS 0
-#define CTSHIFT_BITBSZ 8
-#define CTSHIFT_BITCSZ 16
-
-#define CTF_INSERT(info, field, val) \
- info = (info & ~(CTMASK_##field<<CTSHIFT_##field)) | \
- (((CTSize)(val) & CTMASK_##field) << CTSHIFT_##field)
-
-/* Calling conventions. ORDER CC */
-enum { CTCC_CDECL, CTCC_THISCALL, CTCC_FASTCALL, CTCC_STDCALL };
-
-/* Attribute numbers. */
-enum {
- CTA_NONE, /* Ignored attribute. Must be zero. */
- CTA_QUAL, /* Unmerged qualifiers. */
- CTA_ALIGN, /* Alignment override. */
- CTA_SUBTYPE, /* Transparent sub-type. */
- CTA_REDIR, /* Redirected symbol name. */
- CTA_BAD, /* To catch bad IDs. */
- CTA__MAX
-};
-
-/* Special sizes. */
-#define CTSIZE_INVALID 0xffffffffu
-
-typedef uint32_t CTInfo; /* Type info. */
-typedef uint32_t CTSize; /* Type size. */
-typedef uint32_t CTypeID; /* Type ID. */
-typedef uint16_t CTypeID1; /* Minimum-sized type ID. */
-
-/* C type table element. */
-typedef struct CType {
- CTInfo info; /* Type info. */
- CTSize size; /* Type size or other info. */
- CTypeID1 sib; /* Sibling element. */
- CTypeID1 next; /* Next element in hash chain. */
- GCRef name; /* Element name (GCstr). */
-} CType;
-
-#define CTHASH_SIZE 128 /* Number of hash anchors. */
-#define CTHASH_MASK (CTHASH_SIZE-1)
-
-/* Simplify target-specific configuration. Checked in lj_ccall.h. */
-#define CCALL_MAX_GPR 8
-#define CCALL_MAX_FPR 8
-
-typedef LJ_ALIGN(8) union FPRCBArg { double d; float f[2]; } FPRCBArg;
-
-/* C callback state. Defined here, to avoid dragging in lj_ccall.h. */
-
-typedef LJ_ALIGN(8) struct CCallback {
- FPRCBArg fpr[CCALL_MAX_FPR]; /* Arguments/results in FPRs. */
- intptr_t gpr[CCALL_MAX_GPR]; /* Arguments/results in GPRs. */
- intptr_t *stack; /* Pointer to arguments on stack. */
- void *mcode; /* Machine code for callback func. pointers. */
- CTypeID1 *cbid; /* Callback type table. */
- MSize sizeid; /* Size of callback type table. */
- MSize topid; /* Highest unused callback type table slot. */
- MSize slot; /* Current callback slot. */
-} CCallback;
-
-/* C type state. */
-typedef struct CTState {
- CType *tab; /* C type table. */
- CTypeID top; /* Current top of C type table. */
- MSize sizetab; /* Size of C type table. */
- lua_State *L; /* Lua state (needed for errors and allocations). */
- global_State *g; /* Global state. */
- GCtab *finalizer; /* Map of cdata to finalizer. */
- GCtab *miscmap; /* Map of -CTypeID to metatable and cb slot to func. */
- CCallback cb; /* Temporary callback state. */
- CTypeID1 hash[CTHASH_SIZE]; /* Hash anchors for C type table. */
-} CTState;
-
-#define CTINFO(ct, flags) (((CTInfo)(ct) << CTSHIFT_NUM) + (flags))
-#define CTALIGN(al) ((CTSize)(al) << CTSHIFT_ALIGN)
-#define CTATTRIB(at) ((CTInfo)(at) << CTSHIFT_ATTRIB)
-
-#define ctype_type(info) ((info) >> CTSHIFT_NUM)
-#define ctype_cid(info) ((CTypeID)((info) & CTMASK_CID))
-#define ctype_align(info) (((info) >> CTSHIFT_ALIGN) & CTMASK_ALIGN)
-#define ctype_attrib(info) (((info) >> CTSHIFT_ATTRIB) & CTMASK_ATTRIB)
-#define ctype_bitpos(info) (((info) >> CTSHIFT_BITPOS) & CTMASK_BITPOS)
-#define ctype_bitbsz(info) (((info) >> CTSHIFT_BITBSZ) & CTMASK_BITBSZ)
-#define ctype_bitcsz(info) (((info) >> CTSHIFT_BITCSZ) & CTMASK_BITCSZ)
-#define ctype_vsizeP(info) (((info) >> CTSHIFT_VSIZEP) & CTMASK_VSIZEP)
-#define ctype_msizeP(info) (((info) >> CTSHIFT_MSIZEP) & CTMASK_MSIZEP)
-#define ctype_cconv(info) (((info) >> CTSHIFT_CCONV) & CTMASK_CCONV)
-
-/* Simple type checks. */
-#define ctype_isnum(info) (ctype_type((info)) == CT_NUM)
-#define ctype_isvoid(info) (ctype_type((info)) == CT_VOID)
-#define ctype_isptr(info) (ctype_type((info)) == CT_PTR)
-#define ctype_isarray(info) (ctype_type((info)) == CT_ARRAY)
-#define ctype_isstruct(info) (ctype_type((info)) == CT_STRUCT)
-#define ctype_isfunc(info) (ctype_type((info)) == CT_FUNC)
-#define ctype_isenum(info) (ctype_type((info)) == CT_ENUM)
-#define ctype_istypedef(info) (ctype_type((info)) == CT_TYPEDEF)
-#define ctype_isattrib(info) (ctype_type((info)) == CT_ATTRIB)
-#define ctype_isfield(info) (ctype_type((info)) == CT_FIELD)
-#define ctype_isbitfield(info) (ctype_type((info)) == CT_BITFIELD)
-#define ctype_isconstval(info) (ctype_type((info)) == CT_CONSTVAL)
-#define ctype_isextern(info) (ctype_type((info)) == CT_EXTERN)
-#define ctype_hassize(info) (ctype_type((info)) <= CT_HASSIZE)
-
-/* Combined type and flag checks. */
-#define ctype_isinteger(info) \
- (((info) & (CTMASK_NUM|CTF_BOOL|CTF_FP)) == CTINFO(CT_NUM, 0))
-#define ctype_isinteger_or_bool(info) \
- (((info) & (CTMASK_NUM|CTF_FP)) == CTINFO(CT_NUM, 0))
-#define ctype_isbool(info) \
- (((info) & (CTMASK_NUM|CTF_BOOL)) == CTINFO(CT_NUM, CTF_BOOL))
-#define ctype_isfp(info) \
- (((info) & (CTMASK_NUM|CTF_FP)) == CTINFO(CT_NUM, CTF_FP))
-
-#define ctype_ispointer(info) \
- ((ctype_type(info) >> 1) == (CT_PTR >> 1)) /* Pointer or array. */
-#define ctype_isref(info) \
- (((info) & (CTMASK_NUM|CTF_REF)) == CTINFO(CT_PTR, CTF_REF))
-
-#define ctype_isrefarray(info) \
- (((info) & (CTMASK_NUM|CTF_VECTOR|CTF_COMPLEX)) == CTINFO(CT_ARRAY, 0))
-#define ctype_isvector(info) \
- (((info) & (CTMASK_NUM|CTF_VECTOR)) == CTINFO(CT_ARRAY, CTF_VECTOR))
-#define ctype_iscomplex(info) \
- (((info) & (CTMASK_NUM|CTF_COMPLEX)) == CTINFO(CT_ARRAY, CTF_COMPLEX))
-
-#define ctype_isvltype(info) \
- (((info) & ((CTMASK_NUM|CTF_VLA) - (2u<<CTSHIFT_NUM))) == \
- CTINFO(CT_STRUCT, CTF_VLA)) /* VL array or VL struct. */
-#define ctype_isvlarray(info) \
- (((info) & (CTMASK_NUM|CTF_VLA)) == CTINFO(CT_ARRAY, CTF_VLA))
-
-#define ctype_isxattrib(info, at) \
- (((info) & (CTMASK_NUM|CTATTRIB(CTMASK_ATTRIB))) == \
- CTINFO(CT_ATTRIB, CTATTRIB(at)))
-
-/* Target-dependent sizes and alignments. */
-#if LJ_64
-#define CTSIZE_PTR 8
-#define CTALIGN_PTR CTALIGN(3)
-#else
-#define CTSIZE_PTR 4
-#define CTALIGN_PTR CTALIGN(2)
-#endif
-
-#define CTINFO_REF(ref) \
- CTINFO(CT_PTR, (CTF_CONST|CTF_REF|CTALIGN_PTR) + (ref))
-
-#define CT_MEMALIGN 3 /* Alignment guaranteed by memory allocator. */
-
-/* -- Predefined types ---------------------------------------------------- */
-
-/* Target-dependent types. */
-#if LJ_TARGET_PPC || LJ_TARGET_PPCSPE
-#define CTTYDEFP(_) \
- _(LINT32, 4, CT_NUM, CTF_LONG|CTALIGN(2))
-#else
-#define CTTYDEFP(_)
-#endif
-
-/* Common types. */
-#define CTTYDEF(_) \
- _(NONE, 0, CT_ATTRIB, CTATTRIB(CTA_BAD)) \
- _(VOID, -1, CT_VOID, CTALIGN(0)) \
- _(CVOID, -1, CT_VOID, CTF_CONST|CTALIGN(0)) \
- _(BOOL, 1, CT_NUM, CTF_BOOL|CTF_UNSIGNED|CTALIGN(0)) \
- _(CCHAR, 1, CT_NUM, CTF_CONST|CTF_UCHAR|CTALIGN(0)) \
- _(INT8, 1, CT_NUM, CTALIGN(0)) \
- _(UINT8, 1, CT_NUM, CTF_UNSIGNED|CTALIGN(0)) \
- _(INT16, 2, CT_NUM, CTALIGN(1)) \
- _(UINT16, 2, CT_NUM, CTF_UNSIGNED|CTALIGN(1)) \
- _(INT32, 4, CT_NUM, CTALIGN(2)) \
- _(UINT32, 4, CT_NUM, CTF_UNSIGNED|CTALIGN(2)) \
- _(INT64, 8, CT_NUM, CTF_LONG|CTALIGN(3)) \
- _(UINT64, 8, CT_NUM, CTF_UNSIGNED|CTF_LONG|CTALIGN(3)) \
- _(FLOAT, 4, CT_NUM, CTF_FP|CTALIGN(2)) \
- _(DOUBLE, 8, CT_NUM, CTF_FP|CTALIGN(3)) \
- _(COMPLEX_FLOAT, 8, CT_ARRAY, CTF_COMPLEX|CTALIGN(2)|CTID_FLOAT) \
- _(COMPLEX_DOUBLE, 16, CT_ARRAY, CTF_COMPLEX|CTALIGN(3)|CTID_DOUBLE) \
- _(P_VOID, CTSIZE_PTR, CT_PTR, CTALIGN_PTR|CTID_VOID) \
- _(P_CVOID, CTSIZE_PTR, CT_PTR, CTALIGN_PTR|CTID_CVOID) \
- _(P_CCHAR, CTSIZE_PTR, CT_PTR, CTALIGN_PTR|CTID_CCHAR) \
- _(A_CCHAR, -1, CT_ARRAY, CTF_CONST|CTALIGN(0)|CTID_CCHAR) \
- _(CTYPEID, 4, CT_ENUM, CTALIGN(2)|CTID_INT32) \
- CTTYDEFP(_) \
- /* End of type list. */
-
-/* Public predefined type IDs. */
-enum {
-#define CTTYIDDEF(id, sz, ct, info) CTID_##id,
-CTTYDEF(CTTYIDDEF)
-#undef CTTYIDDEF
- /* Predefined typedefs and keywords follow. */
- CTID_MAX = 65536
-};
-
-/* Target-dependent type IDs. */
-#if LJ_64
-#define CTID_INT_PSZ CTID_INT64
-#define CTID_UINT_PSZ CTID_UINT64
-#else
-#define CTID_INT_PSZ CTID_INT32
-#define CTID_UINT_PSZ CTID_UINT32
-#endif
-
-#if LJ_ABI_WIN
-#define CTID_WCHAR CTID_UINT16
-#elif LJ_TARGET_PPC
-#define CTID_WCHAR CTID_LINT32
-#else
-#define CTID_WCHAR CTID_INT32
-#endif
-
-/* -- C tokens and keywords ----------------------------------------------- */
-
-/* C lexer keywords. */
-#define CTOKDEF(_) \
- _(IDENT, "<identifier>") _(STRING, "<string>") \
- _(INTEGER, "<integer>") _(EOF, "<eof>") \
- _(OROR, "||") _(ANDAND, "&&") _(EQ, "==") _(NE, "!=") \
- _(LE, "<=") _(GE, ">=") _(SHL, "<<") _(SHR, ">>") _(DEREF, "->")
-
-/* Simple declaration specifiers. */
-#define CDSDEF(_) \
- _(VOID) _(BOOL) _(CHAR) _(INT) _(FP) \
- _(LONG) _(LONGLONG) _(SHORT) _(COMPLEX) _(SIGNED) _(UNSIGNED) \
- _(CONST) _(VOLATILE) _(RESTRICT) _(INLINE) \
- _(TYPEDEF) _(EXTERN) _(STATIC) _(AUTO) _(REGISTER)
-
-/* C keywords. */
-#define CKWDEF(_) \
- CDSDEF(_) _(EXTENSION) _(ASM) _(ATTRIBUTE) \
- _(DECLSPEC) _(CCDECL) _(PTRSZ) \
- _(STRUCT) _(UNION) _(ENUM) \
- _(SIZEOF) _(ALIGNOF)
-
-/* C token numbers. */
-enum {
- CTOK_OFS = 255,
-#define CTOKNUM(name, sym) CTOK_##name,
-#define CKWNUM(name) CTOK_##name,
-CTOKDEF(CTOKNUM)
-CKWDEF(CKWNUM)
-#undef CTOKNUM
-#undef CKWNUM
- CTOK_FIRSTDECL = CTOK_VOID,
- CTOK_FIRSTSCL = CTOK_TYPEDEF,
- CTOK_LASTDECLFLAG = CTOK_REGISTER,
- CTOK_LASTDECL = CTOK_ENUM
-};
-
-/* Declaration specifier flags. */
-enum {
-#define CDSFLAG(name) CDF_##name = (1u << (CTOK_##name - CTOK_FIRSTDECL)),
-CDSDEF(CDSFLAG)
-#undef CDSFLAG
- CDF__END
-};
-
-#define CDF_SCL (CDF_TYPEDEF|CDF_EXTERN|CDF_STATIC|CDF_AUTO|CDF_REGISTER)
-
-/* -- C type management --------------------------------------------------- */
-
-#define ctype_ctsG(g) (mref((g)->ctype_state, CTState))
-
-/* Get C type state. */
-static LJ_AINLINE CTState *ctype_cts(lua_State *L)
-{
- CTState *cts = ctype_ctsG(G(L));
- cts->L = L; /* Save L for errors and allocations. */
- return cts;
-}
-
-/* Save and restore state of C type table. */
-#define LJ_CTYPE_SAVE(cts) CTState savects_ = *(cts)
-#define LJ_CTYPE_RESTORE(cts) \
- ((cts)->top = savects_.top, \
- memcpy((cts)->hash, savects_.hash, sizeof(savects_.hash)))
-
-/* Check C type ID for validity when assertions are enabled. */
-static LJ_AINLINE CTypeID ctype_check(CTState *cts, CTypeID id)
-{
- lua_assert(id > 0 && id < cts->top); UNUSED(cts);
- return id;
-}
-
-/* Get C type for C type ID. */
-static LJ_AINLINE CType *ctype_get(CTState *cts, CTypeID id)
-{
- return &cts->tab[ctype_check(cts, id)];
-}
-
-/* Get C type ID for a C type. */
-#define ctype_typeid(cts, ct) ((CTypeID)((ct) - (cts)->tab))
-
-/* Get child C type. */
-static LJ_AINLINE CType *ctype_child(CTState *cts, CType *ct)
-{
- lua_assert(!(ctype_isvoid(ct->info) || ctype_isstruct(ct->info) ||
- ctype_isbitfield(ct->info))); /* These don't have children. */
- return ctype_get(cts, ctype_cid(ct->info));
-}
-
-/* Get raw type for a C type ID. */
-static LJ_AINLINE CType *ctype_raw(CTState *cts, CTypeID id)
-{
- CType *ct = ctype_get(cts, id);
- while (ctype_isattrib(ct->info)) ct = ctype_child(cts, ct);
- return ct;
-}
-
-/* Get raw type of the child of a C type. */
-static LJ_AINLINE CType *ctype_rawchild(CTState *cts, CType *ct)
-{
- do { ct = ctype_child(cts, ct); } while (ctype_isattrib(ct->info));
- return ct;
-}
-
-/* Set the name of a C type table element. */
-static LJ_AINLINE void ctype_setname(CType *ct, GCstr *s)
-{
- /* NOBARRIER: mark string as fixed -- the C type table is never collected. */
- fixstring(s);
- setgcref(ct->name, obj2gco(s));
-}
-
-LJ_FUNC CTypeID lj_ctype_new(CTState *cts, CType **ctp);
-LJ_FUNC CTypeID lj_ctype_intern(CTState *cts, CTInfo info, CTSize size);
-LJ_FUNC void lj_ctype_addname(CTState *cts, CType *ct, CTypeID id);
-LJ_FUNC CTypeID lj_ctype_getname(CTState *cts, CType **ctp, GCstr *name,
- uint32_t tmask);
-LJ_FUNC CType *lj_ctype_getfieldq(CTState *cts, CType *ct, GCstr *name,
- CTSize *ofs, CTInfo *qual);
-#define lj_ctype_getfield(cts, ct, name, ofs) \
- lj_ctype_getfieldq((cts), (ct), (name), (ofs), NULL)
-LJ_FUNC CType *lj_ctype_rawref(CTState *cts, CTypeID id);
-LJ_FUNC CTSize lj_ctype_size(CTState *cts, CTypeID id);
-LJ_FUNC CTSize lj_ctype_vlsize(CTState *cts, CType *ct, CTSize nelem);
-LJ_FUNC CTInfo lj_ctype_info(CTState *cts, CTypeID id, CTSize *szp);
-LJ_FUNC cTValue *lj_ctype_meta(CTState *cts, CTypeID id, MMS mm);
-LJ_FUNC GCstr *lj_ctype_repr(lua_State *L, CTypeID id, GCstr *name);
-LJ_FUNC GCstr *lj_ctype_repr_int64(lua_State *L, uint64_t n, int isunsigned);
-LJ_FUNC GCstr *lj_ctype_repr_complex(lua_State *L, void *sp, CTSize size);
-LJ_FUNC CTState *lj_ctype_init(lua_State *L);
-LJ_FUNC void lj_ctype_freestate(global_State *g);
-
-#endif
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_debug.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_debug.h
deleted file mode 100644
index d10d0da..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_debug.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
-** Debugging and introspection.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_DEBUG_H
-#define _LJ_DEBUG_H
-
-#include "lj_obj.h"
-
-typedef struct lj_Debug {
- /* Common fields. Must be in the same order as in lua.h. */
- int event;
- const char *name;
- const char *namewhat;
- const char *what;
- const char *source;
- int currentline;
- int nups;
- int linedefined;
- int lastlinedefined;
- char short_src[LUA_IDSIZE];
- int i_ci;
- /* Extended fields. Only valid if lj_debug_getinfo() is called with ext = 1.*/
- int nparams;
- int isvararg;
-} lj_Debug;
-
-LJ_FUNC cTValue *lj_debug_frame(lua_State *L, int level, int *size);
-LJ_FUNC BCLine LJ_FASTCALL lj_debug_line(GCproto *pt, BCPos pc);
-LJ_FUNC const char *lj_debug_uvname(GCproto *pt, uint32_t idx);
-LJ_FUNC const char *lj_debug_uvnamev(cTValue *o, uint32_t idx, TValue **tvp);
-LJ_FUNC const char *lj_debug_slotname(GCproto *pt, const BCIns *pc,
- BCReg slot, const char **name);
-LJ_FUNC const char *lj_debug_funcname(lua_State *L, TValue *frame,
- const char **name);
-LJ_FUNC void lj_debug_shortname(char *out, GCstr *str);
-LJ_FUNC void lj_debug_addloc(lua_State *L, const char *msg,
- cTValue *frame, cTValue *nextframe);
-LJ_FUNC void lj_debug_pushloc(lua_State *L, GCproto *pt, BCPos pc);
-LJ_FUNC int lj_debug_getinfo(lua_State *L, const char *what, lj_Debug *ar,
- int ext);
-
-/* Fixed internal variable names. */
-#define VARNAMEDEF(_) \
- _(FOR_IDX, "(for index)") \
- _(FOR_STOP, "(for limit)") \
- _(FOR_STEP, "(for step)") \
- _(FOR_GEN, "(for generator)") \
- _(FOR_STATE, "(for state)") \
- _(FOR_CTL, "(for control)")
-
-enum {
- VARNAME_END,
-#define VARNAMEENUM(name, str) VARNAME_##name,
- VARNAMEDEF(VARNAMEENUM)
-#undef VARNAMEENUM
- VARNAME__MAX
-};
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_def.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_def.h
deleted file mode 100644
index 21cc59a..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_def.h
+++ /dev/null
@@ -1,353 +0,0 @@
-/*
-** LuaJIT common internal definitions.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_DEF_H
-#define _LJ_DEF_H
-
-#include "lua.h"
-
-#if defined(_MSC_VER)
-/* MSVC is stuck in the last century and doesn't have C99's stdint.h. */
-typedef __int8 int8_t;
-typedef __int16 int16_t;
-typedef __int32 int32_t;
-typedef __int64 int64_t;
-typedef unsigned __int8 uint8_t;
-typedef unsigned __int16 uint16_t;
-typedef unsigned __int32 uint32_t;
-typedef unsigned __int64 uint64_t;
-#ifdef _WIN64
-typedef __int64 intptr_t;
-typedef unsigned __int64 uintptr_t;
-#else
-typedef __int32 intptr_t;
-typedef unsigned __int32 uintptr_t;
-#endif
-#elif defined(__symbian__)
-/* Cough. */
-typedef signed char int8_t;
-typedef short int int16_t;
-typedef int int32_t;
-typedef long long int64_t;
-typedef unsigned char uint8_t;
-typedef unsigned short int uint16_t;
-typedef unsigned int uint32_t;
-typedef unsigned long long uint64_t;
-typedef int intptr_t;
-typedef unsigned int uintptr_t;
-#else
-#include <stdint.h>
-#endif
-
-/* Needed everywhere. */
-#include <string.h>
-#include <stdlib.h>
-
-/* Various VM limits. */
-#define LJ_MAX_MEM 0x7fffff00 /* Max. total memory allocation. */
-#define LJ_MAX_ALLOC LJ_MAX_MEM /* Max. individual allocation length. */
-#define LJ_MAX_STR LJ_MAX_MEM /* Max. string length. */
-#define LJ_MAX_UDATA LJ_MAX_MEM /* Max. userdata length. */
-
-#define LJ_MAX_STRTAB (1<<26) /* Max. string table size. */
-#define LJ_MAX_HBITS 26 /* Max. hash bits. */
-#define LJ_MAX_ABITS 28 /* Max. bits of array key. */
-#define LJ_MAX_ASIZE ((1<<(LJ_MAX_ABITS-1))+1) /* Max. array part size. */
-#define LJ_MAX_COLOSIZE 16 /* Max. elems for colocated array. */
-
-#define LJ_MAX_LINE LJ_MAX_MEM /* Max. source code line number. */
-#define LJ_MAX_XLEVEL 200 /* Max. syntactic nesting level. */
-#define LJ_MAX_BCINS (1<<26) /* Max. # of bytecode instructions. */
-#define LJ_MAX_SLOTS 250 /* Max. # of slots in a Lua func. */
-#define LJ_MAX_LOCVAR 200 /* Max. # of local variables. */
-#define LJ_MAX_UPVAL 60 /* Max. # of upvalues. */
-
-#define LJ_MAX_IDXCHAIN 100 /* __index/__newindex chain limit. */
-#define LJ_STACK_EXTRA 5 /* Extra stack space (metamethods). */
-
-#define LJ_NUM_CBPAGE 1 /* Number of FFI callback pages. */
-
-/* Minimum table/buffer sizes. */
-#define LJ_MIN_GLOBAL 6 /* Min. global table size (hbits). */
-#define LJ_MIN_REGISTRY 2 /* Min. registry size (hbits). */
-#define LJ_MIN_STRTAB 256 /* Min. string table size (pow2). */
-#define LJ_MIN_SBUF 32 /* Min. string buffer length. */
-#define LJ_MIN_VECSZ 8 /* Min. size for growable vectors. */
-#define LJ_MIN_IRSZ 32 /* Min. size for growable IR. */
-#define LJ_MIN_K64SZ 16 /* Min. size for chained K64Array. */
-
-/* JIT compiler limits. */
-#define LJ_MAX_JSLOTS 250 /* Max. # of stack slots for a trace. */
-#define LJ_MAX_PHI 64 /* Max. # of PHIs for a loop. */
-#define LJ_MAX_EXITSTUBGR 16 /* Max. # of exit stub groups. */
-
-/* Various macros. */
-#ifndef UNUSED
-#define UNUSED(x) ((void)(x)) /* to avoid warnings */
-#endif
-
-#define U64x(hi, lo) (((uint64_t)0x##hi << 32) + (uint64_t)0x##lo)
-#define i32ptr(p) ((int32_t)(intptr_t)(void *)(p))
-#define u32ptr(p) ((uint32_t)(intptr_t)(void *)(p))
-
-#define checki8(x) ((x) == (int32_t)(int8_t)(x))
-#define checku8(x) ((x) == (int32_t)(uint8_t)(x))
-#define checki16(x) ((x) == (int32_t)(int16_t)(x))
-#define checku16(x) ((x) == (int32_t)(uint16_t)(x))
-#define checki32(x) ((x) == (int32_t)(x))
-#define checku32(x) ((x) == (uint32_t)(x))
-#define checkptr32(x) ((uintptr_t)(x) == (uint32_t)(uintptr_t)(x))
-
-/* Every half-decent C compiler transforms this into a rotate instruction. */
-#define lj_rol(x, n) (((x)<<(n)) | ((x)>>(-(int)(n)&(8*sizeof(x)-1))))
-#define lj_ror(x, n) (((x)<<(-(int)(n)&(8*sizeof(x)-1))) | ((x)>>(n)))
-
-/* A really naive Bloom filter. But sufficient for our needs. */
-typedef uintptr_t BloomFilter;
-#define BLOOM_MASK (8*sizeof(BloomFilter) - 1)
-#define bloombit(x) ((uintptr_t)1 << ((x) & BLOOM_MASK))
-#define bloomset(b, x) ((b) |= bloombit((x)))
-#define bloomtest(b, x) ((b) & bloombit((x)))
-
-#if defined(__GNUC__) || defined(__psp2__)
-
-#define LJ_NORET __attribute__((noreturn))
-#define LJ_ALIGN(n) __attribute__((aligned(n)))
-#define LJ_INLINE inline
-#define LJ_AINLINE inline __attribute__((always_inline))
-#define LJ_NOINLINE __attribute__((noinline))
-
-#if defined(__ELF__) || defined(__MACH__) || defined(__psp2__)
-#if !((defined(__sun__) && defined(__svr4__)) || defined(__CELLOS_LV2__))
-#define LJ_NOAPI extern __attribute__((visibility("hidden")))
-#endif
-#endif
-
-/* Note: it's only beneficial to use fastcall on x86 and then only for up to
-** two non-FP args. The amalgamated compile covers all LJ_FUNC cases. Only
-** indirect calls and related tail-called C functions are marked as fastcall.
-*/
-#if defined(__i386__)
-#define LJ_FASTCALL __attribute__((fastcall))
-#endif
-
-#define LJ_LIKELY(x) __builtin_expect(!!(x), 1)
-#define LJ_UNLIKELY(x) __builtin_expect(!!(x), 0)
-
-#define lj_ffs(x) ((uint32_t)__builtin_ctz(x))
-/* Don't ask ... */
-#if defined(__INTEL_COMPILER) && (defined(__i386__) || defined(__x86_64__))
-static LJ_AINLINE uint32_t lj_fls(uint32_t x)
-{
- uint32_t r; __asm__("bsrl %1, %0" : "=r" (r) : "rm" (x) : "cc"); return r;
-}
-#else
-#define lj_fls(x) ((uint32_t)(__builtin_clz(x)^31))
-#endif
-
-#if defined(__arm__)
-static LJ_AINLINE uint32_t lj_bswap(uint32_t x)
-{
-#if defined(__psp2__)
- return __builtin_rev(x);
-#else
- uint32_t r;
-#if __ARM_ARCH_6__ || __ARM_ARCH_6J__ || __ARM_ARCH_6T2__ || __ARM_ARCH_6Z__ ||\
- __ARM_ARCH_6ZK__ || __ARM_ARCH_7__ || __ARM_ARCH_7A__ || __ARM_ARCH_7R__
- __asm__("rev %0, %1" : "=r" (r) : "r" (x));
- return r;
-#else
-#ifdef __thumb__
- r = x ^ lj_ror(x, 16);
-#else
- __asm__("eor %0, %1, %1, ror #16" : "=r" (r) : "r" (x));
-#endif
- return ((r & 0xff00ffffu) >> 8) ^ lj_ror(x, 8);
-#endif
-#endif
-}
-
-static LJ_AINLINE uint64_t lj_bswap64(uint64_t x)
-{
- return ((uint64_t)lj_bswap((uint32_t)x)<<32) | lj_bswap((uint32_t)(x>>32));
-}
-#elif (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)
-static LJ_AINLINE uint32_t lj_bswap(uint32_t x)
-{
- return (uint32_t)__builtin_bswap32((int32_t)x);
-}
-
-static LJ_AINLINE uint64_t lj_bswap64(uint64_t x)
-{
- return (uint64_t)__builtin_bswap64((int64_t)x);
-}
-#elif defined(__i386__) || defined(__x86_64__)
-static LJ_AINLINE uint32_t lj_bswap(uint32_t x)
-{
- uint32_t r; __asm__("bswap %0" : "=r" (r) : "0" (x)); return r;
-}
-
-#if defined(__i386__)
-static LJ_AINLINE uint64_t lj_bswap64(uint64_t x)
-{
- return ((uint64_t)lj_bswap((uint32_t)x)<<32) | lj_bswap((uint32_t)(x>>32));
-}
-#else
-static LJ_AINLINE uint64_t lj_bswap64(uint64_t x)
-{
- uint64_t r; __asm__("bswap %0" : "=r" (r) : "0" (x)); return r;
-}
-#endif
-#else
-static LJ_AINLINE uint32_t lj_bswap(uint32_t x)
-{
- return (x << 24) | ((x & 0xff00) << 8) | ((x >> 8) & 0xff00) | (x >> 24);
-}
-
-static LJ_AINLINE uint64_t lj_bswap64(uint64_t x)
-{
- return (uint64_t)lj_bswap((uint32_t)(x >> 32)) |
- ((uint64_t)lj_bswap((uint32_t)x) << 32);
-}
-#endif
-
-typedef union __attribute__((packed)) Unaligned16 {
- uint16_t u;
- uint8_t b[2];
-} Unaligned16;
-
-typedef union __attribute__((packed)) Unaligned32 {
- uint32_t u;
- uint8_t b[4];
-} Unaligned32;
-
-/* Unaligned load of uint16_t. */
-static LJ_AINLINE uint16_t lj_getu16(const void *p)
-{
- return ((const Unaligned16 *)p)->u;
-}
-
-/* Unaligned load of uint32_t. */
-static LJ_AINLINE uint32_t lj_getu32(const void *p)
-{
- return ((const Unaligned32 *)p)->u;
-}
-
-#elif defined(_MSC_VER)
-
-#define LJ_NORET __declspec(noreturn)
-#define LJ_ALIGN(n) __declspec(align(n))
-#define LJ_INLINE __inline
-#define LJ_AINLINE __forceinline
-#define LJ_NOINLINE __declspec(noinline)
-#if defined(_M_IX86)
-#define LJ_FASTCALL __fastcall
-#endif
-
-#ifdef _M_PPC
-unsigned int _CountLeadingZeros(long);
-#pragma intrinsic(_CountLeadingZeros)
-static LJ_AINLINE uint32_t lj_fls(uint32_t x)
-{
- return _CountLeadingZeros(x) ^ 31;
-}
-#else
-unsigned char _BitScanForward(uint32_t *, unsigned long);
-unsigned char _BitScanReverse(uint32_t *, unsigned long);
-#pragma intrinsic(_BitScanForward)
-#pragma intrinsic(_BitScanReverse)
-
-static LJ_AINLINE uint32_t lj_ffs(uint32_t x)
-{
- uint32_t r; _BitScanForward(&r, x); return r;
-}
-
-static LJ_AINLINE uint32_t lj_fls(uint32_t x)
-{
- uint32_t r; _BitScanReverse(&r, x); return r;
-}
-#endif
-
-unsigned long _byteswap_ulong(unsigned long);
-uint64_t _byteswap_uint64(uint64_t);
-#define lj_bswap(x) (_byteswap_ulong((x)))
-#define lj_bswap64(x) (_byteswap_uint64((x)))
-
-#if defined(_M_PPC) && defined(LUAJIT_NO_UNALIGNED)
-/*
-** Replacement for unaligned loads on Xbox 360. Disabled by default since it's
-** usually more costly than the occasional stall when crossing a cache-line.
-*/
-static LJ_AINLINE uint16_t lj_getu16(const void *v)
-{
- const uint8_t *p = (const uint8_t *)v;
- return (uint16_t)((p[0]<<8) | p[1]);
-}
-static LJ_AINLINE uint32_t lj_getu32(const void *v)
-{
- const uint8_t *p = (const uint8_t *)v;
- return (uint32_t)((p[0]<<24) | (p[1]<<16) | (p[2]<<8) | p[3]);
-}
-#else
-/* Unaligned loads are generally ok on x86/x64. */
-#define lj_getu16(p) (*(uint16_t *)(p))
-#define lj_getu32(p) (*(uint32_t *)(p))
-#endif
-
-#else
-#error "missing defines for your compiler"
-#endif
-
-/* Optional defines. */
-#ifndef LJ_FASTCALL
-#define LJ_FASTCALL
-#endif
-#ifndef LJ_NORET
-#define LJ_NORET
-#endif
-#ifndef LJ_NOAPI
-#define LJ_NOAPI extern
-#endif
-#ifndef LJ_LIKELY
-#define LJ_LIKELY(x) (x)
-#define LJ_UNLIKELY(x) (x)
-#endif
-
-/* Attributes for internal functions. */
-#define LJ_DATA LJ_NOAPI
-#define LJ_DATADEF
-#define LJ_ASMF LJ_NOAPI
-#define LJ_FUNCA LJ_NOAPI
-#if defined(ljamalg_c)
-#define LJ_FUNC static
-#else
-#define LJ_FUNC LJ_NOAPI
-#endif
-#define LJ_FUNC_NORET LJ_FUNC LJ_NORET
-#define LJ_FUNCA_NORET LJ_FUNCA LJ_NORET
-#define LJ_ASMF_NORET LJ_ASMF LJ_NORET
-
-/* Runtime assertions. */
-#ifdef lua_assert
-#define check_exp(c, e) (lua_assert(c), (e))
-#define api_check(l, e) lua_assert(e)
-#else
-#define lua_assert(c) ((void)0)
-#define check_exp(c, e) (e)
-#define api_check luai_apicheck
-#endif
-
-/* Static assertions. */
-#define LJ_ASSERT_NAME2(name, line) name ## line
-#define LJ_ASSERT_NAME(line) LJ_ASSERT_NAME2(lj_assert_, line)
-#ifdef __COUNTER__
-#define LJ_STATIC_ASSERT(cond) \
- extern void LJ_ASSERT_NAME(__COUNTER__)(int STATIC_ASSERTION_FAILED[(cond)?1:-1])
-#else
-#define LJ_STATIC_ASSERT(cond) \
- extern void LJ_ASSERT_NAME(__LINE__)(int STATIC_ASSERTION_FAILED[(cond)?1:-1])
-#endif
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_dispatch.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_dispatch.h
deleted file mode 100644
index e46a0ee..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_dispatch.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
-** Instruction dispatch handling.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_DISPATCH_H
-#define _LJ_DISPATCH_H
-
-#include "lj_obj.h"
-#include "lj_bc.h"
-#if LJ_HASJIT
-#include "lj_jit.h"
-#endif
-
-#if LJ_TARGET_MIPS
-/* Need our own global offset table for the dreaded MIPS calling conventions. */
-#if LJ_HASJIT
-#define JITGOTDEF(_) _(lj_trace_exit) _(lj_trace_hot)
-#else
-#define JITGOTDEF(_)
-#endif
-#if LJ_HASFFI
-#define FFIGOTDEF(_) \
- _(lj_meta_equal_cd) _(lj_ccallback_enter) _(lj_ccallback_leave)
-#else
-#define FFIGOTDEF(_)
-#endif
-#define GOTDEF(_) \
- _(floor) _(ceil) _(trunc) _(log) _(log10) _(exp) _(sin) _(cos) _(tan) \
- _(asin) _(acos) _(atan) _(sinh) _(cosh) _(tanh) _(frexp) _(modf) _(atan2) \
- _(pow) _(fmod) _(ldexp) \
- _(lj_dispatch_call) _(lj_dispatch_ins) _(lj_err_throw) \
- _(lj_ffh_coroutine_wrap_err) _(lj_func_closeuv) _(lj_func_newL_gc) \
- _(lj_gc_barrieruv) _(lj_gc_step) _(lj_gc_step_fixtop) _(lj_meta_arith) \
- _(lj_meta_call) _(lj_meta_cat) _(lj_meta_comp) _(lj_meta_equal) \
- _(lj_meta_for) _(lj_meta_len) _(lj_meta_tget) _(lj_meta_tset) \
- _(lj_state_growstack) _(lj_str_fromnum) _(lj_str_fromnumber) _(lj_str_new) \
- _(lj_tab_dup) _(lj_tab_get) _(lj_tab_getinth) _(lj_tab_len) _(lj_tab_new) \
- _(lj_tab_newkey) _(lj_tab_next) _(lj_tab_reasize) \
- JITGOTDEF(_) FFIGOTDEF(_)
-
-enum {
-#define GOTENUM(name) LJ_GOT_##name,
-GOTDEF(GOTENUM)
-#undef GOTENUM
- LJ_GOT__MAX
-};
-#endif
-
-/* Type of hot counter. Must match the code in the assembler VM. */
-/* 16 bits are sufficient. Only 0.0015% overhead with maximum slot penalty. */
-typedef uint16_t HotCount;
-
-/* Number of hot counter hash table entries (must be a power of two). */
-#define HOTCOUNT_SIZE 64
-#define HOTCOUNT_PCMASK ((HOTCOUNT_SIZE-1)*sizeof(HotCount))
-
-/* Hotcount decrements. */
-#define HOTCOUNT_LOOP 2
-#define HOTCOUNT_CALL 1
-
-/* This solves a circular dependency problem -- bump as needed. Sigh. */
-#define GG_NUM_ASMFF 62
-
-#define GG_LEN_DDISP (BC__MAX + GG_NUM_ASMFF)
-#define GG_LEN_SDISP BC_FUNCF
-#define GG_LEN_DISP (GG_LEN_DDISP + GG_LEN_SDISP)
-
-/* Global state, main thread and extra fields are allocated together. */
-typedef struct GG_State {
- lua_State L; /* Main thread. */
- global_State g; /* Global state. */
-#if LJ_TARGET_MIPS
- ASMFunction got[LJ_GOT__MAX]; /* Global offset table. */
-#endif
-#if LJ_HASJIT
- jit_State J; /* JIT state. */
- HotCount hotcount[HOTCOUNT_SIZE]; /* Hot counters. */
-#endif
- ASMFunction dispatch[GG_LEN_DISP]; /* Instruction dispatch tables. */
- BCIns bcff[GG_NUM_ASMFF]; /* Bytecode for ASM fast functions. */
-} GG_State;
-
-#define GG_OFS(field) ((int)offsetof(GG_State, field))
-#define G2GG(gl) ((GG_State *)((char *)(gl) - GG_OFS(g)))
-#define J2GG(j) ((GG_State *)((char *)(j) - GG_OFS(J)))
-#define L2GG(L) (G2GG(G(L)))
-#define J2G(J) (&J2GG(J)->g)
-#define G2J(gl) (&G2GG(gl)->J)
-#define L2J(L) (&L2GG(L)->J)
-#define GG_G2DISP (GG_OFS(dispatch) - GG_OFS(g))
-#define GG_DISP2G (GG_OFS(g) - GG_OFS(dispatch))
-#define GG_DISP2J (GG_OFS(J) - GG_OFS(dispatch))
-#define GG_DISP2HOT (GG_OFS(hotcount) - GG_OFS(dispatch))
-#define GG_DISP2STATIC (GG_LEN_DDISP*(int)sizeof(ASMFunction))
-
-#define hotcount_get(gg, pc) \
- (gg)->hotcount[(u32ptr(pc)>>2) & (HOTCOUNT_SIZE-1)]
-#define hotcount_set(gg, pc, val) \
- (hotcount_get((gg), (pc)) = (HotCount)(val))
-
-/* Dispatch table management. */
-LJ_FUNC void lj_dispatch_init(GG_State *GG);
-#if LJ_HASJIT
-LJ_FUNC void lj_dispatch_init_hotcount(global_State *g);
-#endif
-LJ_FUNC void lj_dispatch_update(global_State *g);
-
-/* Instruction dispatch callback for hooks or when recording. */
-LJ_FUNCA void LJ_FASTCALL lj_dispatch_ins(lua_State *L, const BCIns *pc);
-LJ_FUNCA ASMFunction LJ_FASTCALL lj_dispatch_call(lua_State *L, const BCIns*pc);
-LJ_FUNCA void LJ_FASTCALL lj_dispatch_return(lua_State *L, const BCIns *pc);
-
-#if LJ_HASFFI && !defined(_BUILDVM_H)
-/* Save/restore errno and GetLastError() around hooks, exits and recording. */
-#include <errno.h>
-#if LJ_TARGET_WINDOWS
-#define WIN32_LEAN_AND_MEAN
-#include <windows.h>
-#define ERRNO_SAVE int olderr = errno; DWORD oldwerr = GetLastError();
-#define ERRNO_RESTORE errno = olderr; SetLastError(oldwerr);
-#else
-#define ERRNO_SAVE int olderr = errno;
-#define ERRNO_RESTORE errno = olderr;
-#endif
-#else
-#define ERRNO_SAVE
-#define ERRNO_RESTORE
-#endif
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_emit_arm.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_emit_arm.h
deleted file mode 100644
index 285c98d..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_emit_arm.h
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
-** ARM instruction emitter.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-/* -- Constant encoding --------------------------------------------------- */
-
-static uint8_t emit_invai[16] = {
- /* AND */ (ARMI_AND^ARMI_BIC) >> 21,
- /* EOR */ 0,
- /* SUB */ (ARMI_SUB^ARMI_ADD) >> 21,
- /* RSB */ 0,
- /* ADD */ (ARMI_ADD^ARMI_SUB) >> 21,
- /* ADC */ (ARMI_ADC^ARMI_SBC) >> 21,
- /* SBC */ (ARMI_SBC^ARMI_ADC) >> 21,
- /* RSC */ 0,
- /* TST */ 0,
- /* TEQ */ 0,
- /* CMP */ (ARMI_CMP^ARMI_CMN) >> 21,
- /* CMN */ (ARMI_CMN^ARMI_CMP) >> 21,
- /* ORR */ 0,
- /* MOV */ (ARMI_MOV^ARMI_MVN) >> 21,
- /* BIC */ (ARMI_BIC^ARMI_AND) >> 21,
- /* MVN */ (ARMI_MVN^ARMI_MOV) >> 21
-};
-
-/* Encode constant in K12 format for data processing instructions. */
-static uint32_t emit_isk12(ARMIns ai, int32_t n)
-{
- uint32_t invai, i, m = (uint32_t)n;
- /* K12: unsigned 8 bit value, rotated in steps of two bits. */
- for (i = 0; i < 4096; i += 256, m = lj_rol(m, 2))
- if (m <= 255) return ARMI_K12|m|i;
- /* Otherwise try negation/complement with the inverse instruction. */
- invai = emit_invai[((ai >> 21) & 15)];
- if (!invai) return 0; /* Failed. No inverse instruction. */
- m = ~(uint32_t)n;
- if (invai == ((ARMI_SUB^ARMI_ADD) >> 21) ||
- invai == (ARMI_CMP^ARMI_CMN) >> 21) m++;
- for (i = 0; i < 4096; i += 256, m = lj_rol(m, 2))
- if (m <= 255) return ARMI_K12|(invai<<21)|m|i;
- return 0; /* Failed. */
-}
-
-/* -- Emit basic instructions --------------------------------------------- */
-
-static void emit_dnm(ASMState *as, ARMIns ai, Reg rd, Reg rn, Reg rm)
-{
- *--as->mcp = ai | ARMF_D(rd) | ARMF_N(rn) | ARMF_M(rm);
-}
-
-static void emit_dm(ASMState *as, ARMIns ai, Reg rd, Reg rm)
-{
- *--as->mcp = ai | ARMF_D(rd) | ARMF_M(rm);
-}
-
-static void emit_dn(ASMState *as, ARMIns ai, Reg rd, Reg rn)
-{
- *--as->mcp = ai | ARMF_D(rd) | ARMF_N(rn);
-}
-
-static void emit_nm(ASMState *as, ARMIns ai, Reg rn, Reg rm)
-{
- *--as->mcp = ai | ARMF_N(rn) | ARMF_M(rm);
-}
-
-static void emit_d(ASMState *as, ARMIns ai, Reg rd)
-{
- *--as->mcp = ai | ARMF_D(rd);
-}
-
-static void emit_n(ASMState *as, ARMIns ai, Reg rn)
-{
- *--as->mcp = ai | ARMF_N(rn);
-}
-
-static void emit_m(ASMState *as, ARMIns ai, Reg rm)
-{
- *--as->mcp = ai | ARMF_M(rm);
-}
-
-static void emit_lsox(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs)
-{
- lua_assert(ofs >= -255 && ofs <= 255);
- if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U;
- *--as->mcp = ai | ARMI_LS_P | ARMI_LSX_I | ARMF_D(rd) | ARMF_N(rn) |
- ((ofs & 0xf0) << 4) | (ofs & 0x0f);
-}
-
-static void emit_lso(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs)
-{
- lua_assert(ofs >= -4095 && ofs <= 4095);
- /* Combine LDR/STR pairs to LDRD/STRD. */
- if (*as->mcp == (ai|ARMI_LS_P|ARMI_LS_U|ARMF_D(rd^1)|ARMF_N(rn)|(ofs^4)) &&
- (ai & ~(ARMI_LDR^ARMI_STR)) == ARMI_STR && rd != rn &&
- (uint32_t)ofs <= 252 && !(ofs & 3) && !((rd ^ (ofs >>2)) & 1) &&
- as->mcp != as->mcloop) {
- as->mcp++;
- emit_lsox(as, ai == ARMI_LDR ? ARMI_LDRD : ARMI_STRD, rd&~1, rn, ofs&~4);
- return;
- }
- if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U;
- *--as->mcp = ai | ARMI_LS_P | ARMF_D(rd) | ARMF_N(rn) | ofs;
-}
-
-#if !LJ_SOFTFP
-static void emit_vlso(ASMState *as, ARMIns ai, Reg rd, Reg rn, int32_t ofs)
-{
- lua_assert(ofs >= -1020 && ofs <= 1020 && (ofs&3) == 0);
- if (ofs < 0) ofs = -ofs; else ai |= ARMI_LS_U;
- *--as->mcp = ai | ARMI_LS_P | ARMF_D(rd & 15) | ARMF_N(rn) | (ofs >> 2);
-}
-#endif
-
-/* -- Emit loads/stores --------------------------------------------------- */
-
-/* Prefer spills of BASE/L. */
-#define emit_canremat(ref) ((ref) < ASMREF_L)
-
-/* Try to find a one step delta relative to another constant. */
-static int emit_kdelta1(ASMState *as, Reg d, int32_t i)
-{
- RegSet work = ~as->freeset & RSET_GPR;
- while (work) {
- Reg r = rset_picktop(work);
- IRRef ref = regcost_ref(as->cost[r]);
- lua_assert(r != d);
- if (emit_canremat(ref)) {
- int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i);
- uint32_t k = emit_isk12(ARMI_ADD, delta);
- if (k) {
- if (k == ARMI_K12)
- emit_dm(as, ARMI_MOV, d, r);
- else
- emit_dn(as, ARMI_ADD^k, d, r);
- return 1;
- }
- }
- rset_clear(work, r);
- }
- return 0; /* Failed. */
-}
-
-/* Try to find a two step delta relative to another constant. */
-static int emit_kdelta2(ASMState *as, Reg d, int32_t i)
-{
- RegSet work = ~as->freeset & RSET_GPR;
- while (work) {
- Reg r = rset_picktop(work);
- IRRef ref = regcost_ref(as->cost[r]);
- lua_assert(r != d);
- if (emit_canremat(ref)) {
- int32_t other = ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i;
- if (other) {
- int32_t delta = i - other;
- uint32_t sh, inv = 0, k2, k;
- if (delta < 0) { delta = -delta; inv = ARMI_ADD^ARMI_SUB; }
- sh = lj_ffs(delta) & ~1;
- k2 = emit_isk12(0, delta & (255 << sh));
- k = emit_isk12(0, delta & ~(255 << sh));
- if (k) {
- emit_dn(as, ARMI_ADD^k2^inv, d, d);
- emit_dn(as, ARMI_ADD^k^inv, d, r);
- return 1;
- }
- }
- }
- rset_clear(work, r);
- }
- return 0; /* Failed. */
-}
-
-/* Load a 32 bit constant into a GPR. */
-static void emit_loadi(ASMState *as, Reg r, int32_t i)
-{
- uint32_t k = emit_isk12(ARMI_MOV, i);
- lua_assert(rset_test(as->freeset, r) || r == RID_TMP);
- if (k) {
- /* Standard K12 constant. */
- emit_d(as, ARMI_MOV^k, r);
- } else if ((as->flags & JIT_F_ARMV6T2) && (uint32_t)i < 0x00010000u) {
- /* 16 bit loword constant for ARMv6T2. */
- emit_d(as, ARMI_MOVW|(i & 0x0fff)|((i & 0xf000)<<4), r);
- } else if (emit_kdelta1(as, r, i)) {
- /* One step delta relative to another constant. */
- } else if ((as->flags & JIT_F_ARMV6T2)) {
- /* 32 bit hiword/loword constant for ARMv6T2. */
- emit_d(as, ARMI_MOVT|((i>>16) & 0x0fff)|(((i>>16) & 0xf000)<<4), r);
- emit_d(as, ARMI_MOVW|(i & 0x0fff)|((i & 0xf000)<<4), r);
- } else if (emit_kdelta2(as, r, i)) {
- /* Two step delta relative to another constant. */
- } else {
- /* Otherwise construct the constant with up to 4 instructions. */
- /* NYI: use mvn+bic, use pc-relative loads. */
- for (;;) {
- uint32_t sh = lj_ffs(i) & ~1;
- int32_t m = i & (255 << sh);
- i &= ~(255 << sh);
- if (i == 0) {
- emit_d(as, ARMI_MOV ^ emit_isk12(0, m), r);
- break;
- }
- emit_dn(as, ARMI_ORR ^ emit_isk12(0, m), r, r);
- }
- }
-}
-
-#define emit_loada(as, r, addr) emit_loadi(as, (r), i32ptr((addr)))
-
-static Reg ra_allock(ASMState *as, int32_t k, RegSet allow);
-
-/* Get/set from constant pointer. */
-static void emit_lsptr(ASMState *as, ARMIns ai, Reg r, void *p)
-{
- int32_t i = i32ptr(p);
- emit_lso(as, ai, r, ra_allock(as, (i & ~4095), rset_exclude(RSET_GPR, r)),
- (i & 4095));
-}
-
-#if !LJ_SOFTFP
-/* Load a number constant into an FPR. */
-static void emit_loadn(ASMState *as, Reg r, cTValue *tv)
-{
- int32_t i;
- if ((as->flags & JIT_F_VFPV3) && !tv->u32.lo) {
- uint32_t hi = tv->u32.hi;
- uint32_t b = ((hi >> 22) & 0x1ff);
- if (!(hi & 0xffff) && (b == 0x100 || b == 0x0ff)) {
- *--as->mcp = ARMI_VMOVI_D | ARMF_D(r & 15) |
- ((tv->u32.hi >> 12) & 0x00080000) |
- ((tv->u32.hi >> 4) & 0x00070000) |
- ((tv->u32.hi >> 16) & 0x0000000f);
- return;
- }
- }
- i = i32ptr(tv);
- emit_vlso(as, ARMI_VLDR_D, r,
- ra_allock(as, (i & ~1020), RSET_GPR), (i & 1020));
-}
-#endif
-
-/* Get/set global_State fields. */
-#define emit_getgl(as, r, field) \
- emit_lsptr(as, ARMI_LDR, (r), (void *)&J2G(as->J)->field)
-#define emit_setgl(as, r, field) \
- emit_lsptr(as, ARMI_STR, (r), (void *)&J2G(as->J)->field)
-
-/* Trace number is determined from pc of exit instruction. */
-#define emit_setvmstate(as, i) UNUSED(i)
-
-/* -- Emit control-flow instructions -------------------------------------- */
-
-/* Label for internal jumps. */
-typedef MCode *MCLabel;
-
-/* Return label pointing to current PC. */
-#define emit_label(as) ((as)->mcp)
-
-static void emit_branch(ASMState *as, ARMIns ai, MCode *target)
-{
- MCode *p = as->mcp;
- ptrdiff_t delta = (target - p) - 1;
- lua_assert(((delta + 0x00800000) >> 24) == 0);
- *--p = ai | ((uint32_t)delta & 0x00ffffffu);
- as->mcp = p;
-}
-
-#define emit_jmp(as, target) emit_branch(as, ARMI_B, (target))
-
-static void emit_call(ASMState *as, void *target)
-{
- MCode *p = --as->mcp;
- ptrdiff_t delta = ((char *)target - (char *)p) - 8;
- if ((((delta>>2) + 0x00800000) >> 24) == 0) {
- if ((delta & 1))
- *p = ARMI_BLX | ((uint32_t)(delta>>2) & 0x00ffffffu) | ((delta&2) << 23);
- else
- *p = ARMI_BL | ((uint32_t)(delta>>2) & 0x00ffffffu);
- } else { /* Target out of range: need indirect call. But don't use R0-R3. */
- Reg r = ra_allock(as, i32ptr(target), RSET_RANGE(RID_R4, RID_R12+1));
- *p = ARMI_BLXr | ARMF_M(r);
- }
-}
-
-/* -- Emit generic operations --------------------------------------------- */
-
-/* Generic move between two regs. */
-static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src)
-{
-#if LJ_SOFTFP
- lua_assert(!irt_isnum(ir->t)); UNUSED(ir);
-#else
- if (dst >= RID_MAX_GPR) {
- emit_dm(as, irt_isnum(ir->t) ? ARMI_VMOV_D : ARMI_VMOV_S,
- (dst & 15), (src & 15));
- return;
- }
-#endif
- if (as->mcp != as->mcloop) { /* Swap early registers for loads/stores. */
- MCode ins = *as->mcp, swp = (src^dst);
- if ((ins & 0x0c000000) == 0x04000000 && (ins & 0x02000010) != 0x02000010) {
- if (!((ins ^ (dst << 16)) & 0x000f0000))
- *as->mcp = ins ^ (swp << 16); /* Swap N in load/store. */
- if (!(ins & 0x00100000) && !((ins ^ (dst << 12)) & 0x0000f000))
- *as->mcp = ins ^ (swp << 12); /* Swap D in store. */
- }
- }
- emit_dm(as, ARMI_MOV, dst, src);
-}
-
-/* Generic load of register from stack slot. */
-static void emit_spload(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
-{
-#if LJ_SOFTFP
- lua_assert(!irt_isnum(ir->t)); UNUSED(ir);
-#else
- if (r >= RID_MAX_GPR)
- emit_vlso(as, irt_isnum(ir->t) ? ARMI_VLDR_D : ARMI_VLDR_S, r, RID_SP, ofs);
- else
-#endif
- emit_lso(as, ARMI_LDR, r, RID_SP, ofs);
-}
-
-/* Generic store of register to stack slot. */
-static void emit_spstore(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
-{
-#if LJ_SOFTFP
- lua_assert(!irt_isnum(ir->t)); UNUSED(ir);
-#else
- if (r >= RID_MAX_GPR)
- emit_vlso(as, irt_isnum(ir->t) ? ARMI_VSTR_D : ARMI_VSTR_S, r, RID_SP, ofs);
- else
-#endif
- emit_lso(as, ARMI_STR, r, RID_SP, ofs);
-}
-
-/* Emit an arithmetic/logic operation with a constant operand. */
-static void emit_opk(ASMState *as, ARMIns ai, Reg dest, Reg src,
- int32_t i, RegSet allow)
-{
- uint32_t k = emit_isk12(ai, i);
- if (k)
- emit_dn(as, ai^k, dest, src);
- else
- emit_dnm(as, ai, dest, src, ra_allock(as, i, allow));
-}
-
-/* Add offset to pointer. */
-static void emit_addptr(ASMState *as, Reg r, int32_t ofs)
-{
- if (ofs)
- emit_opk(as, ARMI_ADD, r, r, ofs, rset_exclude(RSET_GPR, r));
-}
-
-#define emit_spsub(as, ofs) emit_addptr(as, RID_SP, -(ofs))
-
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_emit_mips.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_emit_mips.h
deleted file mode 100644
index ed62608..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_emit_mips.h
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
-** MIPS instruction emitter.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-/* -- Emit basic instructions --------------------------------------------- */
-
-static void emit_dst(ASMState *as, MIPSIns mi, Reg rd, Reg rs, Reg rt)
-{
- *--as->mcp = mi | MIPSF_D(rd) | MIPSF_S(rs) | MIPSF_T(rt);
-}
-
-static void emit_dta(ASMState *as, MIPSIns mi, Reg rd, Reg rt, uint32_t a)
-{
- *--as->mcp = mi | MIPSF_D(rd) | MIPSF_T(rt) | MIPSF_A(a);
-}
-
-#define emit_ds(as, mi, rd, rs) emit_dst(as, (mi), (rd), (rs), 0)
-#define emit_tg(as, mi, rt, rg) emit_dst(as, (mi), (rg)&31, 0, (rt))
-
-static void emit_tsi(ASMState *as, MIPSIns mi, Reg rt, Reg rs, int32_t i)
-{
- *--as->mcp = mi | MIPSF_T(rt) | MIPSF_S(rs) | (i & 0xffff);
-}
-
-#define emit_ti(as, mi, rt, i) emit_tsi(as, (mi), (rt), 0, (i))
-#define emit_hsi(as, mi, rh, rs, i) emit_tsi(as, (mi), (rh) & 31, (rs), (i))
-
-static void emit_fgh(ASMState *as, MIPSIns mi, Reg rf, Reg rg, Reg rh)
-{
- *--as->mcp = mi | MIPSF_F(rf&31) | MIPSF_G(rg&31) | MIPSF_H(rh&31);
-}
-
-#define emit_fg(as, mi, rf, rg) emit_fgh(as, (mi), (rf), (rg), 0)
-
-static void emit_rotr(ASMState *as, Reg dest, Reg src, Reg tmp, uint32_t shift)
-{
- if ((as->flags & JIT_F_MIPS32R2)) {
- emit_dta(as, MIPSI_ROTR, dest, src, shift);
- } else {
- emit_dst(as, MIPSI_OR, dest, dest, tmp);
- emit_dta(as, MIPSI_SLL, dest, src, (-shift)&31);
- emit_dta(as, MIPSI_SRL, tmp, src, shift);
- }
-}
-
-/* -- Emit loads/stores --------------------------------------------------- */
-
-/* Prefer rematerialization of BASE/L from global_State over spills. */
-#define emit_canremat(ref) ((ref) <= REF_BASE)
-
-/* Try to find a one step delta relative to another constant. */
-static int emit_kdelta1(ASMState *as, Reg t, int32_t i)
-{
- RegSet work = ~as->freeset & RSET_GPR;
- while (work) {
- Reg r = rset_picktop(work);
- IRRef ref = regcost_ref(as->cost[r]);
- lua_assert(r != t);
- if (ref < ASMREF_L) {
- int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i);
- if (checki16(delta)) {
- emit_tsi(as, MIPSI_ADDIU, t, r, delta);
- return 1;
- }
- }
- rset_clear(work, r);
- }
- return 0; /* Failed. */
-}
-
-/* Load a 32 bit constant into a GPR. */
-static void emit_loadi(ASMState *as, Reg r, int32_t i)
-{
- if (checki16(i)) {
- emit_ti(as, MIPSI_LI, r, i);
- } else {
- if ((i & 0xffff)) {
- int32_t jgl = i32ptr(J2G(as->J));
- if ((uint32_t)(i-jgl) < 65536) {
- emit_tsi(as, MIPSI_ADDIU, r, RID_JGL, i-jgl-32768);
- return;
- } else if (emit_kdelta1(as, r, i)) {
- return;
- } else if ((i >> 16) == 0) {
- emit_tsi(as, MIPSI_ORI, r, RID_ZERO, i);
- return;
- }
- emit_tsi(as, MIPSI_ORI, r, r, i);
- }
- emit_ti(as, MIPSI_LUI, r, (i >> 16));
- }
-}
-
-#define emit_loada(as, r, addr) emit_loadi(as, (r), i32ptr((addr)))
-
-static Reg ra_allock(ASMState *as, int32_t k, RegSet allow);
-static void ra_allockreg(ASMState *as, int32_t k, Reg r);
-
-/* Get/set from constant pointer. */
-static void emit_lsptr(ASMState *as, MIPSIns mi, Reg r, void *p, RegSet allow)
-{
- int32_t jgl = i32ptr(J2G(as->J));
- int32_t i = i32ptr(p);
- Reg base;
- if ((uint32_t)(i-jgl) < 65536) {
- i = i-jgl-32768;
- base = RID_JGL;
- } else {
- base = ra_allock(as, i-(int16_t)i, allow);
- }
- emit_tsi(as, mi, r, base, i);
-}
-
-#define emit_loadn(as, r, tv) \
- emit_lsptr(as, MIPSI_LDC1, ((r) & 31), (void *)(tv), RSET_GPR)
-
-/* Get/set global_State fields. */
-static void emit_lsglptr(ASMState *as, MIPSIns mi, Reg r, int32_t ofs)
-{
- emit_tsi(as, mi, r, RID_JGL, ofs-32768);
-}
-
-#define emit_getgl(as, r, field) \
- emit_lsglptr(as, MIPSI_LW, (r), (int32_t)offsetof(global_State, field))
-#define emit_setgl(as, r, field) \
- emit_lsglptr(as, MIPSI_SW, (r), (int32_t)offsetof(global_State, field))
-
-/* Trace number is determined from per-trace exit stubs. */
-#define emit_setvmstate(as, i) UNUSED(i)
-
-/* -- Emit control-flow instructions -------------------------------------- */
-
-/* Label for internal jumps. */
-typedef MCode *MCLabel;
-
-/* Return label pointing to current PC. */
-#define emit_label(as) ((as)->mcp)
-
-static void emit_branch(ASMState *as, MIPSIns mi, Reg rs, Reg rt, MCode *target)
-{
- MCode *p = as->mcp;
- ptrdiff_t delta = target - p;
- lua_assert(((delta + 0x8000) >> 16) == 0);
- *--p = mi | MIPSF_S(rs) | MIPSF_T(rt) | ((uint32_t)delta & 0xffffu);
- as->mcp = p;
-}
-
-static void emit_jmp(ASMState *as, MCode *target)
-{
- *--as->mcp = MIPSI_NOP;
- emit_branch(as, MIPSI_B, RID_ZERO, RID_ZERO, (target));
-}
-
-static void emit_call(ASMState *as, void *target)
-{
- MCode *p = as->mcp;
- *--p = MIPSI_NOP;
- if ((((uintptr_t)target ^ (uintptr_t)p) >> 28) == 0)
- *--p = MIPSI_JAL | (((uintptr_t)target >>2) & 0x03ffffffu);
- else /* Target out of range: need indirect call. */
- *--p = MIPSI_JALR | MIPSF_S(RID_CFUNCADDR);
- as->mcp = p;
- ra_allockreg(as, i32ptr(target), RID_CFUNCADDR);
-}
-
-/* -- Emit generic operations --------------------------------------------- */
-
-#define emit_move(as, dst, src) \
- emit_ds(as, MIPSI_MOVE, (dst), (src))
-
-/* Generic move between two regs. */
-static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src)
-{
- if (dst < RID_MAX_GPR)
- emit_move(as, dst, src);
- else
- emit_fg(as, irt_isnum(ir->t) ? MIPSI_MOV_D : MIPSI_MOV_S, dst, src);
-}
-
-/* Generic load of register from stack slot. */
-static void emit_spload(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
-{
- if (r < RID_MAX_GPR)
- emit_tsi(as, MIPSI_LW, r, RID_SP, ofs);
- else
- emit_tsi(as, irt_isnum(ir->t) ? MIPSI_LDC1 : MIPSI_LWC1,
- (r & 31), RID_SP, ofs);
-}
-
-/* Generic store of register to stack slot. */
-static void emit_spstore(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
-{
- if (r < RID_MAX_GPR)
- emit_tsi(as, MIPSI_SW, r, RID_SP, ofs);
- else
- emit_tsi(as, irt_isnum(ir->t) ? MIPSI_SDC1 : MIPSI_SWC1,
- (r&31), RID_SP, ofs);
-}
-
-/* Add offset to pointer. */
-static void emit_addptr(ASMState *as, Reg r, int32_t ofs)
-{
- if (ofs) {
- lua_assert(checki16(ofs));
- emit_tsi(as, MIPSI_ADDIU, r, r, ofs);
- }
-}
-
-#define emit_spsub(as, ofs) emit_addptr(as, RID_SP, -(ofs))
-
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_emit_ppc.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_emit_ppc.h
deleted file mode 100644
index 1409930..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_emit_ppc.h
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
-** PPC instruction emitter.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-/* -- Emit basic instructions --------------------------------------------- */
-
-static void emit_tab(ASMState *as, PPCIns pi, Reg rt, Reg ra, Reg rb)
-{
- *--as->mcp = pi | PPCF_T(rt) | PPCF_A(ra) | PPCF_B(rb);
-}
-
-#define emit_asb(as, pi, ra, rs, rb) emit_tab(as, (pi), (rs), (ra), (rb))
-#define emit_as(as, pi, ra, rs) emit_tab(as, (pi), (rs), (ra), 0)
-#define emit_ab(as, pi, ra, rb) emit_tab(as, (pi), 0, (ra), (rb))
-
-static void emit_tai(ASMState *as, PPCIns pi, Reg rt, Reg ra, int32_t i)
-{
- *--as->mcp = pi | PPCF_T(rt) | PPCF_A(ra) | (i & 0xffff);
-}
-
-#define emit_ti(as, pi, rt, i) emit_tai(as, (pi), (rt), 0, (i))
-#define emit_ai(as, pi, ra, i) emit_tai(as, (pi), 0, (ra), (i))
-#define emit_asi(as, pi, ra, rs, i) emit_tai(as, (pi), (rs), (ra), (i))
-
-#define emit_fab(as, pi, rf, ra, rb) \
- emit_tab(as, (pi), (rf)&31, (ra)&31, (rb)&31)
-#define emit_fb(as, pi, rf, rb) emit_tab(as, (pi), (rf)&31, 0, (rb)&31)
-#define emit_fac(as, pi, rf, ra, rc) \
- emit_tab(as, (pi) | PPCF_C((rc) & 31), (rf)&31, (ra)&31, 0)
-#define emit_facb(as, pi, rf, ra, rc, rb) \
- emit_tab(as, (pi) | PPCF_C((rc) & 31), (rf)&31, (ra)&31, (rb)&31)
-#define emit_fai(as, pi, rf, ra, i) emit_tai(as, (pi), (rf)&31, (ra), (i))
-
-static void emit_rot(ASMState *as, PPCIns pi, Reg ra, Reg rs,
- int32_t n, int32_t b, int32_t e)
-{
- *--as->mcp = pi | PPCF_T(rs) | PPCF_A(ra) | PPCF_B(n) |
- PPCF_MB(b) | PPCF_ME(e);
-}
-
-static void emit_slwi(ASMState *as, Reg ra, Reg rs, int32_t n)
-{
- lua_assert(n >= 0 && n < 32);
- emit_rot(as, PPCI_RLWINM, ra, rs, n, 0, 31-n);
-}
-
-static void emit_rotlwi(ASMState *as, Reg ra, Reg rs, int32_t n)
-{
- lua_assert(n >= 0 && n < 32);
- emit_rot(as, PPCI_RLWINM, ra, rs, n, 0, 31);
-}
-
-/* -- Emit loads/stores --------------------------------------------------- */
-
-/* Prefer rematerialization of BASE/L from global_State over spills. */
-#define emit_canremat(ref) ((ref) <= REF_BASE)
-
-/* Try to find a one step delta relative to another constant. */
-static int emit_kdelta1(ASMState *as, Reg t, int32_t i)
-{
- RegSet work = ~as->freeset & RSET_GPR;
- while (work) {
- Reg r = rset_picktop(work);
- IRRef ref = regcost_ref(as->cost[r]);
- lua_assert(r != t);
- if (ref < ASMREF_L) {
- int32_t delta = i - (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i);
- if (checki16(delta)) {
- emit_tai(as, PPCI_ADDI, t, r, delta);
- return 1;
- }
- }
- rset_clear(work, r);
- }
- return 0; /* Failed. */
-}
-
-/* Load a 32 bit constant into a GPR. */
-static void emit_loadi(ASMState *as, Reg r, int32_t i)
-{
- if (checki16(i)) {
- emit_ti(as, PPCI_LI, r, i);
- } else {
- if ((i & 0xffff)) {
- int32_t jgl = i32ptr(J2G(as->J));
- if ((uint32_t)(i-jgl) < 65536) {
- emit_tai(as, PPCI_ADDI, r, RID_JGL, i-jgl-32768);
- return;
- } else if (emit_kdelta1(as, r, i)) {
- return;
- }
- emit_asi(as, PPCI_ORI, r, r, i);
- }
- emit_ti(as, PPCI_LIS, r, (i >> 16));
- }
-}
-
-#define emit_loada(as, r, addr) emit_loadi(as, (r), i32ptr((addr)))
-
-static Reg ra_allock(ASMState *as, int32_t k, RegSet allow);
-
-/* Get/set from constant pointer. */
-static void emit_lsptr(ASMState *as, PPCIns pi, Reg r, void *p, RegSet allow)
-{
- int32_t jgl = i32ptr(J2G(as->J));
- int32_t i = i32ptr(p);
- Reg base;
- if ((uint32_t)(i-jgl) < 65536) {
- i = i-jgl-32768;
- base = RID_JGL;
- } else {
- base = ra_allock(as, i-(int16_t)i, allow);
- }
- emit_tai(as, pi, r, base, i);
-}
-
-#define emit_loadn(as, r, tv) \
- emit_lsptr(as, PPCI_LFD, ((r) & 31), (void *)(tv), RSET_GPR)
-
-/* Get/set global_State fields. */
-static void emit_lsglptr(ASMState *as, PPCIns pi, Reg r, int32_t ofs)
-{
- emit_tai(as, pi, r, RID_JGL, ofs-32768);
-}
-
-#define emit_getgl(as, r, field) \
- emit_lsglptr(as, PPCI_LWZ, (r), (int32_t)offsetof(global_State, field))
-#define emit_setgl(as, r, field) \
- emit_lsglptr(as, PPCI_STW, (r), (int32_t)offsetof(global_State, field))
-
-/* Trace number is determined from per-trace exit stubs. */
-#define emit_setvmstate(as, i) UNUSED(i)
-
-/* -- Emit control-flow instructions -------------------------------------- */
-
-/* Label for internal jumps. */
-typedef MCode *MCLabel;
-
-/* Return label pointing to current PC. */
-#define emit_label(as) ((as)->mcp)
-
-static void emit_condbranch(ASMState *as, PPCIns pi, PPCCC cc, MCode *target)
-{
- MCode *p = --as->mcp;
- ptrdiff_t delta = (char *)target - (char *)p;
- lua_assert(((delta + 0x8000) >> 16) == 0);
- pi ^= (delta & 0x8000) * (PPCF_Y/0x8000);
- *p = pi | PPCF_CC(cc) | ((uint32_t)delta & 0xffffu);
-}
-
-static void emit_jmp(ASMState *as, MCode *target)
-{
- MCode *p = --as->mcp;
- ptrdiff_t delta = (char *)target - (char *)p;
- *p = PPCI_B | (delta & 0x03fffffcu);
-}
-
-static void emit_call(ASMState *as, void *target)
-{
- MCode *p = --as->mcp;
- ptrdiff_t delta = (char *)target - (char *)p;
- if ((((delta>>2) + 0x00800000) >> 24) == 0) {
- *p = PPCI_BL | (delta & 0x03fffffcu);
- } else { /* Target out of range: need indirect call. Don't use arg reg. */
- RegSet allow = RSET_GPR & ~RSET_RANGE(RID_R0, REGARG_LASTGPR+1);
- Reg r = ra_allock(as, i32ptr(target), allow);
- *p = PPCI_BCTRL;
- p[-1] = PPCI_MTCTR | PPCF_T(r);
- as->mcp = p-1;
- }
-}
-
-/* -- Emit generic operations --------------------------------------------- */
-
-#define emit_mr(as, dst, src) \
- emit_asb(as, PPCI_MR, (dst), (src), (src))
-
-/* Generic move between two regs. */
-static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src)
-{
- UNUSED(ir);
- if (dst < RID_MAX_GPR)
- emit_mr(as, dst, src);
- else
- emit_fb(as, PPCI_FMR, dst, src);
-}
-
-/* Generic load of register from stack slot. */
-static void emit_spload(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
-{
- if (r < RID_MAX_GPR)
- emit_tai(as, PPCI_LWZ, r, RID_SP, ofs);
- else
- emit_fai(as, irt_isnum(ir->t) ? PPCI_LFD : PPCI_LFS, r, RID_SP, ofs);
-}
-
-/* Generic store of register to stack slot. */
-static void emit_spstore(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
-{
- if (r < RID_MAX_GPR)
- emit_tai(as, PPCI_STW, r, RID_SP, ofs);
- else
- emit_fai(as, irt_isnum(ir->t) ? PPCI_STFD : PPCI_STFS, r, RID_SP, ofs);
-}
-
-/* Emit a compare (for equality) with a constant operand. */
-static void emit_cmpi(ASMState *as, Reg r, int32_t k)
-{
- if (checki16(k)) {
- emit_ai(as, PPCI_CMPWI, r, k);
- } else if (checku16(k)) {
- emit_ai(as, PPCI_CMPLWI, r, k);
- } else {
- emit_ai(as, PPCI_CMPLWI, RID_TMP, k);
- emit_asi(as, PPCI_XORIS, RID_TMP, r, (k >> 16));
- }
-}
-
-/* Add offset to pointer. */
-static void emit_addptr(ASMState *as, Reg r, int32_t ofs)
-{
- if (ofs) {
- emit_tai(as, PPCI_ADDI, r, r, ofs);
- if (!checki16(ofs))
- emit_tai(as, PPCI_ADDIS, r, r, (ofs + 32768) >> 16);
- }
-}
-
-static void emit_spsub(ASMState *as, int32_t ofs)
-{
- if (ofs) {
- emit_tai(as, PPCI_STWU, RID_TMP, RID_SP, -ofs);
- emit_tai(as, PPCI_ADDI, RID_TMP, RID_SP,
- CFRAME_SIZE + (as->parent ? as->parent->spadjust : 0));
- }
-}
-
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_emit_x86.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_emit_x86.h
deleted file mode 100644
index 9c371a9..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_emit_x86.h
+++ /dev/null
@@ -1,468 +0,0 @@
-/*
-** x86/x64 instruction emitter.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-/* -- Emit basic instructions --------------------------------------------- */
-
-#define MODRM(mode, r1, r2) ((MCode)((mode)+(((r1)&7)<<3)+((r2)&7)))
-
-#if LJ_64
-#define REXRB(p, rr, rb) \
- { MCode rex = 0x40 + (((rr)>>1)&4) + (((rb)>>3)&1); \
- if (rex != 0x40) *--(p) = rex; }
-#define FORCE_REX 0x200
-#define REX_64 (FORCE_REX|0x080000)
-#else
-#define REXRB(p, rr, rb) ((void)0)
-#define FORCE_REX 0
-#define REX_64 0
-#endif
-
-#define emit_i8(as, i) (*--as->mcp = (MCode)(i))
-#define emit_i32(as, i) (*(int32_t *)(as->mcp-4) = (i), as->mcp -= 4)
-#define emit_u32(as, u) (*(uint32_t *)(as->mcp-4) = (u), as->mcp -= 4)
-
-#define emit_x87op(as, xo) \
- (*(uint16_t *)(as->mcp-2) = (uint16_t)(xo), as->mcp -= 2)
-
-/* op */
-static LJ_AINLINE MCode *emit_op(x86Op xo, Reg rr, Reg rb, Reg rx,
- MCode *p, int delta)
-{
- int n = (int8_t)xo;
-#if defined(__GNUC__)
- if (__builtin_constant_p(xo) && n == -2)
- p[delta-2] = (MCode)(xo >> 24);
- else if (__builtin_constant_p(xo) && n == -3)
- *(uint16_t *)(p+delta-3) = (uint16_t)(xo >> 16);
- else
-#endif
- *(uint32_t *)(p+delta-5) = (uint32_t)xo;
- p += n + delta;
-#if LJ_64
- {
- uint32_t rex = 0x40 + ((rr>>1)&(4+(FORCE_REX>>1)))+((rx>>2)&2)+((rb>>3)&1);
- if (rex != 0x40) {
- rex |= (rr >> 16);
- if (n == -4) { *p = (MCode)rex; rex = (MCode)(xo >> 8); }
- else if ((xo & 0xffffff) == 0x6600fd) { *p = (MCode)rex; rex = 0x66; }
- *--p = (MCode)rex;
- }
- }
-#else
- UNUSED(rr); UNUSED(rb); UNUSED(rx);
-#endif
- return p;
-}
-
-/* op + modrm */
-#define emit_opm(xo, mode, rr, rb, p, delta) \
- (p[(delta)-1] = MODRM((mode), (rr), (rb)), \
- emit_op((xo), (rr), (rb), 0, (p), (delta)))
-
-/* op + modrm + sib */
-#define emit_opmx(xo, mode, scale, rr, rb, rx, p) \
- (p[-1] = MODRM((scale), (rx), (rb)), \
- p[-2] = MODRM((mode), (rr), RID_ESP), \
- emit_op((xo), (rr), (rb), (rx), (p), -1))
-
-/* op r1, r2 */
-static void emit_rr(ASMState *as, x86Op xo, Reg r1, Reg r2)
-{
- MCode *p = as->mcp;
- as->mcp = emit_opm(xo, XM_REG, r1, r2, p, 0);
-}
-
-#if LJ_64 && defined(LUA_USE_ASSERT)
-/* [addr] is sign-extended in x64 and must be in lower 2G (not 4G). */
-static int32_t ptr2addr(const void *p)
-{
- lua_assert((uintptr_t)p < (uintptr_t)0x80000000);
- return i32ptr(p);
-}
-#else
-#define ptr2addr(p) (i32ptr((p)))
-#endif
-
-/* op r, [addr] */
-static void emit_rma(ASMState *as, x86Op xo, Reg rr, const void *addr)
-{
- MCode *p = as->mcp;
- *(int32_t *)(p-4) = ptr2addr(addr);
-#if LJ_64
- p[-5] = MODRM(XM_SCALE1, RID_ESP, RID_EBP);
- as->mcp = emit_opm(xo, XM_OFS0, rr, RID_ESP, p, -5);
-#else
- as->mcp = emit_opm(xo, XM_OFS0, rr, RID_EBP, p, -4);
-#endif
-}
-
-/* op r, [base+ofs] */
-static void emit_rmro(ASMState *as, x86Op xo, Reg rr, Reg rb, int32_t ofs)
-{
- MCode *p = as->mcp;
- x86Mode mode;
- if (ra_hasreg(rb)) {
- if (ofs == 0 && (rb&7) != RID_EBP) {
- mode = XM_OFS0;
- } else if (checki8(ofs)) {
- *--p = (MCode)ofs;
- mode = XM_OFS8;
- } else {
- p -= 4;
- *(int32_t *)p = ofs;
- mode = XM_OFS32;
- }
- if ((rb&7) == RID_ESP)
- *--p = MODRM(XM_SCALE1, RID_ESP, RID_ESP);
- } else {
- *(int32_t *)(p-4) = ofs;
-#if LJ_64
- p[-5] = MODRM(XM_SCALE1, RID_ESP, RID_EBP);
- p -= 5;
- rb = RID_ESP;
-#else
- p -= 4;
- rb = RID_EBP;
-#endif
- mode = XM_OFS0;
- }
- as->mcp = emit_opm(xo, mode, rr, rb, p, 0);
-}
-
-/* op r, [base+idx*scale+ofs] */
-static void emit_rmrxo(ASMState *as, x86Op xo, Reg rr, Reg rb, Reg rx,
- x86Mode scale, int32_t ofs)
-{
- MCode *p = as->mcp;
- x86Mode mode;
- if (ofs == 0 && (rb&7) != RID_EBP) {
- mode = XM_OFS0;
- } else if (checki8(ofs)) {
- mode = XM_OFS8;
- *--p = (MCode)ofs;
- } else {
- mode = XM_OFS32;
- p -= 4;
- *(int32_t *)p = ofs;
- }
- as->mcp = emit_opmx(xo, mode, scale, rr, rb, rx, p);
-}
-
-/* op r, i */
-static void emit_gri(ASMState *as, x86Group xg, Reg rb, int32_t i)
-{
- MCode *p = as->mcp;
- x86Op xo;
- if (checki8(i)) {
- *--p = (MCode)i;
- xo = XG_TOXOi8(xg);
- } else {
- p -= 4;
- *(int32_t *)p = i;
- xo = XG_TOXOi(xg);
- }
- as->mcp = emit_opm(xo, XM_REG, (Reg)(xg & 7) | (rb & REX_64), rb, p, 0);
-}
-
-/* op [base+ofs], i */
-static void emit_gmroi(ASMState *as, x86Group xg, Reg rb, int32_t ofs,
- int32_t i)
-{
- x86Op xo;
- if (checki8(i)) {
- emit_i8(as, i);
- xo = XG_TOXOi8(xg);
- } else {
- emit_i32(as, i);
- xo = XG_TOXOi(xg);
- }
- emit_rmro(as, xo, (Reg)(xg & 7), rb, ofs);
-}
-
-#define emit_shifti(as, xg, r, i) \
- (emit_i8(as, (i)), emit_rr(as, XO_SHIFTi, (Reg)(xg), (r)))
-
-/* op r, rm/mrm */
-static void emit_mrm(ASMState *as, x86Op xo, Reg rr, Reg rb)
-{
- MCode *p = as->mcp;
- x86Mode mode = XM_REG;
- if (rb == RID_MRM) {
- rb = as->mrm.base;
- if (rb == RID_NONE) {
- rb = RID_EBP;
- mode = XM_OFS0;
- p -= 4;
- *(int32_t *)p = as->mrm.ofs;
- if (as->mrm.idx != RID_NONE)
- goto mrmidx;
-#if LJ_64
- *--p = MODRM(XM_SCALE1, RID_ESP, RID_EBP);
- rb = RID_ESP;
-#endif
- } else {
- if (as->mrm.ofs == 0 && (rb&7) != RID_EBP) {
- mode = XM_OFS0;
- } else if (checki8(as->mrm.ofs)) {
- *--p = (MCode)as->mrm.ofs;
- mode = XM_OFS8;
- } else {
- p -= 4;
- *(int32_t *)p = as->mrm.ofs;
- mode = XM_OFS32;
- }
- if (as->mrm.idx != RID_NONE) {
- mrmidx:
- as->mcp = emit_opmx(xo, mode, as->mrm.scale, rr, rb, as->mrm.idx, p);
- return;
- }
- if ((rb&7) == RID_ESP)
- *--p = MODRM(XM_SCALE1, RID_ESP, RID_ESP);
- }
- }
- as->mcp = emit_opm(xo, mode, rr, rb, p, 0);
-}
-
-/* op rm/mrm, i */
-static void emit_gmrmi(ASMState *as, x86Group xg, Reg rb, int32_t i)
-{
- x86Op xo;
- if (checki8(i)) {
- emit_i8(as, i);
- xo = XG_TOXOi8(xg);
- } else {
- emit_i32(as, i);
- xo = XG_TOXOi(xg);
- }
- emit_mrm(as, xo, (Reg)(xg & 7) | (rb & REX_64), (rb & ~REX_64));
-}
-
-/* -- Emit loads/stores --------------------------------------------------- */
-
-/* Instruction selection for XMM moves. */
-#define XMM_MOVRR(as) ((as->flags & JIT_F_SPLIT_XMM) ? XO_MOVSD : XO_MOVAPS)
-#define XMM_MOVRM(as) ((as->flags & JIT_F_SPLIT_XMM) ? XO_MOVLPD : XO_MOVSD)
-
-/* mov [base+ofs], i */
-static void emit_movmroi(ASMState *as, Reg base, int32_t ofs, int32_t i)
-{
- emit_i32(as, i);
- emit_rmro(as, XO_MOVmi, 0, base, ofs);
-}
-
-/* mov [base+ofs], r */
-#define emit_movtomro(as, r, base, ofs) \
- emit_rmro(as, XO_MOVto, (r), (base), (ofs))
-
-/* Get/set global_State fields. */
-#define emit_opgl(as, xo, r, field) \
- emit_rma(as, (xo), (r), (void *)&J2G(as->J)->field)
-#define emit_getgl(as, r, field) emit_opgl(as, XO_MOV, (r), field)
-#define emit_setgl(as, r, field) emit_opgl(as, XO_MOVto, (r), field)
-
-#define emit_setvmstate(as, i) \
- (emit_i32(as, i), emit_opgl(as, XO_MOVmi, 0, vmstate))
-
-/* mov r, i / xor r, r */
-static void emit_loadi(ASMState *as, Reg r, int32_t i)
-{
- /* XOR r,r is shorter, but modifies the flags. This is bad for HIOP/jcc. */
- if (i == 0 && !(LJ_32 && (IR(as->curins)->o == IR_HIOP ||
- (as->curins+1 < as->T->nins &&
- IR(as->curins+1)->o == IR_HIOP))) &&
- !((*as->mcp == 0x0f && (as->mcp[1] & 0xf0) == XI_JCCn) ||
- (*as->mcp & 0xf0) == XI_JCCs)) {
- emit_rr(as, XO_ARITH(XOg_XOR), r, r);
- } else {
- MCode *p = as->mcp;
- *(int32_t *)(p-4) = i;
- p[-5] = (MCode)(XI_MOVri+(r&7));
- p -= 5;
- REXRB(p, 0, r);
- as->mcp = p;
- }
-}
-
-/* mov r, addr */
-#define emit_loada(as, r, addr) \
- emit_loadi(as, (r), ptr2addr((addr)))
-
-#if LJ_64
-/* mov r, imm64 or shorter 32 bit extended load. */
-static void emit_loadu64(ASMState *as, Reg r, uint64_t u64)
-{
- if (checku32(u64)) { /* 32 bit load clears upper 32 bits. */
- emit_loadi(as, r, (int32_t)u64);
- } else if (checki32((int64_t)u64)) { /* Sign-extended 32 bit load. */
- MCode *p = as->mcp;
- *(int32_t *)(p-4) = (int32_t)u64;
- as->mcp = emit_opm(XO_MOVmi, XM_REG, REX_64, r, p, -4);
- } else { /* Full-size 64 bit load. */
- MCode *p = as->mcp;
- *(uint64_t *)(p-8) = u64;
- p[-9] = (MCode)(XI_MOVri+(r&7));
- p[-10] = 0x48 + ((r>>3)&1);
- p -= 10;
- as->mcp = p;
- }
-}
-#endif
-
-/* movsd r, [&tv->n] / xorps r, r */
-static void emit_loadn(ASMState *as, Reg r, cTValue *tv)
-{
- if (tvispzero(tv)) /* Use xor only for +0. */
- emit_rr(as, XO_XORPS, r, r);
- else
- emit_rma(as, XMM_MOVRM(as), r, &tv->n);
-}
-
-/* -- Emit control-flow instructions -------------------------------------- */
-
-/* Label for short jumps. */
-typedef MCode *MCLabel;
-
-#if LJ_32 && LJ_HASFFI
-/* jmp short target */
-static void emit_sjmp(ASMState *as, MCLabel target)
-{
- MCode *p = as->mcp;
- ptrdiff_t delta = target - p;
- lua_assert(delta == (int8_t)delta);
- p[-1] = (MCode)(int8_t)delta;
- p[-2] = XI_JMPs;
- as->mcp = p - 2;
-}
-#endif
-
-/* jcc short target */
-static void emit_sjcc(ASMState *as, int cc, MCLabel target)
-{
- MCode *p = as->mcp;
- ptrdiff_t delta = target - p;
- lua_assert(delta == (int8_t)delta);
- p[-1] = (MCode)(int8_t)delta;
- p[-2] = (MCode)(XI_JCCs+(cc&15));
- as->mcp = p - 2;
-}
-
-/* jcc short (pending target) */
-static MCLabel emit_sjcc_label(ASMState *as, int cc)
-{
- MCode *p = as->mcp;
- p[-1] = 0;
- p[-2] = (MCode)(XI_JCCs+(cc&15));
- as->mcp = p - 2;
- return p;
-}
-
-/* Fixup jcc short target. */
-static void emit_sfixup(ASMState *as, MCLabel source)
-{
- source[-1] = (MCode)(as->mcp-source);
-}
-
-/* Return label pointing to current PC. */
-#define emit_label(as) ((as)->mcp)
-
-/* Compute relative 32 bit offset for jump and call instructions. */
-static LJ_AINLINE int32_t jmprel(MCode *p, MCode *target)
-{
- ptrdiff_t delta = target - p;
- lua_assert(delta == (int32_t)delta);
- return (int32_t)delta;
-}
-
-/* jcc target */
-static void emit_jcc(ASMState *as, int cc, MCode *target)
-{
- MCode *p = as->mcp;
- *(int32_t *)(p-4) = jmprel(p, target);
- p[-5] = (MCode)(XI_JCCn+(cc&15));
- p[-6] = 0x0f;
- as->mcp = p - 6;
-}
-
-/* jmp target */
-static void emit_jmp(ASMState *as, MCode *target)
-{
- MCode *p = as->mcp;
- *(int32_t *)(p-4) = jmprel(p, target);
- p[-5] = XI_JMP;
- as->mcp = p - 5;
-}
-
-/* call target */
-static void emit_call_(ASMState *as, MCode *target)
-{
- MCode *p = as->mcp;
-#if LJ_64
- if (target-p != (int32_t)(target-p)) {
- /* Assumes RID_RET is never an argument to calls and always clobbered. */
- emit_rr(as, XO_GROUP5, XOg_CALL, RID_RET);
- emit_loadu64(as, RID_RET, (uint64_t)target);
- return;
- }
-#endif
- *(int32_t *)(p-4) = jmprel(p, target);
- p[-5] = XI_CALL;
- as->mcp = p - 5;
-}
-
-#define emit_call(as, f) emit_call_(as, (MCode *)(void *)(f))
-
-/* -- Emit generic operations --------------------------------------------- */
-
-/* Use 64 bit operations to handle 64 bit IR types. */
-#if LJ_64
-#define REX_64IR(ir, r) ((r) + (irt_is64((ir)->t) ? REX_64 : 0))
-#else
-#define REX_64IR(ir, r) (r)
-#endif
-
-/* Generic move between two regs. */
-static void emit_movrr(ASMState *as, IRIns *ir, Reg dst, Reg src)
-{
- UNUSED(ir);
- if (dst < RID_MAX_GPR)
- emit_rr(as, XO_MOV, REX_64IR(ir, dst), src);
- else
- emit_rr(as, XMM_MOVRR(as), dst, src);
-}
-
-/* Generic load of register from stack slot. */
-static void emit_spload(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
-{
- if (r < RID_MAX_GPR)
- emit_rmro(as, XO_MOV, REX_64IR(ir, r), RID_ESP, ofs);
- else
- emit_rmro(as, irt_isnum(ir->t) ? XMM_MOVRM(as) : XO_MOVSS, r, RID_ESP, ofs);
-}
-
-/* Generic store of register to stack slot. */
-static void emit_spstore(ASMState *as, IRIns *ir, Reg r, int32_t ofs)
-{
- if (r < RID_MAX_GPR)
- emit_rmro(as, XO_MOVto, REX_64IR(ir, r), RID_ESP, ofs);
- else
- emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSDto : XO_MOVSSto, r, RID_ESP, ofs);
-}
-
-/* Add offset to pointer. */
-static void emit_addptr(ASMState *as, Reg r, int32_t ofs)
-{
- if (ofs) {
- if ((as->flags & JIT_F_LEA_AGU))
- emit_rmro(as, XO_LEA, r, r, ofs);
- else
- emit_gri(as, XG_ARITHi(XOg_ADD), r, ofs);
- }
-}
-
-#define emit_spsub(as, ofs) emit_addptr(as, RID_ESP|REX_64, -(ofs))
-
-/* Prefer rematerialization of BASE/L from global_State over spills. */
-#define emit_canremat(ref) ((ref) <= REF_BASE)
-
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_err.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_err.h
deleted file mode 100644
index cba5fb7..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_err.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
-** Error handling.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_ERR_H
-#define _LJ_ERR_H
-
-#include <stdarg.h>
-
-#include "lj_obj.h"
-
-typedef enum {
-#define ERRDEF(name, msg) \
- LJ_ERR_##name, LJ_ERR_##name##_ = LJ_ERR_##name + sizeof(msg)-1,
-#include "lj_errmsg.h"
- LJ_ERR__MAX
-} ErrMsg;
-
-LJ_DATA const char *lj_err_allmsg;
-#define err2msg(em) (lj_err_allmsg+(int)(em))
-
-LJ_FUNC GCstr *lj_err_str(lua_State *L, ErrMsg em);
-LJ_FUNCA_NORET void LJ_FASTCALL lj_err_throw(lua_State *L, int errcode);
-LJ_FUNC_NORET void lj_err_mem(lua_State *L);
-LJ_FUNC_NORET void lj_err_run(lua_State *L);
-LJ_FUNC_NORET void lj_err_msg(lua_State *L, ErrMsg em);
-LJ_FUNC_NORET void lj_err_lex(lua_State *L, GCstr *src, const char *tok,
- BCLine line, ErrMsg em, va_list argp);
-LJ_FUNC_NORET void lj_err_optype(lua_State *L, cTValue *o, ErrMsg opm);
-LJ_FUNC_NORET void lj_err_comp(lua_State *L, cTValue *o1, cTValue *o2);
-LJ_FUNC_NORET void lj_err_optype_call(lua_State *L, TValue *o);
-LJ_FUNC_NORET void lj_err_callermsg(lua_State *L, const char *msg);
-LJ_FUNC_NORET void lj_err_callerv(lua_State *L, ErrMsg em, ...);
-LJ_FUNC_NORET void lj_err_caller(lua_State *L, ErrMsg em);
-LJ_FUNC_NORET void lj_err_arg(lua_State *L, int narg, ErrMsg em);
-LJ_FUNC_NORET void lj_err_argv(lua_State *L, int narg, ErrMsg em, ...);
-LJ_FUNC_NORET void lj_err_argtype(lua_State *L, int narg, const char *xname);
-LJ_FUNC_NORET void lj_err_argt(lua_State *L, int narg, int tt);
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_errmsg.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_errmsg.h
deleted file mode 100644
index ac552f4..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_errmsg.h
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
-** VM error messages.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-/* This file may be included multiple times with different ERRDEF macros. */
-
-/* Basic error handling. */
-ERRDEF(ERRMEM, "not enough memory")
-ERRDEF(ERRERR, "error in error handling")
-ERRDEF(ERRCPP, "C++ exception")
-
-/* Allocations. */
-ERRDEF(STROV, "string length overflow")
-ERRDEF(UDATAOV, "userdata length overflow")
-ERRDEF(STKOV, "stack overflow")
-ERRDEF(STKOVM, "stack overflow (%s)")
-ERRDEF(TABOV, "table overflow")
-
-/* Table indexing. */
-ERRDEF(NANIDX, "table index is NaN")
-ERRDEF(NILIDX, "table index is nil")
-ERRDEF(NEXTIDX, "invalid key to " LUA_QL("next"))
-
-/* Metamethod resolving. */
-ERRDEF(BADCALL, "attempt to call a %s value")
-ERRDEF(BADOPRT, "attempt to %s %s " LUA_QS " (a %s value)")
-ERRDEF(BADOPRV, "attempt to %s a %s value")
-ERRDEF(BADCMPT, "attempt to compare %s with %s")
-ERRDEF(BADCMPV, "attempt to compare two %s values")
-ERRDEF(GETLOOP, "loop in gettable")
-ERRDEF(SETLOOP, "loop in settable")
-ERRDEF(OPCALL, "call")
-ERRDEF(OPINDEX, "index")
-ERRDEF(OPARITH, "perform arithmetic on")
-ERRDEF(OPCAT, "concatenate")
-ERRDEF(OPLEN, "get length of")
-
-/* Type checks. */
-ERRDEF(BADSELF, "calling " LUA_QS " on bad self (%s)")
-ERRDEF(BADARG, "bad argument #%d to " LUA_QS " (%s)")
-ERRDEF(BADTYPE, "%s expected, got %s")
-ERRDEF(BADVAL, "invalid value")
-ERRDEF(NOVAL, "value expected")
-ERRDEF(NOCORO, "coroutine expected")
-ERRDEF(NOTABN, "nil or table expected")
-ERRDEF(NOLFUNC, "Lua function expected")
-ERRDEF(NOFUNCL, "function or level expected")
-ERRDEF(NOSFT, "string/function/table expected")
-ERRDEF(NOPROXY, "boolean or proxy expected")
-ERRDEF(FORINIT, LUA_QL("for") " initial value must be a number")
-ERRDEF(FORLIM, LUA_QL("for") " limit must be a number")
-ERRDEF(FORSTEP, LUA_QL("for") " step must be a number")
-
-/* C API checks. */
-ERRDEF(NOENV, "no calling environment")
-ERRDEF(CYIELD, "attempt to yield across C-call boundary")
-ERRDEF(BADLU, "bad light userdata pointer")
-ERRDEF(NOGCMM, "bad action while in __gc metamethod")
-#if LJ_TARGET_WINDOWS
-ERRDEF(BADFPU, "bad FPU precision (use D3DCREATE_FPU_PRESERVE with DirectX)")
-#endif
-
-/* Standard library function errors. */
-ERRDEF(ASSERT, "assertion failed!")
-ERRDEF(PROTMT, "cannot change a protected metatable")
-ERRDEF(UNPACK, "too many results to unpack")
-ERRDEF(RDRSTR, "reader function must return a string")
-ERRDEF(PRTOSTR, LUA_QL("tostring") " must return a string to " LUA_QL("print"))
-ERRDEF(IDXRNG, "index out of range")
-ERRDEF(BASERNG, "base out of range")
-ERRDEF(LVLRNG, "level out of range")
-ERRDEF(INVLVL, "invalid level")
-ERRDEF(INVOPT, "invalid option")
-ERRDEF(INVOPTM, "invalid option " LUA_QS)
-ERRDEF(INVFMT, "invalid format")
-ERRDEF(SETFENV, LUA_QL("setfenv") " cannot change environment of given object")
-ERRDEF(CORUN, "cannot resume running coroutine")
-ERRDEF(CODEAD, "cannot resume dead coroutine")
-ERRDEF(COSUSP, "cannot resume non-suspended coroutine")
-ERRDEF(TABINS, "wrong number of arguments to " LUA_QL("insert"))
-ERRDEF(TABCAT, "invalid value (%s) at index %d in table for " LUA_QL("concat"))
-ERRDEF(TABSORT, "invalid order function for sorting")
-ERRDEF(IOCLFL, "attempt to use a closed file")
-ERRDEF(IOSTDCL, "standard file is closed")
-ERRDEF(OSUNIQF, "unable to generate a unique filename")
-ERRDEF(OSDATEF, "field " LUA_QS " missing in date table")
-ERRDEF(STRDUMP, "unable to dump given function")
-ERRDEF(STRSLC, "string slice too long")
-ERRDEF(STRPATB, "missing " LUA_QL("[") " after " LUA_QL("%f") " in pattern")
-ERRDEF(STRPATC, "invalid pattern capture")
-ERRDEF(STRPATE, "malformed pattern (ends with " LUA_QL("%") ")")
-ERRDEF(STRPATM, "malformed pattern (missing " LUA_QL("]") ")")
-ERRDEF(STRPATU, "unbalanced pattern")
-ERRDEF(STRPATX, "pattern too complex")
-ERRDEF(STRCAPI, "invalid capture index")
-ERRDEF(STRCAPN, "too many captures")
-ERRDEF(STRCAPU, "unfinished capture")
-ERRDEF(STRFMTO, "invalid option " LUA_QL("%%%c") " to " LUA_QL("format"))
-ERRDEF(STRFMTR, "invalid format (repeated flags)")
-ERRDEF(STRFMTW, "invalid format (width or precision too long)")
-ERRDEF(STRGSRV, "invalid replacement value (a %s)")
-ERRDEF(BADMODN, "name conflict for module " LUA_QS)
-#if LJ_HASJIT
-ERRDEF(JITPROT, "runtime code generation failed, restricted kernel?")
-#if LJ_TARGET_X86ORX64
-ERRDEF(NOJIT, "JIT compiler disabled, CPU does not support SSE2")
-#else
-ERRDEF(NOJIT, "JIT compiler disabled")
-#endif
-#elif defined(LJ_ARCH_NOJIT)
-ERRDEF(NOJIT, "no JIT compiler for this architecture (yet)")
-#else
-ERRDEF(NOJIT, "JIT compiler permanently disabled by build option")
-#endif
-ERRDEF(JITOPT, "unknown or malformed optimization flag " LUA_QS)
-
-/* Lexer/parser errors. */
-ERRDEF(XMODE, "attempt to load chunk with wrong mode")
-ERRDEF(XNEAR, "%s near " LUA_QS)
-ERRDEF(XELEM, "lexical element too long")
-ERRDEF(XLINES, "chunk has too many lines")
-ERRDEF(XLEVELS, "chunk has too many syntax levels")
-ERRDEF(XNUMBER, "malformed number")
-ERRDEF(XLSTR, "unfinished long string")
-ERRDEF(XLCOM, "unfinished long comment")
-ERRDEF(XSTR, "unfinished string")
-ERRDEF(XESC, "invalid escape sequence")
-ERRDEF(XLDELIM, "invalid long string delimiter")
-ERRDEF(XTOKEN, LUA_QS " expected")
-ERRDEF(XJUMP, "control structure too long")
-ERRDEF(XSLOTS, "function or expression too complex")
-ERRDEF(XLIMC, "chunk has more than %d local variables")
-ERRDEF(XLIMM, "main function has more than %d %s")
-ERRDEF(XLIMF, "function at line %d has more than %d %s")
-ERRDEF(XMATCH, LUA_QS " expected (to close " LUA_QS " at line %d)")
-ERRDEF(XFIXUP, "function too long for return fixup")
-ERRDEF(XPARAM, "<name> or " LUA_QL("...") " expected")
-#if !LJ_52
-ERRDEF(XAMBIG, "ambiguous syntax (function call x new statement)")
-#endif
-ERRDEF(XFUNARG, "function arguments expected")
-ERRDEF(XSYMBOL, "unexpected symbol")
-ERRDEF(XDOTS, "cannot use " LUA_QL("...") " outside a vararg function")
-ERRDEF(XSYNTAX, "syntax error")
-ERRDEF(XFOR, LUA_QL("=") " or " LUA_QL("in") " expected")
-ERRDEF(XBREAK, "no loop to break")
-ERRDEF(XLUNDEF, "undefined label " LUA_QS)
-ERRDEF(XLDUP, "duplicate label " LUA_QS)
-ERRDEF(XGSCOPE, "<goto %s> jumps into the scope of local " LUA_QS)
-
-/* Bytecode reader errors. */
-ERRDEF(BCFMT, "cannot load incompatible bytecode")
-ERRDEF(BCBAD, "cannot load malformed bytecode")
-
-#if LJ_HASFFI
-/* FFI errors. */
-ERRDEF(FFI_INVTYPE, "invalid C type")
-ERRDEF(FFI_INVSIZE, "size of C type is unknown or too large")
-ERRDEF(FFI_BADSCL, "bad storage class")
-ERRDEF(FFI_DECLSPEC, "declaration specifier expected")
-ERRDEF(FFI_BADTAG, "undeclared or implicit tag " LUA_QS)
-ERRDEF(FFI_REDEF, "attempt to redefine " LUA_QS)
-ERRDEF(FFI_NUMPARAM, "wrong number of type parameters")
-ERRDEF(FFI_INITOV, "too many initializers for " LUA_QS)
-ERRDEF(FFI_BADCONV, "cannot convert " LUA_QS " to " LUA_QS)
-ERRDEF(FFI_BADLEN, "attempt to get length of " LUA_QS)
-ERRDEF(FFI_BADCONCAT, "attempt to concatenate " LUA_QS " and " LUA_QS)
-ERRDEF(FFI_BADARITH, "attempt to perform arithmetic on " LUA_QS " and " LUA_QS)
-ERRDEF(FFI_BADCOMP, "attempt to compare " LUA_QS " with " LUA_QS)
-ERRDEF(FFI_BADCALL, LUA_QS " is not callable")
-ERRDEF(FFI_NUMARG, "wrong number of arguments for function call")
-ERRDEF(FFI_BADMEMBER, LUA_QS " has no member named " LUA_QS)
-ERRDEF(FFI_BADIDX, LUA_QS " cannot be indexed")
-ERRDEF(FFI_BADIDXW, LUA_QS " cannot be indexed with " LUA_QS)
-ERRDEF(FFI_BADMM, LUA_QS " has no " LUA_QS " metamethod")
-ERRDEF(FFI_WRCONST, "attempt to write to constant location")
-ERRDEF(FFI_NODECL, "missing declaration for symbol " LUA_QS)
-ERRDEF(FFI_BADCBACK, "bad callback")
-#if LJ_OS_NOJIT
-ERRDEF(FFI_CBACKOV, "no support for callbacks on this OS")
-#else
-ERRDEF(FFI_CBACKOV, "too many callbacks")
-#endif
-ERRDEF(FFI_NYIPACKBIT, "NYI: packed bit fields")
-ERRDEF(FFI_NYICALL, "NYI: cannot call this C function (yet)")
-#endif
-
-#undef ERRDEF
-
-/* Detecting unused error messages:
- awk -F, '/^ERRDEF/ { gsub(/ERRDEF./, ""); printf "grep -q LJ_ERR_%s *.[ch] || echo %s\n", $1, $1}' lj_errmsg.h | sh
-*/
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_ff.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_ff.h
deleted file mode 100644
index 31d65a0..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_ff.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
-** Fast function IDs.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_FF_H
-#define _LJ_FF_H
-
-/* Fast function ID. */
-typedef enum {
- FF_LUA_ = FF_LUA, /* Lua function (must be 0). */
- FF_C_ = FF_C, /* Regular C function (must be 1). */
-#define FFDEF(name) FF_##name,
-#include "lj_ffdef.h"
- FF__MAX
-} FastFunc;
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_ffrecord.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_ffrecord.h
deleted file mode 100644
index 3b40745..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_ffrecord.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
-** Fast function call recorder.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_FFRECORD_H
-#define _LJ_FFRECORD_H
-
-#include "lj_obj.h"
-#include "lj_jit.h"
-
-#if LJ_HASJIT
-/* Data used by handlers to record a fast function. */
-typedef struct RecordFFData {
- TValue *argv; /* Runtime argument values. */
- ptrdiff_t nres; /* Number of returned results (defaults to 1). */
- uint32_t data; /* Per-ffid auxiliary data (opcode, literal etc.). */
-} RecordFFData;
-
-LJ_FUNC int32_t lj_ffrecord_select_mode(jit_State *J, TRef tr, TValue *tv);
-LJ_FUNC void lj_ffrecord_func(jit_State *J);
-#endif
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_frame.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_frame.h
deleted file mode 100644
index eb5df30..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_frame.h
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
-** Stack frames.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_FRAME_H
-#define _LJ_FRAME_H
-
-#include "lj_obj.h"
-#include "lj_bc.h"
-
-/* -- Lua stack frame ----------------------------------------------------- */
-
-/* Frame type markers in callee function slot (callee base-1). */
-enum {
- FRAME_LUA, FRAME_C, FRAME_CONT, FRAME_VARG,
- FRAME_LUAP, FRAME_CP, FRAME_PCALL, FRAME_PCALLH
-};
-#define FRAME_TYPE 3
-#define FRAME_P 4
-#define FRAME_TYPEP (FRAME_TYPE|FRAME_P)
-
-/* Macros to access and modify Lua frames. */
-#define frame_gc(f) (gcref((f)->fr.func))
-#define frame_func(f) (&frame_gc(f)->fn)
-#define frame_ftsz(f) ((f)->fr.tp.ftsz)
-
-#define frame_type(f) (frame_ftsz(f) & FRAME_TYPE)
-#define frame_typep(f) (frame_ftsz(f) & FRAME_TYPEP)
-#define frame_islua(f) (frame_type(f) == FRAME_LUA)
-#define frame_isc(f) (frame_type(f) == FRAME_C)
-#define frame_iscont(f) (frame_typep(f) == FRAME_CONT)
-#define frame_isvarg(f) (frame_typep(f) == FRAME_VARG)
-#define frame_ispcall(f) ((frame_ftsz(f) & 6) == FRAME_PCALL)
-
-#define frame_pc(f) (mref((f)->fr.tp.pcr, const BCIns))
-#define frame_contpc(f) (frame_pc((f)-1))
-#if LJ_64
-#define frame_contf(f) \
- ((ASMFunction)(void *)((intptr_t)lj_vm_asm_begin + \
- (intptr_t)(int32_t)((f)-1)->u32.lo))
-#else
-#define frame_contf(f) ((ASMFunction)gcrefp(((f)-1)->gcr, void))
-#endif
-#define frame_delta(f) (frame_ftsz(f) >> 3)
-#define frame_sized(f) (frame_ftsz(f) & ~FRAME_TYPEP)
-
-#define frame_prevl(f) ((f) - (1+bc_a(frame_pc(f)[-1])))
-#define frame_prevd(f) ((TValue *)((char *)(f) - frame_sized(f)))
-#define frame_prev(f) (frame_islua(f)?frame_prevl(f):frame_prevd(f))
-/* Note: this macro does not skip over FRAME_VARG. */
-
-#define setframe_pc(f, pc) (setmref((f)->fr.tp.pcr, (pc)))
-#define setframe_ftsz(f, sz) ((f)->fr.tp.ftsz = (sz))
-#define setframe_gc(f, p) (setgcref((f)->fr.func, (p)))
-
-/* -- C stack frame ------------------------------------------------------- */
-
-/* Macros to access and modify the C stack frame chain. */
-
-/* These definitions must match with the arch-specific *.dasc files. */
-#if LJ_TARGET_X86
-#define CFRAME_OFS_ERRF (15*4)
-#define CFRAME_OFS_NRES (14*4)
-#define CFRAME_OFS_PREV (13*4)
-#define CFRAME_OFS_L (12*4)
-#define CFRAME_OFS_PC (6*4)
-#define CFRAME_OFS_MULTRES (5*4)
-#define CFRAME_SIZE (12*4)
-#define CFRAME_SHIFT_MULTRES 0
-#elif LJ_TARGET_X64
-#if LJ_ABI_WIN
-#define CFRAME_OFS_PREV (13*8)
-#define CFRAME_OFS_PC (25*4)
-#define CFRAME_OFS_L (24*4)
-#define CFRAME_OFS_ERRF (23*4)
-#define CFRAME_OFS_NRES (22*4)
-#define CFRAME_OFS_MULTRES (21*4)
-#define CFRAME_SIZE (10*8)
-#define CFRAME_SIZE_JIT (CFRAME_SIZE + 9*16 + 4*8)
-#define CFRAME_SHIFT_MULTRES 0
-#else
-#define CFRAME_OFS_PREV (4*8)
-#define CFRAME_OFS_PC (7*4)
-#define CFRAME_OFS_L (6*4)
-#define CFRAME_OFS_ERRF (5*4)
-#define CFRAME_OFS_NRES (4*4)
-#define CFRAME_OFS_MULTRES (1*4)
-#if LJ_NO_UNWIND
-#define CFRAME_SIZE (12*8)
-#else
-#define CFRAME_SIZE (10*8)
-#endif
-#define CFRAME_SIZE_JIT (CFRAME_SIZE + 16)
-#define CFRAME_SHIFT_MULTRES 0
-#endif
-#elif LJ_TARGET_ARM
-#define CFRAME_OFS_ERRF 24
-#define CFRAME_OFS_NRES 20
-#define CFRAME_OFS_PREV 16
-#define CFRAME_OFS_L 12
-#define CFRAME_OFS_PC 8
-#define CFRAME_OFS_MULTRES 4
-#if LJ_ARCH_HASFPU
-#define CFRAME_SIZE 128
-#else
-#define CFRAME_SIZE 64
-#endif
-#define CFRAME_SHIFT_MULTRES 3
-#elif LJ_TARGET_PPC
-#if LJ_TARGET_XBOX360
-#define CFRAME_OFS_ERRF 424
-#define CFRAME_OFS_NRES 420
-#define CFRAME_OFS_PREV 400
-#define CFRAME_OFS_L 416
-#define CFRAME_OFS_PC 412
-#define CFRAME_OFS_MULTRES 408
-#define CFRAME_SIZE 384
-#define CFRAME_SHIFT_MULTRES 3
-#elif LJ_ARCH_PPC64
-#define CFRAME_OFS_ERRF 472
-#define CFRAME_OFS_NRES 468
-#define CFRAME_OFS_PREV 448
-#define CFRAME_OFS_L 464
-#define CFRAME_OFS_PC 460
-#define CFRAME_OFS_MULTRES 456
-#define CFRAME_SIZE 400
-#define CFRAME_SHIFT_MULTRES 3
-#else
-#define CFRAME_OFS_ERRF 48
-#define CFRAME_OFS_NRES 44
-#define CFRAME_OFS_PREV 40
-#define CFRAME_OFS_L 36
-#define CFRAME_OFS_PC 32
-#define CFRAME_OFS_MULTRES 28
-#define CFRAME_SIZE 272
-#define CFRAME_SHIFT_MULTRES 3
-#endif
-#elif LJ_TARGET_PPCSPE
-#define CFRAME_OFS_ERRF 28
-#define CFRAME_OFS_NRES 24
-#define CFRAME_OFS_PREV 20
-#define CFRAME_OFS_L 16
-#define CFRAME_OFS_PC 12
-#define CFRAME_OFS_MULTRES 8
-#define CFRAME_SIZE 184
-#define CFRAME_SHIFT_MULTRES 3
-#elif LJ_TARGET_MIPS
-#define CFRAME_OFS_ERRF 124
-#define CFRAME_OFS_NRES 120
-#define CFRAME_OFS_PREV 116
-#define CFRAME_OFS_L 112
-#define CFRAME_OFS_PC 20
-#define CFRAME_OFS_MULTRES 16
-#define CFRAME_SIZE 112
-#define CFRAME_SHIFT_MULTRES 3
-#else
-#error "Missing CFRAME_* definitions for this architecture"
-#endif
-
-#ifndef CFRAME_SIZE_JIT
-#define CFRAME_SIZE_JIT CFRAME_SIZE
-#endif
-
-#define CFRAME_RESUME 1
-#define CFRAME_UNWIND_FF 2 /* Only used in unwinder. */
-#define CFRAME_RAWMASK (~(intptr_t)(CFRAME_RESUME|CFRAME_UNWIND_FF))
-
-#define cframe_errfunc(cf) (*(int32_t *)(((char *)(cf))+CFRAME_OFS_ERRF))
-#define cframe_nres(cf) (*(int32_t *)(((char *)(cf))+CFRAME_OFS_NRES))
-#define cframe_prev(cf) (*(void **)(((char *)(cf))+CFRAME_OFS_PREV))
-#define cframe_multres(cf) (*(uint32_t *)(((char *)(cf))+CFRAME_OFS_MULTRES))
-#define cframe_multres_n(cf) (cframe_multres((cf)) >> CFRAME_SHIFT_MULTRES)
-#define cframe_L(cf) \
- (&gcref(*(GCRef *)(((char *)(cf))+CFRAME_OFS_L))->th)
-#define cframe_pc(cf) \
- (mref(*(MRef *)(((char *)(cf))+CFRAME_OFS_PC), const BCIns))
-#define setcframe_L(cf, L) \
- (setmref(*(MRef *)(((char *)(cf))+CFRAME_OFS_L), (L)))
-#define setcframe_pc(cf, pc) \
- (setmref(*(MRef *)(((char *)(cf))+CFRAME_OFS_PC), (pc)))
-#define cframe_canyield(cf) ((intptr_t)(cf) & CFRAME_RESUME)
-#define cframe_unwind_ff(cf) ((intptr_t)(cf) & CFRAME_UNWIND_FF)
-#define cframe_raw(cf) ((void *)((intptr_t)(cf) & CFRAME_RAWMASK))
-#define cframe_Lpc(L) cframe_pc(cframe_raw(L->cframe))
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_func.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_func.h
deleted file mode 100644
index 901751b..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_func.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
-** Function handling (prototypes, functions and upvalues).
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_FUNC_H
-#define _LJ_FUNC_H
-
-#include "lj_obj.h"
-
-/* Prototypes. */
-LJ_FUNC void LJ_FASTCALL lj_func_freeproto(global_State *g, GCproto *pt);
-
-/* Upvalues. */
-LJ_FUNCA void LJ_FASTCALL lj_func_closeuv(lua_State *L, TValue *level);
-LJ_FUNC void LJ_FASTCALL lj_func_freeuv(global_State *g, GCupval *uv);
-
-/* Functions (closures). */
-LJ_FUNC GCfunc *lj_func_newC(lua_State *L, MSize nelems, GCtab *env);
-LJ_FUNC GCfunc *lj_func_newL_empty(lua_State *L, GCproto *pt, GCtab *env);
-LJ_FUNCA GCfunc *lj_func_newL_gc(lua_State *L, GCproto *pt, GCfuncL *parent);
-LJ_FUNC void LJ_FASTCALL lj_func_free(global_State *g, GCfunc *c);
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_gc.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_gc.h
deleted file mode 100644
index 57ac714..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_gc.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
-** Garbage collector.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_GC_H
-#define _LJ_GC_H
-
-#include "lj_obj.h"
-
-/* Garbage collector states. Order matters. */
-enum {
- GCSpause, GCSpropagate, GCSatomic, GCSsweepstring, GCSsweep, GCSfinalize
-};
-
-/* Bitmasks for marked field of GCobj. */
-#define LJ_GC_WHITE0 0x01
-#define LJ_GC_WHITE1 0x02
-#define LJ_GC_BLACK 0x04
-#define LJ_GC_FINALIZED 0x08
-#define LJ_GC_WEAKKEY 0x08
-#define LJ_GC_WEAKVAL 0x10
-#define LJ_GC_CDATA_FIN 0x10
-#define LJ_GC_FIXED 0x20
-#define LJ_GC_SFIXED 0x40
-
-#define LJ_GC_WHITES (LJ_GC_WHITE0 | LJ_GC_WHITE1)
-#define LJ_GC_COLORS (LJ_GC_WHITES | LJ_GC_BLACK)
-#define LJ_GC_WEAK (LJ_GC_WEAKKEY | LJ_GC_WEAKVAL)
-
-/* Macros to test and set GCobj colors. */
-#define iswhite(x) ((x)->gch.marked & LJ_GC_WHITES)
-#define isblack(x) ((x)->gch.marked & LJ_GC_BLACK)
-#define isgray(x) (!((x)->gch.marked & (LJ_GC_BLACK|LJ_GC_WHITES)))
-#define tviswhite(x) (tvisgcv(x) && iswhite(gcV(x)))
-#define otherwhite(g) (g->gc.currentwhite ^ LJ_GC_WHITES)
-#define isdead(g, v) ((v)->gch.marked & otherwhite(g) & LJ_GC_WHITES)
-
-#define curwhite(g) ((g)->gc.currentwhite & LJ_GC_WHITES)
-#define newwhite(g, x) (obj2gco(x)->gch.marked = (uint8_t)curwhite(g))
-#define makewhite(g, x) \
- ((x)->gch.marked = ((x)->gch.marked & (uint8_t)~LJ_GC_COLORS) | curwhite(g))
-#define flipwhite(x) ((x)->gch.marked ^= LJ_GC_WHITES)
-#define black2gray(x) ((x)->gch.marked &= (uint8_t)~LJ_GC_BLACK)
-#define fixstring(s) ((s)->marked |= LJ_GC_FIXED)
-#define markfinalized(x) ((x)->gch.marked |= LJ_GC_FINALIZED)
-
-/* Collector. */
-LJ_FUNC size_t lj_gc_separateudata(global_State *g, int all);
-LJ_FUNC void lj_gc_finalize_udata(lua_State *L);
-#if LJ_HASFFI
-LJ_FUNC void lj_gc_finalize_cdata(lua_State *L);
-#else
-#define lj_gc_finalize_cdata(L) UNUSED(L)
-#endif
-LJ_FUNC void lj_gc_freeall(global_State *g);
-LJ_FUNCA int LJ_FASTCALL lj_gc_step(lua_State *L);
-LJ_FUNCA void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L);
-#if LJ_HASJIT
-LJ_FUNC int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps);
-#endif
-LJ_FUNC void lj_gc_fullgc(lua_State *L);
-
-/* GC check: drive collector forward if the GC threshold has been reached. */
-#define lj_gc_check(L) \
- { if (LJ_UNLIKELY(G(L)->gc.total >= G(L)->gc.threshold)) \
- lj_gc_step(L); }
-#define lj_gc_check_fixtop(L) \
- { if (LJ_UNLIKELY(G(L)->gc.total >= G(L)->gc.threshold)) \
- lj_gc_step_fixtop(L); }
-
-/* Write barriers. */
-LJ_FUNC void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v);
-LJ_FUNCA void LJ_FASTCALL lj_gc_barrieruv(global_State *g, TValue *tv);
-LJ_FUNC void lj_gc_closeuv(global_State *g, GCupval *uv);
-#if LJ_HASJIT
-LJ_FUNC void lj_gc_barriertrace(global_State *g, uint32_t traceno);
-#endif
-
-/* Move the GC propagation frontier back for tables (make it gray again). */
-static LJ_AINLINE void lj_gc_barrierback(global_State *g, GCtab *t)
-{
- GCobj *o = obj2gco(t);
- lua_assert(isblack(o) && !isdead(g, o));
- lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause);
- black2gray(o);
- setgcrefr(t->gclist, g->gc.grayagain);
- setgcref(g->gc.grayagain, o);
-}
-
-/* Barrier for stores to table objects. TValue and GCobj variant. */
-#define lj_gc_anybarriert(L, t) \
- { if (LJ_UNLIKELY(isblack(obj2gco(t)))) lj_gc_barrierback(G(L), (t)); }
-#define lj_gc_barriert(L, t, tv) \
- { if (tviswhite(tv) && isblack(obj2gco(t))) \
- lj_gc_barrierback(G(L), (t)); }
-#define lj_gc_objbarriert(L, t, o) \
- { if (iswhite(obj2gco(o)) && isblack(obj2gco(t))) \
- lj_gc_barrierback(G(L), (t)); }
-
-/* Barrier for stores to any other object. TValue and GCobj variant. */
-#define lj_gc_barrier(L, p, tv) \
- { if (tviswhite(tv) && isblack(obj2gco(p))) \
- lj_gc_barrierf(G(L), obj2gco(p), gcV(tv)); }
-#define lj_gc_objbarrier(L, p, o) \
- { if (iswhite(obj2gco(o)) && isblack(obj2gco(p))) \
- lj_gc_barrierf(G(L), obj2gco(p), obj2gco(o)); }
-
-/* Allocator. */
-LJ_FUNC void *lj_mem_realloc(lua_State *L, void *p, MSize osz, MSize nsz);
-LJ_FUNC void * LJ_FASTCALL lj_mem_newgco(lua_State *L, MSize size);
-LJ_FUNC void *lj_mem_grow(lua_State *L, void *p,
- MSize *szp, MSize lim, MSize esz);
-
-#define lj_mem_new(L, s) lj_mem_realloc(L, NULL, 0, (s))
-
-static LJ_AINLINE void lj_mem_free(global_State *g, void *p, size_t osize)
-{
- g->gc.total -= (MSize)osize;
- g->allocf(g->allocd, p, osize, 0);
-}
-
-#define lj_mem_newvec(L, n, t) ((t *)lj_mem_new(L, (MSize)((n)*sizeof(t))))
-#define lj_mem_reallocvec(L, p, on, n, t) \
- ((p) = (t *)lj_mem_realloc(L, p, (on)*sizeof(t), (MSize)((n)*sizeof(t))))
-#define lj_mem_growvec(L, p, n, m, t) \
- ((p) = (t *)lj_mem_grow(L, (p), &(n), (m), (MSize)sizeof(t)))
-#define lj_mem_freevec(g, p, n, t) lj_mem_free(g, (p), (n)*sizeof(t))
-
-#define lj_mem_newobj(L, t) ((t *)lj_mem_newgco(L, sizeof(t)))
-#define lj_mem_newt(L, s, t) ((t *)lj_mem_new(L, (s)))
-#define lj_mem_freet(g, p) lj_mem_free(g, (p), sizeof(*(p)))
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_gdbjit.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_gdbjit.h
deleted file mode 100644
index bbaa156..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_gdbjit.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
-** Client for the GDB JIT API.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_GDBJIT_H
-#define _LJ_GDBJIT_H
-
-#include "lj_obj.h"
-#include "lj_jit.h"
-
-#if LJ_HASJIT && defined(LUAJIT_USE_GDBJIT)
-
-LJ_FUNC void lj_gdbjit_addtrace(jit_State *J, GCtrace *T);
-LJ_FUNC void lj_gdbjit_deltrace(jit_State *J, GCtrace *T);
-
-#else
-#define lj_gdbjit_addtrace(J, T) UNUSED(T)
-#define lj_gdbjit_deltrace(J, T) UNUSED(T)
-#endif
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_ir.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_ir.h
deleted file mode 100644
index ded3e0e..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_ir.h
+++ /dev/null
@@ -1,551 +0,0 @@
-/*
-** SSA IR (Intermediate Representation) format.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_IR_H
-#define _LJ_IR_H
-
-#include "lj_obj.h"
-
-/* -- IR instructions ----------------------------------------------------- */
-
-/* IR instruction definition. Order matters, see below. ORDER IR */
-#define IRDEF(_) \
- /* Guarded assertions. */ \
- /* Must be properly aligned to flip opposites (^1) and (un)ordered (^4). */ \
- _(LT, N , ref, ref) \
- _(GE, N , ref, ref) \
- _(LE, N , ref, ref) \
- _(GT, N , ref, ref) \
- \
- _(ULT, N , ref, ref) \
- _(UGE, N , ref, ref) \
- _(ULE, N , ref, ref) \
- _(UGT, N , ref, ref) \
- \
- _(EQ, C , ref, ref) \
- _(NE, C , ref, ref) \
- \
- _(ABC, N , ref, ref) \
- _(RETF, S , ref, ref) \
- \
- /* Miscellaneous ops. */ \
- _(NOP, N , ___, ___) \
- _(BASE, N , lit, lit) \
- _(PVAL, N , lit, ___) \
- _(GCSTEP, S , ___, ___) \
- _(HIOP, S , ref, ref) \
- _(LOOP, S , ___, ___) \
- _(USE, S , ref, ___) \
- _(PHI, S , ref, ref) \
- _(RENAME, S , ref, lit) \
- \
- /* Constants. */ \
- _(KPRI, N , ___, ___) \
- _(KINT, N , cst, ___) \
- _(KGC, N , cst, ___) \
- _(KPTR, N , cst, ___) \
- _(KKPTR, N , cst, ___) \
- _(KNULL, N , cst, ___) \
- _(KNUM, N , cst, ___) \
- _(KINT64, N , cst, ___) \
- _(KSLOT, N , ref, lit) \
- \
- /* Bit ops. */ \
- _(BNOT, N , ref, ___) \
- _(BSWAP, N , ref, ___) \
- _(BAND, C , ref, ref) \
- _(BOR, C , ref, ref) \
- _(BXOR, C , ref, ref) \
- _(BSHL, N , ref, ref) \
- _(BSHR, N , ref, ref) \
- _(BSAR, N , ref, ref) \
- _(BROL, N , ref, ref) \
- _(BROR, N , ref, ref) \
- \
- /* Arithmetic ops. ORDER ARITH */ \
- _(ADD, C , ref, ref) \
- _(SUB, N , ref, ref) \
- _(MUL, C , ref, ref) \
- _(DIV, N , ref, ref) \
- _(MOD, N , ref, ref) \
- _(POW, N , ref, ref) \
- _(NEG, N , ref, ref) \
- \
- _(ABS, N , ref, ref) \
- _(ATAN2, N , ref, ref) \
- _(LDEXP, N , ref, ref) \
- _(MIN, C , ref, ref) \
- _(MAX, C , ref, ref) \
- _(FPMATH, N , ref, lit) \
- \
- /* Overflow-checking arithmetic ops. */ \
- _(ADDOV, CW, ref, ref) \
- _(SUBOV, NW, ref, ref) \
- _(MULOV, CW, ref, ref) \
- \
- /* Memory ops. A = array, H = hash, U = upvalue, F = field, S = stack. */ \
- \
- /* Memory references. */ \
- _(AREF, R , ref, ref) \
- _(HREFK, R , ref, ref) \
- _(HREF, L , ref, ref) \
- _(NEWREF, S , ref, ref) \
- _(UREFO, LW, ref, lit) \
- _(UREFC, LW, ref, lit) \
- _(FREF, R , ref, lit) \
- _(STRREF, N , ref, ref) \
- \
- /* Loads and Stores. These must be in the same order. */ \
- _(ALOAD, L , ref, ___) \
- _(HLOAD, L , ref, ___) \
- _(ULOAD, L , ref, ___) \
- _(FLOAD, L , ref, lit) \
- _(XLOAD, L , ref, lit) \
- _(SLOAD, L , lit, lit) \
- _(VLOAD, L , ref, ___) \
- \
- _(ASTORE, S , ref, ref) \
- _(HSTORE, S , ref, ref) \
- _(USTORE, S , ref, ref) \
- _(FSTORE, S , ref, ref) \
- _(XSTORE, S , ref, ref) \
- \
- /* Allocations. */ \
- _(SNEW, N , ref, ref) /* CSE is ok, not marked as A. */ \
- _(XSNEW, A , ref, ref) \
- _(TNEW, AW, lit, lit) \
- _(TDUP, AW, ref, ___) \
- _(CNEW, AW, ref, ref) \
- _(CNEWI, NW, ref, ref) /* CSE is ok, not marked as A. */ \
- \
- /* Barriers. */ \
- _(TBAR, S , ref, ___) \
- _(OBAR, S , ref, ref) \
- _(XBAR, S , ___, ___) \
- \
- /* Type conversions. */ \
- _(CONV, NW, ref, lit) \
- _(TOBIT, N , ref, ref) \
- _(TOSTR, N , ref, ___) \
- _(STRTO, N , ref, ___) \
- \
- /* Calls. */ \
- _(CALLN, N , ref, lit) \
- _(CALLL, L , ref, lit) \
- _(CALLS, S , ref, lit) \
- _(CALLXS, S , ref, ref) \
- _(CARG, N , ref, ref) \
- \
- /* End of list. */
-
-/* IR opcodes (max. 256). */
-typedef enum {
-#define IRENUM(name, m, m1, m2) IR_##name,
-IRDEF(IRENUM)
-#undef IRENUM
- IR__MAX
-} IROp;
-
-/* Stored opcode. */
-typedef uint8_t IROp1;
-
-LJ_STATIC_ASSERT(((int)IR_EQ^1) == (int)IR_NE);
-LJ_STATIC_ASSERT(((int)IR_LT^1) == (int)IR_GE);
-LJ_STATIC_ASSERT(((int)IR_LE^1) == (int)IR_GT);
-LJ_STATIC_ASSERT(((int)IR_LT^3) == (int)IR_GT);
-LJ_STATIC_ASSERT(((int)IR_LT^4) == (int)IR_ULT);
-
-/* Delta between xLOAD and xSTORE. */
-#define IRDELTA_L2S ((int)IR_ASTORE - (int)IR_ALOAD)
-
-LJ_STATIC_ASSERT((int)IR_HLOAD + IRDELTA_L2S == (int)IR_HSTORE);
-LJ_STATIC_ASSERT((int)IR_ULOAD + IRDELTA_L2S == (int)IR_USTORE);
-LJ_STATIC_ASSERT((int)IR_FLOAD + IRDELTA_L2S == (int)IR_FSTORE);
-LJ_STATIC_ASSERT((int)IR_XLOAD + IRDELTA_L2S == (int)IR_XSTORE);
-
-/* -- Named IR literals --------------------------------------------------- */
-
-/* FPMATH sub-functions. ORDER FPM. */
-#define IRFPMDEF(_) \
- _(FLOOR) _(CEIL) _(TRUNC) /* Must be first and in this order. */ \
- _(SQRT) _(EXP) _(EXP2) _(LOG) _(LOG2) _(LOG10) \
- _(SIN) _(COS) _(TAN) \
- _(OTHER)
-
-typedef enum {
-#define FPMENUM(name) IRFPM_##name,
-IRFPMDEF(FPMENUM)
-#undef FPMENUM
- IRFPM__MAX
-} IRFPMathOp;
-
-/* FLOAD fields. */
-#define IRFLDEF(_) \
- _(STR_LEN, offsetof(GCstr, len)) \
- _(FUNC_ENV, offsetof(GCfunc, l.env)) \
- _(FUNC_PC, offsetof(GCfunc, l.pc)) \
- _(TAB_META, offsetof(GCtab, metatable)) \
- _(TAB_ARRAY, offsetof(GCtab, array)) \
- _(TAB_NODE, offsetof(GCtab, node)) \
- _(TAB_ASIZE, offsetof(GCtab, asize)) \
- _(TAB_HMASK, offsetof(GCtab, hmask)) \
- _(TAB_NOMM, offsetof(GCtab, nomm)) \
- _(UDATA_META, offsetof(GCudata, metatable)) \
- _(UDATA_UDTYPE, offsetof(GCudata, udtype)) \
- _(UDATA_FILE, sizeof(GCudata)) \
- _(CDATA_CTYPEID, offsetof(GCcdata, ctypeid)) \
- _(CDATA_PTR, sizeof(GCcdata)) \
- _(CDATA_INT, sizeof(GCcdata)) \
- _(CDATA_INT64, sizeof(GCcdata)) \
- _(CDATA_INT64_4, sizeof(GCcdata) + 4)
-
-typedef enum {
-#define FLENUM(name, ofs) IRFL_##name,
-IRFLDEF(FLENUM)
-#undef FLENUM
- IRFL__MAX
-} IRFieldID;
-
-/* SLOAD mode bits, stored in op2. */
-#define IRSLOAD_PARENT 0x01 /* Coalesce with parent trace. */
-#define IRSLOAD_FRAME 0x02 /* Load hiword of frame. */
-#define IRSLOAD_TYPECHECK 0x04 /* Needs type check. */
-#define IRSLOAD_CONVERT 0x08 /* Number to integer conversion. */
-#define IRSLOAD_READONLY 0x10 /* Read-only, omit slot store. */
-#define IRSLOAD_INHERIT 0x20 /* Inherited by exits/side traces. */
-
-/* XLOAD mode, stored in op2. */
-#define IRXLOAD_READONLY 1 /* Load from read-only data. */
-#define IRXLOAD_VOLATILE 2 /* Load from volatile data. */
-#define IRXLOAD_UNALIGNED 4 /* Unaligned load. */
-
-/* CONV mode, stored in op2. */
-#define IRCONV_SRCMASK 0x001f /* Source IRType. */
-#define IRCONV_DSTMASK 0x03e0 /* Dest. IRType (also in ir->t). */
-#define IRCONV_DSH 5
-#define IRCONV_NUM_INT ((IRT_NUM<<IRCONV_DSH)|IRT_INT)
-#define IRCONV_INT_NUM ((IRT_INT<<IRCONV_DSH)|IRT_NUM)
-#define IRCONV_TRUNC 0x0400 /* Truncate number to integer. */
-#define IRCONV_SEXT 0x0800 /* Sign-extend integer to integer. */
-#define IRCONV_MODEMASK 0x0fff
-#define IRCONV_CONVMASK 0xf000
-#define IRCONV_CSH 12
-/* Number to integer conversion mode. Ordered by strength of the checks. */
-#define IRCONV_TOBIT (0<<IRCONV_CSH) /* None. Cache only: TOBIT conv. */
-#define IRCONV_ANY (1<<IRCONV_CSH) /* Any FP number is ok. */
-#define IRCONV_INDEX (2<<IRCONV_CSH) /* Check + special backprop rules. */
-#define IRCONV_CHECK (3<<IRCONV_CSH) /* Number checked for integerness. */
-
-/* -- IR operands --------------------------------------------------------- */
-
-/* IR operand mode (2 bit). */
-typedef enum {
- IRMref, /* IR reference. */
- IRMlit, /* 16 bit unsigned literal. */
- IRMcst, /* Constant literal: i, gcr or ptr. */
- IRMnone /* Unused operand. */
-} IRMode;
-#define IRM___ IRMnone
-
-/* Mode bits: Commutative, {Normal/Ref, Alloc, Load, Store}, Non-weak guard. */
-#define IRM_C 0x10
-
-#define IRM_N 0x00
-#define IRM_R IRM_N
-#define IRM_A 0x20
-#define IRM_L 0x40
-#define IRM_S 0x60
-
-#define IRM_W 0x80
-
-#define IRM_NW (IRM_N|IRM_W)
-#define IRM_CW (IRM_C|IRM_W)
-#define IRM_AW (IRM_A|IRM_W)
-#define IRM_LW (IRM_L|IRM_W)
-
-#define irm_op1(m) ((IRMode)((m)&3))
-#define irm_op2(m) ((IRMode)(((m)>>2)&3))
-#define irm_iscomm(m) ((m) & IRM_C)
-#define irm_kind(m) ((m) & IRM_S)
-
-#define IRMODE(name, m, m1, m2) (((IRM##m1)|((IRM##m2)<<2)|(IRM_##m))^IRM_W),
-
-LJ_DATA const uint8_t lj_ir_mode[IR__MAX+1];
-
-/* -- IR instruction types ------------------------------------------------ */
-
-/* Map of itypes to non-negative numbers. ORDER LJ_T.
-** LJ_TUPVAL/LJ_TTRACE never appear in a TValue. Use these itypes for
-** IRT_P32 and IRT_P64, which never escape the IR.
-** The various integers are only used in the IR and can only escape to
-** a TValue after implicit or explicit conversion. Their types must be
-** contiguous and next to IRT_NUM (see the typerange macros below).
-*/
-#define IRTDEF(_) \
- _(NIL, 4) _(FALSE, 4) _(TRUE, 4) _(LIGHTUD, LJ_64 ? 8 : 4) _(STR, 4) \
- _(P32, 4) _(THREAD, 4) _(PROTO, 4) _(FUNC, 4) _(P64, 8) _(CDATA, 4) \
- _(TAB, 4) _(UDATA, 4) \
- _(FLOAT, 4) _(NUM, 8) _(I8, 1) _(U8, 1) _(I16, 2) _(U16, 2) \
- _(INT, 4) _(U32, 4) _(I64, 8) _(U64, 8) \
- _(SOFTFP, 4) /* There is room for 9 more types. */
-
-/* IR result type and flags (8 bit). */
-typedef enum {
-#define IRTENUM(name, size) IRT_##name,
-IRTDEF(IRTENUM)
-#undef IRTENUM
- IRT__MAX,
-
- /* Native pointer type and the corresponding integer type. */
- IRT_PTR = LJ_64 ? IRT_P64 : IRT_P32,
- IRT_INTP = LJ_64 ? IRT_I64 : IRT_INT,
- IRT_UINTP = LJ_64 ? IRT_U64 : IRT_U32,
-
- /* Additional flags. */
- IRT_MARK = 0x20, /* Marker for misc. purposes. */
- IRT_ISPHI = 0x40, /* Instruction is left or right PHI operand. */
- IRT_GUARD = 0x80, /* Instruction is a guard. */
-
- /* Masks. */
- IRT_TYPE = 0x1f,
- IRT_T = 0xff
-} IRType;
-
-#define irtype_ispri(irt) ((uint32_t)(irt) <= IRT_TRUE)
-
-/* Stored IRType. */
-typedef struct IRType1 { uint8_t irt; } IRType1;
-
-#define IRT(o, t) ((uint32_t)(((o)<<8) | (t)))
-#define IRTI(o) (IRT((o), IRT_INT))
-#define IRTN(o) (IRT((o), IRT_NUM))
-#define IRTG(o, t) (IRT((o), IRT_GUARD|(t)))
-#define IRTGI(o) (IRT((o), IRT_GUARD|IRT_INT))
-
-#define irt_t(t) ((IRType)(t).irt)
-#define irt_type(t) ((IRType)((t).irt & IRT_TYPE))
-#define irt_sametype(t1, t2) ((((t1).irt ^ (t2).irt) & IRT_TYPE) == 0)
-#define irt_typerange(t, first, last) \
- ((uint32_t)((t).irt & IRT_TYPE) - (uint32_t)(first) <= (uint32_t)(last-first))
-
-#define irt_isnil(t) (irt_type(t) == IRT_NIL)
-#define irt_ispri(t) ((uint32_t)irt_type(t) <= IRT_TRUE)
-#define irt_islightud(t) (irt_type(t) == IRT_LIGHTUD)
-#define irt_isstr(t) (irt_type(t) == IRT_STR)
-#define irt_istab(t) (irt_type(t) == IRT_TAB)
-#define irt_iscdata(t) (irt_type(t) == IRT_CDATA)
-#define irt_isfloat(t) (irt_type(t) == IRT_FLOAT)
-#define irt_isnum(t) (irt_type(t) == IRT_NUM)
-#define irt_isint(t) (irt_type(t) == IRT_INT)
-#define irt_isi8(t) (irt_type(t) == IRT_I8)
-#define irt_isu8(t) (irt_type(t) == IRT_U8)
-#define irt_isi16(t) (irt_type(t) == IRT_I16)
-#define irt_isu16(t) (irt_type(t) == IRT_U16)
-#define irt_isu32(t) (irt_type(t) == IRT_U32)
-#define irt_isi64(t) (irt_type(t) == IRT_I64)
-#define irt_isu64(t) (irt_type(t) == IRT_U64)
-
-#define irt_isfp(t) (irt_isnum(t) || irt_isfloat(t))
-#define irt_isinteger(t) (irt_typerange((t), IRT_I8, IRT_INT))
-#define irt_isgcv(t) (irt_typerange((t), IRT_STR, IRT_UDATA))
-#define irt_isaddr(t) (irt_typerange((t), IRT_LIGHTUD, IRT_UDATA))
-#define irt_isint64(t) (irt_typerange((t), IRT_I64, IRT_U64))
-
-#if LJ_64
-#define IRT_IS64 \
- ((1u<<IRT_NUM)|(1u<<IRT_I64)|(1u<<IRT_U64)|(1u<<IRT_P64)|(1u<<IRT_LIGHTUD))
-#else
-#define IRT_IS64 \
- ((1u<<IRT_NUM)|(1u<<IRT_I64)|(1u<<IRT_U64))
-#endif
-
-#define irt_is64(t) ((IRT_IS64 >> irt_type(t)) & 1)
-#define irt_is64orfp(t) (((IRT_IS64|(1u<<IRT_FLOAT))>>irt_type(t)) & 1)
-
-#define irt_size(t) (lj_ir_type_size[irt_t((t))])
-
-LJ_DATA const uint8_t lj_ir_type_size[];
-
-static LJ_AINLINE IRType itype2irt(const TValue *tv)
-{
- if (tvisint(tv))
- return IRT_INT;
- else if (tvisnum(tv))
- return IRT_NUM;
-#if LJ_64
- else if (tvislightud(tv))
- return IRT_LIGHTUD;
-#endif
- else
- return (IRType)~itype(tv);
-}
-
-static LJ_AINLINE uint32_t irt_toitype_(IRType t)
-{
- lua_assert(!LJ_64 || t != IRT_LIGHTUD);
- if (LJ_DUALNUM && t > IRT_NUM) {
- return LJ_TISNUM;
- } else {
- lua_assert(t <= IRT_NUM);
- return ~(uint32_t)t;
- }
-}
-
-#define irt_toitype(t) irt_toitype_(irt_type((t)))
-
-#define irt_isguard(t) ((t).irt & IRT_GUARD)
-#define irt_ismarked(t) ((t).irt & IRT_MARK)
-#define irt_setmark(t) ((t).irt |= IRT_MARK)
-#define irt_clearmark(t) ((t).irt &= ~IRT_MARK)
-#define irt_isphi(t) ((t).irt & IRT_ISPHI)
-#define irt_setphi(t) ((t).irt |= IRT_ISPHI)
-#define irt_clearphi(t) ((t).irt &= ~IRT_ISPHI)
-
-/* Stored combined IR opcode and type. */
-typedef uint16_t IROpT;
-
-/* -- IR references ------------------------------------------------------- */
-
-/* IR references. */
-typedef uint16_t IRRef1; /* One stored reference. */
-typedef uint32_t IRRef2; /* Two stored references. */
-typedef uint32_t IRRef; /* Used to pass around references. */
-
-/* Fixed references. */
-enum {
- REF_BIAS = 0x8000,
- REF_TRUE = REF_BIAS-3,
- REF_FALSE = REF_BIAS-2,
- REF_NIL = REF_BIAS-1, /* \--- Constants grow downwards. */
- REF_BASE = REF_BIAS, /* /--- IR grows upwards. */
- REF_FIRST = REF_BIAS+1,
- REF_DROP = 0xffff
-};
-
-/* Note: IRMlit operands must be < REF_BIAS, too!
-** This allows for fast and uniform manipulation of all operands
-** without looking up the operand mode in lj_ir_mode:
-** - CSE calculates the maximum reference of two operands.
-** This must work with mixed reference/literal operands, too.
-** - DCE marking only checks for operand >= REF_BIAS.
-** - LOOP needs to substitute reference operands.
-** Constant references and literals must not be modified.
-*/
-
-#define IRREF2(lo, hi) ((IRRef2)(lo) | ((IRRef2)(hi) << 16))
-
-#define irref_isk(ref) ((ref) < REF_BIAS)
-
-/* Tagged IR references (32 bit).
-**
-** +-------+-------+---------------+
-** | irt | flags | ref |
-** +-------+-------+---------------+
-**
-** The tag holds a copy of the IRType and speeds up IR type checks.
-*/
-typedef uint32_t TRef;
-
-#define TREF_REFMASK 0x0000ffff
-#define TREF_FRAME 0x00010000
-#define TREF_CONT 0x00020000
-
-#define TREF(ref, t) ((TRef)((ref) + ((t)<<24)))
-
-#define tref_ref(tr) ((IRRef1)(tr))
-#define tref_t(tr) ((IRType)((tr)>>24))
-#define tref_type(tr) ((IRType)(((tr)>>24) & IRT_TYPE))
-#define tref_typerange(tr, first, last) \
- ((((tr)>>24) & IRT_TYPE) - (TRef)(first) <= (TRef)(last-first))
-
-#define tref_istype(tr, t) (((tr) & (IRT_TYPE<<24)) == ((t)<<24))
-#define tref_isnil(tr) (tref_istype((tr), IRT_NIL))
-#define tref_isfalse(tr) (tref_istype((tr), IRT_FALSE))
-#define tref_istrue(tr) (tref_istype((tr), IRT_TRUE))
-#define tref_isstr(tr) (tref_istype((tr), IRT_STR))
-#define tref_isfunc(tr) (tref_istype((tr), IRT_FUNC))
-#define tref_iscdata(tr) (tref_istype((tr), IRT_CDATA))
-#define tref_istab(tr) (tref_istype((tr), IRT_TAB))
-#define tref_isudata(tr) (tref_istype((tr), IRT_UDATA))
-#define tref_isnum(tr) (tref_istype((tr), IRT_NUM))
-#define tref_isint(tr) (tref_istype((tr), IRT_INT))
-
-#define tref_isbool(tr) (tref_typerange((tr), IRT_FALSE, IRT_TRUE))
-#define tref_ispri(tr) (tref_typerange((tr), IRT_NIL, IRT_TRUE))
-#define tref_istruecond(tr) (!tref_typerange((tr), IRT_NIL, IRT_FALSE))
-#define tref_isinteger(tr) (tref_typerange((tr), IRT_I8, IRT_INT))
-#define tref_isnumber(tr) (tref_typerange((tr), IRT_NUM, IRT_INT))
-#define tref_isnumber_str(tr) (tref_isnumber((tr)) || tref_isstr((tr)))
-#define tref_isgcv(tr) (tref_typerange((tr), IRT_STR, IRT_UDATA))
-
-#define tref_isk(tr) (irref_isk(tref_ref((tr))))
-#define tref_isk2(tr1, tr2) (irref_isk(tref_ref((tr1) | (tr2))))
-
-#define TREF_PRI(t) (TREF(REF_NIL-(t), (t)))
-#define TREF_NIL (TREF_PRI(IRT_NIL))
-#define TREF_FALSE (TREF_PRI(IRT_FALSE))
-#define TREF_TRUE (TREF_PRI(IRT_TRUE))
-
-/* -- IR format ----------------------------------------------------------- */
-
-/* IR instruction format (64 bit).
-**
-** 16 16 8 8 8 8
-** +-------+-------+---+---+---+---+
-** | op1 | op2 | t | o | r | s |
-** +-------+-------+---+---+---+---+
-** | op12/i/gco | ot | prev | (alternative fields in union)
-** +---------------+-------+-------+
-** 32 16 16
-**
-** prev is only valid prior to register allocation and then reused for r + s.
-*/
-
-typedef union IRIns {
- struct {
- LJ_ENDIAN_LOHI(
- IRRef1 op1; /* IR operand 1. */
- , IRRef1 op2; /* IR operand 2. */
- )
- IROpT ot; /* IR opcode and type (overlaps t and o). */
- IRRef1 prev; /* Previous ins in same chain (overlaps r and s). */
- };
- struct {
- IRRef2 op12; /* IR operand 1 and 2 (overlaps op1 and op2). */
- LJ_ENDIAN_LOHI(
- IRType1 t; /* IR type. */
- , IROp1 o; /* IR opcode. */
- )
- LJ_ENDIAN_LOHI(
- uint8_t r; /* Register allocation (overlaps prev). */
- , uint8_t s; /* Spill slot allocation (overlaps prev). */
- )
- };
- int32_t i; /* 32 bit signed integer literal (overlaps op12). */
- GCRef gcr; /* GCobj constant (overlaps op12). */
- MRef ptr; /* Pointer constant (overlaps op12). */
-} IRIns;
-
-#define ir_kgc(ir) check_exp((ir)->o == IR_KGC, gcref((ir)->gcr))
-#define ir_kstr(ir) (gco2str(ir_kgc((ir))))
-#define ir_ktab(ir) (gco2tab(ir_kgc((ir))))
-#define ir_kfunc(ir) (gco2func(ir_kgc((ir))))
-#define ir_kcdata(ir) (gco2cd(ir_kgc((ir))))
-#define ir_knum(ir) check_exp((ir)->o == IR_KNUM, mref((ir)->ptr, cTValue))
-#define ir_kint64(ir) check_exp((ir)->o == IR_KINT64, mref((ir)->ptr,cTValue))
-#define ir_k64(ir) \
- check_exp((ir)->o == IR_KNUM || (ir)->o == IR_KINT64, mref((ir)->ptr,cTValue))
-#define ir_kptr(ir) \
- check_exp((ir)->o == IR_KPTR || (ir)->o == IR_KKPTR, mref((ir)->ptr, void))
-
-/* A store or any other op with a non-weak guard has a side-effect. */
-static LJ_AINLINE int ir_sideeff(IRIns *ir)
-{
- return (((ir->t.irt | ~IRT_GUARD) & lj_ir_mode[ir->o]) >= IRM_S);
-}
-
-LJ_STATIC_ASSERT((int)IRT_GUARD == (int)IRM_W);
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_ircall.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_ircall.h
deleted file mode 100644
index 44afde3..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_ircall.h
+++ /dev/null
@@ -1,271 +0,0 @@
-/*
-** IR CALL* instruction definitions.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_IRCALL_H
-#define _LJ_IRCALL_H
-
-#include "lj_obj.h"
-#include "lj_ir.h"
-#include "lj_jit.h"
-
-/* C call info for CALL* instructions. */
-typedef struct CCallInfo {
- ASMFunction func; /* Function pointer. */
- uint32_t flags; /* Number of arguments and flags. */
-} CCallInfo;
-
-#define CCI_NARGS(ci) ((ci)->flags & 0xff) /* Extract # of args. */
-#define CCI_NARGS_MAX 32 /* Max. # of args. */
-
-#define CCI_OTSHIFT 16
-#define CCI_OPTYPE(ci) ((ci)->flags >> CCI_OTSHIFT) /* Get op/type. */
-#define CCI_OPSHIFT 24
-#define CCI_OP(ci) ((ci)->flags >> CCI_OPSHIFT) /* Get op. */
-
-#define CCI_CALL_N (IR_CALLN << CCI_OPSHIFT)
-#define CCI_CALL_L (IR_CALLL << CCI_OPSHIFT)
-#define CCI_CALL_S (IR_CALLS << CCI_OPSHIFT)
-#define CCI_CALL_FN (CCI_CALL_N|CCI_CC_FASTCALL)
-#define CCI_CALL_FL (CCI_CALL_L|CCI_CC_FASTCALL)
-#define CCI_CALL_FS (CCI_CALL_S|CCI_CC_FASTCALL)
-
-/* C call info flags. */
-#define CCI_L 0x0100 /* Implicit L arg. */
-#define CCI_CASTU64 0x0200 /* Cast u64 result to number. */
-#define CCI_NOFPRCLOBBER 0x0400 /* Does not clobber any FPRs. */
-#define CCI_VARARG 0x0800 /* Vararg function. */
-
-#define CCI_CC_MASK 0x3000 /* Calling convention mask. */
-#define CCI_CC_SHIFT 12
-/* ORDER CC */
-#define CCI_CC_CDECL 0x0000 /* Default cdecl calling convention. */
-#define CCI_CC_THISCALL 0x1000 /* Thiscall calling convention. */
-#define CCI_CC_FASTCALL 0x2000 /* Fastcall calling convention. */
-#define CCI_CC_STDCALL 0x3000 /* Stdcall calling convention. */
-
-/* Helpers for conditional function definitions. */
-#define IRCALLCOND_ANY(x) x
-
-#if LJ_TARGET_X86ORX64
-#define IRCALLCOND_FPMATH(x) NULL
-#else
-#define IRCALLCOND_FPMATH(x) x
-#endif
-
-#if LJ_SOFTFP
-#define IRCALLCOND_SOFTFP(x) x
-#if LJ_HASFFI
-#define IRCALLCOND_SOFTFP_FFI(x) x
-#else
-#define IRCALLCOND_SOFTFP_FFI(x) NULL
-#endif
-#else
-#define IRCALLCOND_SOFTFP(x) NULL
-#define IRCALLCOND_SOFTFP_FFI(x) NULL
-#endif
-
-#define LJ_NEED_FP64 (LJ_TARGET_ARM || LJ_TARGET_PPC || LJ_TARGET_MIPS)
-
-#if LJ_HASFFI && (LJ_SOFTFP || LJ_NEED_FP64)
-#define IRCALLCOND_FP64_FFI(x) x
-#else
-#define IRCALLCOND_FP64_FFI(x) NULL
-#endif
-
-#if LJ_HASFFI
-#define IRCALLCOND_FFI(x) x
-#if LJ_32
-#define IRCALLCOND_FFI32(x) x
-#else
-#define IRCALLCOND_FFI32(x) NULL
-#endif
-#else
-#define IRCALLCOND_FFI(x) NULL
-#define IRCALLCOND_FFI32(x) NULL
-#endif
-
-#if LJ_SOFTFP
-#define ARG1_FP 2 /* Treat as 2 32 bit arguments. */
-#else
-#define ARG1_FP 1
-#endif
-
-#if LJ_32
-#define ARG2_64 4 /* Treat as 4 32 bit arguments. */
-#else
-#define ARG2_64 2
-#endif
-
-/* Function definitions for CALL* instructions. */
-#define IRCALLDEF(_) \
- _(ANY, lj_str_cmp, 2, FN, INT, CCI_NOFPRCLOBBER) \
- _(ANY, lj_str_new, 3, S, STR, CCI_L) \
- _(ANY, lj_strscan_num, 2, FN, INT, 0) \
- _(ANY, lj_str_fromint, 2, FN, STR, CCI_L) \
- _(ANY, lj_str_fromnum, 2, FN, STR, CCI_L) \
- _(ANY, lj_tab_new1, 2, FS, TAB, CCI_L) \
- _(ANY, lj_tab_dup, 2, FS, TAB, CCI_L) \
- _(ANY, lj_tab_newkey, 3, S, P32, CCI_L) \
- _(ANY, lj_tab_len, 1, FL, INT, 0) \
- _(ANY, lj_gc_step_jit, 2, FS, NIL, CCI_L) \
- _(ANY, lj_gc_barrieruv, 2, FS, NIL, 0) \
- _(ANY, lj_mem_newgco, 2, FS, P32, CCI_L) \
- _(ANY, lj_math_random_step, 1, FS, NUM, CCI_CASTU64) \
- _(ANY, lj_vm_modi, 2, FN, INT, 0) \
- _(ANY, sinh, ARG1_FP, N, NUM, 0) \
- _(ANY, cosh, ARG1_FP, N, NUM, 0) \
- _(ANY, tanh, ARG1_FP, N, NUM, 0) \
- _(ANY, fputc, 2, S, INT, 0) \
- _(ANY, fwrite, 4, S, INT, 0) \
- _(ANY, fflush, 1, S, INT, 0) \
- /* ORDER FPM */ \
- _(FPMATH, lj_vm_floor, ARG1_FP, N, NUM, 0) \
- _(FPMATH, lj_vm_ceil, ARG1_FP, N, NUM, 0) \
- _(FPMATH, lj_vm_trunc, ARG1_FP, N, NUM, 0) \
- _(FPMATH, sqrt, ARG1_FP, N, NUM, 0) \
- _(FPMATH, exp, ARG1_FP, N, NUM, 0) \
- _(FPMATH, lj_vm_exp2, ARG1_FP, N, NUM, 0) \
- _(FPMATH, log, ARG1_FP, N, NUM, 0) \
- _(FPMATH, lj_vm_log2, ARG1_FP, N, NUM, 0) \
- _(FPMATH, log10, ARG1_FP, N, NUM, 0) \
- _(FPMATH, sin, ARG1_FP, N, NUM, 0) \
- _(FPMATH, cos, ARG1_FP, N, NUM, 0) \
- _(FPMATH, tan, ARG1_FP, N, NUM, 0) \
- _(FPMATH, lj_vm_powi, ARG1_FP+1, N, NUM, 0) \
- _(FPMATH, pow, ARG1_FP*2, N, NUM, 0) \
- _(FPMATH, atan2, ARG1_FP*2, N, NUM, 0) \
- _(FPMATH, ldexp, ARG1_FP+1, N, NUM, 0) \
- _(SOFTFP, lj_vm_tobit, 2, N, INT, 0) \
- _(SOFTFP, softfp_add, 4, N, NUM, 0) \
- _(SOFTFP, softfp_sub, 4, N, NUM, 0) \
- _(SOFTFP, softfp_mul, 4, N, NUM, 0) \
- _(SOFTFP, softfp_div, 4, N, NUM, 0) \
- _(SOFTFP, softfp_cmp, 4, N, NIL, 0) \
- _(SOFTFP, softfp_i2d, 1, N, NUM, 0) \
- _(SOFTFP, softfp_d2i, 2, N, INT, 0) \
- _(SOFTFP_FFI, softfp_ui2d, 1, N, NUM, 0) \
- _(SOFTFP_FFI, softfp_f2d, 1, N, NUM, 0) \
- _(SOFTFP_FFI, softfp_d2ui, 2, N, INT, 0) \
- _(SOFTFP_FFI, softfp_d2f, 2, N, FLOAT, 0) \
- _(SOFTFP_FFI, softfp_i2f, 1, N, FLOAT, 0) \
- _(SOFTFP_FFI, softfp_ui2f, 1, N, FLOAT, 0) \
- _(SOFTFP_FFI, softfp_f2i, 1, N, INT, 0) \
- _(SOFTFP_FFI, softfp_f2ui, 1, N, INT, 0) \
- _(FP64_FFI, fp64_l2d, 2, N, NUM, 0) \
- _(FP64_FFI, fp64_ul2d, 2, N, NUM, 0) \
- _(FP64_FFI, fp64_l2f, 2, N, FLOAT, 0) \
- _(FP64_FFI, fp64_ul2f, 2, N, FLOAT, 0) \
- _(FP64_FFI, fp64_d2l, ARG1_FP, N, I64, 0) \
- _(FP64_FFI, fp64_d2ul, ARG1_FP, N, U64, 0) \
- _(FP64_FFI, fp64_f2l, 1, N, I64, 0) \
- _(FP64_FFI, fp64_f2ul, 1, N, U64, 0) \
- _(FFI, lj_carith_divi64, ARG2_64, N, I64, CCI_NOFPRCLOBBER) \
- _(FFI, lj_carith_divu64, ARG2_64, N, U64, CCI_NOFPRCLOBBER) \
- _(FFI, lj_carith_modi64, ARG2_64, N, I64, CCI_NOFPRCLOBBER) \
- _(FFI, lj_carith_modu64, ARG2_64, N, U64, CCI_NOFPRCLOBBER) \
- _(FFI, lj_carith_powi64, ARG2_64, N, I64, CCI_NOFPRCLOBBER) \
- _(FFI, lj_carith_powu64, ARG2_64, N, U64, CCI_NOFPRCLOBBER) \
- _(FFI, lj_cdata_setfin, 2, FN, P32, CCI_L) \
- _(FFI, strlen, 1, L, INTP, 0) \
- _(FFI, memcpy, 3, S, PTR, 0) \
- _(FFI, memset, 3, S, PTR, 0) \
- _(FFI, lj_vm_errno, 0, S, INT, CCI_NOFPRCLOBBER) \
- _(FFI32, lj_carith_mul64, ARG2_64, N, I64, CCI_NOFPRCLOBBER)
- \
- /* End of list. */
-
-typedef enum {
-#define IRCALLENUM(cond, name, nargs, kind, type, flags) IRCALL_##name,
-IRCALLDEF(IRCALLENUM)
-#undef IRCALLENUM
- IRCALL__MAX
-} IRCallID;
-
-LJ_FUNC TRef lj_ir_call(jit_State *J, IRCallID id, ...);
-
-LJ_DATA const CCallInfo lj_ir_callinfo[IRCALL__MAX+1];
-
-/* Soft-float declarations. */
-#if LJ_SOFTFP
-#if LJ_TARGET_ARM
-#define softfp_add __aeabi_dadd
-#define softfp_sub __aeabi_dsub
-#define softfp_mul __aeabi_dmul
-#define softfp_div __aeabi_ddiv
-#define softfp_cmp __aeabi_cdcmple
-#define softfp_i2d __aeabi_i2d
-#define softfp_d2i __aeabi_d2iz
-#define softfp_ui2d __aeabi_ui2d
-#define softfp_f2d __aeabi_f2d
-#define softfp_d2ui __aeabi_d2uiz
-#define softfp_d2f __aeabi_d2f
-#define softfp_i2f __aeabi_i2f
-#define softfp_ui2f __aeabi_ui2f
-#define softfp_f2i __aeabi_f2iz
-#define softfp_f2ui __aeabi_f2uiz
-#define fp64_l2d __aeabi_l2d
-#define fp64_ul2d __aeabi_ul2d
-#define fp64_l2f __aeabi_l2f
-#define fp64_ul2f __aeabi_ul2f
-#if LJ_TARGET_IOS
-#define fp64_d2l __fixdfdi
-#define fp64_d2ul __fixunsdfdi
-#define fp64_f2l __fixsfdi
-#define fp64_f2ul __fixunssfdi
-#else
-#define fp64_d2l __aeabi_d2lz
-#define fp64_d2ul __aeabi_d2ulz
-#define fp64_f2l __aeabi_f2lz
-#define fp64_f2ul __aeabi_f2ulz
-#endif
-#else
-#error "Missing soft-float definitions for target architecture"
-#endif
-extern double softfp_add(double a, double b);
-extern double softfp_sub(double a, double b);
-extern double softfp_mul(double a, double b);
-extern double softfp_div(double a, double b);
-extern void softfp_cmp(double a, double b);
-extern double softfp_i2d(int32_t a);
-extern int32_t softfp_d2i(double a);
-#if LJ_HASFFI
-extern double softfp_ui2d(uint32_t a);
-extern double softfp_f2d(float a);
-extern uint32_t softfp_d2ui(double a);
-extern float softfp_d2f(double a);
-extern float softfp_i2f(int32_t a);
-extern float softfp_ui2f(uint32_t a);
-extern int32_t softfp_f2i(float a);
-extern uint32_t softfp_f2ui(float a);
-#endif
-#endif
-
-#if LJ_HASFFI && LJ_NEED_FP64 && !(LJ_TARGET_ARM && LJ_SOFTFP)
-#ifdef __GNUC__
-#define fp64_l2d __floatdidf
-#define fp64_ul2d __floatundidf
-#define fp64_l2f __floatdisf
-#define fp64_ul2f __floatundisf
-#define fp64_d2l __fixdfdi
-#define fp64_d2ul __fixunsdfdi
-#define fp64_f2l __fixsfdi
-#define fp64_f2ul __fixunssfdi
-#else
-#error "Missing fp64 helper definitions for this compiler"
-#endif
-#endif
-
-#if LJ_HASFFI && (LJ_SOFTFP || LJ_NEED_FP64)
-extern double fp64_l2d(int64_t a);
-extern double fp64_ul2d(uint64_t a);
-extern float fp64_l2f(int64_t a);
-extern float fp64_ul2f(uint64_t a);
-extern int64_t fp64_d2l(double a);
-extern uint64_t fp64_d2ul(double a);
-extern int64_t fp64_f2l(float a);
-extern uint64_t fp64_f2ul(float a);
-#endif
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_iropt.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_iropt.h
deleted file mode 100644
index b5081e0..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_iropt.h
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
-** Common header for IR emitter and optimizations.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_IROPT_H
-#define _LJ_IROPT_H
-
-#include <stdarg.h>
-
-#include "lj_obj.h"
-#include "lj_jit.h"
-
-#if LJ_HASJIT
-/* IR emitter. */
-LJ_FUNC void LJ_FASTCALL lj_ir_growtop(jit_State *J);
-LJ_FUNC TRef LJ_FASTCALL lj_ir_emit(jit_State *J);
-
-/* Save current IR in J->fold.ins, but do not emit it (yet). */
-static LJ_AINLINE void lj_ir_set_(jit_State *J, uint16_t ot, IRRef1 a, IRRef1 b)
-{
- J->fold.ins.ot = ot; J->fold.ins.op1 = a; J->fold.ins.op2 = b;
-}
-
-#define lj_ir_set(J, ot, a, b) \
- lj_ir_set_(J, (uint16_t)(ot), (IRRef1)(a), (IRRef1)(b))
-
-/* Get ref of next IR instruction and optionally grow IR.
-** Note: this may invalidate all IRIns*!
-*/
-static LJ_AINLINE IRRef lj_ir_nextins(jit_State *J)
-{
- IRRef ref = J->cur.nins;
- if (LJ_UNLIKELY(ref >= J->irtoplim)) lj_ir_growtop(J);
- J->cur.nins = ref + 1;
- return ref;
-}
-
-/* Interning of constants. */
-LJ_FUNC TRef LJ_FASTCALL lj_ir_kint(jit_State *J, int32_t k);
-LJ_FUNC void lj_ir_k64_freeall(jit_State *J);
-LJ_FUNC TRef lj_ir_k64(jit_State *J, IROp op, cTValue *tv);
-LJ_FUNC cTValue *lj_ir_k64_find(jit_State *J, uint64_t u64);
-LJ_FUNC TRef lj_ir_knum_u64(jit_State *J, uint64_t u64);
-LJ_FUNC TRef lj_ir_knumint(jit_State *J, lua_Number n);
-LJ_FUNC TRef lj_ir_kint64(jit_State *J, uint64_t u64);
-LJ_FUNC TRef lj_ir_kgc(jit_State *J, GCobj *o, IRType t);
-LJ_FUNC TRef lj_ir_kptr_(jit_State *J, IROp op, void *ptr);
-LJ_FUNC TRef lj_ir_knull(jit_State *J, IRType t);
-LJ_FUNC TRef lj_ir_kslot(jit_State *J, TRef key, IRRef slot);
-
-#if LJ_64
-#define lj_ir_kintp(J, k) lj_ir_kint64(J, (uint64_t)(k))
-#else
-#define lj_ir_kintp(J, k) lj_ir_kint(J, (int32_t)(k))
-#endif
-
-static LJ_AINLINE TRef lj_ir_knum(jit_State *J, lua_Number n)
-{
- TValue tv;
- tv.n = n;
- return lj_ir_knum_u64(J, tv.u64);
-}
-
-#define lj_ir_kstr(J, str) lj_ir_kgc(J, obj2gco((str)), IRT_STR)
-#define lj_ir_ktab(J, tab) lj_ir_kgc(J, obj2gco((tab)), IRT_TAB)
-#define lj_ir_kfunc(J, func) lj_ir_kgc(J, obj2gco((func)), IRT_FUNC)
-#define lj_ir_kptr(J, ptr) lj_ir_kptr_(J, IR_KPTR, (ptr))
-#define lj_ir_kkptr(J, ptr) lj_ir_kptr_(J, IR_KKPTR, (ptr))
-
-/* Special FP constants. */
-#define lj_ir_knum_zero(J) lj_ir_knum_u64(J, U64x(00000000,00000000))
-#define lj_ir_knum_one(J) lj_ir_knum_u64(J, U64x(3ff00000,00000000))
-#define lj_ir_knum_tobit(J) lj_ir_knum_u64(J, U64x(43380000,00000000))
-
-/* Special 128 bit SIMD constants. */
-#define lj_ir_knum_abs(J) lj_ir_k64(J, IR_KNUM, LJ_KSIMD(J, LJ_KSIMD_ABS))
-#define lj_ir_knum_neg(J) lj_ir_k64(J, IR_KNUM, LJ_KSIMD(J, LJ_KSIMD_NEG))
-
-/* Access to constants. */
-LJ_FUNC void lj_ir_kvalue(lua_State *L, TValue *tv, const IRIns *ir);
-
-/* Convert IR operand types. */
-LJ_FUNC TRef LJ_FASTCALL lj_ir_tonumber(jit_State *J, TRef tr);
-LJ_FUNC TRef LJ_FASTCALL lj_ir_tonum(jit_State *J, TRef tr);
-LJ_FUNC TRef LJ_FASTCALL lj_ir_tostr(jit_State *J, TRef tr);
-
-/* Miscellaneous IR ops. */
-LJ_FUNC int lj_ir_numcmp(lua_Number a, lua_Number b, IROp op);
-LJ_FUNC int lj_ir_strcmp(GCstr *a, GCstr *b, IROp op);
-LJ_FUNC void lj_ir_rollback(jit_State *J, IRRef ref);
-
-/* Emit IR instructions with on-the-fly optimizations. */
-LJ_FUNC TRef LJ_FASTCALL lj_opt_fold(jit_State *J);
-LJ_FUNC TRef LJ_FASTCALL lj_opt_cse(jit_State *J);
-LJ_FUNC TRef LJ_FASTCALL lj_opt_cselim(jit_State *J, IRRef lim);
-
-/* Special return values for the fold functions. */
-enum {
- NEXTFOLD, /* Couldn't fold, pass on. */
- RETRYFOLD, /* Retry fold with modified fins. */
- KINTFOLD, /* Return ref for int constant in fins->i. */
- FAILFOLD, /* Guard would always fail. */
- DROPFOLD, /* Guard eliminated. */
- MAX_FOLD
-};
-
-#define INTFOLD(k) ((J->fold.ins.i = (k)), (TRef)KINTFOLD)
-#define INT64FOLD(k) (lj_ir_kint64(J, (k)))
-#define CONDFOLD(cond) ((TRef)FAILFOLD + (TRef)(cond))
-#define LEFTFOLD (J->fold.ins.op1)
-#define RIGHTFOLD (J->fold.ins.op2)
-#define CSEFOLD (lj_opt_cse(J))
-#define EMITFOLD (lj_ir_emit(J))
-
-/* Load/store forwarding. */
-LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_aload(jit_State *J);
-LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_hload(jit_State *J);
-LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_uload(jit_State *J);
-LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_fload(jit_State *J);
-LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_xload(jit_State *J);
-LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_tab_len(jit_State *J);
-LJ_FUNC TRef LJ_FASTCALL lj_opt_fwd_hrefk(jit_State *J);
-LJ_FUNC int LJ_FASTCALL lj_opt_fwd_href_nokey(jit_State *J);
-LJ_FUNC int LJ_FASTCALL lj_opt_fwd_tptr(jit_State *J, IRRef lim);
-LJ_FUNC int lj_opt_fwd_wasnonnil(jit_State *J, IROpT loadop, IRRef xref);
-
-/* Dead-store elimination. */
-LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_ahstore(jit_State *J);
-LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_ustore(jit_State *J);
-LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_fstore(jit_State *J);
-LJ_FUNC TRef LJ_FASTCALL lj_opt_dse_xstore(jit_State *J);
-
-/* Narrowing. */
-LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_convert(jit_State *J);
-LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_index(jit_State *J, TRef key);
-LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_toint(jit_State *J, TRef tr);
-LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_tobit(jit_State *J, TRef tr);
-#if LJ_HASFFI
-LJ_FUNC TRef LJ_FASTCALL lj_opt_narrow_cindex(jit_State *J, TRef key);
-#endif
-LJ_FUNC TRef lj_opt_narrow_arith(jit_State *J, TRef rb, TRef rc,
- TValue *vb, TValue *vc, IROp op);
-LJ_FUNC TRef lj_opt_narrow_unm(jit_State *J, TRef rc, TValue *vc);
-LJ_FUNC TRef lj_opt_narrow_mod(jit_State *J, TRef rb, TRef rc, TValue *vb, TValue *vc);
-LJ_FUNC TRef lj_opt_narrow_pow(jit_State *J, TRef rb, TRef rc, TValue *vb, TValue *vc);
-LJ_FUNC IRType lj_opt_narrow_forl(jit_State *J, cTValue *forbase);
-
-/* Optimization passes. */
-LJ_FUNC void lj_opt_dce(jit_State *J);
-LJ_FUNC int lj_opt_loop(jit_State *J);
-#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI)
-LJ_FUNC void lj_opt_split(jit_State *J);
-#else
-#define lj_opt_split(J) UNUSED(J)
-#endif
-LJ_FUNC void lj_opt_sink(jit_State *J);
-
-#endif
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_jit.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_jit.h
deleted file mode 100644
index 3f38d28..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_jit.h
+++ /dev/null
@@ -1,423 +0,0 @@
-/*
-** Common definitions for the JIT compiler.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_JIT_H
-#define _LJ_JIT_H
-
-#include "lj_obj.h"
-#include "lj_ir.h"
-
-/* JIT engine flags. */
-#define JIT_F_ON 0x00000001
-
-/* CPU-specific JIT engine flags. */
-#if LJ_TARGET_X86ORX64
-#define JIT_F_CMOV 0x00000010
-#define JIT_F_SSE2 0x00000020
-#define JIT_F_SSE3 0x00000040
-#define JIT_F_SSE4_1 0x00000080
-#define JIT_F_P4 0x00000100
-#define JIT_F_PREFER_IMUL 0x00000200
-#define JIT_F_SPLIT_XMM 0x00000400
-#define JIT_F_LEA_AGU 0x00000800
-
-/* Names for the CPU-specific flags. Must match the order above. */
-#define JIT_F_CPU_FIRST JIT_F_CMOV
-#define JIT_F_CPUSTRING "\4CMOV\4SSE2\4SSE3\6SSE4.1\2P4\3AMD\2K8\4ATOM"
-#elif LJ_TARGET_ARM
-#define JIT_F_ARMV6_ 0x00000010
-#define JIT_F_ARMV6T2_ 0x00000020
-#define JIT_F_ARMV7 0x00000040
-#define JIT_F_VFPV2 0x00000080
-#define JIT_F_VFPV3 0x00000100
-
-#define JIT_F_ARMV6 (JIT_F_ARMV6_|JIT_F_ARMV6T2_|JIT_F_ARMV7)
-#define JIT_F_ARMV6T2 (JIT_F_ARMV6T2_|JIT_F_ARMV7)
-#define JIT_F_VFP (JIT_F_VFPV2|JIT_F_VFPV3)
-
-/* Names for the CPU-specific flags. Must match the order above. */
-#define JIT_F_CPU_FIRST JIT_F_ARMV6_
-#define JIT_F_CPUSTRING "\5ARMv6\7ARMv6T2\5ARMv7\5VFPv2\5VFPv3"
-#elif LJ_TARGET_PPC
-#define JIT_F_SQRT 0x00000010
-#define JIT_F_ROUND 0x00000020
-
-/* Names for the CPU-specific flags. Must match the order above. */
-#define JIT_F_CPU_FIRST JIT_F_SQRT
-#define JIT_F_CPUSTRING "\4SQRT\5ROUND"
-#elif LJ_TARGET_MIPS
-#define JIT_F_MIPS32R2 0x00000010
-
-/* Names for the CPU-specific flags. Must match the order above. */
-#define JIT_F_CPU_FIRST JIT_F_MIPS32R2
-#define JIT_F_CPUSTRING "\010MIPS32R2"
-#else
-#define JIT_F_CPU_FIRST 0
-#define JIT_F_CPUSTRING ""
-#endif
-
-/* Optimization flags. */
-#define JIT_F_OPT_MASK 0x0fff0000
-
-#define JIT_F_OPT_FOLD 0x00010000
-#define JIT_F_OPT_CSE 0x00020000
-#define JIT_F_OPT_DCE 0x00040000
-#define JIT_F_OPT_FWD 0x00080000
-#define JIT_F_OPT_DSE 0x00100000
-#define JIT_F_OPT_NARROW 0x00200000
-#define JIT_F_OPT_LOOP 0x00400000
-#define JIT_F_OPT_ABC 0x00800000
-#define JIT_F_OPT_SINK 0x01000000
-#define JIT_F_OPT_FUSE 0x02000000
-
-/* Optimizations names for -O. Must match the order above. */
-#define JIT_F_OPT_FIRST JIT_F_OPT_FOLD
-#define JIT_F_OPTSTRING \
- "\4fold\3cse\3dce\3fwd\3dse\6narrow\4loop\3abc\4sink\4fuse"
-
-/* Optimization levels set a fixed combination of flags. */
-#define JIT_F_OPT_0 0
-#define JIT_F_OPT_1 (JIT_F_OPT_FOLD|JIT_F_OPT_CSE|JIT_F_OPT_DCE)
-#define JIT_F_OPT_2 (JIT_F_OPT_1|JIT_F_OPT_NARROW|JIT_F_OPT_LOOP)
-#define JIT_F_OPT_3 (JIT_F_OPT_2|\
- JIT_F_OPT_FWD|JIT_F_OPT_DSE|JIT_F_OPT_ABC|JIT_F_OPT_SINK|JIT_F_OPT_FUSE)
-#define JIT_F_OPT_DEFAULT JIT_F_OPT_3
-
-#if LJ_TARGET_WINDOWS || LJ_64
-/* See: http://blogs.msdn.com/oldnewthing/archive/2003/10/08/55239.aspx */
-#define JIT_P_sizemcode_DEFAULT 64
-#else
-/* Could go as low as 4K, but the mmap() overhead would be rather high. */
-#define JIT_P_sizemcode_DEFAULT 32
-#endif
-
-/* Optimization parameters and their defaults. Length is a char in octal! */
-#define JIT_PARAMDEF(_) \
- _(\010, maxtrace, 1000) /* Max. # of traces in cache. */ \
- _(\011, maxrecord, 4000) /* Max. # of recorded IR instructions. */ \
- _(\012, maxirconst, 500) /* Max. # of IR constants of a trace. */ \
- _(\007, maxside, 100) /* Max. # of side traces of a root trace. */ \
- _(\007, maxsnap, 500) /* Max. # of snapshots for a trace. */ \
- \
- _(\007, hotloop, 56) /* # of iter. to detect a hot loop/call. */ \
- _(\007, hotexit, 10) /* # of taken exits to start a side trace. */ \
- _(\007, tryside, 4) /* # of attempts to compile a side trace. */ \
- \
- _(\012, instunroll, 4) /* Max. unroll for instable loops. */ \
- _(\012, loopunroll, 15) /* Max. unroll for loop ops in side traces. */ \
- _(\012, callunroll, 3) /* Max. unroll for recursive calls. */ \
- _(\011, recunroll, 2) /* Min. unroll for true recursion. */ \
- \
- /* Size of each machine code area (in KBytes). */ \
- _(\011, sizemcode, JIT_P_sizemcode_DEFAULT) \
- /* Max. total size of all machine code areas (in KBytes). */ \
- _(\010, maxmcode, 512) \
- /* End of list. */
-
-enum {
-#define JIT_PARAMENUM(len, name, value) JIT_P_##name,
-JIT_PARAMDEF(JIT_PARAMENUM)
-#undef JIT_PARAMENUM
- JIT_P__MAX
-};
-
-#define JIT_PARAMSTR(len, name, value) #len #name
-#define JIT_P_STRING JIT_PARAMDEF(JIT_PARAMSTR)
-
-/* Trace compiler state. */
-typedef enum {
- LJ_TRACE_IDLE, /* Trace compiler idle. */
- LJ_TRACE_ACTIVE = 0x10,
- LJ_TRACE_RECORD, /* Bytecode recording active. */
- LJ_TRACE_START, /* New trace started. */
- LJ_TRACE_END, /* End of trace. */
- LJ_TRACE_ASM, /* Assemble trace. */
- LJ_TRACE_ERR /* Trace aborted with error. */
-} TraceState;
-
-/* Post-processing action. */
-typedef enum {
- LJ_POST_NONE, /* No action. */
- LJ_POST_FIXCOMP, /* Fixup comparison and emit pending guard. */
- LJ_POST_FIXGUARD, /* Fixup and emit pending guard. */
- LJ_POST_FIXGUARDSNAP, /* Fixup and emit pending guard and snapshot. */
- LJ_POST_FIXBOOL, /* Fixup boolean result. */
- LJ_POST_FIXCONST, /* Fixup constant results. */
- LJ_POST_FFRETRY /* Suppress recording of retried fast functions. */
-} PostProc;
-
-/* Machine code type. */
-#if LJ_TARGET_X86ORX64
-typedef uint8_t MCode;
-#else
-typedef uint32_t MCode;
-#endif
-
-/* Linked list of MCode areas. */
-typedef struct MCLink {
- MCode *next; /* Next area. */
- size_t size; /* Size of current area. */
-} MCLink;
-
-/* Stack snapshot header. */
-typedef struct SnapShot {
- uint16_t mapofs; /* Offset into snapshot map. */
- IRRef1 ref; /* First IR ref for this snapshot. */
- uint8_t nslots; /* Number of valid slots. */
- uint8_t topslot; /* Maximum frame extent. */
- uint8_t nent; /* Number of compressed entries. */
- uint8_t count; /* Count of taken exits for this snapshot. */
-} SnapShot;
-
-#define SNAPCOUNT_DONE 255 /* Already compiled and linked a side trace. */
-
-/* Compressed snapshot entry. */
-typedef uint32_t SnapEntry;
-
-#define SNAP_FRAME 0x010000 /* Frame slot. */
-#define SNAP_CONT 0x020000 /* Continuation slot. */
-#define SNAP_NORESTORE 0x040000 /* No need to restore slot. */
-#define SNAP_SOFTFPNUM 0x080000 /* Soft-float number. */
-LJ_STATIC_ASSERT(SNAP_FRAME == TREF_FRAME);
-LJ_STATIC_ASSERT(SNAP_CONT == TREF_CONT);
-
-#define SNAP(slot, flags, ref) (((SnapEntry)(slot) << 24) + (flags) + (ref))
-#define SNAP_TR(slot, tr) \
- (((SnapEntry)(slot) << 24) + ((tr) & (TREF_CONT|TREF_FRAME|TREF_REFMASK)))
-#define SNAP_MKPC(pc) ((SnapEntry)u32ptr(pc))
-#define SNAP_MKFTSZ(ftsz) ((SnapEntry)(ftsz))
-#define snap_ref(sn) ((sn) & 0xffff)
-#define snap_slot(sn) ((BCReg)((sn) >> 24))
-#define snap_isframe(sn) ((sn) & SNAP_FRAME)
-#define snap_pc(sn) ((const BCIns *)(uintptr_t)(sn))
-#define snap_setref(sn, ref) (((sn) & (0xffff0000&~SNAP_NORESTORE)) | (ref))
-
-/* Snapshot and exit numbers. */
-typedef uint32_t SnapNo;
-typedef uint32_t ExitNo;
-
-/* Trace number. */
-typedef uint32_t TraceNo; /* Used to pass around trace numbers. */
-typedef uint16_t TraceNo1; /* Stored trace number. */
-
-/* Type of link. ORDER LJ_TRLINK */
-typedef enum {
- LJ_TRLINK_NONE, /* Incomplete trace. No link, yet. */
- LJ_TRLINK_ROOT, /* Link to other root trace. */
- LJ_TRLINK_LOOP, /* Loop to same trace. */
- LJ_TRLINK_TAILREC, /* Tail-recursion. */
- LJ_TRLINK_UPREC, /* Up-recursion. */
- LJ_TRLINK_DOWNREC, /* Down-recursion. */
- LJ_TRLINK_INTERP, /* Fallback to interpreter. */
- LJ_TRLINK_RETURN /* Return to interpreter. */
-} TraceLink;
-
-/* Trace object. */
-typedef struct GCtrace {
- GCHeader;
- uint8_t topslot; /* Top stack slot already checked to be allocated. */
- uint8_t linktype; /* Type of link. */
- IRRef nins; /* Next IR instruction. Biased with REF_BIAS. */
- GCRef gclist;
- IRIns *ir; /* IR instructions/constants. Biased with REF_BIAS. */
- IRRef nk; /* Lowest IR constant. Biased with REF_BIAS. */
- uint16_t nsnap; /* Number of snapshots. */
- uint16_t nsnapmap; /* Number of snapshot map elements. */
- SnapShot *snap; /* Snapshot array. */
- SnapEntry *snapmap; /* Snapshot map. */
- GCRef startpt; /* Starting prototype. */
- MRef startpc; /* Bytecode PC of starting instruction. */
- BCIns startins; /* Original bytecode of starting instruction. */
- MSize szmcode; /* Size of machine code. */
- MCode *mcode; /* Start of machine code. */
- MSize mcloop; /* Offset of loop start in machine code. */
- uint16_t nchild; /* Number of child traces (root trace only). */
- uint16_t spadjust; /* Stack pointer adjustment (offset in bytes). */
- TraceNo1 traceno; /* Trace number. */
- TraceNo1 link; /* Linked trace (or self for loops). */
- TraceNo1 root; /* Root trace of side trace (or 0 for root traces). */
- TraceNo1 nextroot; /* Next root trace for same prototype. */
- TraceNo1 nextside; /* Next side trace of same root trace. */
- uint8_t sinktags; /* Trace has SINK tags. */
- uint8_t unused1;
-#ifdef LUAJIT_USE_GDBJIT
- void *gdbjit_entry; /* GDB JIT entry. */
-#endif
-} GCtrace;
-
-#define gco2trace(o) check_exp((o)->gch.gct == ~LJ_TTRACE, (GCtrace *)(o))
-#define traceref(J, n) \
- check_exp((n)>0 && (MSize)(n)<J->sizetrace, (GCtrace *)gcref(J->trace[(n)]))
-
-LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCtrace, gclist));
-
-static LJ_AINLINE MSize snap_nextofs(GCtrace *T, SnapShot *snap)
-{
- if (snap+1 == &T->snap[T->nsnap])
- return T->nsnapmap;
- else
- return (snap+1)->mapofs;
-}
-
-/* Round-robin penalty cache for bytecodes leading to aborted traces. */
-typedef struct HotPenalty {
- MRef pc; /* Starting bytecode PC. */
- uint16_t val; /* Penalty value, i.e. hotcount start. */
- uint16_t reason; /* Abort reason (really TraceErr). */
-} HotPenalty;
-
-#define PENALTY_SLOTS 64 /* Penalty cache slot. Must be a power of 2. */
-#define PENALTY_MIN (36*2) /* Minimum penalty value. */
-#define PENALTY_MAX 60000 /* Maximum penalty value. */
-#define PENALTY_RNDBITS 4 /* # of random bits to add to penalty value. */
-
-/* Round-robin backpropagation cache for narrowing conversions. */
-typedef struct BPropEntry {
- IRRef1 key; /* Key: original reference. */
- IRRef1 val; /* Value: reference after conversion. */
- IRRef mode; /* Mode for this entry (currently IRCONV_*). */
-} BPropEntry;
-
-/* Number of slots for the backpropagation cache. Must be a power of 2. */
-#define BPROP_SLOTS 16
-
-/* Scalar evolution analysis cache. */
-typedef struct ScEvEntry {
- MRef pc; /* Bytecode PC of FORI. */
- IRRef1 idx; /* Index reference. */
- IRRef1 start; /* Constant start reference. */
- IRRef1 stop; /* Constant stop reference. */
- IRRef1 step; /* Constant step reference. */
- IRType1 t; /* Scalar type. */
- uint8_t dir; /* Direction. 1: +, 0: -. */
-} ScEvEntry;
-
-/* 128 bit SIMD constants. */
-enum {
- LJ_KSIMD_ABS,
- LJ_KSIMD_NEG,
- LJ_KSIMD__MAX
-};
-
-/* Get 16 byte aligned pointer to SIMD constant. */
-#define LJ_KSIMD(J, n) \
- ((TValue *)(((intptr_t)&J->ksimd[2*(n)] + 15) & ~(intptr_t)15))
-
-/* Set/reset flag to activate the SPLIT pass for the current trace. */
-#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI)
-#define lj_needsplit(J) (J->needsplit = 1)
-#define lj_resetsplit(J) (J->needsplit = 0)
-#else
-#define lj_needsplit(J) UNUSED(J)
-#define lj_resetsplit(J) UNUSED(J)
-#endif
-
-/* Fold state is used to fold instructions on-the-fly. */
-typedef struct FoldState {
- IRIns ins; /* Currently emitted instruction. */
- IRIns left; /* Instruction referenced by left operand. */
- IRIns right; /* Instruction referenced by right operand. */
-} FoldState;
-
-/* JIT compiler state. */
-typedef struct jit_State {
- GCtrace cur; /* Current trace. */
-
- lua_State *L; /* Current Lua state. */
- const BCIns *pc; /* Current PC. */
- GCfunc *fn; /* Current function. */
- GCproto *pt; /* Current prototype. */
- TRef *base; /* Current frame base, points into J->slots. */
-
- uint32_t flags; /* JIT engine flags. */
- BCReg maxslot; /* Relative to baseslot. */
- BCReg baseslot; /* Current frame base, offset into J->slots. */
-
- uint8_t mergesnap; /* Allowed to merge with next snapshot. */
- uint8_t needsnap; /* Need snapshot before recording next bytecode. */
- IRType1 guardemit; /* Accumulated IRT_GUARD for emitted instructions. */
- uint8_t bcskip; /* Number of bytecode instructions to skip. */
-
- FoldState fold; /* Fold state. */
-
- const BCIns *bc_min; /* Start of allowed bytecode range for root trace. */
- MSize bc_extent; /* Extent of the range. */
-
- TraceState state; /* Trace compiler state. */
-
- int32_t instunroll; /* Unroll counter for instable loops. */
- int32_t loopunroll; /* Unroll counter for loop ops in side traces. */
- int32_t tailcalled; /* Number of successive tailcalls. */
- int32_t framedepth; /* Current frame depth. */
- int32_t retdepth; /* Return frame depth (count of RETF). */
-
- MRef k64; /* Pointer to chained array of 64 bit constants. */
- TValue ksimd[LJ_KSIMD__MAX*2+1]; /* 16 byte aligned SIMD constants. */
-
- IRIns *irbuf; /* Temp. IR instruction buffer. Biased with REF_BIAS. */
- IRRef irtoplim; /* Upper limit of instuction buffer (biased). */
- IRRef irbotlim; /* Lower limit of instuction buffer (biased). */
- IRRef loopref; /* Last loop reference or ref of final LOOP (or 0). */
-
- MSize sizesnap; /* Size of temp. snapshot buffer. */
- SnapShot *snapbuf; /* Temp. snapshot buffer. */
- SnapEntry *snapmapbuf; /* Temp. snapshot map buffer. */
- MSize sizesnapmap; /* Size of temp. snapshot map buffer. */
-
- PostProc postproc; /* Required post-processing after execution. */
-#if LJ_SOFTFP || (LJ_32 && LJ_HASFFI)
- int needsplit; /* Need SPLIT pass. */
-#endif
-
- GCRef *trace; /* Array of traces. */
- TraceNo freetrace; /* Start of scan for next free trace. */
- MSize sizetrace; /* Size of trace array. */
-
- IRRef1 chain[IR__MAX]; /* IR instruction skip-list chain anchors. */
- TRef slot[LJ_MAX_JSLOTS+LJ_STACK_EXTRA]; /* Stack slot map. */
-
- int32_t param[JIT_P__MAX]; /* JIT engine parameters. */
-
- MCode *exitstubgroup[LJ_MAX_EXITSTUBGR]; /* Exit stub group addresses. */
-
- HotPenalty penalty[PENALTY_SLOTS]; /* Penalty slots. */
- uint32_t penaltyslot; /* Round-robin index into penalty slots. */
- uint32_t prngstate; /* PRNG state. */
-
- BPropEntry bpropcache[BPROP_SLOTS]; /* Backpropagation cache slots. */
- uint32_t bpropslot; /* Round-robin index into bpropcache slots. */
-
- ScEvEntry scev; /* Scalar evolution analysis cache slots. */
-
- const BCIns *startpc; /* Bytecode PC of starting instruction. */
- TraceNo parent; /* Parent of current side trace (0 for root traces). */
- ExitNo exitno; /* Exit number in parent of current side trace. */
-
- BCIns *patchpc; /* PC for pending re-patch. */
- BCIns patchins; /* Instruction for pending re-patch. */
-
- int mcprot; /* Protection of current mcode area. */
- MCode *mcarea; /* Base of current mcode area. */
- MCode *mctop; /* Top of current mcode area. */
- MCode *mcbot; /* Bottom of current mcode area. */
- size_t szmcarea; /* Size of current mcode area. */
- size_t szallmcarea; /* Total size of all allocated mcode areas. */
-
- TValue errinfo; /* Additional info element for trace errors. */
-}
-#if LJ_TARGET_ARM
-LJ_ALIGN(16) /* For DISPATCH-relative addresses in assembler part. */
-#endif
-jit_State;
-
-/* Trivial PRNG e.g. used for penalty randomization. */
-static LJ_AINLINE uint32_t LJ_PRNG_BITS(jit_State *J, int bits)
-{
- /* Yes, this LCG is very weak, but that doesn't matter for our use case. */
- J->prngstate = J->prngstate * 1103515245 + 12345;
- return J->prngstate >> (32-bits);
-}
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_lex.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_lex.h
deleted file mode 100644
index 41c03f9..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_lex.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
-** Lexical analyzer.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_LEX_H
-#define _LJ_LEX_H
-
-#include <stdarg.h>
-
-#include "lj_obj.h"
-#include "lj_err.h"
-
-/* Lua lexer tokens. */
-#define TKDEF(_, __) \
- _(and) _(break) _(do) _(else) _(elseif) _(end) _(false) \
- _(for) _(function) _(goto) _(if) _(in) _(local) _(nil) _(not) _(or) \
- _(repeat) _(return) _(then) _(true) _(until) _(while) \
- __(concat, ..) __(dots, ...) __(eq, ==) __(ge, >=) __(le, <=) __(ne, ~=) \
- __(label, ::) __(number, <number>) __(name, <name>) __(string, <string>) \
- __(eof, <eof>)
-
-enum {
- TK_OFS = 256,
-#define TKENUM1(name) TK_##name,
-#define TKENUM2(name, sym) TK_##name,
-TKDEF(TKENUM1, TKENUM2)
-#undef TKENUM1
-#undef TKENUM2
- TK_RESERVED = TK_while - TK_OFS
-};
-
-typedef int LexToken;
-
-/* Combined bytecode ins/line. Only used during bytecode generation. */
-typedef struct BCInsLine {
- BCIns ins; /* Bytecode instruction. */
- BCLine line; /* Line number for this bytecode. */
-} BCInsLine;
-
-/* Info for local variables. Only used during bytecode generation. */
-typedef struct VarInfo {
- GCRef name; /* Local variable name or goto/label name. */
- BCPos startpc; /* First point where the local variable is active. */
- BCPos endpc; /* First point where the local variable is dead. */
- uint8_t slot; /* Variable slot. */
- uint8_t info; /* Variable/goto/label info. */
-} VarInfo;
-
-/* Lua lexer state. */
-typedef struct LexState {
- struct FuncState *fs; /* Current FuncState. Defined in lj_parse.c. */
- struct lua_State *L; /* Lua state. */
- TValue tokenval; /* Current token value. */
- TValue lookaheadval; /* Lookahead token value. */
- int current; /* Current character (charint). */
- LexToken token; /* Current token. */
- LexToken lookahead; /* Lookahead token. */
- MSize n; /* Bytes left in input buffer. */
- const char *p; /* Current position in input buffer. */
- SBuf sb; /* String buffer for tokens. */
- lua_Reader rfunc; /* Reader callback. */
- void *rdata; /* Reader callback data. */
- BCLine linenumber; /* Input line counter. */
- BCLine lastline; /* Line of last token. */
- GCstr *chunkname; /* Current chunk name (interned string). */
- const char *chunkarg; /* Chunk name argument. */
- const char *mode; /* Allow loading bytecode (b) and/or source text (t). */
- VarInfo *vstack; /* Stack for names and extents of local variables. */
- MSize sizevstack; /* Size of variable stack. */
- MSize vtop; /* Top of variable stack. */
- BCInsLine *bcstack; /* Stack for bytecode instructions/line numbers. */
- MSize sizebcstack; /* Size of bytecode stack. */
- uint32_t level; /* Syntactical nesting level. */
-} LexState;
-
-LJ_FUNC int lj_lex_setup(lua_State *L, LexState *ls);
-LJ_FUNC void lj_lex_cleanup(lua_State *L, LexState *ls);
-LJ_FUNC void lj_lex_next(LexState *ls);
-LJ_FUNC LexToken lj_lex_lookahead(LexState *ls);
-LJ_FUNC const char *lj_lex_token2str(LexState *ls, LexToken token);
-LJ_FUNC_NORET void lj_lex_error(LexState *ls, LexToken token, ErrMsg em, ...);
-LJ_FUNC void lj_lex_init(lua_State *L);
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_lib.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_lib.h
deleted file mode 100644
index aa87407..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_lib.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
-** Library function support.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_LIB_H
-#define _LJ_LIB_H
-
-#include "lj_obj.h"
-
-/*
-** A fallback handler is called by the assembler VM if the fast path fails:
-**
-** - too few arguments: unrecoverable.
-** - wrong argument type: recoverable, if coercion succeeds.
-** - bad argument value: unrecoverable.
-** - stack overflow: recoverable, if stack reallocation succeeds.
-** - extra handling: recoverable.
-**
-** The unrecoverable cases throw an error with lj_err_arg(), lj_err_argtype(),
-** lj_err_caller() or lj_err_callermsg().
-** The recoverable cases return 0 or the number of results + 1.
-** The assembler VM retries the fast path only if 0 is returned.
-** This time the fallback must not be called again or it gets stuck in a loop.
-*/
-
-/* Return values from fallback handler. */
-#define FFH_RETRY 0
-#define FFH_UNREACHABLE FFH_RETRY
-#define FFH_RES(n) ((n)+1)
-#define FFH_TAILCALL (-1)
-
-LJ_FUNC TValue *lj_lib_checkany(lua_State *L, int narg);
-LJ_FUNC GCstr *lj_lib_checkstr(lua_State *L, int narg);
-LJ_FUNC GCstr *lj_lib_optstr(lua_State *L, int narg);
-#if LJ_DUALNUM
-LJ_FUNC void lj_lib_checknumber(lua_State *L, int narg);
-#else
-#define lj_lib_checknumber(L, narg) lj_lib_checknum((L), (narg))
-#endif
-LJ_FUNC lua_Number lj_lib_checknum(lua_State *L, int narg);
-LJ_FUNC int32_t lj_lib_checkint(lua_State *L, int narg);
-LJ_FUNC int32_t lj_lib_optint(lua_State *L, int narg, int32_t def);
-LJ_FUNC int32_t lj_lib_checkbit(lua_State *L, int narg);
-LJ_FUNC GCfunc *lj_lib_checkfunc(lua_State *L, int narg);
-LJ_FUNC GCtab *lj_lib_checktab(lua_State *L, int narg);
-LJ_FUNC GCtab *lj_lib_checktabornil(lua_State *L, int narg);
-LJ_FUNC int lj_lib_checkopt(lua_State *L, int narg, int def, const char *lst);
-
-/* Avoid including lj_frame.h. */
-#define lj_lib_upvalue(L, n) \
- (&gcref((L->base-1)->fr.func)->fn.c.upvalue[(n)-1])
-
-#if LJ_TARGET_WINDOWS
-#define lj_lib_checkfpu(L) \
- do { setnumV(L->top++, (lua_Number)1437217655); \
- if (lua_tointeger(L, -1) != 1437217655) lj_err_caller(L, LJ_ERR_BADFPU); \
- L->top--; } while (0)
-#else
-#define lj_lib_checkfpu(L) UNUSED(L)
-#endif
-
-/* Push internal function on the stack. */
-static LJ_AINLINE void lj_lib_pushcc(lua_State *L, lua_CFunction f,
- int id, int n)
-{
- GCfunc *fn;
- lua_pushcclosure(L, f, n);
- fn = funcV(L->top-1);
- fn->c.ffid = (uint8_t)id;
- setmref(fn->c.pc, &G(L)->bc_cfunc_int);
-}
-
-#define lj_lib_pushcf(L, fn, id) (lj_lib_pushcc(L, (fn), (id), 0))
-
-/* Library function declarations. Scanned by buildvm. */
-#define LJLIB_CF(name) static int lj_cf_##name(lua_State *L)
-#define LJLIB_ASM(name) static int lj_ffh_##name(lua_State *L)
-#define LJLIB_ASM_(name)
-#define LJLIB_SET(name)
-#define LJLIB_PUSH(arg)
-#define LJLIB_REC(handler)
-#define LJLIB_NOREGUV
-#define LJLIB_NOREG
-
-#define LJ_LIB_REG(L, regname, name) \
- lj_lib_register(L, regname, lj_lib_init_##name, lj_lib_cf_##name)
-
-LJ_FUNC void lj_lib_register(lua_State *L, const char *libname,
- const uint8_t *init, const lua_CFunction *cf);
-
-/* Library init data tags. */
-#define LIBINIT_LENMASK 0x3f
-#define LIBINIT_TAGMASK 0xc0
-#define LIBINIT_CF 0x00
-#define LIBINIT_ASM 0x40
-#define LIBINIT_ASM_ 0x80
-#define LIBINIT_STRING 0xc0
-#define LIBINIT_MAXSTR 0x39
-#define LIBINIT_SET 0xfa
-#define LIBINIT_NUMBER 0xfb
-#define LIBINIT_COPY 0xfc
-#define LIBINIT_LASTCL 0xfd
-#define LIBINIT_FFID 0xfe
-#define LIBINIT_END 0xff
-
-/* Exported library functions. */
-
-typedef struct RandomState RandomState;
-LJ_FUNC uint64_t LJ_FASTCALL lj_math_random_step(RandomState *rs);
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_mcode.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_mcode.h
deleted file mode 100644
index f0847e9..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_mcode.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
-** Machine code management.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_MCODE_H
-#define _LJ_MCODE_H
-
-#include "lj_obj.h"
-
-#if LJ_HASJIT || LJ_HASFFI
-LJ_FUNC void lj_mcode_sync(void *start, void *end);
-#endif
-
-#if LJ_HASJIT
-
-#include "lj_jit.h"
-
-LJ_FUNC void lj_mcode_free(jit_State *J);
-LJ_FUNC MCode *lj_mcode_reserve(jit_State *J, MCode **lim);
-LJ_FUNC void lj_mcode_commit(jit_State *J, MCode *m);
-LJ_FUNC void lj_mcode_abort(jit_State *J);
-LJ_FUNC MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish);
-LJ_FUNC_NORET void lj_mcode_limiterr(jit_State *J, size_t need);
-
-#define lj_mcode_commitbot(J, m) (J->mcbot = (m))
-
-#endif
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_meta.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_meta.h
deleted file mode 100644
index e061d99..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_meta.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
-** Metamethod handling.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_META_H
-#define _LJ_META_H
-
-#include "lj_obj.h"
-
-/* Metamethod handling */
-LJ_FUNC void lj_meta_init(lua_State *L);
-LJ_FUNC cTValue *lj_meta_cache(GCtab *mt, MMS mm, GCstr *name);
-LJ_FUNC cTValue *lj_meta_lookup(lua_State *L, cTValue *o, MMS mm);
-#if LJ_HASFFI
-LJ_FUNC int lj_meta_tailcall(lua_State *L, cTValue *tv);
-#endif
-
-#define lj_meta_fastg(g, mt, mm) \
- ((mt) == NULL ? NULL : ((mt)->nomm & (1u<<(mm))) ? NULL : \
- lj_meta_cache(mt, mm, mmname_str(g, mm)))
-#define lj_meta_fast(L, mt, mm) lj_meta_fastg(G(L), mt, mm)
-
-/* C helpers for some instructions, called from assembler VM. */
-LJ_FUNCA cTValue *lj_meta_tget(lua_State *L, cTValue *o, cTValue *k);
-LJ_FUNCA TValue *lj_meta_tset(lua_State *L, cTValue *o, cTValue *k);
-LJ_FUNCA TValue *lj_meta_arith(lua_State *L, TValue *ra, cTValue *rb,
- cTValue *rc, BCReg op);
-LJ_FUNCA TValue *lj_meta_cat(lua_State *L, TValue *top, int left);
-LJ_FUNCA TValue * LJ_FASTCALL lj_meta_len(lua_State *L, cTValue *o);
-LJ_FUNCA TValue *lj_meta_equal(lua_State *L, GCobj *o1, GCobj *o2, int ne);
-LJ_FUNCA TValue * LJ_FASTCALL lj_meta_equal_cd(lua_State *L, BCIns ins);
-LJ_FUNCA TValue *lj_meta_comp(lua_State *L, cTValue *o1, cTValue *o2, int op);
-LJ_FUNCA void lj_meta_call(lua_State *L, TValue *func, TValue *top);
-LJ_FUNCA void LJ_FASTCALL lj_meta_for(lua_State *L, TValue *o);
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_obj.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_obj.h
deleted file mode 100644
index 2ee526c..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_obj.h
+++ /dev/null
@@ -1,864 +0,0 @@
-/*
-** LuaJIT VM tags, values and objects.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-**
-** Portions taken verbatim or adapted from the Lua interpreter.
-** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
-*/
-
-#ifndef _LJ_OBJ_H
-#define _LJ_OBJ_H
-
-#include "lua.h"
-#include "lj_def.h"
-#include "lj_arch.h"
-
-/* -- Memory references (32 bit address space) ---------------------------- */
-
-/* Memory size. */
-typedef uint32_t MSize;
-
-/* Memory reference */
-typedef struct MRef {
- uint32_t ptr32; /* Pseudo 32 bit pointer. */
-} MRef;
-
-#define mref(r, t) ((t *)(void *)(uintptr_t)(r).ptr32)
-
-#define setmref(r, p) ((r).ptr32 = (uint32_t)(uintptr_t)(void *)(p))
-#define setmrefr(r, v) ((r).ptr32 = (v).ptr32)
-
-/* -- GC object references (32 bit address space) ------------------------- */
-
-/* GCobj reference */
-typedef struct GCRef {
- uint32_t gcptr32; /* Pseudo 32 bit pointer. */
-} GCRef;
-
-/* Common GC header for all collectable objects. */
-#define GCHeader GCRef nextgc; uint8_t marked; uint8_t gct
-/* This occupies 6 bytes, so use the next 2 bytes for non-32 bit fields. */
-
-#define gcref(r) ((GCobj *)(uintptr_t)(r).gcptr32)
-#define gcrefp(r, t) ((t *)(void *)(uintptr_t)(r).gcptr32)
-#define gcrefu(r) ((r).gcptr32)
-#define gcrefi(r) ((int32_t)(r).gcptr32)
-#define gcrefeq(r1, r2) ((r1).gcptr32 == (r2).gcptr32)
-#define gcnext(gc) (gcref((gc)->gch.nextgc))
-
-#define setgcref(r, gc) ((r).gcptr32 = (uint32_t)(uintptr_t)&(gc)->gch)
-#define setgcrefi(r, i) ((r).gcptr32 = (uint32_t)(i))
-#define setgcrefp(r, p) ((r).gcptr32 = (uint32_t)(uintptr_t)(p))
-#define setgcrefnull(r) ((r).gcptr32 = 0)
-#define setgcrefr(r, v) ((r).gcptr32 = (v).gcptr32)
-
-/* IMPORTANT NOTE:
-**
-** All uses of the setgcref* macros MUST be accompanied with a write barrier.
-**
-** This is to ensure the integrity of the incremental GC. The invariant
-** to preserve is that a black object never points to a white object.
-** I.e. never store a white object into a field of a black object.
-**
-** It's ok to LEAVE OUT the write barrier ONLY in the following cases:
-** - The source is not a GC object (NULL).
-** - The target is a GC root. I.e. everything in global_State.
-** - The target is a lua_State field (threads are never black).
-** - The target is a stack slot, see setgcV et al.
-** - The target is an open upvalue, i.e. pointing to a stack slot.
-** - The target is a newly created object (i.e. marked white). But make
-** sure nothing invokes the GC inbetween.
-** - The target and the source are the same object (self-reference).
-** - The target already contains the object (e.g. moving elements around).
-**
-** The most common case is a store to a stack slot. All other cases where
-** a barrier has been omitted are annotated with a NOBARRIER comment.
-**
-** The same logic applies for stores to table slots (array part or hash
-** part). ALL uses of lj_tab_set* require a barrier for the stored value
-** *and* the stored key, based on the above rules. In practice this means
-** a barrier is needed if *either* of the key or value are a GC object.
-**
-** It's ok to LEAVE OUT the write barrier in the following special cases:
-** - The stored value is nil. The key doesn't matter because it's either
-** not resurrected or lj_tab_newkey() will take care of the key barrier.
-** - The key doesn't matter if the *previously* stored value is guaranteed
-** to be non-nil (because the key is kept alive in the table).
-** - The key doesn't matter if it's guaranteed not to be part of the table,
-** since lj_tab_newkey() takes care of the key barrier. This applies
-** trivially to new tables, but watch out for resurrected keys. Storing
-** a nil value leaves the key in the table!
-**
-** In case of doubt use lj_gc_anybarriert() as it's rather cheap. It's used
-** by the interpreter for all table stores.
-**
-** Note: In contrast to Lua's GC, LuaJIT's GC does *not* specially mark
-** dead keys in tables. The reference is left in, but it's guaranteed to
-** be never dereferenced as long as the value is nil. It's ok if the key is
-** freed or if any object subsequently gets the same address.
-**
-** Not destroying dead keys helps to keep key hash slots stable. This avoids
-** specialization back-off for HREFK when a value flips between nil and
-** non-nil and the GC gets in the way. It also allows safely hoisting
-** HREF/HREFK across GC steps. Dead keys are only removed if a table is
-** resized (i.e. by NEWREF) and xREF must not be CSEd across a resize.
-**
-** The trade-off is that a write barrier for tables must take the key into
-** account, too. Implicitly resurrecting the key by storing a non-nil value
-** may invalidate the incremental GC invariant.
-*/
-
-/* -- Common type definitions --------------------------------------------- */
-
-/* Types for handling bytecodes. Need this here, details in lj_bc.h. */
-typedef uint32_t BCIns; /* Bytecode instruction. */
-typedef uint32_t BCPos; /* Bytecode position. */
-typedef uint32_t BCReg; /* Bytecode register. */
-typedef int32_t BCLine; /* Bytecode line number. */
-
-/* Internal assembler functions. Never call these directly from C. */
-typedef void (*ASMFunction)(void);
-
-/* Resizable string buffer. Need this here, details in lj_str.h. */
-typedef struct SBuf {
- char *buf; /* String buffer base. */
- MSize n; /* String buffer length. */
- MSize sz; /* String buffer size. */
-} SBuf;
-
-/* -- Tags and values ----------------------------------------------------- */
-
-/* Frame link. */
-typedef union {
- int32_t ftsz; /* Frame type and size of previous frame. */
- MRef pcr; /* Overlaps PC for Lua frames. */
-} FrameLink;
-
-/* Tagged value. */
-typedef LJ_ALIGN(8) union TValue {
- uint64_t u64; /* 64 bit pattern overlaps number. */
- lua_Number n; /* Number object overlaps split tag/value object. */
- struct {
- LJ_ENDIAN_LOHI(
- union {
- GCRef gcr; /* GCobj reference (if any). */
- int32_t i; /* Integer value. */
- };
- , uint32_t it; /* Internal object tag. Must overlap MSW of number. */
- )
- };
- struct {
- LJ_ENDIAN_LOHI(
- GCRef func; /* Function for next frame (or dummy L). */
- , FrameLink tp; /* Link to previous frame. */
- )
- } fr;
- struct {
- LJ_ENDIAN_LOHI(
- uint32_t lo; /* Lower 32 bits of number. */
- , uint32_t hi; /* Upper 32 bits of number. */
- )
- } u32;
-} TValue;
-
-typedef const TValue cTValue;
-
-#define tvref(r) (mref(r, TValue))
-
-/* More external and GCobj tags for internal objects. */
-#define LAST_TT LUA_TTHREAD
-#define LUA_TPROTO (LAST_TT+1)
-#define LUA_TCDATA (LAST_TT+2)
-
-/* Internal object tags.
-**
-** Internal tags overlap the MSW of a number object (must be a double).
-** Interpreted as a double these are special NaNs. The FPU only generates
-** one type of NaN (0xfff8_0000_0000_0000). So MSWs > 0xfff80000 are available
-** for use as internal tags. Small negative numbers are used to shorten the
-** encoding of type comparisons (reg/mem against sign-ext. 8 bit immediate).
-**
-** ---MSW---.---LSW---
-** primitive types | itype | |
-** lightuserdata | itype | void * | (32 bit platforms)
-** lightuserdata |ffff| void * | (64 bit platforms, 47 bit pointers)
-** GC objects | itype | GCRef |
-** int (LJ_DUALNUM)| itype | int |
-** number -------double------
-**
-** ORDER LJ_T
-** Primitive types nil/false/true must be first, lightuserdata next.
-** GC objects are at the end, table/userdata must be lowest.
-** Also check lj_ir.h for similar ordering constraints.
-*/
-#define LJ_TNIL (~0u)
-#define LJ_TFALSE (~1u)
-#define LJ_TTRUE (~2u)
-#define LJ_TLIGHTUD (~3u)
-#define LJ_TSTR (~4u)
-#define LJ_TUPVAL (~5u)
-#define LJ_TTHREAD (~6u)
-#define LJ_TPROTO (~7u)
-#define LJ_TFUNC (~8u)
-#define LJ_TTRACE (~9u)
-#define LJ_TCDATA (~10u)
-#define LJ_TTAB (~11u)
-#define LJ_TUDATA (~12u)
-/* This is just the canonical number type used in some places. */
-#define LJ_TNUMX (~13u)
-
-/* Integers have itype == LJ_TISNUM doubles have itype < LJ_TISNUM */
-#if LJ_64
-#define LJ_TISNUM 0xfffeffffu
-#else
-#define LJ_TISNUM LJ_TNUMX
-#endif
-#define LJ_TISTRUECOND LJ_TFALSE
-#define LJ_TISPRI LJ_TTRUE
-#define LJ_TISGCV (LJ_TSTR+1)
-#define LJ_TISTABUD LJ_TTAB
-
-/* -- String object ------------------------------------------------------- */
-
-/* String object header. String payload follows. */
-typedef struct GCstr {
- GCHeader;
- uint8_t reserved; /* Used by lexer for fast lookup of reserved words. */
- uint8_t unused;
- MSize hash; /* Hash of string. */
- MSize len; /* Size of string. */
-} GCstr;
-
-#define strref(r) (&gcref((r))->str)
-#define strdata(s) ((const char *)((s)+1))
-#define strdatawr(s) ((char *)((s)+1))
-#define strVdata(o) strdata(strV(o))
-#define sizestring(s) (sizeof(struct GCstr)+(s)->len+1)
-
-/* -- Userdata object ----------------------------------------------------- */
-
-/* Userdata object. Payload follows. */
-typedef struct GCudata {
- GCHeader;
- uint8_t udtype; /* Userdata type. */
- uint8_t unused2;
- GCRef env; /* Should be at same offset in GCfunc. */
- MSize len; /* Size of payload. */
- GCRef metatable; /* Must be at same offset in GCtab. */
- uint32_t align1; /* To force 8 byte alignment of the payload. */
-} GCudata;
-
-/* Userdata types. */
-enum {
- UDTYPE_USERDATA, /* Regular userdata. */
- UDTYPE_IO_FILE, /* I/O library FILE. */
- UDTYPE_FFI_CLIB, /* FFI C library namespace. */
- UDTYPE__MAX
-};
-
-#define uddata(u) ((void *)((u)+1))
-#define sizeudata(u) (sizeof(struct GCudata)+(u)->len)
-
-/* -- C data object ------------------------------------------------------- */
-
-/* C data object. Payload follows. */
-typedef struct GCcdata {
- GCHeader;
- uint16_t ctypeid; /* C type ID. */
-} GCcdata;
-
-/* Prepended to variable-sized or realigned C data objects. */
-typedef struct GCcdataVar {
- uint16_t offset; /* Offset to allocated memory (relative to GCcdata). */
- uint16_t extra; /* Extra space allocated (incl. GCcdata + GCcdatav). */
- MSize len; /* Size of payload. */
-} GCcdataVar;
-
-#define cdataptr(cd) ((void *)((cd)+1))
-#define cdataisv(cd) ((cd)->marked & 0x80)
-#define cdatav(cd) ((GCcdataVar *)((char *)(cd) - sizeof(GCcdataVar)))
-#define cdatavlen(cd) check_exp(cdataisv(cd), cdatav(cd)->len)
-#define sizecdatav(cd) (cdatavlen(cd) + cdatav(cd)->extra)
-#define memcdatav(cd) ((void *)((char *)(cd) - cdatav(cd)->offset))
-
-/* -- Prototype object ---------------------------------------------------- */
-
-#define SCALE_NUM_GCO ((int32_t)sizeof(lua_Number)/sizeof(GCRef))
-#define round_nkgc(n) (((n) + SCALE_NUM_GCO-1) & ~(SCALE_NUM_GCO-1))
-
-typedef struct GCproto {
- GCHeader;
- uint8_t numparams; /* Number of parameters. */
- uint8_t framesize; /* Fixed frame size. */
- MSize sizebc; /* Number of bytecode instructions. */
- GCRef gclist;
- MRef k; /* Split constant array (points to the middle). */
- MRef uv; /* Upvalue list. local slot|0x8000 or parent uv idx. */
- MSize sizekgc; /* Number of collectable constants. */
- MSize sizekn; /* Number of lua_Number constants. */
- MSize sizept; /* Total size including colocated arrays. */
- uint8_t sizeuv; /* Number of upvalues. */
- uint8_t flags; /* Miscellaneous flags (see below). */
- uint16_t trace; /* Anchor for chain of root traces. */
- /* ------ The following fields are for debugging/tracebacks only ------ */
- GCRef chunkname; /* Name of the chunk this function was defined in. */
- BCLine firstline; /* First line of the function definition. */
- BCLine numline; /* Number of lines for the function definition. */
- MRef lineinfo; /* Compressed map from bytecode ins. to source line. */
- MRef uvinfo; /* Upvalue names. */
- MRef varinfo; /* Names and compressed extents of local variables. */
-} GCproto;
-
-/* Flags for prototype. */
-#define PROTO_CHILD 0x01 /* Has child prototypes. */
-#define PROTO_VARARG 0x02 /* Vararg function. */
-#define PROTO_FFI 0x04 /* Uses BC_KCDATA for FFI datatypes. */
-#define PROTO_NOJIT 0x08 /* JIT disabled for this function. */
-#define PROTO_ILOOP 0x10 /* Patched bytecode with ILOOP etc. */
-/* Only used during parsing. */
-#define PROTO_HAS_RETURN 0x20 /* Already emitted a return. */
-#define PROTO_FIXUP_RETURN 0x40 /* Need to fixup emitted returns. */
-/* Top bits used for counting created closures. */
-#define PROTO_CLCOUNT 0x20 /* Base of saturating 3 bit counter. */
-#define PROTO_CLC_BITS 3
-#define PROTO_CLC_POLY (3*PROTO_CLCOUNT) /* Polymorphic threshold. */
-
-#define PROTO_UV_LOCAL 0x8000 /* Upvalue for local slot. */
-#define PROTO_UV_IMMUTABLE 0x4000 /* Immutable upvalue. */
-
-#define proto_kgc(pt, idx) \
- check_exp((uintptr_t)(intptr_t)(idx) >= (uintptr_t)-(intptr_t)(pt)->sizekgc, \
- gcref(mref((pt)->k, GCRef)[(idx)]))
-#define proto_knumtv(pt, idx) \
- check_exp((uintptr_t)(idx) < (pt)->sizekn, &mref((pt)->k, TValue)[(idx)])
-#define proto_bc(pt) ((BCIns *)((char *)(pt) + sizeof(GCproto)))
-#define proto_bcpos(pt, pc) ((BCPos)((pc) - proto_bc(pt)))
-#define proto_uv(pt) (mref((pt)->uv, uint16_t))
-
-#define proto_chunkname(pt) (strref((pt)->chunkname))
-#define proto_chunknamestr(pt) (strdata(proto_chunkname((pt))))
-#define proto_lineinfo(pt) (mref((pt)->lineinfo, const void))
-#define proto_uvinfo(pt) (mref((pt)->uvinfo, const uint8_t))
-#define proto_varinfo(pt) (mref((pt)->varinfo, const uint8_t))
-
-/* -- Upvalue object ------------------------------------------------------ */
-
-typedef struct GCupval {
- GCHeader;
- uint8_t closed; /* Set if closed (i.e. uv->v == &uv->u.value). */
- uint8_t immutable; /* Immutable value. */
- union {
- TValue tv; /* If closed: the value itself. */
- struct { /* If open: double linked list, anchored at thread. */
- GCRef prev;
- GCRef next;
- };
- };
- MRef v; /* Points to stack slot (open) or above (closed). */
- uint32_t dhash; /* Disambiguation hash: dh1 != dh2 => cannot alias. */
-} GCupval;
-
-#define uvprev(uv_) (&gcref((uv_)->prev)->uv)
-#define uvnext(uv_) (&gcref((uv_)->next)->uv)
-#define uvval(uv_) (mref((uv_)->v, TValue))
-
-/* -- Function object (closures) ------------------------------------------ */
-
-/* Common header for functions. env should be at same offset in GCudata. */
-#define GCfuncHeader \
- GCHeader; uint8_t ffid; uint8_t nupvalues; \
- GCRef env; GCRef gclist; MRef pc
-
-typedef struct GCfuncC {
- GCfuncHeader;
- lua_CFunction f; /* C function to be called. */
- TValue upvalue[1]; /* Array of upvalues (TValue). */
-} GCfuncC;
-
-typedef struct GCfuncL {
- GCfuncHeader;
- GCRef uvptr[1]; /* Array of _pointers_ to upvalue objects (GCupval). */
-} GCfuncL;
-
-typedef union GCfunc {
- GCfuncC c;
- GCfuncL l;
-} GCfunc;
-
-#define FF_LUA 0
-#define FF_C 1
-#define isluafunc(fn) ((fn)->c.ffid == FF_LUA)
-#define iscfunc(fn) ((fn)->c.ffid == FF_C)
-#define isffunc(fn) ((fn)->c.ffid > FF_C)
-#define funcproto(fn) \
- check_exp(isluafunc(fn), (GCproto *)(mref((fn)->l.pc, char)-sizeof(GCproto)))
-#define sizeCfunc(n) (sizeof(GCfuncC)-sizeof(TValue)+sizeof(TValue)*(n))
-#define sizeLfunc(n) (sizeof(GCfuncL)-sizeof(GCRef)+sizeof(GCRef)*(n))
-
-/* -- Table object -------------------------------------------------------- */
-
-/* Hash node. */
-typedef struct Node {
- TValue val; /* Value object. Must be first field. */
- TValue key; /* Key object. */
- MRef next; /* Hash chain. */
- MRef freetop; /* Top of free elements (stored in t->node[0]). */
-} Node;
-
-LJ_STATIC_ASSERT(offsetof(Node, val) == 0);
-
-typedef struct GCtab {
- GCHeader;
- uint8_t nomm; /* Negative cache for fast metamethods. */
- int8_t colo; /* Array colocation. */
- MRef array; /* Array part. */
- GCRef gclist;
- GCRef metatable; /* Must be at same offset in GCudata. */
- MRef node; /* Hash part. */
- uint32_t asize; /* Size of array part (keys [0, asize-1]). */
- uint32_t hmask; /* Hash part mask (size of hash part - 1). */
-} GCtab;
-
-#define sizetabcolo(n) ((n)*sizeof(TValue) + sizeof(GCtab))
-#define tabref(r) (&gcref((r))->tab)
-#define noderef(r) (mref((r), Node))
-#define nextnode(n) (mref((n)->next, Node))
-
-/* -- State objects ------------------------------------------------------- */
-
-/* VM states. */
-enum {
- LJ_VMST_INTERP, /* Interpreter. */
- LJ_VMST_C, /* C function. */
- LJ_VMST_GC, /* Garbage collector. */
- LJ_VMST_EXIT, /* Trace exit handler. */
- LJ_VMST_RECORD, /* Trace recorder. */
- LJ_VMST_OPT, /* Optimizer. */
- LJ_VMST_ASM, /* Assembler. */
- LJ_VMST__MAX
-};
-
-#define setvmstate(g, st) ((g)->vmstate = ~LJ_VMST_##st)
-
-/* Metamethods. ORDER MM */
-#ifdef LJ_HASFFI
-#define MMDEF_FFI(_) _(new)
-#else
-#define MMDEF_FFI(_)
-#endif
-
-#if LJ_52 || LJ_HASFFI
-#define MMDEF_PAIRS(_) _(pairs) _(ipairs)
-#else
-#define MMDEF_PAIRS(_)
-#define MM_pairs 255
-#define MM_ipairs 255
-#endif
-
-#define MMDEF(_) \
- _(index) _(newindex) _(gc) _(mode) _(eq) _(len) \
- /* Only the above (fast) metamethods are negative cached (max. 8). */ \
- _(lt) _(le) _(concat) _(call) \
- /* The following must be in ORDER ARITH. */ \
- _(add) _(sub) _(mul) _(div) _(mod) _(pow) _(unm) \
- /* The following are used in the standard libraries. */ \
- _(metatable) _(tostring) MMDEF_FFI(_) MMDEF_PAIRS(_)
-
-typedef enum {
-#define MMENUM(name) MM_##name,
-MMDEF(MMENUM)
-#undef MMENUM
- MM__MAX,
- MM____ = MM__MAX,
- MM_FAST = MM_len
-} MMS;
-
-/* GC root IDs. */
-typedef enum {
- GCROOT_MMNAME, /* Metamethod names. */
- GCROOT_MMNAME_LAST = GCROOT_MMNAME + MM__MAX-1,
- GCROOT_BASEMT, /* Metatables for base types. */
- GCROOT_BASEMT_NUM = GCROOT_BASEMT + ~LJ_TNUMX,
- GCROOT_IO_INPUT, /* Userdata for default I/O input file. */
- GCROOT_IO_OUTPUT, /* Userdata for default I/O output file. */
- GCROOT_MAX
-} GCRootID;
-
-#define basemt_it(g, it) ((g)->gcroot[GCROOT_BASEMT+~(it)])
-#define basemt_obj(g, o) ((g)->gcroot[GCROOT_BASEMT+itypemap(o)])
-#define mmname_str(g, mm) (strref((g)->gcroot[GCROOT_MMNAME+(mm)]))
-
-typedef struct GCState {
- MSize total; /* Memory currently allocated. */
- MSize threshold; /* Memory threshold. */
- uint8_t currentwhite; /* Current white color. */
- uint8_t state; /* GC state. */
- uint8_t nocdatafin; /* No cdata finalizer called. */
- uint8_t unused2;
- MSize sweepstr; /* Sweep position in string table. */
- GCRef root; /* List of all collectable objects. */
- MRef sweep; /* Sweep position in root list. */
- GCRef gray; /* List of gray objects. */
- GCRef grayagain; /* List of objects for atomic traversal. */
- GCRef weak; /* List of weak tables (to be cleared). */
- GCRef mmudata; /* List of userdata (to be finalized). */
- MSize stepmul; /* Incremental GC step granularity. */
- MSize debt; /* Debt (how much GC is behind schedule). */
- MSize estimate; /* Estimate of memory actually in use. */
- MSize pause; /* Pause between successive GC cycles. */
-} GCState;
-
-/* Global state, shared by all threads of a Lua universe. */
-typedef struct global_State {
- GCRef *strhash; /* String hash table (hash chain anchors). */
- MSize strmask; /* String hash mask (size of hash table - 1). */
- MSize strnum; /* Number of strings in hash table. */
- lua_Alloc allocf; /* Memory allocator. */
- void *allocd; /* Memory allocator data. */
- GCState gc; /* Garbage collector. */
- SBuf tmpbuf; /* Temporary buffer for string concatenation. */
- Node nilnode; /* Fallback 1-element hash part (nil key and value). */
- GCstr strempty; /* Empty string. */
- uint8_t stremptyz; /* Zero terminator of empty string. */
- uint8_t hookmask; /* Hook mask. */
- uint8_t dispatchmode; /* Dispatch mode. */
- uint8_t vmevmask; /* VM event mask. */
- GCRef mainthref; /* Link to main thread. */
- TValue registrytv; /* Anchor for registry. */
- TValue tmptv, tmptv2; /* Temporary TValues. */
- GCupval uvhead; /* Head of double-linked list of all open upvalues. */
- int32_t hookcount; /* Instruction hook countdown. */
- int32_t hookcstart; /* Start count for instruction hook counter. */
- lua_Hook hookf; /* Hook function. */
- lua_CFunction wrapf; /* Wrapper for C function calls. */
- lua_CFunction panic; /* Called as a last resort for errors. */
- volatile int32_t vmstate; /* VM state or current JIT code trace number. */
- BCIns bc_cfunc_int; /* Bytecode for internal C function calls. */
- BCIns bc_cfunc_ext; /* Bytecode for external C function calls. */
- GCRef jit_L; /* Current JIT code lua_State or NULL. */
- MRef jit_base; /* Current JIT code L->base. */
- MRef ctype_state; /* Pointer to C type state. */
- GCRef gcroot[GCROOT_MAX]; /* GC roots. */
-} global_State;
-
-#define mainthread(g) (&gcref(g->mainthref)->th)
-#define niltv(L) \
- check_exp(tvisnil(&G(L)->nilnode.val), &G(L)->nilnode.val)
-#define niltvg(g) \
- check_exp(tvisnil(&(g)->nilnode.val), &(g)->nilnode.val)
-
-/* Hook management. Hook event masks are defined in lua.h. */
-#define HOOK_EVENTMASK 0x0f
-#define HOOK_ACTIVE 0x10
-#define HOOK_ACTIVE_SHIFT 4
-#define HOOK_VMEVENT 0x20
-#define HOOK_GC 0x40
-#define hook_active(g) ((g)->hookmask & HOOK_ACTIVE)
-#define hook_enter(g) ((g)->hookmask |= HOOK_ACTIVE)
-#define hook_entergc(g) ((g)->hookmask |= (HOOK_ACTIVE|HOOK_GC))
-#define hook_vmevent(g) ((g)->hookmask |= (HOOK_ACTIVE|HOOK_VMEVENT))
-#define hook_leave(g) ((g)->hookmask &= ~HOOK_ACTIVE)
-#define hook_save(g) ((g)->hookmask & ~HOOK_EVENTMASK)
-#define hook_restore(g, h) \
- ((g)->hookmask = ((g)->hookmask & HOOK_EVENTMASK) | (h))
-
-/* Per-thread state object. */
-struct lua_State {
- GCHeader;
- uint8_t dummy_ffid; /* Fake FF_C for curr_funcisL() on dummy frames. */
- uint8_t status; /* Thread status. */
- MRef glref; /* Link to global state. */
- GCRef gclist; /* GC chain. */
- TValue *base; /* Base of currently executing function. */
- TValue *top; /* First free slot in the stack. */
- MRef maxstack; /* Last free slot in the stack. */
- MRef stack; /* Stack base. */
- GCRef openupval; /* List of open upvalues in the stack. */
- GCRef env; /* Thread environment (table of globals). */
- void *cframe; /* End of C stack frame chain. */
- MSize stacksize; /* True stack size (incl. LJ_STACK_EXTRA). */
-};
-
-#define G(L) (mref(L->glref, global_State))
-#define registry(L) (&G(L)->registrytv)
-
-/* Macros to access the currently executing (Lua) function. */
-#define curr_func(L) (&gcref((L->base-1)->fr.func)->fn)
-#define curr_funcisL(L) (isluafunc(curr_func(L)))
-#define curr_proto(L) (funcproto(curr_func(L)))
-#define curr_topL(L) (L->base + curr_proto(L)->framesize)
-#define curr_top(L) (curr_funcisL(L) ? curr_topL(L) : L->top)
-
-/* -- GC object definition and conversions -------------------------------- */
-
-/* GC header for generic access to common fields of GC objects. */
-typedef struct GChead {
- GCHeader;
- uint8_t unused1;
- uint8_t unused2;
- GCRef env;
- GCRef gclist;
- GCRef metatable;
-} GChead;
-
-/* The env field SHOULD be at the same offset for all GC objects. */
-LJ_STATIC_ASSERT(offsetof(GChead, env) == offsetof(GCfuncL, env));
-LJ_STATIC_ASSERT(offsetof(GChead, env) == offsetof(GCudata, env));
-
-/* The metatable field MUST be at the same offset for all GC objects. */
-LJ_STATIC_ASSERT(offsetof(GChead, metatable) == offsetof(GCtab, metatable));
-LJ_STATIC_ASSERT(offsetof(GChead, metatable) == offsetof(GCudata, metatable));
-
-/* The gclist field MUST be at the same offset for all GC objects. */
-LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(lua_State, gclist));
-LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCproto, gclist));
-LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCfuncL, gclist));
-LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCtab, gclist));
-
-typedef union GCobj {
- GChead gch;
- GCstr str;
- GCupval uv;
- lua_State th;
- GCproto pt;
- GCfunc fn;
- GCcdata cd;
- GCtab tab;
- GCudata ud;
-} GCobj;
-
-/* Macros to convert a GCobj pointer into a specific value. */
-#define gco2str(o) check_exp((o)->gch.gct == ~LJ_TSTR, &(o)->str)
-#define gco2uv(o) check_exp((o)->gch.gct == ~LJ_TUPVAL, &(o)->uv)
-#define gco2th(o) check_exp((o)->gch.gct == ~LJ_TTHREAD, &(o)->th)
-#define gco2pt(o) check_exp((o)->gch.gct == ~LJ_TPROTO, &(o)->pt)
-#define gco2func(o) check_exp((o)->gch.gct == ~LJ_TFUNC, &(o)->fn)
-#define gco2cd(o) check_exp((o)->gch.gct == ~LJ_TCDATA, &(o)->cd)
-#define gco2tab(o) check_exp((o)->gch.gct == ~LJ_TTAB, &(o)->tab)
-#define gco2ud(o) check_exp((o)->gch.gct == ~LJ_TUDATA, &(o)->ud)
-
-/* Macro to convert any collectable object into a GCobj pointer. */
-#define obj2gco(v) ((GCobj *)(v))
-
-/* -- TValue getters/setters ---------------------------------------------- */
-
-#ifdef LUA_USE_ASSERT
-#include "lj_gc.h"
-#endif
-
-/* Macros to test types. */
-#define itype(o) ((o)->it)
-#define tvisnil(o) (itype(o) == LJ_TNIL)
-#define tvisfalse(o) (itype(o) == LJ_TFALSE)
-#define tvistrue(o) (itype(o) == LJ_TTRUE)
-#define tvisbool(o) (tvisfalse(o) || tvistrue(o))
-#if LJ_64
-#define tvislightud(o) (((int32_t)itype(o) >> 15) == -2)
-#else
-#define tvislightud(o) (itype(o) == LJ_TLIGHTUD)
-#endif
-#define tvisstr(o) (itype(o) == LJ_TSTR)
-#define tvisfunc(o) (itype(o) == LJ_TFUNC)
-#define tvisthread(o) (itype(o) == LJ_TTHREAD)
-#define tvisproto(o) (itype(o) == LJ_TPROTO)
-#define tviscdata(o) (itype(o) == LJ_TCDATA)
-#define tvistab(o) (itype(o) == LJ_TTAB)
-#define tvisudata(o) (itype(o) == LJ_TUDATA)
-#define tvisnumber(o) (itype(o) <= LJ_TISNUM)
-#define tvisint(o) (LJ_DUALNUM && itype(o) == LJ_TISNUM)
-#define tvisnum(o) (itype(o) < LJ_TISNUM)
-
-#define tvistruecond(o) (itype(o) < LJ_TISTRUECOND)
-#define tvispri(o) (itype(o) >= LJ_TISPRI)
-#define tvistabud(o) (itype(o) <= LJ_TISTABUD) /* && !tvisnum() */
-#define tvisgcv(o) ((itype(o) - LJ_TISGCV) > (LJ_TNUMX - LJ_TISGCV))
-
-/* Special macros to test numbers for NaN, +0, -0, +1 and raw equality. */
-#define tvisnan(o) ((o)->n != (o)->n)
-#if LJ_64
-#define tviszero(o) (((o)->u64 << 1) == 0)
-#else
-#define tviszero(o) (((o)->u32.lo | ((o)->u32.hi << 1)) == 0)
-#endif
-#define tvispzero(o) ((o)->u64 == 0)
-#define tvismzero(o) ((o)->u64 == U64x(80000000,00000000))
-#define tvispone(o) ((o)->u64 == U64x(3ff00000,00000000))
-#define rawnumequal(o1, o2) ((o1)->u64 == (o2)->u64)
-
-/* Macros to convert type ids. */
-#if LJ_64
-#define itypemap(o) \
- (tvisnumber(o) ? ~LJ_TNUMX : tvislightud(o) ? ~LJ_TLIGHTUD : ~itype(o))
-#else
-#define itypemap(o) (tvisnumber(o) ? ~LJ_TNUMX : ~itype(o))
-#endif
-
-/* Macros to get tagged values. */
-#define gcval(o) (gcref((o)->gcr))
-#define boolV(o) check_exp(tvisbool(o), (LJ_TFALSE - (o)->it))
-#if LJ_64
-#define lightudV(o) \
- check_exp(tvislightud(o), (void *)((o)->u64 & U64x(00007fff,ffffffff)))
-#else
-#define lightudV(o) check_exp(tvislightud(o), gcrefp((o)->gcr, void))
-#endif
-#define gcV(o) check_exp(tvisgcv(o), gcval(o))
-#define strV(o) check_exp(tvisstr(o), &gcval(o)->str)
-#define funcV(o) check_exp(tvisfunc(o), &gcval(o)->fn)
-#define threadV(o) check_exp(tvisthread(o), &gcval(o)->th)
-#define protoV(o) check_exp(tvisproto(o), &gcval(o)->pt)
-#define cdataV(o) check_exp(tviscdata(o), &gcval(o)->cd)
-#define tabV(o) check_exp(tvistab(o), &gcval(o)->tab)
-#define udataV(o) check_exp(tvisudata(o), &gcval(o)->ud)
-#define numV(o) check_exp(tvisnum(o), (o)->n)
-#define intV(o) check_exp(tvisint(o), (int32_t)(o)->i)
-
-/* Macros to set tagged values. */
-#define setitype(o, i) ((o)->it = (i))
-#define setnilV(o) ((o)->it = LJ_TNIL)
-#define setboolV(o, x) ((o)->it = LJ_TFALSE-(uint32_t)(x))
-
-static LJ_AINLINE void setlightudV(TValue *o, void *p)
-{
-#if LJ_64
- o->u64 = (uint64_t)p | (((uint64_t)0xffff) << 48);
-#else
- setgcrefp(o->gcr, p); setitype(o, LJ_TLIGHTUD);
-#endif
-}
-
-#if LJ_64
-#define checklightudptr(L, p) \
- (((uint64_t)(p) >> 47) ? (lj_err_msg(L, LJ_ERR_BADLU), NULL) : (p))
-#define setcont(o, f) \
- ((o)->u64 = (uint64_t)(void *)(f) - (uint64_t)lj_vm_asm_begin)
-#else
-#define checklightudptr(L, p) (p)
-#define setcont(o, f) setlightudV((o), (void *)(f))
-#endif
-
-#define tvchecklive(L, o) \
- UNUSED(L), lua_assert(!tvisgcv(o) || \
- ((~itype(o) == gcval(o)->gch.gct) && !isdead(G(L), gcval(o))))
-
-static LJ_AINLINE void setgcV(lua_State *L, TValue *o, GCobj *v, uint32_t itype)
-{
- setgcref(o->gcr, v); setitype(o, itype); tvchecklive(L, o);
-}
-
-#define define_setV(name, type, tag) \
-static LJ_AINLINE void name(lua_State *L, TValue *o, type *v) \
-{ \
- setgcV(L, o, obj2gco(v), tag); \
-}
-define_setV(setstrV, GCstr, LJ_TSTR)
-define_setV(setthreadV, lua_State, LJ_TTHREAD)
-define_setV(setprotoV, GCproto, LJ_TPROTO)
-define_setV(setfuncV, GCfunc, LJ_TFUNC)
-define_setV(setcdataV, GCcdata, LJ_TCDATA)
-define_setV(settabV, GCtab, LJ_TTAB)
-define_setV(setudataV, GCudata, LJ_TUDATA)
-
-#define setnumV(o, x) ((o)->n = (x))
-#define setnanV(o) ((o)->u64 = U64x(fff80000,00000000))
-#define setpinfV(o) ((o)->u64 = U64x(7ff00000,00000000))
-#define setminfV(o) ((o)->u64 = U64x(fff00000,00000000))
-
-static LJ_AINLINE void setintV(TValue *o, int32_t i)
-{
-#if LJ_DUALNUM
- o->i = (uint32_t)i; setitype(o, LJ_TISNUM);
-#else
- o->n = (lua_Number)i;
-#endif
-}
-
-static LJ_AINLINE void setint64V(TValue *o, int64_t i)
-{
- if (LJ_DUALNUM && LJ_LIKELY(i == (int64_t)(int32_t)i))
- setintV(o, (int32_t)i);
- else
- setnumV(o, (lua_Number)i);
-}
-
-#if LJ_64
-#define setintptrV(o, i) setint64V((o), (i))
-#else
-#define setintptrV(o, i) setintV((o), (i))
-#endif
-
-/* Copy tagged values. */
-static LJ_AINLINE void copyTV(lua_State *L, TValue *o1, const TValue *o2)
-{
- *o1 = *o2; tvchecklive(L, o1);
-}
-
-/* -- Number to integer conversion ---------------------------------------- */
-
-#if LJ_SOFTFP
-LJ_ASMF int32_t lj_vm_tobit(double x);
-#endif
-
-static LJ_AINLINE int32_t lj_num2bit(lua_Number n)
-{
-#if LJ_SOFTFP
- return lj_vm_tobit(n);
-#else
- TValue o;
- o.n = n + 6755399441055744.0; /* 2^52 + 2^51 */
- return (int32_t)o.u32.lo;
-#endif
-}
-
-#if LJ_TARGET_X86 && !defined(__SSE2__)
-#define lj_num2int(n) lj_num2bit((n))
-#else
-#define lj_num2int(n) ((int32_t)(n))
-#endif
-
-/*
-** This must match the JIT backend behavior. In particular for archs
-** that don't have a common hardware instruction for this conversion.
-** Note that signed FP to unsigned int conversions have an undefined
-** result and should never be relied upon in portable FFI code.
-** See also: C99 or C11 standard, 6.3.1.4, footnote of (1).
-*/
-static LJ_AINLINE uint64_t lj_num2u64(lua_Number n)
-{
-#if LJ_TARGET_X86ORX64 || LJ_TARGET_MIPS
- int64_t i = (int64_t)n;
- if (i < 0) i = (int64_t)(n - 18446744073709551616.0);
- return (uint64_t)i;
-#else
- return (uint64_t)n;
-#endif
-}
-
-static LJ_AINLINE int32_t numberVint(cTValue *o)
-{
- if (LJ_LIKELY(tvisint(o)))
- return intV(o);
- else
- return lj_num2int(numV(o));
-}
-
-static LJ_AINLINE lua_Number numberVnum(cTValue *o)
-{
- if (LJ_UNLIKELY(tvisint(o)))
- return (lua_Number)intV(o);
- else
- return numV(o);
-}
-
-/* -- Miscellaneous object handling --------------------------------------- */
-
-/* Names and maps for internal and external object tags. */
-LJ_DATA const char *const lj_obj_typename[1+LUA_TCDATA+1];
-LJ_DATA const char *const lj_obj_itypename[~LJ_TNUMX+1];
-
-#define lj_typename(o) (lj_obj_itypename[itypemap(o)])
-
-/* Compare two objects without calling metamethods. */
-LJ_FUNC int lj_obj_equal(cTValue *o1, cTValue *o2);
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_parse.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_parse.h
deleted file mode 100644
index ceeab69..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_parse.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
-** Lua parser (source code -> bytecode).
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_PARSE_H
-#define _LJ_PARSE_H
-
-#include "lj_obj.h"
-#include "lj_lex.h"
-
-LJ_FUNC GCproto *lj_parse(LexState *ls);
-LJ_FUNC GCstr *lj_parse_keepstr(LexState *ls, const char *str, size_t l);
-#if LJ_HASFFI
-LJ_FUNC void lj_parse_keepcdata(LexState *ls, TValue *tv, GCcdata *cd);
-#endif
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_record.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_record.h
deleted file mode 100644
index 2bbbde5..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_record.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
-** Trace recorder (bytecode -> SSA IR).
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_RECORD_H
-#define _LJ_RECORD_H
-
-#include "lj_obj.h"
-#include "lj_jit.h"
-
-#if LJ_HASJIT
-/* Context for recording an indexed load/store. */
-typedef struct RecordIndex {
- TValue tabv; /* Runtime value of table (or indexed object). */
- TValue keyv; /* Runtime value of key. */
- TValue valv; /* Runtime value of stored value. */
- TValue mobjv; /* Runtime value of metamethod object. */
- GCtab *mtv; /* Runtime value of metatable object. */
- cTValue *oldv; /* Runtime value of previously stored value. */
- TRef tab; /* Table (or indexed object) reference. */
- TRef key; /* Key reference. */
- TRef val; /* Value reference for a store or 0 for a load. */
- TRef mt; /* Metatable reference. */
- TRef mobj; /* Metamethod object reference. */
- int idxchain; /* Index indirections left or 0 for raw lookup. */
-} RecordIndex;
-
-LJ_FUNC int lj_record_objcmp(jit_State *J, TRef a, TRef b,
- cTValue *av, cTValue *bv);
-LJ_FUNC TRef lj_record_constify(jit_State *J, cTValue *o);
-
-LJ_FUNC void lj_record_call(jit_State *J, BCReg func, ptrdiff_t nargs);
-LJ_FUNC void lj_record_tailcall(jit_State *J, BCReg func, ptrdiff_t nargs);
-LJ_FUNC void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults);
-
-LJ_FUNC int lj_record_mm_lookup(jit_State *J, RecordIndex *ix, MMS mm);
-LJ_FUNC TRef lj_record_idx(jit_State *J, RecordIndex *ix);
-
-LJ_FUNC void lj_record_ins(jit_State *J);
-LJ_FUNC void lj_record_setup(jit_State *J);
-#endif
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_snap.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_snap.h
deleted file mode 100644
index 2c9ae3d..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_snap.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
-** Snapshot handling.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_SNAP_H
-#define _LJ_SNAP_H
-
-#include "lj_obj.h"
-#include "lj_jit.h"
-
-#if LJ_HASJIT
-LJ_FUNC void lj_snap_add(jit_State *J);
-LJ_FUNC void lj_snap_purge(jit_State *J);
-LJ_FUNC void lj_snap_shrink(jit_State *J);
-LJ_FUNC IRIns *lj_snap_regspmap(GCtrace *T, SnapNo snapno, IRIns *ir);
-LJ_FUNC void lj_snap_replay(jit_State *J, GCtrace *T);
-LJ_FUNC const BCIns *lj_snap_restore(jit_State *J, void *exptr);
-LJ_FUNC void lj_snap_grow_buf_(jit_State *J, MSize need);
-LJ_FUNC void lj_snap_grow_map_(jit_State *J, MSize need);
-
-static LJ_AINLINE void lj_snap_grow_buf(jit_State *J, MSize need)
-{
- if (LJ_UNLIKELY(need > J->sizesnap)) lj_snap_grow_buf_(J, need);
-}
-
-static LJ_AINLINE void lj_snap_grow_map(jit_State *J, MSize need)
-{
- if (LJ_UNLIKELY(need > J->sizesnapmap)) lj_snap_grow_map_(J, need);
-}
-
-#endif
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_state.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_state.h
deleted file mode 100644
index d5b476b..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_state.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
-** State and stack handling.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_STATE_H
-#define _LJ_STATE_H
-
-#include "lj_obj.h"
-
-#define incr_top(L) \
- (++L->top >= tvref(L->maxstack) && (lj_state_growstack1(L), 0))
-
-#define savestack(L, p) ((char *)(p) - mref(L->stack, char))
-#define restorestack(L, n) ((TValue *)(mref(L->stack, char) + (n)))
-
-LJ_FUNC void lj_state_relimitstack(lua_State *L);
-LJ_FUNC void lj_state_shrinkstack(lua_State *L, MSize used);
-LJ_FUNCA void LJ_FASTCALL lj_state_growstack(lua_State *L, MSize need);
-LJ_FUNC void LJ_FASTCALL lj_state_growstack1(lua_State *L);
-
-static LJ_AINLINE void lj_state_checkstack(lua_State *L, MSize need)
-{
- if ((mref(L->maxstack, char) - (char *)L->top) <=
- (ptrdiff_t)need*(ptrdiff_t)sizeof(TValue))
- lj_state_growstack(L, need);
-}
-
-LJ_FUNC lua_State *lj_state_new(lua_State *L);
-LJ_FUNC void LJ_FASTCALL lj_state_free(global_State *g, lua_State *L);
-#if LJ_64
-LJ_FUNC lua_State *lj_state_newstate(lua_Alloc f, void *ud);
-#endif
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_str.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_str.h
deleted file mode 100644
index be04a97..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_str.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
-** String handling.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_STR_H
-#define _LJ_STR_H
-
-#include <stdarg.h>
-
-#include "lj_obj.h"
-
-/* String interning. */
-LJ_FUNC int32_t LJ_FASTCALL lj_str_cmp(GCstr *a, GCstr *b);
-LJ_FUNC void lj_str_resize(lua_State *L, MSize newmask);
-LJ_FUNCA GCstr *lj_str_new(lua_State *L, const char *str, size_t len);
-LJ_FUNC void LJ_FASTCALL lj_str_free(global_State *g, GCstr *s);
-
-#define lj_str_newz(L, s) (lj_str_new(L, s, strlen(s)))
-#define lj_str_newlit(L, s) (lj_str_new(L, "" s, sizeof(s)-1))
-
-/* Type conversions. */
-LJ_FUNC size_t LJ_FASTCALL lj_str_bufnum(char *s, cTValue *o);
-LJ_FUNC char * LJ_FASTCALL lj_str_bufint(char *p, int32_t k);
-LJ_FUNCA GCstr * LJ_FASTCALL lj_str_fromnum(lua_State *L, const lua_Number *np);
-LJ_FUNC GCstr * LJ_FASTCALL lj_str_fromint(lua_State *L, int32_t k);
-LJ_FUNCA GCstr * LJ_FASTCALL lj_str_fromnumber(lua_State *L, cTValue *o);
-
-#define LJ_STR_INTBUF (1+10)
-#define LJ_STR_NUMBUF LUAI_MAXNUMBER2STR
-
-/* String formatting. */
-LJ_FUNC const char *lj_str_pushvf(lua_State *L, const char *fmt, va_list argp);
-LJ_FUNC const char *lj_str_pushf(lua_State *L, const char *fmt, ...)
-#if defined(__GNUC__)
- __attribute__ ((format (printf, 2, 3)))
-#endif
- ;
-
-/* Resizable string buffers. Struct definition in lj_obj.h. */
-LJ_FUNC char *lj_str_needbuf(lua_State *L, SBuf *sb, MSize sz);
-
-#define lj_str_initbuf(sb) ((sb)->buf = NULL, (sb)->sz = 0)
-#define lj_str_resetbuf(sb) ((sb)->n = 0)
-#define lj_str_resizebuf(L, sb, size) \
- ((sb)->buf = (char *)lj_mem_realloc(L, (sb)->buf, (sb)->sz, (size)), \
- (sb)->sz = (size))
-#define lj_str_freebuf(g, sb) lj_mem_free(g, (void *)(sb)->buf, (sb)->sz)
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_strscan.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_strscan.h
deleted file mode 100644
index 6fb0dda..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_strscan.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
-** String scanning.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_STRSCAN_H
-#define _LJ_STRSCAN_H
-
-#include "lj_obj.h"
-
-/* Options for accepted/returned formats. */
-#define STRSCAN_OPT_TOINT 0x01 /* Convert to int32_t, if possible. */
-#define STRSCAN_OPT_TONUM 0x02 /* Always convert to double. */
-#define STRSCAN_OPT_IMAG 0x04
-#define STRSCAN_OPT_LL 0x08
-#define STRSCAN_OPT_C 0x10
-
-/* Returned format. */
-typedef enum {
- STRSCAN_ERROR,
- STRSCAN_NUM, STRSCAN_IMAG,
- STRSCAN_INT, STRSCAN_U32, STRSCAN_I64, STRSCAN_U64,
-} StrScanFmt;
-
-LJ_FUNC StrScanFmt lj_strscan_scan(const uint8_t *p, TValue *o, uint32_t opt);
-LJ_FUNC int LJ_FASTCALL lj_strscan_num(GCstr *str, TValue *o);
-#if LJ_DUALNUM
-LJ_FUNC int LJ_FASTCALL lj_strscan_number(GCstr *str, TValue *o);
-#else
-#define lj_strscan_number(s, o) lj_strscan_num((s), (o))
-#endif
-
-/* Check for number or convert string to number/int in-place (!). */
-static LJ_AINLINE int lj_strscan_numberobj(TValue *o)
-{
- return tvisnumber(o) || (tvisstr(o) && lj_strscan_number(strV(o), o));
-}
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_tab.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_tab.h
deleted file mode 100644
index 36ce7cd..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_tab.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
-** Table handling.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_TAB_H
-#define _LJ_TAB_H
-
-#include "lj_obj.h"
-
-/* Hash constants. Tuned using a brute force search. */
-#define HASH_BIAS (-0x04c11db7)
-#define HASH_ROT1 14
-#define HASH_ROT2 5
-#define HASH_ROT3 13
-
-/* Scramble the bits of numbers and pointers. */
-static LJ_AINLINE uint32_t hashrot(uint32_t lo, uint32_t hi)
-{
-#if LJ_TARGET_X86ORX64
- /* Prefer variant that compiles well for a 2-operand CPU. */
- lo ^= hi; hi = lj_rol(hi, HASH_ROT1);
- lo -= hi; hi = lj_rol(hi, HASH_ROT2);
- hi ^= lo; hi -= lj_rol(lo, HASH_ROT3);
-#else
- lo ^= hi;
- lo = lo - lj_rol(hi, HASH_ROT1);
- hi = lo ^ lj_rol(hi, HASH_ROT1 + HASH_ROT2);
- hi = hi - lj_rol(lo, HASH_ROT3);
-#endif
- return hi;
-}
-
-#define hsize2hbits(s) ((s) ? ((s)==1 ? 1 : 1+lj_fls((uint32_t)((s)-1))) : 0)
-
-LJ_FUNCA GCtab *lj_tab_new(lua_State *L, uint32_t asize, uint32_t hbits);
-#if LJ_HASJIT
-LJ_FUNC GCtab * LJ_FASTCALL lj_tab_new1(lua_State *L, uint32_t ahsize);
-#endif
-LJ_FUNCA GCtab * LJ_FASTCALL lj_tab_dup(lua_State *L, const GCtab *kt);
-LJ_FUNC void LJ_FASTCALL lj_tab_free(global_State *g, GCtab *t);
-#if LJ_HASFFI
-LJ_FUNC void lj_tab_rehash(lua_State *L, GCtab *t);
-#endif
-LJ_FUNCA void lj_tab_reasize(lua_State *L, GCtab *t, uint32_t nasize);
-
-/* Caveat: all getters except lj_tab_get() can return NULL! */
-
-LJ_FUNCA cTValue * LJ_FASTCALL lj_tab_getinth(GCtab *t, int32_t key);
-LJ_FUNC cTValue *lj_tab_getstr(GCtab *t, GCstr *key);
-LJ_FUNCA cTValue *lj_tab_get(lua_State *L, GCtab *t, cTValue *key);
-
-/* Caveat: all setters require a write barrier for the stored value. */
-
-LJ_FUNCA TValue *lj_tab_newkey(lua_State *L, GCtab *t, cTValue *key);
-LJ_FUNC TValue *lj_tab_setinth(lua_State *L, GCtab *t, int32_t key);
-LJ_FUNC TValue *lj_tab_setstr(lua_State *L, GCtab *t, GCstr *key);
-LJ_FUNC TValue *lj_tab_set(lua_State *L, GCtab *t, cTValue *key);
-
-#define inarray(t, key) ((MSize)(key) < (MSize)(t)->asize)
-#define arrayslot(t, i) (&tvref((t)->array)[(i)])
-#define lj_tab_getint(t, key) \
- (inarray((t), (key)) ? arrayslot((t), (key)) : lj_tab_getinth((t), (key)))
-#define lj_tab_setint(L, t, key) \
- (inarray((t), (key)) ? arrayslot((t), (key)) : lj_tab_setinth(L, (t), (key)))
-
-LJ_FUNCA int lj_tab_next(lua_State *L, GCtab *t, TValue *key);
-LJ_FUNCA MSize LJ_FASTCALL lj_tab_len(GCtab *t);
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_target.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_target.h
deleted file mode 100644
index 53bfa6b..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_target.h
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
-** Definitions for target CPU.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_TARGET_H
-#define _LJ_TARGET_H
-
-#include "lj_def.h"
-#include "lj_arch.h"
-
-/* -- Registers and spill slots ------------------------------------------- */
-
-/* Register type (uint8_t in ir->r). */
-typedef uint32_t Reg;
-
-/* The hi-bit is NOT set for an allocated register. This means the value
-** can be directly used without masking. The hi-bit is set for a register
-** allocation hint or for RID_INIT, RID_SINK or RID_SUNK.
-*/
-#define RID_NONE 0x80
-#define RID_MASK 0x7f
-#define RID_INIT (RID_NONE|RID_MASK)
-#define RID_SINK (RID_INIT-1)
-#define RID_SUNK (RID_INIT-2)
-
-#define ra_noreg(r) ((r) & RID_NONE)
-#define ra_hasreg(r) (!((r) & RID_NONE))
-
-/* The ra_hashint() macro assumes a previous test for ra_noreg(). */
-#define ra_hashint(r) ((r) < RID_SUNK)
-#define ra_gethint(r) ((Reg)((r) & RID_MASK))
-#define ra_sethint(rr, r) rr = (uint8_t)((r)|RID_NONE)
-#define ra_samehint(r1, r2) (ra_gethint((r1)^(r2)) == 0)
-
-/* Spill slot 0 means no spill slot has been allocated. */
-#define SPS_NONE 0
-
-#define ra_hasspill(s) ((s) != SPS_NONE)
-
-/* Combined register and spill slot (uint16_t in ir->prev). */
-typedef uint32_t RegSP;
-
-#define REGSP(r, s) ((r) + ((s) << 8))
-#define REGSP_HINT(r) ((r)|RID_NONE)
-#define REGSP_INIT REGSP(RID_INIT, 0)
-
-#define regsp_reg(rs) ((rs) & 255)
-#define regsp_spill(rs) ((rs) >> 8)
-#define regsp_used(rs) \
- (((rs) & ~REGSP(RID_MASK, 0)) != REGSP(RID_NONE, 0))
-
-/* -- Register sets ------------------------------------------------------- */
-
-/* Bitset for registers. 32 registers suffice for most architectures.
-** Note that one set holds bits for both GPRs and FPRs.
-*/
-#if LJ_TARGET_PPC || LJ_TARGET_MIPS
-typedef uint64_t RegSet;
-#else
-typedef uint32_t RegSet;
-#endif
-
-#define RID2RSET(r) (((RegSet)1) << (r))
-#define RSET_EMPTY ((RegSet)0)
-#define RSET_RANGE(lo, hi) ((RID2RSET((hi)-(lo))-1) << (lo))
-
-#define rset_test(rs, r) ((int)((rs) >> (r)) & 1)
-#define rset_set(rs, r) (rs |= RID2RSET(r))
-#define rset_clear(rs, r) (rs &= ~RID2RSET(r))
-#define rset_exclude(rs, r) (rs & ~RID2RSET(r))
-#if LJ_TARGET_PPC || LJ_TARGET_MIPS
-#define rset_picktop(rs) ((Reg)(__builtin_clzll(rs)^63))
-#define rset_pickbot(rs) ((Reg)__builtin_ctzll(rs))
-#else
-#define rset_picktop(rs) ((Reg)lj_fls(rs))
-#define rset_pickbot(rs) ((Reg)lj_ffs(rs))
-#endif
-
-/* -- Register allocation cost -------------------------------------------- */
-
-/* The register allocation heuristic keeps track of the cost for allocating
-** a specific register:
-**
-** A free register (obviously) has a cost of 0 and a 1-bit in the free mask.
-**
-** An already allocated register has the (non-zero) IR reference in the lowest
-** bits and the result of a blended cost-model in the higher bits.
-**
-** The allocator first checks the free mask for a hit. Otherwise an (unrolled)
-** linear search for the minimum cost is used. The search doesn't need to
-** keep track of the position of the minimum, which makes it very fast.
-** The lowest bits of the minimum cost show the desired IR reference whose
-** register is the one to evict.
-**
-** Without the cost-model this degenerates to the standard heuristics for
-** (reverse) linear-scan register allocation. Since code generation is done
-** in reverse, a live interval extends from the last use to the first def.
-** For an SSA IR the IR reference is the first (and only) def and thus
-** trivially marks the end of the interval. The LSRA heuristics says to pick
-** the register whose live interval has the furthest extent, i.e. the lowest
-** IR reference in our case.
-**
-** A cost-model should take into account other factors, like spill-cost and
-** restore- or rematerialization-cost, which depend on the kind of instruction.
-** E.g. constants have zero spill costs, variant instructions have higher
-** costs than invariants and PHIs should preferably never be spilled.
-**
-** Here's a first cut at simple, but effective blended cost-model for R-LSRA:
-** - Due to careful design of the IR, constants already have lower IR
-** references than invariants and invariants have lower IR references
-** than variants.
-** - The cost in the upper 16 bits is the sum of the IR reference and a
-** weighted score. The score currently only takes into account whether
-** the IRT_ISPHI bit is set in the instruction type.
-** - The PHI weight is the minimum distance (in IR instructions) a PHI
-** reference has to be further apart from a non-PHI reference to be spilled.
-** - It should be a power of two (for speed) and must be between 2 and 32768.
-** Good values for the PHI weight seem to be between 40 and 150.
-** - Further study is required.
-*/
-#define REGCOST_PHI_WEIGHT 64
-
-/* Cost for allocating a specific register. */
-typedef uint32_t RegCost;
-
-/* Note: assumes 16 bit IRRef1. */
-#define REGCOST(cost, ref) ((RegCost)(ref) + ((RegCost)(cost) << 16))
-#define regcost_ref(rc) ((IRRef1)(rc))
-
-#define REGCOST_T(t) \
- ((RegCost)((t)&IRT_ISPHI) * (((RegCost)(REGCOST_PHI_WEIGHT)<<16)/IRT_ISPHI))
-#define REGCOST_REF_T(ref, t) (REGCOST((ref), (ref)) + REGCOST_T((t)))
-
-/* -- Target-specific definitions ----------------------------------------- */
-
-#if LJ_TARGET_X86ORX64
-#include "lj_target_x86.h"
-#elif LJ_TARGET_ARM
-#include "lj_target_arm.h"
-#elif LJ_TARGET_PPC
-#include "lj_target_ppc.h"
-#elif LJ_TARGET_MIPS
-#include "lj_target_mips.h"
-#else
-#error "Missing include for target CPU"
-#endif
-
-#ifdef EXITSTUBS_PER_GROUP
-/* Return the address of an exit stub. */
-static LJ_AINLINE char *exitstub_addr_(char **group, uint32_t exitno)
-{
- lua_assert(group[exitno / EXITSTUBS_PER_GROUP] != NULL);
- return (char *)group[exitno / EXITSTUBS_PER_GROUP] +
- EXITSTUB_SPACING*(exitno % EXITSTUBS_PER_GROUP);
-}
-/* Avoid dependence on lj_jit.h if only including lj_target.h. */
-#define exitstub_addr(J, exitno) \
- ((MCode *)exitstub_addr_((char **)((J)->exitstubgroup), (exitno)))
-#endif
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_target_arm.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_target_arm.h
deleted file mode 100644
index d02cbf8..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_target_arm.h
+++ /dev/null
@@ -1,274 +0,0 @@
-/*
-** Definitions for ARM CPUs.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_TARGET_ARM_H
-#define _LJ_TARGET_ARM_H
-
-/* -- Registers IDs ------------------------------------------------------- */
-
-#define GPRDEF(_) \
- _(R0) _(R1) _(R2) _(R3) _(R4) _(R5) _(R6) _(R7) \
- _(R8) _(R9) _(R10) _(R11) _(R12) _(SP) _(LR) _(PC)
-#if LJ_SOFTFP
-#define FPRDEF(_)
-#else
-#define FPRDEF(_) \
- _(D0) _(D1) _(D2) _(D3) _(D4) _(D5) _(D6) _(D7) \
- _(D8) _(D9) _(D10) _(D11) _(D12) _(D13) _(D14) _(D15)
-#endif
-#define VRIDDEF(_)
-
-#define RIDENUM(name) RID_##name,
-
-enum {
- GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */
- FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */
- RID_MAX,
- RID_TMP = RID_LR,
-
- /* Calling conventions. */
- RID_RET = RID_R0,
- RID_RETLO = RID_R0,
- RID_RETHI = RID_R1,
-#if LJ_SOFTFP
- RID_FPRET = RID_R0,
-#else
- RID_FPRET = RID_D0,
-#endif
-
- /* These definitions must match with the *.dasc file(s): */
- RID_BASE = RID_R9, /* Interpreter BASE. */
- RID_LPC = RID_R6, /* Interpreter PC. */
- RID_DISPATCH = RID_R7, /* Interpreter DISPATCH table. */
- RID_LREG = RID_R8, /* Interpreter L. */
-
- /* Register ranges [min, max) and number of registers. */
- RID_MIN_GPR = RID_R0,
- RID_MAX_GPR = RID_PC+1,
- RID_MIN_FPR = RID_MAX_GPR,
-#if LJ_SOFTFP
- RID_MAX_FPR = RID_MIN_FPR,
-#else
- RID_MAX_FPR = RID_D15+1,
-#endif
- RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR,
- RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR
-};
-
-#define RID_NUM_KREF RID_NUM_GPR
-#define RID_MIN_KREF RID_R0
-
-/* -- Register sets ------------------------------------------------------- */
-
-/* Make use of all registers, except sp, lr and pc. */
-#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_R12+1))
-#define RSET_GPREVEN \
- (RID2RSET(RID_R0)|RID2RSET(RID_R2)|RID2RSET(RID_R4)|RID2RSET(RID_R6)| \
- RID2RSET(RID_R8)|RID2RSET(RID_R10))
-#define RSET_GPRODD \
- (RID2RSET(RID_R1)|RID2RSET(RID_R3)|RID2RSET(RID_R5)|RID2RSET(RID_R7)| \
- RID2RSET(RID_R9)|RID2RSET(RID_R11))
-#if LJ_SOFTFP
-#define RSET_FPR 0
-#else
-#define RSET_FPR (RSET_RANGE(RID_MIN_FPR, RID_MAX_FPR))
-#endif
-#define RSET_ALL (RSET_GPR|RSET_FPR)
-#define RSET_INIT RSET_ALL
-
-/* ABI-specific register sets. lr is an implicit scratch register. */
-#define RSET_SCRATCH_GPR_ (RSET_RANGE(RID_R0, RID_R3+1)|RID2RSET(RID_R12))
-#ifdef __APPLE__
-#define RSET_SCRATCH_GPR (RSET_SCRATCH_GPR_|RID2RSET(RID_R9))
-#else
-#define RSET_SCRATCH_GPR RSET_SCRATCH_GPR_
-#endif
-#if LJ_SOFTFP
-#define RSET_SCRATCH_FPR 0
-#else
-#define RSET_SCRATCH_FPR (RSET_RANGE(RID_D0, RID_D7+1))
-#endif
-#define RSET_SCRATCH (RSET_SCRATCH_GPR|RSET_SCRATCH_FPR)
-#define REGARG_FIRSTGPR RID_R0
-#define REGARG_LASTGPR RID_R3
-#define REGARG_NUMGPR 4
-#if LJ_ABI_SOFTFP
-#define REGARG_FIRSTFPR 0
-#define REGARG_LASTFPR 0
-#define REGARG_NUMFPR 0
-#else
-#define REGARG_FIRSTFPR RID_D0
-#define REGARG_LASTFPR RID_D7
-#define REGARG_NUMFPR 8
-#endif
-
-/* -- Spill slots --------------------------------------------------------- */
-
-/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs.
-**
-** SPS_FIXED: Available fixed spill slots in interpreter frame.
-** This definition must match with the *.dasc file(s).
-**
-** SPS_FIRST: First spill slot for general use. Reserve min. two 32 bit slots.
-*/
-#define SPS_FIXED 2
-#define SPS_FIRST 2
-
-#define SPOFS_TMP 0
-
-#define sps_scale(slot) (4 * (int32_t)(slot))
-#define sps_align(slot) (((slot) - SPS_FIXED + 1) & ~1)
-
-/* -- Exit state ---------------------------------------------------------- */
-
-/* This definition must match with the *.dasc file(s). */
-typedef struct {
-#if !LJ_SOFTFP
- lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */
-#endif
- int32_t gpr[RID_NUM_GPR]; /* General-purpose registers. */
- int32_t spill[256]; /* Spill slots. */
-} ExitState;
-
-/* PC after instruction that caused an exit. Used to find the trace number. */
-#define EXITSTATE_PCREG RID_PC
-/* Highest exit + 1 indicates stack check. */
-#define EXITSTATE_CHECKEXIT 1
-
-#define EXITSTUB_SPACING 4
-#define EXITSTUBS_PER_GROUP 32
-
-/* -- Instructions -------------------------------------------------------- */
-
-/* Instruction fields. */
-#define ARMF_CC(ai, cc) (((ai) ^ ARMI_CCAL) | ((cc) << 28))
-#define ARMF_N(r) ((r) << 16)
-#define ARMF_D(r) ((r) << 12)
-#define ARMF_S(r) ((r) << 8)
-#define ARMF_M(r) (r)
-#define ARMF_SH(sh, n) (((sh) << 5) | ((n) << 7))
-#define ARMF_RSH(sh, r) (0x10 | ((sh) << 5) | ARMF_S(r))
-
-typedef enum ARMIns {
- ARMI_CCAL = 0xe0000000,
- ARMI_S = 0x000100000,
- ARMI_K12 = 0x02000000,
- ARMI_KNEG = 0x00200000,
- ARMI_LS_W = 0x00200000,
- ARMI_LS_U = 0x00800000,
- ARMI_LS_P = 0x01000000,
- ARMI_LS_R = 0x02000000,
- ARMI_LSX_I = 0x00400000,
-
- ARMI_AND = 0xe0000000,
- ARMI_EOR = 0xe0200000,
- ARMI_SUB = 0xe0400000,
- ARMI_RSB = 0xe0600000,
- ARMI_ADD = 0xe0800000,
- ARMI_ADC = 0xe0a00000,
- ARMI_SBC = 0xe0c00000,
- ARMI_RSC = 0xe0e00000,
- ARMI_TST = 0xe1100000,
- ARMI_TEQ = 0xe1300000,
- ARMI_CMP = 0xe1500000,
- ARMI_CMN = 0xe1700000,
- ARMI_ORR = 0xe1800000,
- ARMI_MOV = 0xe1a00000,
- ARMI_BIC = 0xe1c00000,
- ARMI_MVN = 0xe1e00000,
-
- ARMI_NOP = 0xe1a00000,
-
- ARMI_MUL = 0xe0000090,
- ARMI_SMULL = 0xe0c00090,
-
- ARMI_LDR = 0xe4100000,
- ARMI_LDRB = 0xe4500000,
- ARMI_LDRH = 0xe01000b0,
- ARMI_LDRSB = 0xe01000d0,
- ARMI_LDRSH = 0xe01000f0,
- ARMI_LDRD = 0xe00000d0,
- ARMI_STR = 0xe4000000,
- ARMI_STRB = 0xe4400000,
- ARMI_STRH = 0xe00000b0,
- ARMI_STRD = 0xe00000f0,
- ARMI_PUSH = 0xe92d0000,
-
- ARMI_B = 0xea000000,
- ARMI_BL = 0xeb000000,
- ARMI_BLX = 0xfa000000,
- ARMI_BLXr = 0xe12fff30,
-
- /* ARMv6 */
- ARMI_REV = 0xe6bf0f30,
- ARMI_SXTB = 0xe6af0070,
- ARMI_SXTH = 0xe6bf0070,
- ARMI_UXTB = 0xe6ef0070,
- ARMI_UXTH = 0xe6ff0070,
-
- /* ARMv6T2 */
- ARMI_MOVW = 0xe3000000,
- ARMI_MOVT = 0xe3400000,
-
- /* VFP */
- ARMI_VMOV_D = 0xeeb00b40,
- ARMI_VMOV_S = 0xeeb00a40,
- ARMI_VMOVI_D = 0xeeb00b00,
-
- ARMI_VMOV_R_S = 0xee100a10,
- ARMI_VMOV_S_R = 0xee000a10,
- ARMI_VMOV_RR_D = 0xec500b10,
- ARMI_VMOV_D_RR = 0xec400b10,
-
- ARMI_VADD_D = 0xee300b00,
- ARMI_VSUB_D = 0xee300b40,
- ARMI_VMUL_D = 0xee200b00,
- ARMI_VMLA_D = 0xee000b00,
- ARMI_VMLS_D = 0xee000b40,
- ARMI_VNMLS_D = 0xee100b00,
- ARMI_VDIV_D = 0xee800b00,
-
- ARMI_VABS_D = 0xeeb00bc0,
- ARMI_VNEG_D = 0xeeb10b40,
- ARMI_VSQRT_D = 0xeeb10bc0,
-
- ARMI_VCMP_D = 0xeeb40b40,
- ARMI_VCMPZ_D = 0xeeb50b40,
-
- ARMI_VMRS = 0xeef1fa10,
-
- ARMI_VCVT_S32_F32 = 0xeebd0ac0,
- ARMI_VCVT_S32_F64 = 0xeebd0bc0,
- ARMI_VCVT_U32_F32 = 0xeebc0ac0,
- ARMI_VCVT_U32_F64 = 0xeebc0bc0,
- ARMI_VCVTR_S32_F32 = 0xeebd0a40,
- ARMI_VCVTR_S32_F64 = 0xeebd0b40,
- ARMI_VCVTR_U32_F32 = 0xeebc0a40,
- ARMI_VCVTR_U32_F64 = 0xeebc0b40,
- ARMI_VCVT_F32_S32 = 0xeeb80ac0,
- ARMI_VCVT_F64_S32 = 0xeeb80bc0,
- ARMI_VCVT_F32_U32 = 0xeeb80a40,
- ARMI_VCVT_F64_U32 = 0xeeb80b40,
- ARMI_VCVT_F32_F64 = 0xeeb70bc0,
- ARMI_VCVT_F64_F32 = 0xeeb70ac0,
-
- ARMI_VLDR_S = 0xed100a00,
- ARMI_VLDR_D = 0xed100b00,
- ARMI_VSTR_S = 0xed000a00,
- ARMI_VSTR_D = 0xed000b00,
-} ARMIns;
-
-typedef enum ARMShift {
- ARMSH_LSL, ARMSH_LSR, ARMSH_ASR, ARMSH_ROR
-} ARMShift;
-
-/* ARM condition codes. */
-typedef enum ARMCC {
- CC_EQ, CC_NE, CC_CS, CC_CC, CC_MI, CC_PL, CC_VS, CC_VC,
- CC_HI, CC_LS, CC_GE, CC_LT, CC_GT, CC_LE, CC_AL,
- CC_HS = CC_CS, CC_LO = CC_CC
-} ARMCC;
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_target_mips.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_target_mips.h
deleted file mode 100644
index bed174b..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_target_mips.h
+++ /dev/null
@@ -1,258 +0,0 @@
-/*
-** Definitions for MIPS CPUs.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_TARGET_MIPS_H
-#define _LJ_TARGET_MIPS_H
-
-/* -- Registers IDs ------------------------------------------------------- */
-
-#define GPRDEF(_) \
- _(R0) _(R1) _(R2) _(R3) _(R4) _(R5) _(R6) _(R7) \
- _(R8) _(R9) _(R10) _(R11) _(R12) _(R13) _(R14) _(R15) \
- _(R16) _(R17) _(R18) _(R19) _(R20) _(R21) _(R22) _(R23) \
- _(R24) _(R25) _(SYS1) _(SYS2) _(R28) _(SP) _(R30) _(RA)
-#define FPRDEF(_) \
- _(F0) _(F1) _(F2) _(F3) _(F4) _(F5) _(F6) _(F7) \
- _(F8) _(F9) _(F10) _(F11) _(F12) _(F13) _(F14) _(F15) \
- _(F16) _(F17) _(F18) _(F19) _(F20) _(F21) _(F22) _(F23) \
- _(F24) _(F25) _(F26) _(F27) _(F28) _(F29) _(F30) _(F31)
-#define VRIDDEF(_)
-
-#define RIDENUM(name) RID_##name,
-
-enum {
- GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */
- FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */
- RID_MAX,
- RID_ZERO = RID_R0,
- RID_TMP = RID_RA,
- RID_GP = RID_R28,
-
- /* Calling conventions. */
- RID_RET = RID_R2,
-#if LJ_LE
- RID_RETHI = RID_R3,
- RID_RETLO = RID_R2,
-#else
- RID_RETHI = RID_R2,
- RID_RETLO = RID_R3,
-#endif
- RID_FPRET = RID_F0,
- RID_CFUNCADDR = RID_R25,
-
- /* These definitions must match with the *.dasc file(s): */
- RID_BASE = RID_R16, /* Interpreter BASE. */
- RID_LPC = RID_R18, /* Interpreter PC. */
- RID_DISPATCH = RID_R19, /* Interpreter DISPATCH table. */
- RID_LREG = RID_R20, /* Interpreter L. */
- RID_JGL = RID_R30, /* On-trace: global_State + 32768. */
-
- /* Register ranges [min, max) and number of registers. */
- RID_MIN_GPR = RID_R0,
- RID_MAX_GPR = RID_RA+1,
- RID_MIN_FPR = RID_F0,
- RID_MAX_FPR = RID_F31+1,
- RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR,
- RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR /* Only even regs are used. */
-};
-
-#define RID_NUM_KREF RID_NUM_GPR
-#define RID_MIN_KREF RID_R0
-
-/* -- Register sets ------------------------------------------------------- */
-
-/* Make use of all registers, except ZERO, TMP, SP, SYS1, SYS2, JGL and GP. */
-#define RSET_FIXED \
- (RID2RSET(RID_ZERO)|RID2RSET(RID_TMP)|RID2RSET(RID_SP)|\
- RID2RSET(RID_SYS1)|RID2RSET(RID_SYS2)|RID2RSET(RID_JGL)|RID2RSET(RID_GP))
-#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR) - RSET_FIXED)
-#define RSET_FPR \
- (RID2RSET(RID_F0)|RID2RSET(RID_F2)|RID2RSET(RID_F4)|RID2RSET(RID_F6)|\
- RID2RSET(RID_F8)|RID2RSET(RID_F10)|RID2RSET(RID_F12)|RID2RSET(RID_F14)|\
- RID2RSET(RID_F16)|RID2RSET(RID_F18)|RID2RSET(RID_F20)|RID2RSET(RID_F22)|\
- RID2RSET(RID_F24)|RID2RSET(RID_F26)|RID2RSET(RID_F28)|RID2RSET(RID_F30))
-#define RSET_ALL (RSET_GPR|RSET_FPR)
-#define RSET_INIT RSET_ALL
-
-#define RSET_SCRATCH_GPR \
- (RSET_RANGE(RID_R1, RID_R15+1)|\
- RID2RSET(RID_R24)|RID2RSET(RID_R25))
-#define RSET_SCRATCH_FPR \
- (RID2RSET(RID_F0)|RID2RSET(RID_F2)|RID2RSET(RID_F4)|RID2RSET(RID_F6)|\
- RID2RSET(RID_F8)|RID2RSET(RID_F10)|RID2RSET(RID_F12)|RID2RSET(RID_F14)|\
- RID2RSET(RID_F16)|RID2RSET(RID_F18))
-#define RSET_SCRATCH (RSET_SCRATCH_GPR|RSET_SCRATCH_FPR)
-#define REGARG_FIRSTGPR RID_R4
-#define REGARG_LASTGPR RID_R7
-#define REGARG_NUMGPR 4
-#define REGARG_FIRSTFPR RID_F12
-#define REGARG_LASTFPR RID_F14
-#define REGARG_NUMFPR 2
-
-/* -- Spill slots --------------------------------------------------------- */
-
-/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs.
-**
-** SPS_FIXED: Available fixed spill slots in interpreter frame.
-** This definition must match with the *.dasc file(s).
-**
-** SPS_FIRST: First spill slot for general use.
-*/
-#define SPS_FIXED 5
-#define SPS_FIRST 4
-
-#define SPOFS_TMP 0
-
-#define sps_scale(slot) (4 * (int32_t)(slot))
-#define sps_align(slot) (((slot) - SPS_FIXED + 1) & ~1)
-
-/* -- Exit state ---------------------------------------------------------- */
-
-/* This definition must match with the *.dasc file(s). */
-typedef struct {
- lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */
- int32_t gpr[RID_NUM_GPR]; /* General-purpose registers. */
- int32_t spill[256]; /* Spill slots. */
-} ExitState;
-
-/* Highest exit + 1 indicates stack check. */
-#define EXITSTATE_CHECKEXIT 1
-
-/* Return the address of a per-trace exit stub. */
-static LJ_AINLINE uint32_t *exitstub_trace_addr_(uint32_t *p)
-{
- while (*p == 0x00000000) p++; /* Skip MIPSI_NOP. */
- return p;
-}
-/* Avoid dependence on lj_jit.h if only including lj_target.h. */
-#define exitstub_trace_addr(T, exitno) \
- exitstub_trace_addr_((MCode *)((char *)(T)->mcode + (T)->szmcode))
-
-/* -- Instructions -------------------------------------------------------- */
-
-/* Instruction fields. */
-#define MIPSF_S(r) ((r) << 21)
-#define MIPSF_T(r) ((r) << 16)
-#define MIPSF_D(r) ((r) << 11)
-#define MIPSF_R(r) ((r) << 21)
-#define MIPSF_H(r) ((r) << 16)
-#define MIPSF_G(r) ((r) << 11)
-#define MIPSF_F(r) ((r) << 6)
-#define MIPSF_A(n) ((n) << 6)
-#define MIPSF_M(n) ((n) << 11)
-
-typedef enum MIPSIns {
- /* Integer instructions. */
- MIPSI_MOVE = 0x00000021,
- MIPSI_NOP = 0x00000000,
-
- MIPSI_LI = 0x24000000,
- MIPSI_LU = 0x34000000,
- MIPSI_LUI = 0x3c000000,
-
- MIPSI_ADDIU = 0x24000000,
- MIPSI_ANDI = 0x30000000,
- MIPSI_ORI = 0x34000000,
- MIPSI_XORI = 0x38000000,
- MIPSI_SLTI = 0x28000000,
- MIPSI_SLTIU = 0x2c000000,
-
- MIPSI_ADDU = 0x00000021,
- MIPSI_SUBU = 0x00000023,
- MIPSI_MUL = 0x70000002,
- MIPSI_AND = 0x00000024,
- MIPSI_OR = 0x00000025,
- MIPSI_XOR = 0x00000026,
- MIPSI_NOR = 0x00000027,
- MIPSI_SLT = 0x0000002a,
- MIPSI_SLTU = 0x0000002b,
- MIPSI_MOVZ = 0x0000000a,
- MIPSI_MOVN = 0x0000000b,
-
- MIPSI_SLL = 0x00000000,
- MIPSI_SRL = 0x00000002,
- MIPSI_SRA = 0x00000003,
- MIPSI_ROTR = 0x00200002, /* MIPS32R2 */
- MIPSI_SLLV = 0x00000004,
- MIPSI_SRLV = 0x00000006,
- MIPSI_SRAV = 0x00000007,
- MIPSI_ROTRV = 0x00000046, /* MIPS32R2 */
-
- MIPSI_SEB = 0x7c000420, /* MIPS32R2 */
- MIPSI_SEH = 0x7c000620, /* MIPS32R2 */
- MIPSI_WSBH = 0x7c0000a0, /* MIPS32R2 */
-
- MIPSI_B = 0x10000000,
- MIPSI_J = 0x08000000,
- MIPSI_JAL = 0x0c000000,
- MIPSI_JR = 0x00000008,
- MIPSI_JALR = 0x0000f809,
-
- MIPSI_BEQ = 0x10000000,
- MIPSI_BNE = 0x14000000,
- MIPSI_BLEZ = 0x18000000,
- MIPSI_BGTZ = 0x1c000000,
- MIPSI_BLTZ = 0x04000000,
- MIPSI_BGEZ = 0x04010000,
-
- /* Load/store instructions. */
- MIPSI_LW = 0x8c000000,
- MIPSI_SW = 0xac000000,
- MIPSI_LB = 0x80000000,
- MIPSI_SB = 0xa0000000,
- MIPSI_LH = 0x84000000,
- MIPSI_SH = 0xa4000000,
- MIPSI_LBU = 0x90000000,
- MIPSI_LHU = 0x94000000,
- MIPSI_LWC1 = 0xc4000000,
- MIPSI_SWC1 = 0xe4000000,
- MIPSI_LDC1 = 0xd4000000,
- MIPSI_SDC1 = 0xf4000000,
-
- /* FP instructions. */
- MIPSI_MOV_S = 0x46000006,
- MIPSI_MOV_D = 0x46200006,
- MIPSI_MOVT_D = 0x46210011,
- MIPSI_MOVF_D = 0x46200011,
-
- MIPSI_ABS_D = 0x46200005,
- MIPSI_NEG_D = 0x46200007,
-
- MIPSI_ADD_D = 0x46200000,
- MIPSI_SUB_D = 0x46200001,
- MIPSI_MUL_D = 0x46200002,
- MIPSI_DIV_D = 0x46200003,
- MIPSI_SQRT_D = 0x46200004,
-
- MIPSI_ADD_S = 0x46000000,
- MIPSI_SUB_S = 0x46000001,
-
- MIPSI_CVT_D_S = 0x46000021,
- MIPSI_CVT_W_S = 0x46000024,
- MIPSI_CVT_S_D = 0x46200020,
- MIPSI_CVT_W_D = 0x46200024,
- MIPSI_CVT_S_W = 0x46800020,
- MIPSI_CVT_D_W = 0x46800021,
-
- MIPSI_TRUNC_W_S = 0x4600000d,
- MIPSI_TRUNC_W_D = 0x4620000d,
- MIPSI_FLOOR_W_S = 0x4600000f,
- MIPSI_FLOOR_W_D = 0x4620000f,
-
- MIPSI_MFC1 = 0x44000000,
- MIPSI_MTC1 = 0x44800000,
-
- MIPSI_BC1F = 0x45000000,
- MIPSI_BC1T = 0x45010000,
-
- MIPSI_C_EQ_D = 0x46200032,
- MIPSI_C_OLT_D = 0x46200034,
- MIPSI_C_ULT_D = 0x46200035,
- MIPSI_C_OLE_D = 0x46200036,
- MIPSI_C_ULE_D = 0x46200037,
-
-} MIPSIns;
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_target_ppc.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_target_ppc.h
deleted file mode 100644
index e57e27d..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_target_ppc.h
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
-** Definitions for PPC CPUs.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_TARGET_PPC_H
-#define _LJ_TARGET_PPC_H
-
-/* -- Registers IDs ------------------------------------------------------- */
-
-#define GPRDEF(_) \
- _(R0) _(SP) _(SYS1) _(R3) _(R4) _(R5) _(R6) _(R7) \
- _(R8) _(R9) _(R10) _(R11) _(R12) _(SYS2) _(R14) _(R15) \
- _(R16) _(R17) _(R18) _(R19) _(R20) _(R21) _(R22) _(R23) \
- _(R24) _(R25) _(R26) _(R27) _(R28) _(R29) _(R30) _(R31)
-#define FPRDEF(_) \
- _(F0) _(F1) _(F2) _(F3) _(F4) _(F5) _(F6) _(F7) \
- _(F8) _(F9) _(F10) _(F11) _(F12) _(F13) _(F14) _(F15) \
- _(F16) _(F17) _(F18) _(F19) _(F20) _(F21) _(F22) _(F23) \
- _(F24) _(F25) _(F26) _(F27) _(F28) _(F29) _(F30) _(F31)
-#define VRIDDEF(_)
-
-#define RIDENUM(name) RID_##name,
-
-enum {
- GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */
- FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */
- RID_MAX,
- RID_TMP = RID_R0,
-
- /* Calling conventions. */
- RID_RET = RID_R3,
- RID_RETHI = RID_R3,
- RID_RETLO = RID_R4,
- RID_FPRET = RID_F1,
-
- /* These definitions must match with the *.dasc file(s): */
- RID_BASE = RID_R14, /* Interpreter BASE. */
- RID_LPC = RID_R16, /* Interpreter PC. */
- RID_DISPATCH = RID_R17, /* Interpreter DISPATCH table. */
- RID_LREG = RID_R18, /* Interpreter L. */
- RID_JGL = RID_R31, /* On-trace: global_State + 32768. */
-
- /* Register ranges [min, max) and number of registers. */
- RID_MIN_GPR = RID_R0,
- RID_MAX_GPR = RID_R31+1,
- RID_MIN_FPR = RID_F0,
- RID_MAX_FPR = RID_F31+1,
- RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR,
- RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR
-};
-
-#define RID_NUM_KREF RID_NUM_GPR
-#define RID_MIN_KREF RID_R0
-
-/* -- Register sets ------------------------------------------------------- */
-
-/* Make use of all registers, except TMP, SP, SYS1, SYS2 and JGL. */
-#define RSET_FIXED \
- (RID2RSET(RID_TMP)|RID2RSET(RID_SP)|RID2RSET(RID_SYS1)|\
- RID2RSET(RID_SYS2)|RID2RSET(RID_JGL))
-#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR) - RSET_FIXED)
-#define RSET_FPR RSET_RANGE(RID_MIN_FPR, RID_MAX_FPR)
-#define RSET_ALL (RSET_GPR|RSET_FPR)
-#define RSET_INIT RSET_ALL
-
-#define RSET_SCRATCH_GPR (RSET_RANGE(RID_R3, RID_R12+1))
-#define RSET_SCRATCH_FPR (RSET_RANGE(RID_F0, RID_F13+1))
-#define RSET_SCRATCH (RSET_SCRATCH_GPR|RSET_SCRATCH_FPR)
-#define REGARG_FIRSTGPR RID_R3
-#define REGARG_LASTGPR RID_R10
-#define REGARG_NUMGPR 8
-#define REGARG_FIRSTFPR RID_F1
-#define REGARG_LASTFPR RID_F8
-#define REGARG_NUMFPR 8
-
-/* -- Spill slots --------------------------------------------------------- */
-
-/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs.
-**
-** SPS_FIXED: Available fixed spill slots in interpreter frame.
-** This definition must match with the *.dasc file(s).
-**
-** SPS_FIRST: First spill slot for general use.
-** [sp+12] tmplo word \
-** [sp+ 8] tmphi word / tmp dword, parameter area for callee
-** [sp+ 4] tmpw, LR of callee
-** [sp+ 0] stack chain
-*/
-#define SPS_FIXED 7
-#define SPS_FIRST 4
-
-/* Stack offsets for temporary slots. Used for FP<->int conversions etc. */
-#define SPOFS_TMPW 4
-#define SPOFS_TMP 8
-#define SPOFS_TMPHI 8
-#define SPOFS_TMPLO 12
-
-#define sps_scale(slot) (4 * (int32_t)(slot))
-#define sps_align(slot) (((slot) - SPS_FIXED + 3) & ~3)
-
-/* -- Exit state ---------------------------------------------------------- */
-
-/* This definition must match with the *.dasc file(s). */
-typedef struct {
- lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */
- int32_t gpr[RID_NUM_GPR]; /* General-purpose registers. */
- int32_t spill[256]; /* Spill slots. */
-} ExitState;
-
-/* Highest exit + 1 indicates stack check. */
-#define EXITSTATE_CHECKEXIT 1
-
-/* Return the address of a per-trace exit stub. */
-static LJ_AINLINE uint32_t *exitstub_trace_addr_(uint32_t *p, uint32_t exitno)
-{
- while (*p == 0x60000000) p++; /* Skip PPCI_NOP. */
- return p + 3 + exitno;
-}
-/* Avoid dependence on lj_jit.h if only including lj_target.h. */
-#define exitstub_trace_addr(T, exitno) \
- exitstub_trace_addr_((MCode *)((char *)(T)->mcode + (T)->szmcode), (exitno))
-
-/* -- Instructions -------------------------------------------------------- */
-
-/* Instruction fields. */
-#define PPCF_CC(cc) ((((cc) & 3) << 16) | (((cc) & 4) << 22))
-#define PPCF_T(r) ((r) << 21)
-#define PPCF_A(r) ((r) << 16)
-#define PPCF_B(r) ((r) << 11)
-#define PPCF_C(r) ((r) << 6)
-#define PPCF_MB(n) ((n) << 6)
-#define PPCF_ME(n) ((n) << 1)
-#define PPCF_Y 0x00200000
-#define PPCF_DOT 0x00000001
-
-typedef enum PPCIns {
- /* Integer instructions. */
- PPCI_MR = 0x7c000378,
- PPCI_NOP = 0x60000000,
-
- PPCI_LI = 0x38000000,
- PPCI_LIS = 0x3c000000,
-
- PPCI_ADD = 0x7c000214,
- PPCI_ADDC = 0x7c000014,
- PPCI_ADDO = 0x7c000614,
- PPCI_ADDE = 0x7c000114,
- PPCI_ADDZE = 0x7c000194,
- PPCI_ADDME = 0x7c0001d4,
- PPCI_ADDI = 0x38000000,
- PPCI_ADDIS = 0x3c000000,
- PPCI_ADDIC = 0x30000000,
- PPCI_ADDICDOT = 0x34000000,
-
- PPCI_SUBF = 0x7c000050,
- PPCI_SUBFC = 0x7c000010,
- PPCI_SUBFO = 0x7c000450,
- PPCI_SUBFE = 0x7c000110,
- PPCI_SUBFZE = 0x7c000190,
- PPCI_SUBFME = 0x7c0001d0,
- PPCI_SUBFIC = 0x20000000,
-
- PPCI_NEG = 0x7c0000d0,
-
- PPCI_AND = 0x7c000038,
- PPCI_ANDC = 0x7c000078,
- PPCI_NAND = 0x7c0003b8,
- PPCI_ANDIDOT = 0x70000000,
- PPCI_ANDISDOT = 0x74000000,
-
- PPCI_OR = 0x7c000378,
- PPCI_NOR = 0x7c0000f8,
- PPCI_ORI = 0x60000000,
- PPCI_ORIS = 0x64000000,
-
- PPCI_XOR = 0x7c000278,
- PPCI_EQV = 0x7c000238,
- PPCI_XORI = 0x68000000,
- PPCI_XORIS = 0x6c000000,
-
- PPCI_CMPW = 0x7c000000,
- PPCI_CMPLW = 0x7c000040,
- PPCI_CMPWI = 0x2c000000,
- PPCI_CMPLWI = 0x28000000,
-
- PPCI_MULLW = 0x7c0001d6,
- PPCI_MULLI = 0x1c000000,
- PPCI_MULLWO = 0x7c0005d6,
-
- PPCI_EXTSB = 0x7c000774,
- PPCI_EXTSH = 0x7c000734,
-
- PPCI_SLW = 0x7c000030,
- PPCI_SRW = 0x7c000430,
- PPCI_SRAW = 0x7c000630,
- PPCI_SRAWI = 0x7c000670,
-
- PPCI_RLWNM = 0x5c000000,
- PPCI_RLWINM = 0x54000000,
- PPCI_RLWIMI = 0x50000000,
-
- PPCI_B = 0x48000000,
- PPCI_BL = 0x48000001,
- PPCI_BC = 0x40800000,
- PPCI_BCL = 0x40800001,
- PPCI_BCTR = 0x4e800420,
- PPCI_BCTRL = 0x4e800421,
-
- PPCI_CRANDC = 0x4c000102,
- PPCI_CRXOR = 0x4c000182,
- PPCI_CRAND = 0x4c000202,
- PPCI_CREQV = 0x4c000242,
- PPCI_CRORC = 0x4c000342,
- PPCI_CROR = 0x4c000382,
-
- PPCI_MFLR = 0x7c0802a6,
- PPCI_MTCTR = 0x7c0903a6,
-
- PPCI_MCRXR = 0x7c000400,
-
- /* Load/store instructions. */
- PPCI_LWZ = 0x80000000,
- PPCI_LBZ = 0x88000000,
- PPCI_STW = 0x90000000,
- PPCI_STB = 0x98000000,
- PPCI_LHZ = 0xa0000000,
- PPCI_LHA = 0xa8000000,
- PPCI_STH = 0xb0000000,
-
- PPCI_STWU = 0x94000000,
-
- PPCI_LFS = 0xc0000000,
- PPCI_LFD = 0xc8000000,
- PPCI_STFS = 0xd0000000,
- PPCI_STFD = 0xd8000000,
-
- PPCI_LWZX = 0x7c00002e,
- PPCI_LBZX = 0x7c0000ae,
- PPCI_STWX = 0x7c00012e,
- PPCI_STBX = 0x7c0001ae,
- PPCI_LHZX = 0x7c00022e,
- PPCI_LHAX = 0x7c0002ae,
- PPCI_STHX = 0x7c00032e,
-
- PPCI_LWBRX = 0x7c00042c,
- PPCI_STWBRX = 0x7c00052c,
-
- PPCI_LFSX = 0x7c00042e,
- PPCI_LFDX = 0x7c0004ae,
- PPCI_STFSX = 0x7c00052e,
- PPCI_STFDX = 0x7c0005ae,
-
- /* FP instructions. */
- PPCI_FMR = 0xfc000090,
- PPCI_FNEG = 0xfc000050,
- PPCI_FABS = 0xfc000210,
-
- PPCI_FRSP = 0xfc000018,
- PPCI_FCTIWZ = 0xfc00001e,
-
- PPCI_FADD = 0xfc00002a,
- PPCI_FSUB = 0xfc000028,
- PPCI_FMUL = 0xfc000032,
- PPCI_FDIV = 0xfc000024,
- PPCI_FSQRT = 0xfc00002c,
-
- PPCI_FMADD = 0xfc00003a,
- PPCI_FMSUB = 0xfc000038,
- PPCI_FNMSUB = 0xfc00003c,
-
- PPCI_FCMPU = 0xfc000000,
- PPCI_FSEL = 0xfc00002e,
-} PPCIns;
-
-typedef enum PPCCC {
- CC_GE, CC_LE, CC_NE, CC_NS, CC_LT, CC_GT, CC_EQ, CC_SO
-} PPCCC;
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_target_x86.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_target_x86.h
deleted file mode 100644
index 4426cc8..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_target_x86.h
+++ /dev/null
@@ -1,342 +0,0 @@
-/*
-** Definitions for x86 and x64 CPUs.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_TARGET_X86_H
-#define _LJ_TARGET_X86_H
-
-/* -- Registers IDs ------------------------------------------------------- */
-
-#if LJ_64
-#define GPRDEF(_) \
- _(EAX) _(ECX) _(EDX) _(EBX) _(ESP) _(EBP) _(ESI) _(EDI) \
- _(R8D) _(R9D) _(R10D) _(R11D) _(R12D) _(R13D) _(R14D) _(R15D)
-#define FPRDEF(_) \
- _(XMM0) _(XMM1) _(XMM2) _(XMM3) _(XMM4) _(XMM5) _(XMM6) _(XMM7) \
- _(XMM8) _(XMM9) _(XMM10) _(XMM11) _(XMM12) _(XMM13) _(XMM14) _(XMM15)
-#else
-#define GPRDEF(_) \
- _(EAX) _(ECX) _(EDX) _(EBX) _(ESP) _(EBP) _(ESI) _(EDI)
-#define FPRDEF(_) \
- _(XMM0) _(XMM1) _(XMM2) _(XMM3) _(XMM4) _(XMM5) _(XMM6) _(XMM7)
-#endif
-#define VRIDDEF(_) \
- _(MRM)
-
-#define RIDENUM(name) RID_##name,
-
-enum {
- GPRDEF(RIDENUM) /* General-purpose registers (GPRs). */
- FPRDEF(RIDENUM) /* Floating-point registers (FPRs). */
- RID_MAX,
- RID_MRM = RID_MAX, /* Pseudo-id for ModRM operand. */
-
- /* Calling conventions. */
- RID_RET = RID_EAX,
-#if LJ_64
- RID_FPRET = RID_XMM0,
-#else
- RID_RETLO = RID_EAX,
- RID_RETHI = RID_EDX,
-#endif
-
- /* These definitions must match with the *.dasc file(s): */
- RID_BASE = RID_EDX, /* Interpreter BASE. */
-#if LJ_64 && !LJ_ABI_WIN
- RID_LPC = RID_EBX, /* Interpreter PC. */
- RID_DISPATCH = RID_R14D, /* Interpreter DISPATCH table. */
-#else
- RID_LPC = RID_ESI, /* Interpreter PC. */
- RID_DISPATCH = RID_EBX, /* Interpreter DISPATCH table. */
-#endif
-
- /* Register ranges [min, max) and number of registers. */
- RID_MIN_GPR = RID_EAX,
- RID_MIN_FPR = RID_XMM0,
- RID_MAX_GPR = RID_MIN_FPR,
- RID_MAX_FPR = RID_MAX,
- RID_NUM_GPR = RID_MAX_GPR - RID_MIN_GPR,
- RID_NUM_FPR = RID_MAX_FPR - RID_MIN_FPR,
-};
-
-/* -- Register sets ------------------------------------------------------- */
-
-/* Make use of all registers, except the stack pointer. */
-#define RSET_GPR (RSET_RANGE(RID_MIN_GPR, RID_MAX_GPR)-RID2RSET(RID_ESP))
-#define RSET_FPR (RSET_RANGE(RID_MIN_FPR, RID_MAX_FPR))
-#define RSET_ALL (RSET_GPR|RSET_FPR)
-#define RSET_INIT RSET_ALL
-
-#if LJ_64
-/* Note: this requires the use of FORCE_REX! */
-#define RSET_GPR8 RSET_GPR
-#else
-#define RSET_GPR8 (RSET_RANGE(RID_EAX, RID_EBX+1))
-#endif
-
-/* ABI-specific register sets. */
-#define RSET_ACD (RID2RSET(RID_EAX)|RID2RSET(RID_ECX)|RID2RSET(RID_EDX))
-#if LJ_64
-#if LJ_ABI_WIN
-/* Windows x64 ABI. */
-#define RSET_SCRATCH \
- (RSET_ACD|RSET_RANGE(RID_R8D, RID_R11D+1)|RSET_RANGE(RID_XMM0, RID_XMM5+1))
-#define REGARG_GPRS \
- (RID_ECX|((RID_EDX|((RID_R8D|(RID_R9D<<5))<<5))<<5))
-#define REGARG_NUMGPR 4
-#define REGARG_NUMFPR 4
-#define REGARG_FIRSTFPR RID_XMM0
-#define REGARG_LASTFPR RID_XMM3
-#define STACKARG_OFS (4*8)
-#else
-/* The rest of the civilized x64 world has a common ABI. */
-#define RSET_SCRATCH \
- (RSET_ACD|RSET_RANGE(RID_ESI, RID_R11D+1)|RSET_FPR)
-#define REGARG_GPRS \
- (RID_EDI|((RID_ESI|((RID_EDX|((RID_ECX|((RID_R8D|(RID_R9D \
- <<5))<<5))<<5))<<5))<<5))
-#define REGARG_NUMGPR 6
-#define REGARG_NUMFPR 8
-#define REGARG_FIRSTFPR RID_XMM0
-#define REGARG_LASTFPR RID_XMM7
-#define STACKARG_OFS 0
-#endif
-#else
-/* Common x86 ABI. */
-#define RSET_SCRATCH (RSET_ACD|RSET_FPR)
-#define REGARG_GPRS (RID_ECX|(RID_EDX<<5)) /* Fastcall only. */
-#define REGARG_NUMGPR 2 /* Fastcall only. */
-#define REGARG_NUMFPR 0
-#define STACKARG_OFS 0
-#endif
-
-#if LJ_64
-/* Prefer the low 8 regs of each type to reduce REX prefixes. */
-#undef rset_picktop
-#define rset_picktop(rs) (lj_fls(lj_bswap(rs)) ^ 0x18)
-#endif
-
-/* -- Spill slots --------------------------------------------------------- */
-
-/* Spill slots are 32 bit wide. An even/odd pair is used for FPRs.
-**
-** SPS_FIXED: Available fixed spill slots in interpreter frame.
-** This definition must match with the *.dasc file(s).
-**
-** SPS_FIRST: First spill slot for general use. Reserve min. two 32 bit slots.
-*/
-#if LJ_64
-#if LJ_ABI_WIN
-#define SPS_FIXED (4*2)
-#define SPS_FIRST (4*2) /* Don't use callee register save area. */
-#else
-#define SPS_FIXED 4
-#define SPS_FIRST 2
-#endif
-#else
-#define SPS_FIXED 6
-#define SPS_FIRST 2
-#endif
-
-#define SPOFS_TMP 0
-
-#define sps_scale(slot) (4 * (int32_t)(slot))
-#define sps_align(slot) (((slot) - SPS_FIXED + 3) & ~3)
-
-/* -- Exit state ---------------------------------------------------------- */
-
-/* This definition must match with the *.dasc file(s). */
-typedef struct {
- lua_Number fpr[RID_NUM_FPR]; /* Floating-point registers. */
- intptr_t gpr[RID_NUM_GPR]; /* General-purpose registers. */
- int32_t spill[256]; /* Spill slots. */
-} ExitState;
-
-/* Limited by the range of a short fwd jump (127): (2+2)*(32-1)-2 = 122. */
-#define EXITSTUB_SPACING (2+2)
-#define EXITSTUBS_PER_GROUP 32
-
-/* -- x86 ModRM operand encoding ------------------------------------------ */
-
-typedef enum {
- XM_OFS0 = 0x00, XM_OFS8 = 0x40, XM_OFS32 = 0x80, XM_REG = 0xc0,
- XM_SCALE1 = 0x00, XM_SCALE2 = 0x40, XM_SCALE4 = 0x80, XM_SCALE8 = 0xc0,
- XM_MASK = 0xc0
-} x86Mode;
-
-/* Structure to hold variable ModRM operand. */
-typedef struct {
- int32_t ofs; /* Offset. */
- uint8_t base; /* Base register or RID_NONE. */
- uint8_t idx; /* Index register or RID_NONE. */
- uint8_t scale; /* Index scale (XM_SCALE1 .. XM_SCALE8). */
-} x86ModRM;
-
-/* -- Opcodes ------------------------------------------------------------- */
-
-/* Macros to construct variable-length x86 opcodes. -(len+1) is in LSB. */
-#define XO_(o) ((uint32_t)(0x0000fe + (0x##o<<24)))
-#define XO_FPU(a,b) ((uint32_t)(0x00fd + (0x##a<<16)+(0x##b<<24)))
-#define XO_0f(o) ((uint32_t)(0x0f00fd + (0x##o<<24)))
-#define XO_66(o) ((uint32_t)(0x6600fd + (0x##o<<24)))
-#define XO_660f(o) ((uint32_t)(0x0f66fc + (0x##o<<24)))
-#define XO_f20f(o) ((uint32_t)(0x0ff2fc + (0x##o<<24)))
-#define XO_f30f(o) ((uint32_t)(0x0ff3fc + (0x##o<<24)))
-
-/* This list of x86 opcodes is not intended to be complete. Opcodes are only
-** included when needed. Take a look at DynASM or jit.dis_x86 to see the
-** whole mess.
-*/
-typedef enum {
- /* Fixed length opcodes. XI_* prefix. */
- XI_NOP = 0x90,
- XI_XCHGa = 0x90,
- XI_CALL = 0xe8,
- XI_JMP = 0xe9,
- XI_JMPs = 0xeb,
- XI_PUSH = 0x50, /* Really 50+r. */
- XI_JCCs = 0x70, /* Really 7x. */
- XI_JCCn = 0x80, /* Really 0f8x. */
- XI_LEA = 0x8d,
- XI_MOVrib = 0xb0, /* Really b0+r. */
- XI_MOVri = 0xb8, /* Really b8+r. */
- XI_ARITHib = 0x80,
- XI_ARITHi = 0x81,
- XI_ARITHi8 = 0x83,
- XI_PUSHi8 = 0x6a,
- XI_TESTb = 0x84,
- XI_TEST = 0x85,
- XI_MOVmi = 0xc7,
- XI_GROUP5 = 0xff,
-
- /* Note: little-endian byte-order! */
- XI_FLDZ = 0xeed9,
- XI_FLD1 = 0xe8d9,
- XI_FLDLG2 = 0xecd9,
- XI_FLDLN2 = 0xedd9,
- XI_FDUP = 0xc0d9, /* Really fld st0. */
- XI_FPOP = 0xd8dd, /* Really fstp st0. */
- XI_FPOP1 = 0xd9dd, /* Really fstp st1. */
- XI_FRNDINT = 0xfcd9,
- XI_FSIN = 0xfed9,
- XI_FCOS = 0xffd9,
- XI_FPTAN = 0xf2d9,
- XI_FPATAN = 0xf3d9,
- XI_FSCALE = 0xfdd9,
- XI_FYL2X = 0xf1d9,
-
- /* Variable-length opcodes. XO_* prefix. */
- XO_MOV = XO_(8b),
- XO_MOVto = XO_(89),
- XO_MOVtow = XO_66(89),
- XO_MOVtob = XO_(88),
- XO_MOVmi = XO_(c7),
- XO_MOVmib = XO_(c6),
- XO_LEA = XO_(8d),
- XO_ARITHib = XO_(80),
- XO_ARITHi = XO_(81),
- XO_ARITHi8 = XO_(83),
- XO_ARITHiw8 = XO_66(83),
- XO_SHIFTi = XO_(c1),
- XO_SHIFT1 = XO_(d1),
- XO_SHIFTcl = XO_(d3),
- XO_IMUL = XO_0f(af),
- XO_IMULi = XO_(69),
- XO_IMULi8 = XO_(6b),
- XO_CMP = XO_(3b),
- XO_TESTb = XO_(84),
- XO_TEST = XO_(85),
- XO_GROUP3b = XO_(f6),
- XO_GROUP3 = XO_(f7),
- XO_GROUP5b = XO_(fe),
- XO_GROUP5 = XO_(ff),
- XO_MOVZXb = XO_0f(b6),
- XO_MOVZXw = XO_0f(b7),
- XO_MOVSXb = XO_0f(be),
- XO_MOVSXw = XO_0f(bf),
- XO_MOVSXd = XO_(63),
- XO_BSWAP = XO_0f(c8),
- XO_CMOV = XO_0f(40),
-
- XO_MOVSD = XO_f20f(10),
- XO_MOVSDto = XO_f20f(11),
- XO_MOVSS = XO_f30f(10),
- XO_MOVSSto = XO_f30f(11),
- XO_MOVLPD = XO_660f(12),
- XO_MOVAPS = XO_0f(28),
- XO_XORPS = XO_0f(57),
- XO_ANDPS = XO_0f(54),
- XO_ADDSD = XO_f20f(58),
- XO_SUBSD = XO_f20f(5c),
- XO_MULSD = XO_f20f(59),
- XO_DIVSD = XO_f20f(5e),
- XO_SQRTSD = XO_f20f(51),
- XO_MINSD = XO_f20f(5d),
- XO_MAXSD = XO_f20f(5f),
- XO_ROUNDSD = 0x0b3a0ffc, /* Really 66 0f 3a 0b. See asm_fpmath. */
- XO_UCOMISD = XO_660f(2e),
- XO_CVTSI2SD = XO_f20f(2a),
- XO_CVTSD2SI = XO_f20f(2d),
- XO_CVTTSD2SI= XO_f20f(2c),
- XO_CVTSI2SS = XO_f30f(2a),
- XO_CVTSS2SI = XO_f30f(2d),
- XO_CVTTSS2SI= XO_f30f(2c),
- XO_CVTSS2SD = XO_f30f(5a),
- XO_CVTSD2SS = XO_f20f(5a),
- XO_ADDSS = XO_f30f(58),
- XO_MOVD = XO_660f(6e),
- XO_MOVDto = XO_660f(7e),
-
- XO_FLDd = XO_(d9), XOg_FLDd = 0,
- XO_FLDq = XO_(dd), XOg_FLDq = 0,
- XO_FILDd = XO_(db), XOg_FILDd = 0,
- XO_FILDq = XO_(df), XOg_FILDq = 5,
- XO_FSTPd = XO_(d9), XOg_FSTPd = 3,
- XO_FSTPq = XO_(dd), XOg_FSTPq = 3,
- XO_FISTPq = XO_(df), XOg_FISTPq = 7,
- XO_FISTTPq = XO_(dd), XOg_FISTTPq = 1,
- XO_FADDq = XO_(dc), XOg_FADDq = 0,
- XO_FLDCW = XO_(d9), XOg_FLDCW = 5,
- XO_FNSTCW = XO_(d9), XOg_FNSTCW = 7
-} x86Op;
-
-/* x86 opcode groups. */
-typedef uint32_t x86Group;
-
-#define XG_(i8, i, g) ((x86Group)(((i8) << 16) + ((i) << 8) + (g)))
-#define XG_ARITHi(g) XG_(XI_ARITHi8, XI_ARITHi, g)
-#define XG_TOXOi(xg) ((x86Op)(0x000000fe + (((xg)<<16) & 0xff000000)))
-#define XG_TOXOi8(xg) ((x86Op)(0x000000fe + (((xg)<<8) & 0xff000000)))
-
-#define XO_ARITH(a) ((x86Op)(0x030000fe + ((a)<<27)))
-#define XO_ARITHw(a) ((x86Op)(0x036600fd + ((a)<<27)))
-
-typedef enum {
- XOg_ADD, XOg_OR, XOg_ADC, XOg_SBB, XOg_AND, XOg_SUB, XOg_XOR, XOg_CMP,
- XOg_X_IMUL
-} x86Arith;
-
-typedef enum {
- XOg_ROL, XOg_ROR, XOg_RCL, XOg_RCR, XOg_SHL, XOg_SHR, XOg_SAL, XOg_SAR
-} x86Shift;
-
-typedef enum {
- XOg_TEST, XOg_TEST_, XOg_NOT, XOg_NEG, XOg_MUL, XOg_IMUL, XOg_DIV, XOg_IDIV
-} x86Group3;
-
-typedef enum {
- XOg_INC, XOg_DEC, XOg_CALL, XOg_CALLfar, XOg_JMP, XOg_JMPfar, XOg_PUSH
-} x86Group5;
-
-/* x86 condition codes. */
-typedef enum {
- CC_O, CC_NO, CC_B, CC_NB, CC_E, CC_NE, CC_BE, CC_NBE,
- CC_S, CC_NS, CC_P, CC_NP, CC_L, CC_NL, CC_LE, CC_NLE,
- CC_C = CC_B, CC_NAE = CC_C, CC_NC = CC_NB, CC_AE = CC_NB,
- CC_Z = CC_E, CC_NZ = CC_NE, CC_NA = CC_BE, CC_A = CC_NBE,
- CC_PE = CC_P, CC_PO = CC_NP, CC_NGE = CC_L, CC_GE = CC_NL,
- CC_NG = CC_LE, CC_G = CC_NLE
-} x86CC;
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_trace.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_trace.h
deleted file mode 100644
index edc7af0..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_trace.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
-** Trace management.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_TRACE_H
-#define _LJ_TRACE_H
-
-#include "lj_obj.h"
-
-#if LJ_HASJIT
-#include "lj_jit.h"
-#include "lj_dispatch.h"
-
-/* Trace errors. */
-typedef enum {
-#define TREDEF(name, msg) LJ_TRERR_##name,
-#include "lj_traceerr.h"
- LJ_TRERR__MAX
-} TraceError;
-
-LJ_FUNC_NORET void lj_trace_err(jit_State *J, TraceError e);
-LJ_FUNC_NORET void lj_trace_err_info(jit_State *J, TraceError e);
-
-/* Trace management. */
-LJ_FUNC void LJ_FASTCALL lj_trace_free(global_State *g, GCtrace *T);
-LJ_FUNC void lj_trace_reenableproto(GCproto *pt);
-LJ_FUNC void lj_trace_flushproto(global_State *g, GCproto *pt);
-LJ_FUNC void lj_trace_flush(jit_State *J, TraceNo traceno);
-LJ_FUNC int lj_trace_flushall(lua_State *L);
-LJ_FUNC void lj_trace_initstate(global_State *g);
-LJ_FUNC void lj_trace_freestate(global_State *g);
-
-/* Event handling. */
-LJ_FUNC void lj_trace_ins(jit_State *J, const BCIns *pc);
-LJ_FUNCA void LJ_FASTCALL lj_trace_hot(jit_State *J, const BCIns *pc);
-LJ_FUNCA int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr);
-
-/* Signal asynchronous abort of trace or end of trace. */
-#define lj_trace_abort(g) (G2J(g)->state &= ~LJ_TRACE_ACTIVE)
-#define lj_trace_end(J) (J->state = LJ_TRACE_END)
-
-#else
-
-#define lj_trace_flushall(L) (UNUSED(L), 0)
-#define lj_trace_initstate(g) UNUSED(g)
-#define lj_trace_freestate(g) UNUSED(g)
-#define lj_trace_abort(g) UNUSED(g)
-#define lj_trace_end(J) UNUSED(J)
-
-#endif
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_traceerr.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_traceerr.h
deleted file mode 100644
index f920e5e..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_traceerr.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
-** Trace compiler error messages.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-/* This file may be included multiple times with different TREDEF macros. */
-
-/* Recording. */
-TREDEF(RECERR, "error thrown or hook called during recording")
-TREDEF(TRACEOV, "trace too long")
-TREDEF(STACKOV, "trace too deep")
-TREDEF(SNAPOV, "too many snapshots")
-TREDEF(BLACKL, "blacklisted")
-TREDEF(NYIBC, "NYI: bytecode %d")
-
-/* Recording loop ops. */
-TREDEF(LLEAVE, "leaving loop in root trace")
-TREDEF(LINNER, "inner loop in root trace")
-TREDEF(LUNROLL, "loop unroll limit reached")
-
-/* Recording calls/returns. */
-TREDEF(BADTYPE, "bad argument type")
-TREDEF(CJITOFF, "JIT compilation disabled for function")
-TREDEF(CUNROLL, "call unroll limit reached")
-TREDEF(DOWNREC, "down-recursion, restarting")
-TREDEF(NYICF, "NYI: C function %s")
-TREDEF(NYIFF, "NYI: FastFunc %s")
-TREDEF(NYIFFU, "NYI: unsupported variant of FastFunc %s")
-TREDEF(NYIRETL, "NYI: return to lower frame")
-
-/* Recording indexed load/store. */
-TREDEF(STORENN, "store with nil or NaN key")
-TREDEF(NOMM, "missing metamethod")
-TREDEF(IDXLOOP, "looping index lookup")
-TREDEF(NYITMIX, "NYI: mixed sparse/dense table")
-
-/* Recording C data operations. */
-TREDEF(NOCACHE, "symbol not in cache")
-TREDEF(NYICONV, "NYI: unsupported C type conversion")
-TREDEF(NYICALL, "NYI: unsupported C function type")
-
-/* Optimizations. */
-TREDEF(GFAIL, "guard would always fail")
-TREDEF(PHIOV, "too many PHIs")
-TREDEF(TYPEINS, "persistent type instability")
-
-/* Assembler. */
-TREDEF(MCODEAL, "failed to allocate mcode memory")
-TREDEF(MCODEOV, "machine code too long")
-TREDEF(MCODELM, "hit mcode limit (retrying)")
-TREDEF(SPILLOV, "too many spill slots")
-TREDEF(BADRA, "inconsistent register allocation")
-TREDEF(NYIIR, "NYI: cannot assemble IR instruction %d")
-TREDEF(NYIPHI, "NYI: PHI shuffling too complex")
-TREDEF(NYICOAL, "NYI: register coalescing too complex")
-
-#undef TREDEF
-
-/* Detecting unused error messages:
- awk -F, '/^TREDEF/ { gsub(/TREDEF./, ""); printf "grep -q LJ_TRERR_%s *.[ch] || echo %s\n", $1, $1}' lj_traceerr.h | sh
-*/
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_udata.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_udata.h
deleted file mode 100644
index f271a42..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_udata.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
-** Userdata handling.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_UDATA_H
-#define _LJ_UDATA_H
-
-#include "lj_obj.h"
-
-LJ_FUNC GCudata *lj_udata_new(lua_State *L, MSize sz, GCtab *env);
-LJ_FUNC void LJ_FASTCALL lj_udata_free(global_State *g, GCudata *ud);
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_vm.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_vm.h
deleted file mode 100644
index 3ffa76b..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_vm.h
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
-** Assembler VM interface definitions.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_VM_H
-#define _LJ_VM_H
-
-#include "lj_obj.h"
-
-/* Entry points for ASM parts of VM. */
-LJ_ASMF void lj_vm_call(lua_State *L, TValue *base, int nres1);
-LJ_ASMF int lj_vm_pcall(lua_State *L, TValue *base, int nres1, ptrdiff_t ef);
-typedef TValue *(*lua_CPFunction)(lua_State *L, lua_CFunction func, void *ud);
-LJ_ASMF int lj_vm_cpcall(lua_State *L, lua_CFunction func, void *ud,
- lua_CPFunction cp);
-LJ_ASMF int lj_vm_resume(lua_State *L, TValue *base, int nres1, ptrdiff_t ef);
-LJ_ASMF_NORET void LJ_FASTCALL lj_vm_unwind_c(void *cframe, int errcode);
-LJ_ASMF_NORET void LJ_FASTCALL lj_vm_unwind_ff(void *cframe);
-LJ_ASMF void lj_vm_unwind_c_eh(void);
-LJ_ASMF void lj_vm_unwind_ff_eh(void);
-#if LJ_TARGET_X86ORX64
-LJ_ASMF void lj_vm_unwind_rethrow(void);
-#endif
-
-/* Miscellaneous functions. */
-#if LJ_TARGET_X86ORX64
-LJ_ASMF int lj_vm_cpuid(uint32_t f, uint32_t res[4]);
-#endif
-#if LJ_TARGET_PPC
-void lj_vm_cachesync(void *start, void *end);
-#endif
-LJ_ASMF double lj_vm_foldarith(double x, double y, int op);
-#if LJ_HASJIT
-LJ_ASMF double lj_vm_foldfpm(double x, int op);
-#endif
-#if !LJ_ARCH_HASFPU
-/* Declared in lj_obj.h: LJ_ASMF int32_t lj_vm_tobit(double x); */
-#endif
-
-/* Dispatch targets for recording and hooks. */
-LJ_ASMF void lj_vm_record(void);
-LJ_ASMF void lj_vm_inshook(void);
-LJ_ASMF void lj_vm_rethook(void);
-LJ_ASMF void lj_vm_callhook(void);
-
-/* Trace exit handling. */
-LJ_ASMF void lj_vm_exit_handler(void);
-LJ_ASMF void lj_vm_exit_interp(void);
-
-/* Internal math helper functions. */
-#if LJ_TARGET_X86ORX64 || LJ_TARGET_PPC
-#define lj_vm_floor floor
-#define lj_vm_ceil ceil
-#else
-LJ_ASMF double lj_vm_floor(double);
-LJ_ASMF double lj_vm_ceil(double);
-#if LJ_TARGET_ARM
-LJ_ASMF double lj_vm_floor_sf(double);
-LJ_ASMF double lj_vm_ceil_sf(double);
-#endif
-#endif
-#if defined(LUAJIT_NO_LOG2) || LJ_TARGET_X86ORX64
-LJ_ASMF double lj_vm_log2(double);
-#else
-#define lj_vm_log2 log2
-#endif
-
-#if LJ_HASJIT
-#if LJ_TARGET_X86ORX64
-LJ_ASMF void lj_vm_floor_sse(void);
-LJ_ASMF void lj_vm_ceil_sse(void);
-LJ_ASMF void lj_vm_trunc_sse(void);
-LJ_ASMF void lj_vm_exp_x87(void);
-LJ_ASMF void lj_vm_exp2_x87(void);
-LJ_ASMF void lj_vm_pow_sse(void);
-LJ_ASMF void lj_vm_powi_sse(void);
-#else
-#if LJ_TARGET_PPC
-#define lj_vm_trunc trunc
-#else
-LJ_ASMF double lj_vm_trunc(double);
-#if LJ_TARGET_ARM
-LJ_ASMF double lj_vm_trunc_sf(double);
-#endif
-#endif
-LJ_ASMF double lj_vm_powi(double, int32_t);
-#ifdef LUAJIT_NO_EXP2
-LJ_ASMF double lj_vm_exp2(double);
-#else
-#define lj_vm_exp2 exp2
-#endif
-#endif
-LJ_ASMF int32_t LJ_FASTCALL lj_vm_modi(int32_t, int32_t);
-#if LJ_HASFFI
-LJ_ASMF int lj_vm_errno(void);
-#endif
-#endif
-
-/* Continuations for metamethods. */
-LJ_ASMF void lj_cont_cat(void); /* Continue with concatenation. */
-LJ_ASMF void lj_cont_ra(void); /* Store result in RA from instruction. */
-LJ_ASMF void lj_cont_nop(void); /* Do nothing, just continue execution. */
-LJ_ASMF void lj_cont_condt(void); /* Branch if result is true. */
-LJ_ASMF void lj_cont_condf(void); /* Branch if result is false. */
-LJ_ASMF void lj_cont_hook(void); /* Continue from hook yield. */
-
-enum { LJ_CONT_TAILCALL, LJ_CONT_FFI_CALLBACK }; /* Special continuations. */
-
-/* Start of the ASM code. */
-LJ_ASMF char lj_vm_asm_begin[];
-
-/* Bytecode offsets are relative to lj_vm_asm_begin. */
-#define makeasmfunc(ofs) ((ASMFunction)(lj_vm_asm_begin + (ofs)))
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_vmevent.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_vmevent.h
deleted file mode 100644
index 050fb4d..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lj_vmevent.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
-** VM event handling.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LJ_VMEVENT_H
-#define _LJ_VMEVENT_H
-
-#include "lj_obj.h"
-
-/* Registry key for VM event handler table. */
-#define LJ_VMEVENTS_REGKEY "_VMEVENTS"
-#define LJ_VMEVENTS_HSIZE 4
-
-#define VMEVENT_MASK(ev) ((uint8_t)1 << ((int)(ev) & 7))
-#define VMEVENT_HASH(ev) ((int)(ev) & ~7)
-#define VMEVENT_HASHIDX(h) ((int)(h) << 3)
-#define VMEVENT_NOCACHE 255
-
-#define VMEVENT_DEF(name, hash) \
- LJ_VMEVENT_##name##_, \
- LJ_VMEVENT_##name = ((LJ_VMEVENT_##name##_) & 7)|((hash) << 3)
-
-/* VM event IDs. */
-typedef enum {
- VMEVENT_DEF(BC, 0x00003883),
- VMEVENT_DEF(TRACE, 0xb2d91467),
- VMEVENT_DEF(RECORD, 0x9284bf4f),
- VMEVENT_DEF(TEXIT, 0xb29df2b0),
- LJ_VMEVENT__MAX
-} VMEvent;
-
-#ifdef LUAJIT_DISABLE_VMEVENT
-#define lj_vmevent_send(L, ev, args) UNUSED(L)
-#define lj_vmevent_send_(L, ev, args, post) UNUSED(L)
-#else
-#define lj_vmevent_send(L, ev, args) \
- if (G(L)->vmevmask & VMEVENT_MASK(LJ_VMEVENT_##ev)) { \
- ptrdiff_t argbase = lj_vmevent_prepare(L, LJ_VMEVENT_##ev); \
- if (argbase) { \
- args \
- lj_vmevent_call(L, argbase); \
- } \
- }
-#define lj_vmevent_send_(L, ev, args, post) \
- if (G(L)->vmevmask & VMEVENT_MASK(LJ_VMEVENT_##ev)) { \
- ptrdiff_t argbase = lj_vmevent_prepare(L, LJ_VMEVENT_##ev); \
- if (argbase) { \
- args \
- lj_vmevent_call(L, argbase); \
- post \
- } \
- }
-
-LJ_FUNC ptrdiff_t lj_vmevent_prepare(lua_State *L, VMEvent ev);
-LJ_FUNC void lj_vmevent_call(lua_State *L, ptrdiff_t argbase);
-#endif
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lua.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lua.h
deleted file mode 100644
index c83fd3b..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lua.h
+++ /dev/null
@@ -1,393 +0,0 @@
-/*
-** $Id: lua.h,v 1.218.1.5 2008/08/06 13:30:12 roberto Exp $
-** Lua - An Extensible Extension Language
-** Lua.org, PUC-Rio, Brazil (http://www.lua.org)
-** See Copyright Notice at the end of this file
-*/
-
-
-#ifndef lua_h
-#define lua_h
-
-#include <stdarg.h>
-#include <stddef.h>
-
-
-#include "luaconf.h"
-
-
-#define LUA_VERSION "Lua 5.1"
-#define LUA_RELEASE "Lua 5.1.4"
-#define LUA_VERSION_NUM 501
-#define LUA_COPYRIGHT "Copyright (C) 1994-2008 Lua.org, PUC-Rio"
-#define LUA_AUTHORS "R. Ierusalimschy, L. H. de Figueiredo & W. Celes"
-
-
-/* mark for precompiled code (`<esc>Lua') */
-#define LUA_SIGNATURE "\033Lua"
-
-/* option for multiple returns in `lua_pcall' and `lua_call' */
-#define LUA_MULTRET (-1)
-
-
-/*
-** pseudo-indices
-*/
-#define LUA_REGISTRYINDEX (-10000)
-#define LUA_ENVIRONINDEX (-10001)
-#define LUA_GLOBALSINDEX (-10002)
-#define lua_upvalueindex(i) (LUA_GLOBALSINDEX-(i))
-
-
-/* thread status; 0 is OK */
-#define LUA_YIELD 1
-#define LUA_ERRRUN 2
-#define LUA_ERRSYNTAX 3
-#define LUA_ERRMEM 4
-#define LUA_ERRERR 5
-
-
-typedef struct lua_State lua_State;
-
-typedef int (*lua_CFunction) (lua_State *L);
-
-
-/*
-** functions that read/write blocks when loading/dumping Lua chunks
-*/
-typedef const char * (*lua_Reader) (lua_State *L, void *ud, size_t *sz);
-
-typedef int (*lua_Writer) (lua_State *L, const void* p, size_t sz, void* ud);
-
-
-/*
-** prototype for memory-allocation functions
-*/
-typedef void * (*lua_Alloc) (void *ud, void *ptr, size_t osize, size_t nsize);
-
-
-/*
-** basic types
-*/
-#define LUA_TNONE (-1)
-
-#define LUA_TNIL 0
-#define LUA_TBOOLEAN 1
-#define LUA_TLIGHTUSERDATA 2
-#define LUA_TNUMBER 3
-#define LUA_TSTRING 4
-#define LUA_TTABLE 5
-#define LUA_TFUNCTION 6
-#define LUA_TUSERDATA 7
-#define LUA_TTHREAD 8
-
-
-
-/* minimum Lua stack available to a C function */
-#define LUA_MINSTACK 20
-
-
-/*
-** generic extra include file
-*/
-#if defined(LUA_USER_H)
-#include LUA_USER_H
-#endif
-
-
-/* type of numbers in Lua */
-typedef LUA_NUMBER lua_Number;
-
-
-/* type for integer functions */
-typedef LUA_INTEGER lua_Integer;
-
-
-
-/*
-** state manipulation
-*/
-LUA_API lua_State *(lua_newstate) (lua_Alloc f, void *ud);
-LUA_API void (lua_close) (lua_State *L);
-LUA_API lua_State *(lua_newthread) (lua_State *L);
-
-LUA_API lua_CFunction (lua_atpanic) (lua_State *L, lua_CFunction panicf);
-
-
-/*
-** basic stack manipulation
-*/
-LUA_API int (lua_gettop) (lua_State *L);
-LUA_API void (lua_settop) (lua_State *L, int idx);
-LUA_API void (lua_pushvalue) (lua_State *L, int idx);
-LUA_API void (lua_remove) (lua_State *L, int idx);
-LUA_API void (lua_insert) (lua_State *L, int idx);
-LUA_API void (lua_replace) (lua_State *L, int idx);
-LUA_API int (lua_checkstack) (lua_State *L, int sz);
-
-LUA_API void (lua_xmove) (lua_State *from, lua_State *to, int n);
-
-
-/*
-** access functions (stack -> C)
-*/
-
-LUA_API int (lua_isnumber) (lua_State *L, int idx);
-LUA_API int (lua_isstring) (lua_State *L, int idx);
-LUA_API int (lua_iscfunction) (lua_State *L, int idx);
-LUA_API int (lua_isuserdata) (lua_State *L, int idx);
-LUA_API int (lua_type) (lua_State *L, int idx);
-LUA_API const char *(lua_typename) (lua_State *L, int tp);
-
-LUA_API int (lua_equal) (lua_State *L, int idx1, int idx2);
-LUA_API int (lua_rawequal) (lua_State *L, int idx1, int idx2);
-LUA_API int (lua_lessthan) (lua_State *L, int idx1, int idx2);
-
-LUA_API lua_Number (lua_tonumber) (lua_State *L, int idx);
-LUA_API lua_Integer (lua_tointeger) (lua_State *L, int idx);
-LUA_API int (lua_toboolean) (lua_State *L, int idx);
-LUA_API const char *(lua_tolstring) (lua_State *L, int idx, size_t *len);
-LUA_API size_t (lua_objlen) (lua_State *L, int idx);
-LUA_API lua_CFunction (lua_tocfunction) (lua_State *L, int idx);
-LUA_API void *(lua_touserdata) (lua_State *L, int idx);
-LUA_API lua_State *(lua_tothread) (lua_State *L, int idx);
-LUA_API const void *(lua_topointer) (lua_State *L, int idx);
-
-
-/*
-** push functions (C -> stack)
-*/
-LUA_API void (lua_pushnil) (lua_State *L);
-LUA_API void (lua_pushnumber) (lua_State *L, lua_Number n);
-LUA_API void (lua_pushinteger) (lua_State *L, lua_Integer n);
-LUA_API void (lua_pushlstring) (lua_State *L, const char *s, size_t l);
-LUA_API void (lua_pushstring) (lua_State *L, const char *s);
-LUA_API const char *(lua_pushvfstring) (lua_State *L, const char *fmt,
- va_list argp);
-LUA_API const char *(lua_pushfstring) (lua_State *L, const char *fmt, ...);
-LUA_API void (lua_pushcclosure) (lua_State *L, lua_CFunction fn, int n);
-LUA_API void (lua_pushboolean) (lua_State *L, int b);
-LUA_API void (lua_pushlightuserdata) (lua_State *L, void *p);
-LUA_API int (lua_pushthread) (lua_State *L);
-
-
-/*
-** get functions (Lua -> stack)
-*/
-LUA_API void (lua_gettable) (lua_State *L, int idx);
-LUA_API void (lua_getfield) (lua_State *L, int idx, const char *k);
-LUA_API void (lua_rawget) (lua_State *L, int idx);
-LUA_API void (lua_rawgeti) (lua_State *L, int idx, int n);
-LUA_API void (lua_createtable) (lua_State *L, int narr, int nrec);
-LUA_API void *(lua_newuserdata) (lua_State *L, size_t sz);
-LUA_API int (lua_getmetatable) (lua_State *L, int objindex);
-LUA_API void (lua_getfenv) (lua_State *L, int idx);
-
-
-/*
-** set functions (stack -> Lua)
-*/
-LUA_API void (lua_settable) (lua_State *L, int idx);
-LUA_API void (lua_setfield) (lua_State *L, int idx, const char *k);
-LUA_API void (lua_rawset) (lua_State *L, int idx);
-LUA_API void (lua_rawseti) (lua_State *L, int idx, int n);
-LUA_API int (lua_setmetatable) (lua_State *L, int objindex);
-LUA_API int (lua_setfenv) (lua_State *L, int idx);
-
-
-/*
-** `load' and `call' functions (load and run Lua code)
-*/
-LUA_API void (lua_call) (lua_State *L, int nargs, int nresults);
-LUA_API int (lua_pcall) (lua_State *L, int nargs, int nresults, int errfunc);
-LUA_API int (lua_cpcall) (lua_State *L, lua_CFunction func, void *ud);
-LUA_API int (lua_load) (lua_State *L, lua_Reader reader, void *dt,
- const char *chunkname);
-
-LUA_API int (lua_dump) (lua_State *L, lua_Writer writer, void *data);
-
-
-/*
-** coroutine functions
-*/
-LUA_API int (lua_yield) (lua_State *L, int nresults);
-LUA_API int (lua_resume) (lua_State *L, int narg);
-LUA_API int (lua_status) (lua_State *L);
-
-/*
-** garbage-collection function and options
-*/
-
-#define LUA_GCSTOP 0
-#define LUA_GCRESTART 1
-#define LUA_GCCOLLECT 2
-#define LUA_GCCOUNT 3
-#define LUA_GCCOUNTB 4
-#define LUA_GCSTEP 5
-#define LUA_GCSETPAUSE 6
-#define LUA_GCSETSTEPMUL 7
-
-LUA_API int (lua_gc) (lua_State *L, int what, int data);
-
-
-/*
-** miscellaneous functions
-*/
-
-LUA_API int (lua_error) (lua_State *L);
-
-LUA_API int (lua_next) (lua_State *L, int idx);
-
-LUA_API void (lua_concat) (lua_State *L, int n);
-
-LUA_API lua_Alloc (lua_getallocf) (lua_State *L, void **ud);
-LUA_API void lua_setallocf (lua_State *L, lua_Alloc f, void *ud);
-
-
-
-/*
-** ===============================================================
-** some useful macros
-** ===============================================================
-*/
-
-#define lua_pop(L,n) lua_settop(L, -(n)-1)
-
-#define lua_newtable(L) lua_createtable(L, 0, 0)
-
-#define lua_register(L,n,f) (lua_pushcfunction(L, (f)), lua_setglobal(L, (n)))
-
-#define lua_pushcfunction(L,f) lua_pushcclosure(L, (f), 0)
-
-#define lua_strlen(L,i) lua_objlen(L, (i))
-
-#define lua_isfunction(L,n) (lua_type(L, (n)) == LUA_TFUNCTION)
-#define lua_istable(L,n) (lua_type(L, (n)) == LUA_TTABLE)
-#define lua_islightuserdata(L,n) (lua_type(L, (n)) == LUA_TLIGHTUSERDATA)
-#define lua_isnil(L,n) (lua_type(L, (n)) == LUA_TNIL)
-#define lua_isboolean(L,n) (lua_type(L, (n)) == LUA_TBOOLEAN)
-#define lua_isthread(L,n) (lua_type(L, (n)) == LUA_TTHREAD)
-#define lua_isnone(L,n) (lua_type(L, (n)) == LUA_TNONE)
-#define lua_isnoneornil(L, n) (lua_type(L, (n)) <= 0)
-
-#define lua_pushliteral(L, s) \
- lua_pushlstring(L, "" s, (sizeof(s)/sizeof(char))-1)
-
-#define lua_setglobal(L,s) lua_setfield(L, LUA_GLOBALSINDEX, (s))
-#define lua_getglobal(L,s) lua_getfield(L, LUA_GLOBALSINDEX, (s))
-
-#define lua_tostring(L,i) lua_tolstring(L, (i), NULL)
-
-
-
-/*
-** compatibility macros and functions
-*/
-
-#define lua_open() luaL_newstate()
-
-#define lua_getregistry(L) lua_pushvalue(L, LUA_REGISTRYINDEX)
-
-#define lua_getgccount(L) lua_gc(L, LUA_GCCOUNT, 0)
-
-#define lua_Chunkreader lua_Reader
-#define lua_Chunkwriter lua_Writer
-
-
-/* hack */
-LUA_API void lua_setlevel (lua_State *from, lua_State *to);
-
-
-/*
-** {======================================================================
-** Debug API
-** =======================================================================
-*/
-
-
-/*
-** Event codes
-*/
-#define LUA_HOOKCALL 0
-#define LUA_HOOKRET 1
-#define LUA_HOOKLINE 2
-#define LUA_HOOKCOUNT 3
-#define LUA_HOOKTAILRET 4
-
-
-/*
-** Event masks
-*/
-#define LUA_MASKCALL (1 << LUA_HOOKCALL)
-#define LUA_MASKRET (1 << LUA_HOOKRET)
-#define LUA_MASKLINE (1 << LUA_HOOKLINE)
-#define LUA_MASKCOUNT (1 << LUA_HOOKCOUNT)
-
-typedef struct lua_Debug lua_Debug; /* activation record */
-
-
-/* Functions to be called by the debuger in specific events */
-typedef void (*lua_Hook) (lua_State *L, lua_Debug *ar);
-
-
-LUA_API int lua_getstack (lua_State *L, int level, lua_Debug *ar);
-LUA_API int lua_getinfo (lua_State *L, const char *what, lua_Debug *ar);
-LUA_API const char *lua_getlocal (lua_State *L, const lua_Debug *ar, int n);
-LUA_API const char *lua_setlocal (lua_State *L, const lua_Debug *ar, int n);
-LUA_API const char *lua_getupvalue (lua_State *L, int funcindex, int n);
-LUA_API const char *lua_setupvalue (lua_State *L, int funcindex, int n);
-LUA_API int lua_sethook (lua_State *L, lua_Hook func, int mask, int count);
-LUA_API lua_Hook lua_gethook (lua_State *L);
-LUA_API int lua_gethookmask (lua_State *L);
-LUA_API int lua_gethookcount (lua_State *L);
-
-/* From Lua 5.2. */
-LUA_API void *lua_upvalueid (lua_State *L, int idx, int n);
-LUA_API void lua_upvaluejoin (lua_State *L, int idx1, int n1, int idx2, int n2);
-LUA_API int lua_loadx (lua_State *L, lua_Reader reader, void *dt,
- const char *chunkname, const char *mode);
-
-
-struct lua_Debug {
- int event;
- const char *name; /* (n) */
- const char *namewhat; /* (n) `global', `local', `field', `method' */
- const char *what; /* (S) `Lua', `C', `main', `tail' */
- const char *source; /* (S) */
- int currentline; /* (l) */
- int nups; /* (u) number of upvalues */
- int linedefined; /* (S) */
- int lastlinedefined; /* (S) */
- char short_src[LUA_IDSIZE]; /* (S) */
- /* private part */
- int i_ci; /* active function */
-};
-
-/* }====================================================================== */
-
-
-/******************************************************************************
-* Copyright (C) 1994-2008 Lua.org, PUC-Rio. All rights reserved.
-*
-* Permission is hereby granted, free of charge, to any person obtaining
-* a copy of this software and associated documentation files (the
-* "Software"), to deal in the Software without restriction, including
-* without limitation the rights to use, copy, modify, merge, publish,
-* distribute, sublicense, and/or sell copies of the Software, and to
-* permit persons to whom the Software is furnished to do so, subject to
-* the following conditions:
-*
-* The above copyright notice and this permission notice shall be
-* included in all copies or substantial portions of the Software.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-******************************************************************************/
-
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lua.hpp b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lua.hpp
deleted file mode 100644
index 07e9002..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lua.hpp
+++ /dev/null
@@ -1,9 +0,0 @@
-// C++ wrapper for LuaJIT header files.
-
-extern "C" {
-#include "lua.h"
-#include "lauxlib.h"
-#include "lualib.h"
-#include "luajit.h"
-}
-
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/luaconf.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/luaconf.h
deleted file mode 100644
index b33e91b..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/luaconf.h
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
-** Configuration header.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef luaconf_h
-#define luaconf_h
-
-#ifndef WINVER
-#define WINVER 0x0501
-#endif
-#include <limits.h>
-#include <stddef.h>
-
-/* Default path for loading Lua and C modules with require(). */
-#if defined(_WIN32)
-/*
-** In Windows, any exclamation mark ('!') in the path is replaced by the
-** path of the directory of the executable file of the current process.
-*/
-#define LUA_LDIR "!\\lua\\"
-#define LUA_CDIR "!\\"
-#define LUA_PATH_DEFAULT \
- ".\\?.lua;" LUA_LDIR"?.lua;" LUA_LDIR"?\\init.lua;"
-#define LUA_CPATH_DEFAULT \
- ".\\?.dll;" LUA_CDIR"?.dll;" LUA_CDIR"loadall.dll"
-#else
-/*
-** Note to distribution maintainers: do NOT patch the following lines!
-** Please read ../doc/install.html#distro and pass PREFIX=/usr instead.
-*/
-#ifndef LUA_MULTILIB
-#define LUA_MULTILIB "lib"
-#endif
-#ifndef LUA_LMULTILIB
-#define LUA_LMULTILIB "lib"
-#endif
-#define LUA_LROOT "/usr/local"
-#define LUA_LUADIR "/lua/5.1/"
-#define LUA_LJDIR "/luajit-2.0.5/"
-
-#ifdef LUA_ROOT
-#define LUA_JROOT LUA_ROOT
-#define LUA_RLDIR LUA_ROOT "/share" LUA_LUADIR
-#define LUA_RCDIR LUA_ROOT "/" LUA_MULTILIB LUA_LUADIR
-#define LUA_RLPATH ";" LUA_RLDIR "?.lua;" LUA_RLDIR "?/init.lua"
-#define LUA_RCPATH ";" LUA_RCDIR "?.so"
-#else
-#define LUA_JROOT LUA_LROOT
-#define LUA_RLPATH
-#define LUA_RCPATH
-#endif
-
-#define LUA_JPATH ";" LUA_JROOT "/share" LUA_LJDIR "?.lua"
-#define LUA_LLDIR LUA_LROOT "/share" LUA_LUADIR
-#define LUA_LCDIR LUA_LROOT "/" LUA_LMULTILIB LUA_LUADIR
-#define LUA_LLPATH ";" LUA_LLDIR "?.lua;" LUA_LLDIR "?/init.lua"
-#define LUA_LCPATH1 ";" LUA_LCDIR "?.so"
-#define LUA_LCPATH2 ";" LUA_LCDIR "loadall.so"
-
-#define LUA_PATH_DEFAULT "./?.lua" LUA_JPATH LUA_LLPATH LUA_RLPATH
-#define LUA_CPATH_DEFAULT "./?.so" LUA_LCPATH1 LUA_RCPATH LUA_LCPATH2
-#endif
-
-/* Environment variable names for path overrides and initialization code. */
-#define LUA_PATH "LUA_PATH"
-#define LUA_CPATH "LUA_CPATH"
-#define LUA_INIT "LUA_INIT"
-
-/* Special file system characters. */
-#if defined(_WIN32)
-#define LUA_DIRSEP "\\"
-#else
-#define LUA_DIRSEP "/"
-#endif
-#define LUA_PATHSEP ";"
-#define LUA_PATH_MARK "?"
-#define LUA_EXECDIR "!"
-#define LUA_IGMARK "-"
-#define LUA_PATH_CONFIG \
- LUA_DIRSEP "\n" LUA_PATHSEP "\n" LUA_PATH_MARK "\n" \
- LUA_EXECDIR "\n" LUA_IGMARK
-
-/* Quoting in error messages. */
-#define LUA_QL(x) "'" x "'"
-#define LUA_QS LUA_QL("%s")
-
-/* Various tunables. */
-#define LUAI_MAXSTACK 65500 /* Max. # of stack slots for a thread (<64K). */
-#define LUAI_MAXCSTACK 8000 /* Max. # of stack slots for a C func (<10K). */
-#define LUAI_GCPAUSE 200 /* Pause GC until memory is at 200%. */
-#define LUAI_GCMUL 200 /* Run GC at 200% of allocation speed. */
-#define LUA_MAXCAPTURES 32 /* Max. pattern captures. */
-
-/* Compatibility with older library function names. */
-#define LUA_COMPAT_MOD /* OLD: math.mod, NEW: math.fmod */
-#define LUA_COMPAT_GFIND /* OLD: string.gfind, NEW: string.gmatch */
-
-/* Configuration for the frontend (the luajit executable). */
-#if defined(luajit_c)
-#define LUA_PROGNAME "luajit" /* Fallback frontend name. */
-#define LUA_PROMPT "> " /* Interactive prompt. */
-#define LUA_PROMPT2 ">> " /* Continuation prompt. */
-#define LUA_MAXINPUT 512 /* Max. input line length. */
-#endif
-
-/* Note: changing the following defines breaks the Lua 5.1 ABI. */
-#define LUA_INTEGER ptrdiff_t
-#define LUA_IDSIZE 60 /* Size of lua_Debug.short_src. */
-/*
-** Size of lauxlib and io.* on-stack buffers. Weird workaround to avoid using
-** unreasonable amounts of stack space, but still retain ABI compatibility.
-** Blame Lua for depending on BUFSIZ in the ABI, blame **** for wrecking it.
-*/
-#define LUAL_BUFFERSIZE (BUFSIZ > 16384 ? 8192 : BUFSIZ)
-
-/* The following defines are here only for compatibility with luaconf.h
-** from the standard Lua distribution. They must not be changed for LuaJIT.
-*/
-#define LUA_NUMBER_DOUBLE
-#define LUA_NUMBER double
-#define LUAI_UACNUMBER double
-#define LUA_NUMBER_SCAN "%lf"
-#define LUA_NUMBER_FMT "%.14g"
-#define lua_number2str(s, n) sprintf((s), LUA_NUMBER_FMT, (n))
-#define LUAI_MAXNUMBER2STR 32
-#define LUA_INTFRMLEN "l"
-#define LUA_INTFRM_T long
-
-/* Linkage of public API functions. */
-#if defined(LUA_BUILD_AS_DLL)
-#if defined(LUA_CORE) || defined(LUA_LIB)
-#define LUA_API __declspec(dllexport)
-#else
-#define LUA_API __declspec(dllimport)
-#endif
-#else
-#define LUA_API extern
-#endif
-
-#define LUALIB_API LUA_API
-
-/* Support for internal assertions. */
-#if defined(LUA_USE_ASSERT) || defined(LUA_USE_APICHECK)
-#include <assert.h>
-#endif
-#ifdef LUA_USE_ASSERT
-#define lua_assert(x) assert(x)
-#endif
-#ifdef LUA_USE_APICHECK
-#define luai_apicheck(L, o) { (void)L; assert(o); }
-#else
-#define luai_apicheck(L, o) { (void)L; }
-#endif
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/luajit.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/luajit.h
deleted file mode 100644
index c5ff3ac..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/luajit.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
-** LuaJIT -- a Just-In-Time Compiler for Lua. http://luajit.org/
-**
-** Copyright (C) 2005-2017 Mike Pall. All rights reserved.
-**
-** Permission is hereby granted, free of charge, to any person obtaining
-** a copy of this software and associated documentation files (the
-** "Software"), to deal in the Software without restriction, including
-** without limitation the rights to use, copy, modify, merge, publish,
-** distribute, sublicense, and/or sell copies of the Software, and to
-** permit persons to whom the Software is furnished to do so, subject to
-** the following conditions:
-**
-** The above copyright notice and this permission notice shall be
-** included in all copies or substantial portions of the Software.
-**
-** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-** SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-**
-** [ MIT license: http://www.opensource.org/licenses/mit-license.php ]
-*/
-
-#ifndef _LUAJIT_H
-#define _LUAJIT_H
-
-#include "lua.h"
-
-#define LUAJIT_VERSION "LuaJIT 2.0.5"
-#define LUAJIT_VERSION_NUM 20005 /* Version 2.0.5 = 02.00.05. */
-#define LUAJIT_VERSION_SYM luaJIT_version_2_0_5
-#define LUAJIT_COPYRIGHT "Copyright (C) 2005-2017 Mike Pall"
-#define LUAJIT_URL "http://luajit.org/"
-
-/* Modes for luaJIT_setmode. */
-#define LUAJIT_MODE_MASK 0x00ff
-
-enum {
- LUAJIT_MODE_ENGINE, /* Set mode for whole JIT engine. */
- LUAJIT_MODE_DEBUG, /* Set debug mode (idx = level). */
-
- LUAJIT_MODE_FUNC, /* Change mode for a function. */
- LUAJIT_MODE_ALLFUNC, /* Recurse into subroutine protos. */
- LUAJIT_MODE_ALLSUBFUNC, /* Change only the subroutines. */
-
- LUAJIT_MODE_TRACE, /* Flush a compiled trace. */
-
- LUAJIT_MODE_WRAPCFUNC = 0x10, /* Set wrapper mode for C function calls. */
-
- LUAJIT_MODE_MAX
-};
-
-/* Flags or'ed in to the mode. */
-#define LUAJIT_MODE_OFF 0x0000 /* Turn feature off. */
-#define LUAJIT_MODE_ON 0x0100 /* Turn feature on. */
-#define LUAJIT_MODE_FLUSH 0x0200 /* Flush JIT-compiled code. */
-
-/* LuaJIT public C API. */
-
-/* Control the JIT engine. */
-LUA_API int luaJIT_setmode(lua_State *L, int idx, int mode);
-
-/* Enforce (dynamic) linker error for version mismatches. Call from main. */
-LUA_API void LUAJIT_VERSION_SYM(void);
-
-#endif
diff --git a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lualib.h b/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lualib.h
deleted file mode 100644
index bfc130a..0000000
--- a/build/vc++/libs/LuaJIT-2.0.5/include/LuaJIT/lualib.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
-** Standard library header.
-** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
-*/
-
-#ifndef _LUALIB_H
-#define _LUALIB_H
-
-#include "lua.h"
-
-#define LUA_FILEHANDLE "FILE*"
-
-#define LUA_COLIBNAME "coroutine"
-#define LUA_MATHLIBNAME "math"
-#define LUA_STRLIBNAME "string"
-#define LUA_TABLIBNAME "table"
-#define LUA_IOLIBNAME "io"
-#define LUA_OSLIBNAME "os"
-#define LUA_LOADLIBNAME "package"
-#define LUA_DBLIBNAME "debug"
-#define LUA_BITLIBNAME "bit"
-#define LUA_JITLIBNAME "jit"
-#define LUA_FFILIBNAME "ffi"
-
-LUALIB_API int luaopen_base(lua_State *L);
-LUALIB_API int luaopen_math(lua_State *L);
-LUALIB_API int luaopen_string(lua_State *L);
-LUALIB_API int luaopen_table(lua_State *L);
-LUALIB_API int luaopen_io(lua_State *L);
-LUALIB_API int luaopen_os(lua_State *L);
-LUALIB_API int luaopen_package(lua_State *L);
-LUALIB_API int luaopen_debug(lua_State *L);
-LUALIB_API int luaopen_bit(lua_State *L);
-LUALIB_API int luaopen_jit(lua_State *L);
-LUALIB_API int luaopen_ffi(lua_State *L);
-
-LUALIB_API void luaL_openlibs(lua_State *L);
-
-#ifndef lua_assert
-#define lua_assert(x) ((void)0)
-#endif
-
-#endif