Kaydet (Commit) 254359b9 authored tarafından Herbert Dürr's avatar Herbert Dürr

adapt the optimization fix to UNO bridge's callVirtualMethod() for FreeBSD and Linux

adapted from the the fix to OSX 64bit's UNO bridge (in r1477588):
if the pCallStack variable is optimized out then any assumptions of the
method's inlined assembler about stack layout collapse. Adding a pseudo
dependency to the pCallStack variable solves that problem.
üst 2ff25b11
...@@ -98,11 +98,12 @@ static void callVirtualMethod(void * pThis, sal_uInt32 nVtableIndex, ...@@ -98,11 +98,12 @@ static void callVirtualMethod(void * pThis, sal_uInt32 nVtableIndex,
pMethod = *((sal_uInt64 *)pMethod); pMethod = *((sal_uInt64 *)pMethod);
// Load parameters to stack, if necessary // Load parameters to stack, if necessary
sal_uInt64* pCallStack = NULL;
if ( nStack ) if ( nStack )
{ {
// 16-bytes aligned // 16-bytes aligned
sal_uInt32 nStackBytes = ( ( nStack + 1 ) >> 1 ) * 16; sal_uInt32 nStackBytes = ( ( nStack + 1 ) >> 1 ) * 16;
sal_uInt64 *pCallStack = (sal_uInt64 *) __builtin_alloca( nStackBytes ); pCallStack = (sal_uInt64*) __builtin_alloca( nStackBytes );
memcpy( pCallStack, pStack, nStackBytes ); memcpy( pCallStack, pStack, nStackBytes );
} }
...@@ -113,7 +114,6 @@ static void callVirtualMethod(void * pThis, sal_uInt32 nVtableIndex, ...@@ -113,7 +114,6 @@ static void callVirtualMethod(void * pThis, sal_uInt32 nVtableIndex,
double xmm1; double xmm1;
asm volatile ( asm volatile (
// Fill the xmm registers // Fill the xmm registers
"movq %2, %%rax\n\t" "movq %2, %%rax\n\t"
...@@ -148,7 +148,8 @@ static void callVirtualMethod(void * pThis, sal_uInt32 nVtableIndex, ...@@ -148,7 +148,8 @@ static void callVirtualMethod(void * pThis, sal_uInt32 nVtableIndex,
"movsd %%xmm1, %7\n\t" "movsd %%xmm1, %7\n\t"
: :
: "m" ( pMethod ), "m" ( pGPR ), "m" ( pFPR ), "m" ( nFPR ), : "m" ( pMethod ), "m" ( pGPR ), "m" ( pFPR ), "m" ( nFPR ),
"m" ( rax ), "m" ( rdx ), "m" ( xmm0 ), "m" ( xmm1 ) "m" ( rax ), "m" ( rdx ), "m" ( xmm0 ), "m" ( xmm1 ),
"m" (pCallStack) // dummy input to prevent the compiler from optimizing the alloca out
: "rax", "rdi", "rsi", "rdx", "rcx", "r8", "r9", "r11" : "rax", "rdi", "rsi", "rdx", "rcx", "r8", "r9", "r11"
); );
......
...@@ -98,11 +98,12 @@ static void callVirtualMethod(void * pThis, sal_uInt32 nVtableIndex, ...@@ -98,11 +98,12 @@ static void callVirtualMethod(void * pThis, sal_uInt32 nVtableIndex,
pMethod = *((sal_uInt64 *)pMethod); pMethod = *((sal_uInt64 *)pMethod);
// Load parameters to stack, if necessary // Load parameters to stack, if necessary
sal_uInt64* pCallStack = NULL;
if ( nStack ) if ( nStack )
{ {
// 16-bytes aligned // 16-bytes aligned
sal_uInt32 nStackBytes = ( ( nStack + 1 ) >> 1 ) * 16; sal_uInt32 nStackBytes = ( ( nStack + 1 ) >> 1 ) * 16;
sal_uInt64 *pCallStack = (sal_uInt64 *) __builtin_alloca( nStackBytes ); pCallStack = (sal_uInt64*) __builtin_alloca( nStackBytes );
memcpy( pCallStack, pStack, nStackBytes ); memcpy( pCallStack, pStack, nStackBytes );
} }
...@@ -113,7 +114,6 @@ static void callVirtualMethod(void * pThis, sal_uInt32 nVtableIndex, ...@@ -113,7 +114,6 @@ static void callVirtualMethod(void * pThis, sal_uInt32 nVtableIndex,
double xmm1; double xmm1;
asm volatile ( asm volatile (
// Fill the xmm registers // Fill the xmm registers
"movq %2, %%rax\n\t" "movq %2, %%rax\n\t"
...@@ -148,7 +148,8 @@ static void callVirtualMethod(void * pThis, sal_uInt32 nVtableIndex, ...@@ -148,7 +148,8 @@ static void callVirtualMethod(void * pThis, sal_uInt32 nVtableIndex,
"movsd %%xmm1, %7\n\t" "movsd %%xmm1, %7\n\t"
: :
: "m" ( pMethod ), "m" ( pGPR ), "m" ( pFPR ), "m" ( nFPR ), : "m" ( pMethod ), "m" ( pGPR ), "m" ( pFPR ), "m" ( nFPR ),
"m" ( rax ), "m" ( rdx ), "m" ( xmm0 ), "m" ( xmm1 ) "m" ( rax ), "m" ( rdx ), "m" ( xmm0 ), "m" ( xmm1 ),
"m" (pCallStack) // dummy input to prevent the compiler from optimizing the alloca out
: "rax", "rdi", "rsi", "rdx", "rcx", "r8", "r9", "r11", : "rax", "rdi", "rsi", "rdx", "rcx", "r8", "r9", "r11",
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7" "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
); );
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment