morestack.S 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872
  1. # x86/x86_64 support for -fsplit-stack.
  2. # Copyright (C) 2009-2022 Free Software Foundation, Inc.
  3. # Contributed by Ian Lance Taylor <iant@google.com>.
  4. # This file is part of GCC.
  5. # GCC is free software; you can redistribute it and/or modify it under
  6. # the terms of the GNU General Public License as published by the Free
  7. # Software Foundation; either version 3, or (at your option) any later
  8. # version.
  9. # GCC is distributed in the hope that it will be useful, but WITHOUT ANY
  10. # WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  12. # for more details.
  13. # Under Section 7 of GPL version 3, you are granted additional
  14. # permissions described in the GCC Runtime Library Exception, version
  15. # 3.1, as published by the Free Software Foundation.
  16. # You should have received a copy of the GNU General Public License and
  17. # a copy of the GCC Runtime Library Exception along with this program;
  18. # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
  19. # <http://www.gnu.org/licenses/>.
  20. #include "auto-host.h"
  21. # Support for allocating more stack space when using -fsplit-stack.
  22. # When a function discovers that it needs more stack space, it will
  23. # call __morestack with the size of the stack frame and the size of
  24. # the parameters to copy from the old stack frame to the new one.
  25. # The __morestack function preserves the parameter registers and
  26. # calls __generic_morestack to actually allocate the stack space.
  27. # When this is called stack space is very low, but we ensure that
  28. # there is enough space to push the parameter registers and to call
  29. # __generic_morestack.
  30. # When calling __generic_morestack, FRAME_SIZE points to the size of
  31. # the desired frame when the function is called, and the function
  32. # sets it to the size of the allocated stack. OLD_STACK points to
  33. # the parameters on the old stack and PARAM_SIZE is the number of
  34. # bytes of parameters to copy to the new stack. These are the
  35. # parameters of the function that called __morestack. The
  36. # __generic_morestack function returns the new stack pointer,
  37. # pointing to the address of the first copied parameter. The return
  38. # value minus the returned *FRAME_SIZE will be the first address on
  39. # the stack which we should not use.
  40. # void *__generic_morestack (size_t *frame_size, void *old_stack,
  41. # size_t param_size);
  42. # The __morestack routine has to arrange for the caller to return to a
  43. # stub on the new stack. The stub is responsible for restoring the
  44. # old stack pointer and returning to the caller's caller. This calls
  45. # __generic_releasestack to retrieve the old stack pointer and release
  46. # the newly allocated stack.
  47. # void *__generic_releasestack (size_t *available);
  48. # We do a little dance so that the processor's call/return return
  49. # address prediction works out. The compiler arranges for the caller
  50. # to look like this:
  51. # call __generic_morestack
  52. # ret
  53. # L:
  54. # // carry on with function
  55. # After we allocate more stack, we call L, which is in our caller.
  56. # When that returns (to the predicted instruction), we release the
  57. # stack segment and reset the stack pointer. We then return to the
  58. # predicted instruction, namely the ret instruction immediately after
  59. # the call to __generic_morestack. That then returns to the caller of
  60. # the original caller.
  61. # The amount of extra space we ask for. In general this has to be
  62. # enough for the dynamic loader to find a symbol and for a signal
  63. # handler to run.
  64. #ifndef __x86_64__
  65. #define BACKOFF (1024)
  66. #else
  67. #define BACKOFF (3584)
  68. #endif
  69. # The amount of space we ask for when calling non-split-stack code.
  70. #define NON_SPLIT_STACK 0x100000
  71. # This entry point is for split-stack code which calls non-split-stack
  72. # code. When the linker sees this case, it converts the call to
  73. # __morestack to call __morestack_non_split instead. We just bump the
  74. # requested stack space by 16K.
  75. #include <cet.h>
  76. .global __morestack_non_split
  77. .hidden __morestack_non_split
  78. #ifdef __ELF__
  79. .type __morestack_non_split,@function
  80. #endif
  81. __morestack_non_split:
  82. .cfi_startproc
  83. #ifndef __x86_64__
  84. # See below for an extended explanation of this.
  85. .cfi_def_cfa %esp,16
  86. pushl %eax # Save %eax in case it is a parameter.
  87. .cfi_adjust_cfa_offset 4 # Account for pushed register.
  88. movl %esp,%eax # Current stack,
  89. subl 8(%esp),%eax # less required stack frame size,
  90. subl $NON_SPLIT_STACK,%eax # less space for non-split code.
  91. cmpl %gs:0x30,%eax # See if we have enough space.
  92. jb 2f # Get more space if we need it.
  93. # Here the stack is
  94. # %esp + 20: stack pointer after two returns
  95. # %esp + 16: return address of morestack caller's caller
  96. # %esp + 12: size of parameters
  97. # %esp + 8: new stack frame size
  98. # %esp + 4: return address of this function
  99. # %esp: saved %eax
  100. #
  101. # Since we aren't doing a full split stack, we don't need to
  102. # do anything when our caller returns. So we return to our
  103. # caller rather than calling it, and let it return as usual.
  104. # To make that work we adjust the return address.
  105. # This breaks call/return address prediction for the call to
  106. # this function. I can't figure out a way to make it work
  107. # short of copying the parameters down the stack, which will
  108. # probably take more clock cycles than we will lose breaking
  109. # call/return address prediction. We will only break
  110. # prediction for this call, not for our caller.
  111. movl 4(%esp),%eax # Increment the return address
  112. cmpb $0xc3,(%eax) # to skip the ret instruction;
  113. je 1f # see above.
  114. addl $2,%eax
  115. 1: inc %eax
  116. # If the instruction that we return to is
  117. # leal 20(%ebp),{%eax,%ecx,%edx}
  118. # then we have been called by a varargs function that expects
  119. # %ebp to hold a real value. That can only work if we do the
  120. # full stack split routine. FIXME: This is fragile.
  121. cmpb $0x8d,(%eax)
  122. jne 3f
  123. cmpb $0x14,2(%eax)
  124. jne 3f
  125. cmpb $0x45,1(%eax)
  126. je 2f
  127. cmpb $0x4d,1(%eax)
  128. je 2f
  129. cmpb $0x55,1(%eax)
  130. je 2f
  131. 3:
  132. movl %eax,4(%esp) # Update return address.
  133. popl %eax # Restore %eax and stack.
  134. .cfi_adjust_cfa_offset -4 # Account for popped register.
  135. ret $8 # Return to caller, popping args.
  136. 2:
  137. .cfi_adjust_cfa_offset 4 # Back to where we were.
  138. popl %eax # Restore %eax and stack.
  139. .cfi_adjust_cfa_offset -4 # Account for popped register.
  140. # Increment space we request.
  141. addl $NON_SPLIT_STACK+0x1000+BACKOFF,4(%esp)
  142. # Fall through into morestack.
  143. #else
  144. # See below for an extended explanation of this.
  145. .cfi_def_cfa %rsp,16
  146. pushq %rax # Save %rax in case caller is using
  147. # it to preserve original %r10.
  148. .cfi_adjust_cfa_offset 8 # Adjust for pushed register.
  149. movq %rsp,%rax # Current stack,
  150. subq %r10,%rax # less required stack frame size,
  151. subq $NON_SPLIT_STACK,%rax # less space for non-split code.
  152. #ifdef __LP64__
  153. cmpq %fs:0x70,%rax # See if we have enough space.
  154. #else
  155. cmpl %fs:0x40,%eax
  156. #endif
  157. jb 2f # Get more space if we need it.
  158. # If the instruction that we return to is
  159. # leaq 24(%rbp), %r11n
  160. # then we have been called by a varargs function that expects
  161. # %ebp to hold a real value. That can only work if we do the
  162. # full stack split routine. FIXME: This is fragile.
  163. movq 8(%rsp),%rax
  164. incq %rax # Skip ret instruction in caller.
  165. cmpl $0x185d8d4c,(%rax)
  166. je 2f
  167. # This breaks call/return prediction, as described above.
  168. incq 8(%rsp) # Increment the return address.
  169. popq %rax # Restore register.
  170. .cfi_adjust_cfa_offset -8 # Adjust for popped register.
  171. ret # Return to caller.
  172. 2:
  173. popq %rax # Restore register.
  174. .cfi_adjust_cfa_offset -8 # Adjust for popped register.
  175. # Increment space we request.
  176. addq $NON_SPLIT_STACK+0x1000+BACKOFF,%r10
  177. # Fall through into morestack.
  178. #endif
  179. .cfi_endproc
  180. #ifdef __ELF__
  181. .size __morestack_non_split, . - __morestack_non_split
  182. #endif
  183. # __morestack_non_split falls through into __morestack.
  184. # The __morestack function.
  185. .global __morestack
  186. .hidden __morestack
  187. #ifdef __ELF__
  188. .type __morestack,@function
  189. #endif
  190. __morestack:
  191. .LFB1:
  192. .cfi_startproc
  193. #ifndef __x86_64__
  194. # The 32-bit __morestack function.
  195. # We use a cleanup to restore the stack guard if an exception
  196. # is thrown through this code.
  197. #ifndef __PIC__
  198. .cfi_personality 0,__gcc_personality_v0
  199. .cfi_lsda 0,.LLSDA1
  200. #else
  201. .cfi_personality 0x9b,DW.ref.__gcc_personality_v0
  202. .cfi_lsda 0x1b,.LLSDA1
  203. #endif
  204. # We return below with a ret $8. We will return to a single
  205. # return instruction, which will return to the caller of our
  206. # caller. We let the unwinder skip that single return
  207. # instruction, and just return to the real caller.
  208. # Here CFA points just past the return address on the stack,
  209. # e.g., on function entry it is %esp + 4. The stack looks
  210. # like this:
  211. # CFA + 12: stack pointer after two returns
  212. # CFA + 8: return address of morestack caller's caller
  213. # CFA + 4: size of parameters
  214. # CFA: new stack frame size
  215. # CFA - 4: return address of this function
  216. # CFA - 8: previous value of %ebp; %ebp points here
  217. # Setting the new CFA to be the current CFA + 12 (i.e., %esp +
  218. # 16) will make the unwinder pick up the right return address.
  219. .cfi_def_cfa %esp,16
  220. pushl %ebp
  221. .cfi_adjust_cfa_offset 4
  222. .cfi_offset %ebp, -20
  223. movl %esp,%ebp
  224. .cfi_def_cfa_register %ebp
  225. # In 32-bit mode the parameters are pushed on the stack. The
  226. # argument size is pushed then the new stack frame size is
  227. # pushed.
  228. # In the body of a non-leaf function, the stack pointer will
  229. # be aligned to a 16-byte boundary. That is CFA + 12 in the
  230. # stack picture above: (CFA + 12) % 16 == 0. At this point we
  231. # have %esp == CFA - 8, so %esp % 16 == 12. We need some
  232. # space for saving registers and passing parameters, and we
  233. # need to wind up with %esp % 16 == 0.
  234. subl $44,%esp
  235. # Because our cleanup code may need to clobber %ebx, we need
  236. # to save it here so the unwinder can restore the value used
  237. # by the caller. Note that we don't have to restore the
  238. # register, since we don't change it, we just have to save it
  239. # for the unwinder.
  240. movl %ebx,-4(%ebp)
  241. .cfi_offset %ebx, -24
  242. # In 32-bit mode the registers %eax, %edx, and %ecx may be
  243. # used for parameters, depending on the regparm and fastcall
  244. # attributes.
  245. movl %eax,-8(%ebp)
  246. movl %edx,-12(%ebp)
  247. movl %ecx,-16(%ebp)
  248. call __morestack_block_signals
  249. movl 12(%ebp),%eax # The size of the parameters.
  250. movl %eax,8(%esp)
  251. leal 20(%ebp),%eax # Address of caller's parameters.
  252. movl %eax,4(%esp)
  253. addl $BACKOFF,8(%ebp) # Ask for backoff bytes.
  254. leal 8(%ebp),%eax # The address of the new frame size.
  255. movl %eax,(%esp)
  256. call __generic_morestack
  257. movl %eax,%esp # Switch to the new stack.
  258. subl 8(%ebp),%eax # The end of the stack space.
  259. addl $BACKOFF,%eax # Back off 512 bytes.
  260. .LEHB0:
  261. # FIXME: The offset must match
  262. # TARGET_THREAD_SPLIT_STACK_OFFSET in
  263. # gcc/config/i386/linux.h.
  264. movl %eax,%gs:0x30 # Save the new stack boundary.
  265. call __morestack_unblock_signals
  266. movl -12(%ebp),%edx # Restore registers.
  267. movl -16(%ebp),%ecx
  268. movl 4(%ebp),%eax # Increment the return address
  269. cmpb $0xc3,(%eax) # to skip the ret instruction;
  270. je 1f # see above.
  271. addl $2,%eax
  272. 1: inc %eax
  273. movl %eax,-12(%ebp) # Store return address in an
  274. # unused slot.
  275. movl -8(%ebp),%eax # Restore the last register.
  276. call *-12(%ebp) # Call our caller!
  277. # The caller will return here, as predicted.
  278. # Save the registers which may hold a return value. We
  279. # assume that __generic_releasestack does not touch any
  280. # floating point or vector registers.
  281. pushl %eax
  282. pushl %edx
  283. # Push the arguments to __generic_releasestack now so that the
  284. # stack is at a 16-byte boundary for
  285. # __morestack_block_signals.
  286. pushl $0 # Where the available space is returned.
  287. leal 0(%esp),%eax # Push its address.
  288. push %eax
  289. call __morestack_block_signals
  290. call __generic_releasestack
  291. subl 4(%esp),%eax # Subtract available space.
  292. addl $BACKOFF,%eax # Back off 512 bytes.
  293. .LEHE0:
  294. movl %eax,%gs:0x30 # Save the new stack boundary.
  295. addl $8,%esp # Remove values from stack.
  296. # We need to restore the old stack pointer, which is in %rbp,
  297. # before we unblock signals. We also need to restore %eax and
  298. # %edx after we unblock signals but before we return. Do this
  299. # by moving %eax and %edx from the current stack to the old
  300. # stack.
  301. popl %edx # Pop return value from current stack.
  302. popl %eax
  303. movl %ebp,%esp # Restore stack pointer.
  304. # As before, we now have %esp % 16 == 12.
  305. pushl %eax # Push return value on old stack.
  306. pushl %edx
  307. subl $4,%esp # Align stack to 16-byte boundary.
  308. call __morestack_unblock_signals
  309. addl $4,%esp
  310. popl %edx # Restore return value.
  311. popl %eax
  312. .cfi_remember_state
  313. # We never changed %ebx, so we don't have to actually restore it.
  314. .cfi_restore %ebx
  315. popl %ebp
  316. .cfi_restore %ebp
  317. .cfi_def_cfa %esp, 16
  318. ret $8 # Return to caller, which will
  319. # immediately return. Pop
  320. # arguments as we go.
  321. # This is the cleanup code called by the stack unwinder when unwinding
  322. # through the code between .LEHB0 and .LEHE0 above.
  323. .L1:
  324. .cfi_restore_state
  325. subl $16,%esp # Maintain 16 byte alignment.
  326. movl %eax,4(%esp) # Save exception header.
  327. movl %ebp,(%esp) # Stack pointer after resume.
  328. call __generic_findstack
  329. movl %ebp,%ecx # Get the stack pointer.
  330. subl %eax,%ecx # Subtract available space.
  331. addl $BACKOFF,%ecx # Back off 512 bytes.
  332. movl %ecx,%gs:0x30 # Save new stack boundary.
  333. movl 4(%esp),%eax # Function argument.
  334. movl %eax,(%esp)
  335. #ifdef __PIC__
  336. call __x86.get_pc_thunk.bx # %ebx may not be set up for us.
  337. addl $_GLOBAL_OFFSET_TABLE_, %ebx
  338. call _Unwind_Resume@PLT # Resume unwinding.
  339. #else
  340. call _Unwind_Resume
  341. #endif
  342. #else /* defined(__x86_64__) */
  343. # The 64-bit __morestack function.
  344. # We use a cleanup to restore the stack guard if an exception
  345. # is thrown through this code.
  346. #ifndef __PIC__
  347. .cfi_personality 0x3,__gcc_personality_v0
  348. .cfi_lsda 0x3,.LLSDA1
  349. #else
  350. .cfi_personality 0x9b,DW.ref.__gcc_personality_v0
  351. .cfi_lsda 0x1b,.LLSDA1
  352. #endif
  353. # We will return a single return instruction, which will
  354. # return to the caller of our caller. Let the unwinder skip
  355. # that single return instruction, and just return to the real
  356. # caller.
  357. .cfi_def_cfa %rsp,16
  358. # Set up a normal backtrace.
  359. pushq %rbp
  360. .cfi_adjust_cfa_offset 8
  361. .cfi_offset %rbp, -24
  362. movq %rsp, %rbp
  363. .cfi_def_cfa_register %rbp
  364. # In 64-bit mode the new stack frame size is passed in r10
  365. # and the argument size is passed in r11.
  366. addq $BACKOFF,%r10 # Ask for backoff bytes.
  367. pushq %r10 # Save new frame size.
  368. # In 64-bit mode the registers %rdi, %rsi, %rdx, %rcx, %r8,
  369. # and %r9 may be used for parameters. We also preserve %rax
  370. # which the caller may use to hold %r10.
  371. pushq %rax
  372. pushq %rdi
  373. pushq %rsi
  374. pushq %rdx
  375. pushq %rcx
  376. pushq %r8
  377. pushq %r9
  378. pushq %r11
  379. # We entered morestack with the stack pointer aligned to a
  380. # 16-byte boundary (the call to morestack's caller used 8
  381. # bytes, and the call to morestack used 8 bytes). We have now
  382. # pushed 10 registers, so we are still aligned to a 16-byte
  383. # boundary.
  384. call __morestack_block_signals
  385. leaq -8(%rbp),%rdi # Address of new frame size.
  386. leaq 24(%rbp),%rsi # The caller's parameters.
  387. popq %rdx # The size of the parameters.
  388. subq $8,%rsp # Align stack.
  389. call __generic_morestack
  390. movq -8(%rbp),%r10 # Reload modified frame size
  391. movq %rax,%rsp # Switch to the new stack.
  392. subq %r10,%rax # The end of the stack space.
  393. addq $BACKOFF,%rax # Back off 1024 bytes.
  394. .LEHB0:
  395. # FIXME: The offset must match
  396. # TARGET_THREAD_SPLIT_STACK_OFFSET in
  397. # gcc/config/i386/linux64.h.
  398. # Macro to save the new stack boundary.
  399. #ifdef __LP64__
  400. #define X86_64_SAVE_NEW_STACK_BOUNDARY(reg) movq %r##reg,%fs:0x70
  401. #else
  402. #define X86_64_SAVE_NEW_STACK_BOUNDARY(reg) movl %e##reg,%fs:0x40
  403. #endif
  404. X86_64_SAVE_NEW_STACK_BOUNDARY (ax)
  405. call __morestack_unblock_signals
  406. movq -24(%rbp),%rdi # Restore registers.
  407. movq -32(%rbp),%rsi
  408. movq -40(%rbp),%rdx
  409. movq -48(%rbp),%rcx
  410. movq -56(%rbp),%r8
  411. movq -64(%rbp),%r9
  412. movq 8(%rbp),%r10 # Increment the return address
  413. incq %r10 # to skip the ret instruction;
  414. # see above.
  415. movq -16(%rbp),%rax # Restore caller's %rax.
  416. call *%r10 # Call our caller!
  417. # The caller will return here, as predicted.
  418. # Save the registers which may hold a return value. We
  419. # assume that __generic_releasestack does not touch any
  420. # floating point or vector registers.
  421. pushq %rax
  422. pushq %rdx
  423. call __morestack_block_signals
  424. pushq $0 # For alignment.
  425. pushq $0 # Where the available space is returned.
  426. leaq 0(%rsp),%rdi # Pass its address.
  427. call __generic_releasestack
  428. subq 0(%rsp),%rax # Subtract available space.
  429. addq $BACKOFF,%rax # Back off 1024 bytes.
  430. .LEHE0:
  431. X86_64_SAVE_NEW_STACK_BOUNDARY (ax)
  432. addq $16,%rsp # Remove values from stack.
  433. # We need to restore the old stack pointer, which is in %rbp,
  434. # before we unblock signals. We also need to restore %rax and
  435. # %rdx after we unblock signals but before we return. Do this
  436. # by moving %rax and %rdx from the current stack to the old
  437. # stack.
  438. popq %rdx # Pop return value from current stack.
  439. popq %rax
  440. movq %rbp,%rsp # Restore stack pointer.
  441. # Now (%rsp & 16) == 8.
  442. subq $8,%rsp # For alignment.
  443. pushq %rax # Push return value on old stack.
  444. pushq %rdx
  445. call __morestack_unblock_signals
  446. popq %rdx # Restore return value.
  447. popq %rax
  448. addq $8,%rsp
  449. .cfi_remember_state
  450. popq %rbp
  451. .cfi_restore %rbp
  452. .cfi_def_cfa %rsp, 16
  453. ret # Return to caller, which will
  454. # immediately return.
  455. # This is the cleanup code called by the stack unwinder when unwinding
  456. # through the code between .LEHB0 and .LEHE0 above.
  457. .L1:
  458. .cfi_restore_state
  459. subq $16,%rsp # Maintain 16 byte alignment.
  460. movq %rax,(%rsp) # Save exception header.
  461. movq %rbp,%rdi # Stack pointer after resume.
  462. call __generic_findstack
  463. movq %rbp,%rcx # Get the stack pointer.
  464. subq %rax,%rcx # Subtract available space.
  465. addq $BACKOFF,%rcx # Back off 1024 bytes.
  466. X86_64_SAVE_NEW_STACK_BOUNDARY (cx)
  467. movq (%rsp),%rdi # Restore exception data for call.
  468. #ifdef __PIC__
  469. call _Unwind_Resume@PLT # Resume unwinding.
  470. #else
  471. call _Unwind_Resume # Resume unwinding.
  472. #endif
  473. #endif /* defined(__x86_64__) */
  474. .cfi_endproc
  475. #ifdef __ELF__
  476. .size __morestack, . - __morestack
  477. #endif
  478. #if !defined(__x86_64__) && defined(__PIC__)
  479. # Output the thunk to get PC into bx, since we use it above.
  480. .section .text.__x86.get_pc_thunk.bx,"axG",@progbits,__x86.get_pc_thunk.bx,comdat
  481. .globl __x86.get_pc_thunk.bx
  482. .hidden __x86.get_pc_thunk.bx
  483. #ifdef __ELF__
  484. .type __x86.get_pc_thunk.bx, @function
  485. #endif
  486. __x86.get_pc_thunk.bx:
  487. .cfi_startproc
  488. movl (%esp), %ebx
  489. ret
  490. .cfi_endproc
  491. #ifdef __ELF__
  492. .size __x86.get_pc_thunk.bx, . - __x86.get_pc_thunk.bx
  493. #endif
  494. #endif
  495. # The exception table. This tells the personality routine to execute
  496. # the exception handler.
  497. .section .gcc_except_table,"a",@progbits
  498. .align 4
  499. .LLSDA1:
  500. .byte 0xff # @LPStart format (omit)
  501. .byte 0xff # @TType format (omit)
  502. .byte 0x1 # call-site format (uleb128)
  503. .uleb128 .LLSDACSE1-.LLSDACSB1 # Call-site table length
  504. .LLSDACSB1:
  505. .uleb128 .LEHB0-.LFB1 # region 0 start
  506. .uleb128 .LEHE0-.LEHB0 # length
  507. .uleb128 .L1-.LFB1 # landing pad
  508. .uleb128 0 # action
  509. .LLSDACSE1:
  510. .global __gcc_personality_v0
  511. #ifdef __PIC__
  512. # Build a position independent reference to the basic
  513. # personality function.
  514. .hidden DW.ref.__gcc_personality_v0
  515. .weak DW.ref.__gcc_personality_v0
  516. .section .data.DW.ref.__gcc_personality_v0,"awG",@progbits,DW.ref.__gcc_personality_v0,comdat
  517. .type DW.ref.__gcc_personality_v0, @object
  518. DW.ref.__gcc_personality_v0:
  519. #ifndef __LP64__
  520. .align 4
  521. .size DW.ref.__gcc_personality_v0, 4
  522. .long __gcc_personality_v0
  523. #else
  524. .align 8
  525. .size DW.ref.__gcc_personality_v0, 8
  526. .quad __gcc_personality_v0
  527. #endif
  528. #endif
  529. #if defined __x86_64__ && defined __LP64__
  530. # This entry point is used for the large model. With this entry point
  531. # the upper 32 bits of %r10 hold the argument size and the lower 32
  532. # bits hold the new stack frame size. There doesn't seem to be a way
  533. # to know in the assembler code that we are assembling for the large
  534. # model, and there doesn't seem to be a large model multilib anyhow.
  535. # If one is developed, then the non-PIC code is probably OK since we
  536. # will probably be close to the morestack code, but the PIC code
  537. # almost certainly needs to be changed. FIXME.
  538. .text
  539. .global __morestack_large_model
  540. .hidden __morestack_large_model
  541. #ifdef __ELF__
  542. .type __morestack_large_model,@function
  543. #endif
  544. __morestack_large_model:
  545. .cfi_startproc
  546. _CET_ENDBR
  547. movq %r10, %r11
  548. andl $0xffffffff, %r10d
  549. sarq $32, %r11
  550. jmp __morestack
  551. .cfi_endproc
  552. #ifdef __ELF__
  553. .size __morestack_large_model, . - __morestack_large_model
  554. #endif
  555. #endif /* __x86_64__ && __LP64__ */
  556. # Initialize the stack test value when the program starts or when a
  557. # new thread starts. We don't know how large the main stack is, so we
  558. # guess conservatively. We might be able to use getrlimit here.
  559. .text
  560. .global __stack_split_initialize
  561. .hidden __stack_split_initialize
  562. #ifdef __ELF__
  563. .type __stack_split_initialize, @function
  564. #endif
  565. __stack_split_initialize:
  566. _CET_ENDBR
  567. #ifndef __x86_64__
  568. leal -16000(%esp),%eax # We should have at least 16K.
  569. movl %eax,%gs:0x30
  570. subl $4,%esp # Align stack.
  571. pushl $16000
  572. pushl %esp
  573. #ifdef __PIC__
  574. call __generic_morestack_set_initial_sp@PLT
  575. #else
  576. call __generic_morestack_set_initial_sp
  577. #endif
  578. addl $12,%esp
  579. ret
  580. #else /* defined(__x86_64__) */
  581. leaq -16000(%rsp),%rax # We should have at least 16K.
  582. X86_64_SAVE_NEW_STACK_BOUNDARY (ax)
  583. subq $8,%rsp # Align stack.
  584. movq %rsp,%rdi
  585. movq $16000,%rsi
  586. #ifdef __PIC__
  587. call __generic_morestack_set_initial_sp@PLT
  588. #else
  589. call __generic_morestack_set_initial_sp
  590. #endif
  591. addq $8,%rsp
  592. ret
  593. #endif /* defined(__x86_64__) */
  594. #ifdef __ELF__
  595. .size __stack_split_initialize, . - __stack_split_initialize
  596. #endif
  597. # Routines to get and set the guard, for __splitstack_getcontext,
  598. # __splitstack_setcontext, and __splitstack_makecontext.
  599. # void *__morestack_get_guard (void) returns the current stack guard.
  600. .text
  601. .global __morestack_get_guard
  602. .hidden __morestack_get_guard
  603. #ifdef __ELF__
  604. .type __morestack_get_guard,@function
  605. #endif
  606. __morestack_get_guard:
  607. #ifndef __x86_64__
  608. movl %gs:0x30,%eax
  609. #else
  610. #ifdef __LP64__
  611. movq %fs:0x70,%rax
  612. #else
  613. movl %fs:0x40,%eax
  614. #endif
  615. #endif
  616. ret
  617. #ifdef __ELF__
  618. .size __morestack_get_guard, . - __morestack_get_guard
  619. #endif
  620. # void __morestack_set_guard (void *) sets the stack guard.
  621. .global __morestack_set_guard
  622. .hidden __morestack_set_guard
  623. #ifdef __ELF__
  624. .type __morestack_set_guard,@function
  625. #endif
  626. __morestack_set_guard:
  627. #ifndef __x86_64__
  628. movl 4(%esp),%eax
  629. movl %eax,%gs:0x30
  630. #else
  631. X86_64_SAVE_NEW_STACK_BOUNDARY (di)
  632. #endif
  633. ret
  634. #ifdef __ELF__
  635. .size __morestack_set_guard, . - __morestack_set_guard
  636. #endif
  637. # void *__morestack_make_guard (void *, size_t) returns the stack
  638. # guard value for a stack.
  639. .global __morestack_make_guard
  640. .hidden __morestack_make_guard
  641. #ifdef __ELF__
  642. .type __morestack_make_guard,@function
  643. #endif
  644. __morestack_make_guard:
  645. #ifndef __x86_64__
  646. movl 4(%esp),%eax
  647. subl 8(%esp),%eax
  648. addl $BACKOFF,%eax
  649. #else
  650. subq %rsi,%rdi
  651. addq $BACKOFF,%rdi
  652. movq %rdi,%rax
  653. #endif
  654. ret
  655. #ifdef __ELF__
  656. .size __morestack_make_guard, . - __morestack_make_guard
  657. #endif
  658. # Make __stack_split_initialize a high priority constructor. FIXME:
  659. # This is ELF specific.
  660. #if HAVE_INITFINI_ARRAY_SUPPORT
  661. .section .init_array.00000,"aw",@progbits
  662. #else
  663. .section .ctors.65535,"aw",@progbits
  664. #endif
  665. #ifndef __LP64__
  666. .align 4
  667. .long __stack_split_initialize
  668. .long __morestack_load_mmap
  669. #else
  670. .align 8
  671. .quad __stack_split_initialize
  672. .quad __morestack_load_mmap
  673. #endif
  674. #ifdef __ELF__
  675. .section .note.GNU-stack,"",@progbits
  676. .section .note.GNU-split-stack,"",@progbits
  677. .section .note.GNU-no-split-stack,"",@progbits
  678. #endif