hppa-tdep.c 95 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178
  1. /* Target-dependent code for the HP PA-RISC architecture.
  2. Copyright (C) 1986-2022 Free Software Foundation, Inc.
  3. Contributed by the Center for Software Science at the
  4. University of Utah (pa-gdb-bugs@cs.utah.edu).
  5. This file is part of GDB.
  6. This program is free software; you can redistribute it and/or modify
  7. it under the terms of the GNU General Public License as published by
  8. the Free Software Foundation; either version 3 of the License, or
  9. (at your option) any later version.
  10. This program is distributed in the hope that it will be useful,
  11. but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. GNU General Public License for more details.
  14. You should have received a copy of the GNU General Public License
  15. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  16. #include "defs.h"
  17. #include "bfd.h"
  18. #include "inferior.h"
  19. #include "regcache.h"
  20. #include "completer.h"
  21. #include "osabi.h"
  22. #include "arch-utils.h"
  23. /* For argument passing to the inferior. */
  24. #include "symtab.h"
  25. #include "dis-asm.h"
  26. #include "trad-frame.h"
  27. #include "frame-unwind.h"
  28. #include "frame-base.h"
  29. #include "gdbcore.h"
  30. #include "gdbcmd.h"
  31. #include "gdbtypes.h"
  32. #include "objfiles.h"
  33. #include "hppa-tdep.h"
  34. #include <algorithm>
  35. static bool hppa_debug = false;
  36. /* Some local constants. */
  37. static const int hppa32_num_regs = 128;
  38. static const int hppa64_num_regs = 96;
  39. /* We use the objfile->obj_private pointer for two things:
  40. * 1. An unwind table;
  41. *
  42. * 2. A pointer to any associated shared library object.
  43. *
  44. * #defines are used to help refer to these objects.
  45. */
  46. /* Info about the unwind table associated with an object file.
  47. * This is hung off of the "objfile->obj_private" pointer, and
  48. * is allocated in the objfile's psymbol obstack. This allows
  49. * us to have unique unwind info for each executable and shared
  50. * library that we are debugging.
  51. */
  52. struct hppa_unwind_info
  53. {
  54. struct unwind_table_entry *table; /* Pointer to unwind info */
  55. struct unwind_table_entry *cache; /* Pointer to last entry we found */
  56. int last; /* Index of last entry */
  57. };
  58. struct hppa_objfile_private
  59. {
  60. struct hppa_unwind_info *unwind_info = nullptr; /* a pointer */
  61. struct so_list *so_info = nullptr; /* a pointer */
  62. CORE_ADDR dp = 0;
  63. int dummy_call_sequence_reg = 0;
  64. CORE_ADDR dummy_call_sequence_addr = 0;
  65. };
  66. /* hppa-specific object data -- unwind and solib info.
  67. TODO/maybe: think about splitting this into two parts; the unwind data is
  68. common to all hppa targets, but is only used in this file; we can register
  69. that separately and make this static. The solib data is probably hpux-
  70. specific, so we can create a separate extern objfile_data that is registered
  71. by hppa-hpux-tdep.c and shared with pa64solib.c and somsolib.c. */
  72. static const struct objfile_key<hppa_objfile_private> hppa_objfile_priv_data;
  73. /* Get at various relevant fields of an instruction word. */
  74. #define MASK_5 0x1f
  75. #define MASK_11 0x7ff
  76. #define MASK_14 0x3fff
  77. #define MASK_21 0x1fffff
  78. /* Sizes (in bytes) of the native unwind entries. */
  79. #define UNWIND_ENTRY_SIZE 16
  80. #define STUB_UNWIND_ENTRY_SIZE 8
  81. /* Routines to extract various sized constants out of hppa
  82. instructions. */
  83. /* This assumes that no garbage lies outside of the lower bits of
  84. value. */
  85. static int
  86. hppa_sign_extend (unsigned val, unsigned bits)
  87. {
  88. return (int) (val >> (bits - 1) ? (-(1 << bits)) | val : val);
  89. }
  90. /* For many immediate values the sign bit is the low bit! */
  91. static int
  92. hppa_low_hppa_sign_extend (unsigned val, unsigned bits)
  93. {
  94. return (int) ((val & 0x1 ? (-(1 << (bits - 1))) : 0) | val >> 1);
  95. }
  96. /* Extract the bits at positions between FROM and TO, using HP's numbering
  97. (MSB = 0). */
  98. int
  99. hppa_get_field (unsigned word, int from, int to)
  100. {
  101. return ((word) >> (31 - (to)) & ((1 << ((to) - (from) + 1)) - 1));
  102. }
  103. /* Extract the immediate field from a ld{bhw}s instruction. */
  104. int
  105. hppa_extract_5_load (unsigned word)
  106. {
  107. return hppa_low_hppa_sign_extend (word >> 16 & MASK_5, 5);
  108. }
  109. /* Extract the immediate field from a break instruction. */
  110. unsigned
  111. hppa_extract_5r_store (unsigned word)
  112. {
  113. return (word & MASK_5);
  114. }
  115. /* Extract the immediate field from a {sr}sm instruction. */
  116. unsigned
  117. hppa_extract_5R_store (unsigned word)
  118. {
  119. return (word >> 16 & MASK_5);
  120. }
  121. /* Extract a 14 bit immediate field. */
  122. int
  123. hppa_extract_14 (unsigned word)
  124. {
  125. return hppa_low_hppa_sign_extend (word & MASK_14, 14);
  126. }
  127. /* Extract a 21 bit constant. */
  128. int
  129. hppa_extract_21 (unsigned word)
  130. {
  131. int val;
  132. word &= MASK_21;
  133. word <<= 11;
  134. val = hppa_get_field (word, 20, 20);
  135. val <<= 11;
  136. val |= hppa_get_field (word, 9, 19);
  137. val <<= 2;
  138. val |= hppa_get_field (word, 5, 6);
  139. val <<= 5;
  140. val |= hppa_get_field (word, 0, 4);
  141. val <<= 2;
  142. val |= hppa_get_field (word, 7, 8);
  143. return hppa_sign_extend (val, 21) << 11;
  144. }
  145. /* extract a 17 bit constant from branch instructions, returning the
  146. 19 bit signed value. */
  147. int
  148. hppa_extract_17 (unsigned word)
  149. {
  150. return hppa_sign_extend (hppa_get_field (word, 19, 28) |
  151. hppa_get_field (word, 29, 29) << 10 |
  152. hppa_get_field (word, 11, 15) << 11 |
  153. (word & 0x1) << 16, 17) << 2;
  154. }
  155. CORE_ADDR
  156. hppa_symbol_address(const char *sym)
  157. {
  158. struct bound_minimal_symbol minsym;
  159. minsym = lookup_minimal_symbol (sym, NULL, NULL);
  160. if (minsym.minsym)
  161. return BMSYMBOL_VALUE_ADDRESS (minsym);
  162. else
  163. return (CORE_ADDR)-1;
  164. }
  165. /* Compare the start address for two unwind entries returning 1 if
  166. the first address is larger than the second, -1 if the second is
  167. larger than the first, and zero if they are equal. */
  168. static int
  169. compare_unwind_entries (const void *arg1, const void *arg2)
  170. {
  171. const struct unwind_table_entry *a = (const struct unwind_table_entry *) arg1;
  172. const struct unwind_table_entry *b = (const struct unwind_table_entry *) arg2;
  173. if (a->region_start > b->region_start)
  174. return 1;
  175. else if (a->region_start < b->region_start)
  176. return -1;
  177. else
  178. return 0;
  179. }
  180. static void
  181. record_text_segment_lowaddr (bfd *abfd, asection *section, void *data)
  182. {
  183. if ((section->flags & (SEC_ALLOC | SEC_LOAD | SEC_READONLY))
  184. == (SEC_ALLOC | SEC_LOAD | SEC_READONLY))
  185. {
  186. bfd_vma value = section->vma - section->filepos;
  187. CORE_ADDR *low_text_segment_address = (CORE_ADDR *)data;
  188. if (value < *low_text_segment_address)
  189. *low_text_segment_address = value;
  190. }
  191. }
  192. static void
  193. internalize_unwinds (struct objfile *objfile, struct unwind_table_entry *table,
  194. asection *section, unsigned int entries,
  195. size_t size, CORE_ADDR text_offset)
  196. {
  197. /* We will read the unwind entries into temporary memory, then
  198. fill in the actual unwind table. */
  199. if (size > 0)
  200. {
  201. struct gdbarch *gdbarch = objfile->arch ();
  202. hppa_gdbarch_tdep *tdep = (hppa_gdbarch_tdep *) gdbarch_tdep (gdbarch);
  203. unsigned long tmp;
  204. unsigned i;
  205. char *buf = (char *) alloca (size);
  206. CORE_ADDR low_text_segment_address;
  207. /* For ELF targets, then unwinds are supposed to
  208. be segment relative offsets instead of absolute addresses.
  209. Note that when loading a shared library (text_offset != 0) the
  210. unwinds are already relative to the text_offset that will be
  211. passed in. */
  212. if (tdep->is_elf && text_offset == 0)
  213. {
  214. low_text_segment_address = -1;
  215. bfd_map_over_sections (objfile->obfd,
  216. record_text_segment_lowaddr,
  217. &low_text_segment_address);
  218. text_offset = low_text_segment_address;
  219. }
  220. else if (tdep->solib_get_text_base)
  221. {
  222. text_offset = tdep->solib_get_text_base (objfile);
  223. }
  224. bfd_get_section_contents (objfile->obfd, section, buf, 0, size);
  225. /* Now internalize the information being careful to handle host/target
  226. endian issues. */
  227. for (i = 0; i < entries; i++)
  228. {
  229. table[i].region_start = bfd_get_32 (objfile->obfd,
  230. (bfd_byte *) buf);
  231. table[i].region_start += text_offset;
  232. buf += 4;
  233. table[i].region_end = bfd_get_32 (objfile->obfd, (bfd_byte *) buf);
  234. table[i].region_end += text_offset;
  235. buf += 4;
  236. tmp = bfd_get_32 (objfile->obfd, (bfd_byte *) buf);
  237. buf += 4;
  238. table[i].Cannot_unwind = (tmp >> 31) & 0x1;
  239. table[i].Millicode = (tmp >> 30) & 0x1;
  240. table[i].Millicode_save_sr0 = (tmp >> 29) & 0x1;
  241. table[i].Region_description = (tmp >> 27) & 0x3;
  242. table[i].reserved = (tmp >> 26) & 0x1;
  243. table[i].Entry_SR = (tmp >> 25) & 0x1;
  244. table[i].Entry_FR = (tmp >> 21) & 0xf;
  245. table[i].Entry_GR = (tmp >> 16) & 0x1f;
  246. table[i].Args_stored = (tmp >> 15) & 0x1;
  247. table[i].Variable_Frame = (tmp >> 14) & 0x1;
  248. table[i].Separate_Package_Body = (tmp >> 13) & 0x1;
  249. table[i].Frame_Extension_Millicode = (tmp >> 12) & 0x1;
  250. table[i].Stack_Overflow_Check = (tmp >> 11) & 0x1;
  251. table[i].Two_Instruction_SP_Increment = (tmp >> 10) & 0x1;
  252. table[i].sr4export = (tmp >> 9) & 0x1;
  253. table[i].cxx_info = (tmp >> 8) & 0x1;
  254. table[i].cxx_try_catch = (tmp >> 7) & 0x1;
  255. table[i].sched_entry_seq = (tmp >> 6) & 0x1;
  256. table[i].reserved1 = (tmp >> 5) & 0x1;
  257. table[i].Save_SP = (tmp >> 4) & 0x1;
  258. table[i].Save_RP = (tmp >> 3) & 0x1;
  259. table[i].Save_MRP_in_frame = (tmp >> 2) & 0x1;
  260. table[i].save_r19 = (tmp >> 1) & 0x1;
  261. table[i].Cleanup_defined = tmp & 0x1;
  262. tmp = bfd_get_32 (objfile->obfd, (bfd_byte *) buf);
  263. buf += 4;
  264. table[i].MPE_XL_interrupt_marker = (tmp >> 31) & 0x1;
  265. table[i].HP_UX_interrupt_marker = (tmp >> 30) & 0x1;
  266. table[i].Large_frame = (tmp >> 29) & 0x1;
  267. table[i].alloca_frame = (tmp >> 28) & 0x1;
  268. table[i].reserved2 = (tmp >> 27) & 0x1;
  269. table[i].Total_frame_size = tmp & 0x7ffffff;
  270. /* Stub unwinds are handled elsewhere. */
  271. table[i].stub_unwind.stub_type = 0;
  272. table[i].stub_unwind.padding = 0;
  273. }
  274. }
  275. }
  276. /* Read in the backtrace information stored in the `$UNWIND_START$' section of
  277. the object file. This info is used mainly by find_unwind_entry() to find
  278. out the stack frame size and frame pointer used by procedures. We put
  279. everything on the psymbol obstack in the objfile so that it automatically
  280. gets freed when the objfile is destroyed. */
  281. static void
  282. read_unwind_info (struct objfile *objfile)
  283. {
  284. asection *unwind_sec, *stub_unwind_sec;
  285. size_t unwind_size, stub_unwind_size, total_size;
  286. unsigned index, unwind_entries;
  287. unsigned stub_entries, total_entries;
  288. CORE_ADDR text_offset;
  289. struct hppa_unwind_info *ui;
  290. struct hppa_objfile_private *obj_private;
  291. text_offset = objfile->text_section_offset ();
  292. ui = (struct hppa_unwind_info *) obstack_alloc (&objfile->objfile_obstack,
  293. sizeof (struct hppa_unwind_info));
  294. ui->table = NULL;
  295. ui->cache = NULL;
  296. ui->last = -1;
  297. /* For reasons unknown the HP PA64 tools generate multiple unwinder
  298. sections in a single executable. So we just iterate over every
  299. section in the BFD looking for unwinder sections instead of trying
  300. to do a lookup with bfd_get_section_by_name.
  301. First determine the total size of the unwind tables so that we
  302. can allocate memory in a nice big hunk. */
  303. total_entries = 0;
  304. for (unwind_sec = objfile->obfd->sections;
  305. unwind_sec;
  306. unwind_sec = unwind_sec->next)
  307. {
  308. if (strcmp (unwind_sec->name, "$UNWIND_START$") == 0
  309. || strcmp (unwind_sec->name, ".PARISC.unwind") == 0)
  310. {
  311. unwind_size = bfd_section_size (unwind_sec);
  312. unwind_entries = unwind_size / UNWIND_ENTRY_SIZE;
  313. total_entries += unwind_entries;
  314. }
  315. }
  316. /* Now compute the size of the stub unwinds. Note the ELF tools do not
  317. use stub unwinds at the current time. */
  318. stub_unwind_sec = bfd_get_section_by_name (objfile->obfd, "$UNWIND_END$");
  319. if (stub_unwind_sec)
  320. {
  321. stub_unwind_size = bfd_section_size (stub_unwind_sec);
  322. stub_entries = stub_unwind_size / STUB_UNWIND_ENTRY_SIZE;
  323. }
  324. else
  325. {
  326. stub_unwind_size = 0;
  327. stub_entries = 0;
  328. }
  329. /* Compute total number of unwind entries and their total size. */
  330. total_entries += stub_entries;
  331. total_size = total_entries * sizeof (struct unwind_table_entry);
  332. /* Allocate memory for the unwind table. */
  333. ui->table = (struct unwind_table_entry *)
  334. obstack_alloc (&objfile->objfile_obstack, total_size);
  335. ui->last = total_entries - 1;
  336. /* Now read in each unwind section and internalize the standard unwind
  337. entries. */
  338. index = 0;
  339. for (unwind_sec = objfile->obfd->sections;
  340. unwind_sec;
  341. unwind_sec = unwind_sec->next)
  342. {
  343. if (strcmp (unwind_sec->name, "$UNWIND_START$") == 0
  344. || strcmp (unwind_sec->name, ".PARISC.unwind") == 0)
  345. {
  346. unwind_size = bfd_section_size (unwind_sec);
  347. unwind_entries = unwind_size / UNWIND_ENTRY_SIZE;
  348. internalize_unwinds (objfile, &ui->table[index], unwind_sec,
  349. unwind_entries, unwind_size, text_offset);
  350. index += unwind_entries;
  351. }
  352. }
  353. /* Now read in and internalize the stub unwind entries. */
  354. if (stub_unwind_size > 0)
  355. {
  356. unsigned int i;
  357. char *buf = (char *) alloca (stub_unwind_size);
  358. /* Read in the stub unwind entries. */
  359. bfd_get_section_contents (objfile->obfd, stub_unwind_sec, buf,
  360. 0, stub_unwind_size);
  361. /* Now convert them into regular unwind entries. */
  362. for (i = 0; i < stub_entries; i++, index++)
  363. {
  364. /* Clear out the next unwind entry. */
  365. memset (&ui->table[index], 0, sizeof (struct unwind_table_entry));
  366. /* Convert offset & size into region_start and region_end.
  367. Stuff away the stub type into "reserved" fields. */
  368. ui->table[index].region_start = bfd_get_32 (objfile->obfd,
  369. (bfd_byte *) buf);
  370. ui->table[index].region_start += text_offset;
  371. buf += 4;
  372. ui->table[index].stub_unwind.stub_type = bfd_get_8 (objfile->obfd,
  373. (bfd_byte *) buf);
  374. buf += 2;
  375. ui->table[index].region_end
  376. = ui->table[index].region_start + 4 *
  377. (bfd_get_16 (objfile->obfd, (bfd_byte *) buf) - 1);
  378. buf += 2;
  379. }
  380. }
  381. /* Unwind table needs to be kept sorted. */
  382. qsort (ui->table, total_entries, sizeof (struct unwind_table_entry),
  383. compare_unwind_entries);
  384. /* Keep a pointer to the unwind information. */
  385. obj_private = hppa_objfile_priv_data.get (objfile);
  386. if (obj_private == NULL)
  387. obj_private = hppa_objfile_priv_data.emplace (objfile);
  388. obj_private->unwind_info = ui;
  389. }
  390. /* Lookup the unwind (stack backtrace) info for the given PC. We search all
  391. of the objfiles seeking the unwind table entry for this PC. Each objfile
  392. contains a sorted list of struct unwind_table_entry. Since we do a binary
  393. search of the unwind tables, we depend upon them to be sorted. */
  394. struct unwind_table_entry *
  395. find_unwind_entry (CORE_ADDR pc)
  396. {
  397. int first, middle, last;
  398. if (hppa_debug)
  399. gdb_printf (gdb_stdlog, "{ find_unwind_entry %s -> ",
  400. hex_string (pc));
  401. /* A function at address 0? Not in HP-UX! */
  402. if (pc == (CORE_ADDR) 0)
  403. {
  404. if (hppa_debug)
  405. gdb_printf (gdb_stdlog, "NULL }\n");
  406. return NULL;
  407. }
  408. for (objfile *objfile : current_program_space->objfiles ())
  409. {
  410. struct hppa_unwind_info *ui;
  411. ui = NULL;
  412. struct hppa_objfile_private *priv = hppa_objfile_priv_data.get (objfile);
  413. if (priv)
  414. ui = priv->unwind_info;
  415. if (!ui)
  416. {
  417. read_unwind_info (objfile);
  418. priv = hppa_objfile_priv_data.get (objfile);
  419. if (priv == NULL)
  420. error (_("Internal error reading unwind information."));
  421. ui = priv->unwind_info;
  422. }
  423. /* First, check the cache. */
  424. if (ui->cache
  425. && pc >= ui->cache->region_start
  426. && pc <= ui->cache->region_end)
  427. {
  428. if (hppa_debug)
  429. gdb_printf (gdb_stdlog, "%s (cached) }\n",
  430. hex_string ((uintptr_t) ui->cache));
  431. return ui->cache;
  432. }
  433. /* Not in the cache, do a binary search. */
  434. first = 0;
  435. last = ui->last;
  436. while (first <= last)
  437. {
  438. middle = (first + last) / 2;
  439. if (pc >= ui->table[middle].region_start
  440. && pc <= ui->table[middle].region_end)
  441. {
  442. ui->cache = &ui->table[middle];
  443. if (hppa_debug)
  444. gdb_printf (gdb_stdlog, "%s }\n",
  445. hex_string ((uintptr_t) ui->cache));
  446. return &ui->table[middle];
  447. }
  448. if (pc < ui->table[middle].region_start)
  449. last = middle - 1;
  450. else
  451. first = middle + 1;
  452. }
  453. }
  454. if (hppa_debug)
  455. gdb_printf (gdb_stdlog, "NULL (not found) }\n");
  456. return NULL;
  457. }
  458. /* Implement the stack_frame_destroyed_p gdbarch method.
  459. The epilogue is defined here as the area either on the `bv' instruction
  460. itself or an instruction which destroys the function's stack frame.
  461. We do not assume that the epilogue is at the end of a function as we can
  462. also have return sequences in the middle of a function. */
  463. static int
  464. hppa_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
  465. {
  466. enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
  467. unsigned long status;
  468. unsigned int inst;
  469. gdb_byte buf[4];
  470. status = target_read_memory (pc, buf, 4);
  471. if (status != 0)
  472. return 0;
  473. inst = extract_unsigned_integer (buf, 4, byte_order);
  474. /* The most common way to perform a stack adjustment ldo X(sp),sp
  475. We are destroying a stack frame if the offset is negative. */
  476. if ((inst & 0xffffc000) == 0x37de0000
  477. && hppa_extract_14 (inst) < 0)
  478. return 1;
  479. /* ldw,mb D(sp),X or ldd,mb D(sp),X */
  480. if (((inst & 0x0fc010e0) == 0x0fc010e0
  481. || (inst & 0x0fc010e0) == 0x0fc010e0)
  482. && hppa_extract_14 (inst) < 0)
  483. return 1;
  484. /* bv %r0(%rp) or bv,n %r0(%rp) */
  485. if (inst == 0xe840c000 || inst == 0xe840c002)
  486. return 1;
  487. return 0;
  488. }
  489. constexpr gdb_byte hppa_break_insn[] = {0x00, 0x01, 0x00, 0x04};
  490. typedef BP_MANIPULATION (hppa_break_insn) hppa_breakpoint;
  491. /* Return the name of a register. */
  492. static const char *
  493. hppa32_register_name (struct gdbarch *gdbarch, int i)
  494. {
  495. static const char *names[] = {
  496. "flags", "r1", "rp", "r3",
  497. "r4", "r5", "r6", "r7",
  498. "r8", "r9", "r10", "r11",
  499. "r12", "r13", "r14", "r15",
  500. "r16", "r17", "r18", "r19",
  501. "r20", "r21", "r22", "r23",
  502. "r24", "r25", "r26", "dp",
  503. "ret0", "ret1", "sp", "r31",
  504. "sar", "pcoqh", "pcsqh", "pcoqt",
  505. "pcsqt", "eiem", "iir", "isr",
  506. "ior", "ipsw", "goto", "sr4",
  507. "sr0", "sr1", "sr2", "sr3",
  508. "sr5", "sr6", "sr7", "cr0",
  509. "cr8", "cr9", "ccr", "cr12",
  510. "cr13", "cr24", "cr25", "cr26",
  511. "mpsfu_high","mpsfu_low","mpsfu_ovflo","pad",
  512. "fpsr", "fpe1", "fpe2", "fpe3",
  513. "fpe4", "fpe5", "fpe6", "fpe7",
  514. "fr4", "fr4R", "fr5", "fr5R",
  515. "fr6", "fr6R", "fr7", "fr7R",
  516. "fr8", "fr8R", "fr9", "fr9R",
  517. "fr10", "fr10R", "fr11", "fr11R",
  518. "fr12", "fr12R", "fr13", "fr13R",
  519. "fr14", "fr14R", "fr15", "fr15R",
  520. "fr16", "fr16R", "fr17", "fr17R",
  521. "fr18", "fr18R", "fr19", "fr19R",
  522. "fr20", "fr20R", "fr21", "fr21R",
  523. "fr22", "fr22R", "fr23", "fr23R",
  524. "fr24", "fr24R", "fr25", "fr25R",
  525. "fr26", "fr26R", "fr27", "fr27R",
  526. "fr28", "fr28R", "fr29", "fr29R",
  527. "fr30", "fr30R", "fr31", "fr31R"
  528. };
  529. if (i < 0 || i >= (sizeof (names) / sizeof (*names)))
  530. return NULL;
  531. else
  532. return names[i];
  533. }
  534. static const char *
  535. hppa64_register_name (struct gdbarch *gdbarch, int i)
  536. {
  537. static const char *names[] = {
  538. "flags", "r1", "rp", "r3",
  539. "r4", "r5", "r6", "r7",
  540. "r8", "r9", "r10", "r11",
  541. "r12", "r13", "r14", "r15",
  542. "r16", "r17", "r18", "r19",
  543. "r20", "r21", "r22", "r23",
  544. "r24", "r25", "r26", "dp",
  545. "ret0", "ret1", "sp", "r31",
  546. "sar", "pcoqh", "pcsqh", "pcoqt",
  547. "pcsqt", "eiem", "iir", "isr",
  548. "ior", "ipsw", "goto", "sr4",
  549. "sr0", "sr1", "sr2", "sr3",
  550. "sr5", "sr6", "sr7", "cr0",
  551. "cr8", "cr9", "ccr", "cr12",
  552. "cr13", "cr24", "cr25", "cr26",
  553. "mpsfu_high","mpsfu_low","mpsfu_ovflo","pad",
  554. "fpsr", "fpe1", "fpe2", "fpe3",
  555. "fr4", "fr5", "fr6", "fr7",
  556. "fr8", "fr9", "fr10", "fr11",
  557. "fr12", "fr13", "fr14", "fr15",
  558. "fr16", "fr17", "fr18", "fr19",
  559. "fr20", "fr21", "fr22", "fr23",
  560. "fr24", "fr25", "fr26", "fr27",
  561. "fr28", "fr29", "fr30", "fr31"
  562. };
  563. if (i < 0 || i >= (sizeof (names) / sizeof (*names)))
  564. return NULL;
  565. else
  566. return names[i];
  567. }
  568. /* Map dwarf DBX register numbers to GDB register numbers. */
  569. static int
  570. hppa64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
  571. {
  572. /* The general registers and the sar are the same in both sets. */
  573. if (reg >= 0 && reg <= 32)
  574. return reg;
  575. /* fr4-fr31 are mapped from 72 in steps of 2. */
  576. if (reg >= 72 && reg < 72 + 28 * 2 && !(reg & 1))
  577. return HPPA64_FP4_REGNUM + (reg - 72) / 2;
  578. return -1;
  579. }
  580. /* This function pushes a stack frame with arguments as part of the
  581. inferior function calling mechanism.
  582. This is the version of the function for the 32-bit PA machines, in
  583. which later arguments appear at lower addresses. (The stack always
  584. grows towards higher addresses.)
  585. We simply allocate the appropriate amount of stack space and put
  586. arguments into their proper slots. */
  587. static CORE_ADDR
  588. hppa32_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
  589. struct regcache *regcache, CORE_ADDR bp_addr,
  590. int nargs, struct value **args, CORE_ADDR sp,
  591. function_call_return_method return_method,
  592. CORE_ADDR struct_addr)
  593. {
  594. enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
  595. /* Stack base address at which any pass-by-reference parameters are
  596. stored. */
  597. CORE_ADDR struct_end = 0;
  598. /* Stack base address at which the first parameter is stored. */
  599. CORE_ADDR param_end = 0;
  600. /* Two passes. First pass computes the location of everything,
  601. second pass writes the bytes out. */
  602. int write_pass;
  603. /* Global pointer (r19) of the function we are trying to call. */
  604. CORE_ADDR gp;
  605. hppa_gdbarch_tdep *tdep = (hppa_gdbarch_tdep *) gdbarch_tdep (gdbarch);
  606. for (write_pass = 0; write_pass < 2; write_pass++)
  607. {
  608. CORE_ADDR struct_ptr = 0;
  609. /* The first parameter goes into sp-36, each stack slot is 4-bytes.
  610. struct_ptr is adjusted for each argument below, so the first
  611. argument will end up at sp-36. */
  612. CORE_ADDR param_ptr = 32;
  613. int i;
  614. int small_struct = 0;
  615. for (i = 0; i < nargs; i++)
  616. {
  617. struct value *arg = args[i];
  618. struct type *type = check_typedef (value_type (arg));
  619. /* The corresponding parameter that is pushed onto the
  620. stack, and [possibly] passed in a register. */
  621. gdb_byte param_val[8];
  622. int param_len;
  623. memset (param_val, 0, sizeof param_val);
  624. if (TYPE_LENGTH (type) > 8)
  625. {
  626. /* Large parameter, pass by reference. Store the value
  627. in "struct" area and then pass its address. */
  628. param_len = 4;
  629. struct_ptr += align_up (TYPE_LENGTH (type), 8);
  630. if (write_pass)
  631. write_memory (struct_end - struct_ptr,
  632. value_contents (arg).data (), TYPE_LENGTH (type));
  633. store_unsigned_integer (param_val, 4, byte_order,
  634. struct_end - struct_ptr);
  635. }
  636. else if (type->code () == TYPE_CODE_INT
  637. || type->code () == TYPE_CODE_ENUM)
  638. {
  639. /* Integer value store, right aligned. "unpack_long"
  640. takes care of any sign-extension problems. */
  641. param_len = align_up (TYPE_LENGTH (type), 4);
  642. store_unsigned_integer
  643. (param_val, param_len, byte_order,
  644. unpack_long (type, value_contents (arg).data ()));
  645. }
  646. else if (type->code () == TYPE_CODE_FLT)
  647. {
  648. /* Floating point value store, right aligned. */
  649. param_len = align_up (TYPE_LENGTH (type), 4);
  650. memcpy (param_val, value_contents (arg).data (), param_len);
  651. }
  652. else
  653. {
  654. param_len = align_up (TYPE_LENGTH (type), 4);
  655. /* Small struct value are stored right-aligned. */
  656. memcpy (param_val + param_len - TYPE_LENGTH (type),
  657. value_contents (arg).data (), TYPE_LENGTH (type));
  658. /* Structures of size 5, 6 and 7 bytes are special in that
  659. the higher-ordered word is stored in the lower-ordered
  660. argument, and even though it is a 8-byte quantity the
  661. registers need not be 8-byte aligned. */
  662. if (param_len > 4 && param_len < 8)
  663. small_struct = 1;
  664. }
  665. param_ptr += param_len;
  666. if (param_len == 8 && !small_struct)
  667. param_ptr = align_up (param_ptr, 8);
  668. /* First 4 non-FP arguments are passed in gr26-gr23.
  669. First 4 32-bit FP arguments are passed in fr4L-fr7L.
  670. First 2 64-bit FP arguments are passed in fr5 and fr7.
  671. The rest go on the stack, starting at sp-36, towards lower
  672. addresses. 8-byte arguments must be aligned to a 8-byte
  673. stack boundary. */
  674. if (write_pass)
  675. {
  676. write_memory (param_end - param_ptr, param_val, param_len);
  677. /* There are some cases when we don't know the type
  678. expected by the callee (e.g. for variadic functions), so
  679. pass the parameters in both general and fp regs. */
  680. if (param_ptr <= 48)
  681. {
  682. int grreg = 26 - (param_ptr - 36) / 4;
  683. int fpLreg = 72 + (param_ptr - 36) / 4 * 2;
  684. int fpreg = 74 + (param_ptr - 32) / 8 * 4;
  685. regcache->cooked_write (grreg, param_val);
  686. regcache->cooked_write (fpLreg, param_val);
  687. if (param_len > 4)
  688. {
  689. regcache->cooked_write (grreg + 1, param_val + 4);
  690. regcache->cooked_write (fpreg, param_val);
  691. regcache->cooked_write (fpreg + 1, param_val + 4);
  692. }
  693. }
  694. }
  695. }
  696. /* Update the various stack pointers. */
  697. if (!write_pass)
  698. {
  699. struct_end = sp + align_up (struct_ptr, 64);
  700. /* PARAM_PTR already accounts for all the arguments passed
  701. by the user. However, the ABI mandates minimum stack
  702. space allocations for outgoing arguments. The ABI also
  703. mandates minimum stack alignments which we must
  704. preserve. */
  705. param_end = struct_end + align_up (param_ptr, 64);
  706. }
  707. }
  708. /* If a structure has to be returned, set up register 28 to hold its
  709. address. */
  710. if (return_method == return_method_struct)
  711. regcache_cooked_write_unsigned (regcache, 28, struct_addr);
  712. gp = tdep->find_global_pointer (gdbarch, function);
  713. if (gp != 0)
  714. regcache_cooked_write_unsigned (regcache, 19, gp);
  715. /* Set the return address. */
  716. if (!gdbarch_push_dummy_code_p (gdbarch))
  717. regcache_cooked_write_unsigned (regcache, HPPA_RP_REGNUM, bp_addr);
  718. /* Update the Stack Pointer. */
  719. regcache_cooked_write_unsigned (regcache, HPPA_SP_REGNUM, param_end);
  720. return param_end;
  721. }
  722. /* The 64-bit PA-RISC calling conventions are documented in "64-Bit
  723. Runtime Architecture for PA-RISC 2.0", which is distributed as part
  724. as of the HP-UX Software Transition Kit (STK). This implementation
  725. is based on version 3.3, dated October 6, 1997. */
  726. /* Check whether TYPE is an "Integral or Pointer Scalar Type". */
  727. static int
  728. hppa64_integral_or_pointer_p (const struct type *type)
  729. {
  730. switch (type->code ())
  731. {
  732. case TYPE_CODE_INT:
  733. case TYPE_CODE_BOOL:
  734. case TYPE_CODE_CHAR:
  735. case TYPE_CODE_ENUM:
  736. case TYPE_CODE_RANGE:
  737. {
  738. int len = TYPE_LENGTH (type);
  739. return (len == 1 || len == 2 || len == 4 || len == 8);
  740. }
  741. case TYPE_CODE_PTR:
  742. case TYPE_CODE_REF:
  743. case TYPE_CODE_RVALUE_REF:
  744. return (TYPE_LENGTH (type) == 8);
  745. default:
  746. break;
  747. }
  748. return 0;
  749. }
  750. /* Check whether TYPE is a "Floating Scalar Type". */
  751. static int
  752. hppa64_floating_p (const struct type *type)
  753. {
  754. switch (type->code ())
  755. {
  756. case TYPE_CODE_FLT:
  757. {
  758. int len = TYPE_LENGTH (type);
  759. return (len == 4 || len == 8 || len == 16);
  760. }
  761. default:
  762. break;
  763. }
  764. return 0;
  765. }
  766. /* If CODE points to a function entry address, try to look up the corresponding
  767. function descriptor and return its address instead. If CODE is not a
  768. function entry address, then just return it unchanged. */
  769. static CORE_ADDR
  770. hppa64_convert_code_addr_to_fptr (struct gdbarch *gdbarch, CORE_ADDR code)
  771. {
  772. enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
  773. struct obj_section *sec, *opd;
  774. sec = find_pc_section (code);
  775. if (!sec)
  776. return code;
  777. /* If CODE is in a data section, assume it's already a fptr. */
  778. if (!(sec->the_bfd_section->flags & SEC_CODE))
  779. return code;
  780. ALL_OBJFILE_OSECTIONS (sec->objfile, opd)
  781. {
  782. if (strcmp (opd->the_bfd_section->name, ".opd") == 0)
  783. break;
  784. }
  785. if (opd < sec->objfile->sections_end)
  786. {
  787. for (CORE_ADDR addr = opd->addr (); addr < opd->endaddr (); addr += 2 * 8)
  788. {
  789. ULONGEST opdaddr;
  790. gdb_byte tmp[8];
  791. if (target_read_memory (addr, tmp, sizeof (tmp)))
  792. break;
  793. opdaddr = extract_unsigned_integer (tmp, sizeof (tmp), byte_order);
  794. if (opdaddr == code)
  795. return addr - 16;
  796. }
  797. }
  798. return code;
  799. }
  800. static CORE_ADDR
  801. hppa64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
  802. struct regcache *regcache, CORE_ADDR bp_addr,
  803. int nargs, struct value **args, CORE_ADDR sp,
  804. function_call_return_method return_method,
  805. CORE_ADDR struct_addr)
  806. {
  807. hppa_gdbarch_tdep *tdep = (hppa_gdbarch_tdep *) gdbarch_tdep (gdbarch);
  808. enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
  809. int i, offset = 0;
  810. CORE_ADDR gp;
  811. /* "The outgoing parameter area [...] must be aligned at a 16-byte
  812. boundary." */
  813. sp = align_up (sp, 16);
  814. for (i = 0; i < nargs; i++)
  815. {
  816. struct value *arg = args[i];
  817. struct type *type = value_type (arg);
  818. int len = TYPE_LENGTH (type);
  819. const bfd_byte *valbuf;
  820. bfd_byte fptrbuf[8];
  821. int regnum;
  822. /* "Each parameter begins on a 64-bit (8-byte) boundary." */
  823. offset = align_up (offset, 8);
  824. if (hppa64_integral_or_pointer_p (type))
  825. {
  826. /* "Integral scalar parameters smaller than 64 bits are
  827. padded on the left (i.e., the value is in the
  828. least-significant bits of the 64-bit storage unit, and
  829. the high-order bits are undefined)." Therefore we can
  830. safely sign-extend them. */
  831. if (len < 8)
  832. {
  833. arg = value_cast (builtin_type (gdbarch)->builtin_int64, arg);
  834. len = 8;
  835. }
  836. }
  837. else if (hppa64_floating_p (type))
  838. {
  839. if (len > 8)
  840. {
  841. /* "Quad-precision (128-bit) floating-point scalar
  842. parameters are aligned on a 16-byte boundary." */
  843. offset = align_up (offset, 16);
  844. /* "Double-extended- and quad-precision floating-point
  845. parameters within the first 64 bytes of the parameter
  846. list are always passed in general registers." */
  847. }
  848. else
  849. {
  850. if (len == 4)
  851. {
  852. /* "Single-precision (32-bit) floating-point scalar
  853. parameters are padded on the left with 32 bits of
  854. garbage (i.e., the floating-point value is in the
  855. least-significant 32 bits of a 64-bit storage
  856. unit)." */
  857. offset += 4;
  858. }
  859. /* "Single- and double-precision floating-point
  860. parameters in this area are passed according to the
  861. available formal parameter information in a function
  862. prototype. [...] If no prototype is in scope,
  863. floating-point parameters must be passed both in the
  864. corresponding general registers and in the
  865. corresponding floating-point registers." */
  866. regnum = HPPA64_FP4_REGNUM + offset / 8;
  867. if (regnum < HPPA64_FP4_REGNUM + 8)
  868. {
  869. /* "Single-precision floating-point parameters, when
  870. passed in floating-point registers, are passed in
  871. the right halves of the floating point registers;
  872. the left halves are unused." */
  873. regcache->cooked_write_part (regnum, offset % 8, len,
  874. value_contents (arg).data ());
  875. }
  876. }
  877. }
  878. else
  879. {
  880. if (len > 8)
  881. {
  882. /* "Aggregates larger than 8 bytes are aligned on a
  883. 16-byte boundary, possibly leaving an unused argument
  884. slot, which is filled with garbage. If necessary,
  885. they are padded on the right (with garbage), to a
  886. multiple of 8 bytes." */
  887. offset = align_up (offset, 16);
  888. }
  889. }
  890. /* If we are passing a function pointer, make sure we pass a function
  891. descriptor instead of the function entry address. */
  892. if (type->code () == TYPE_CODE_PTR
  893. && TYPE_TARGET_TYPE (type)->code () == TYPE_CODE_FUNC)
  894. {
  895. ULONGEST codeptr, fptr;
  896. codeptr = unpack_long (type, value_contents (arg).data ());
  897. fptr = hppa64_convert_code_addr_to_fptr (gdbarch, codeptr);
  898. store_unsigned_integer (fptrbuf, TYPE_LENGTH (type), byte_order,
  899. fptr);
  900. valbuf = fptrbuf;
  901. }
  902. else
  903. {
  904. valbuf = value_contents (arg).data ();
  905. }
  906. /* Always store the argument in memory. */
  907. write_memory (sp + offset, valbuf, len);
  908. regnum = HPPA_ARG0_REGNUM - offset / 8;
  909. while (regnum > HPPA_ARG0_REGNUM - 8 && len > 0)
  910. {
  911. regcache->cooked_write_part (regnum, offset % 8, std::min (len, 8),
  912. valbuf);
  913. offset += std::min (len, 8);
  914. valbuf += std::min (len, 8);
  915. len -= std::min (len, 8);
  916. regnum--;
  917. }
  918. offset += len;
  919. }
  920. /* Set up GR29 (%ret1) to hold the argument pointer (ap). */
  921. regcache_cooked_write_unsigned (regcache, HPPA_RET1_REGNUM, sp + 64);
  922. /* Allocate the outgoing parameter area. Make sure the outgoing
  923. parameter area is multiple of 16 bytes in length. */
  924. sp += std::max (align_up (offset, 16), (ULONGEST) 64);
  925. /* Allocate 32-bytes of scratch space. The documentation doesn't
  926. mention this, but it seems to be needed. */
  927. sp += 32;
  928. /* Allocate the frame marker area. */
  929. sp += 16;
  930. /* If a structure has to be returned, set up GR 28 (%ret0) to hold
  931. its address. */
  932. if (return_method == return_method_struct)
  933. regcache_cooked_write_unsigned (regcache, HPPA_RET0_REGNUM, struct_addr);
  934. /* Set up GR27 (%dp) to hold the global pointer (gp). */
  935. gp = tdep->find_global_pointer (gdbarch, function);
  936. if (gp != 0)
  937. regcache_cooked_write_unsigned (regcache, HPPA_DP_REGNUM, gp);
  938. /* Set up GR2 (%rp) to hold the return pointer (rp). */
  939. if (!gdbarch_push_dummy_code_p (gdbarch))
  940. regcache_cooked_write_unsigned (regcache, HPPA_RP_REGNUM, bp_addr);
  941. /* Set up GR30 to hold the stack pointer (sp). */
  942. regcache_cooked_write_unsigned (regcache, HPPA_SP_REGNUM, sp);
  943. return sp;
  944. }
  945. /* Handle 32/64-bit struct return conventions. */
  946. static enum return_value_convention
  947. hppa32_return_value (struct gdbarch *gdbarch, struct value *function,
  948. struct type *type, struct regcache *regcache,
  949. gdb_byte *readbuf, const gdb_byte *writebuf)
  950. {
  951. if (TYPE_LENGTH (type) <= 2 * 4)
  952. {
  953. /* The value always lives in the right hand end of the register
  954. (or register pair)? */
  955. int b;
  956. int reg = type->code () == TYPE_CODE_FLT ? HPPA_FP4_REGNUM : 28;
  957. int part = TYPE_LENGTH (type) % 4;
  958. /* The left hand register contains only part of the value,
  959. transfer that first so that the rest can be xfered as entire
  960. 4-byte registers. */
  961. if (part > 0)
  962. {
  963. if (readbuf != NULL)
  964. regcache->cooked_read_part (reg, 4 - part, part, readbuf);
  965. if (writebuf != NULL)
  966. regcache->cooked_write_part (reg, 4 - part, part, writebuf);
  967. reg++;
  968. }
  969. /* Now transfer the remaining register values. */
  970. for (b = part; b < TYPE_LENGTH (type); b += 4)
  971. {
  972. if (readbuf != NULL)
  973. regcache->cooked_read (reg, readbuf + b);
  974. if (writebuf != NULL)
  975. regcache->cooked_write (reg, writebuf + b);
  976. reg++;
  977. }
  978. return RETURN_VALUE_REGISTER_CONVENTION;
  979. }
  980. else
  981. return RETURN_VALUE_STRUCT_CONVENTION;
  982. }
  983. static enum return_value_convention
  984. hppa64_return_value (struct gdbarch *gdbarch, struct value *function,
  985. struct type *type, struct regcache *regcache,
  986. gdb_byte *readbuf, const gdb_byte *writebuf)
  987. {
  988. int len = TYPE_LENGTH (type);
  989. int regnum, offset;
  990. if (len > 16)
  991. {
  992. /* All return values larger than 128 bits must be aggregate
  993. return values. */
  994. gdb_assert (!hppa64_integral_or_pointer_p (type));
  995. gdb_assert (!hppa64_floating_p (type));
  996. /* "Aggregate return values larger than 128 bits are returned in
  997. a buffer allocated by the caller. The address of the buffer
  998. must be passed in GR 28." */
  999. return RETURN_VALUE_STRUCT_CONVENTION;
  1000. }
  1001. if (hppa64_integral_or_pointer_p (type))
  1002. {
  1003. /* "Integral return values are returned in GR 28. Values
  1004. smaller than 64 bits are padded on the left (with garbage)." */
  1005. regnum = HPPA_RET0_REGNUM;
  1006. offset = 8 - len;
  1007. }
  1008. else if (hppa64_floating_p (type))
  1009. {
  1010. if (len > 8)
  1011. {
  1012. /* "Double-extended- and quad-precision floating-point
  1013. values are returned in GRs 28 and 29. The sign,
  1014. exponent, and most-significant bits of the mantissa are
  1015. returned in GR 28; the least-significant bits of the
  1016. mantissa are passed in GR 29. For double-extended
  1017. precision values, GR 29 is padded on the right with 48
  1018. bits of garbage." */
  1019. regnum = HPPA_RET0_REGNUM;
  1020. offset = 0;
  1021. }
  1022. else
  1023. {
  1024. /* "Single-precision and double-precision floating-point
  1025. return values are returned in FR 4R (single precision) or
  1026. FR 4 (double-precision)." */
  1027. regnum = HPPA64_FP4_REGNUM;
  1028. offset = 8 - len;
  1029. }
  1030. }
  1031. else
  1032. {
  1033. /* "Aggregate return values up to 64 bits in size are returned
  1034. in GR 28. Aggregates smaller than 64 bits are left aligned
  1035. in the register; the pad bits on the right are undefined."
  1036. "Aggregate return values between 65 and 128 bits are returned
  1037. in GRs 28 and 29. The first 64 bits are placed in GR 28, and
  1038. the remaining bits are placed, left aligned, in GR 29. The
  1039. pad bits on the right of GR 29 (if any) are undefined." */
  1040. regnum = HPPA_RET0_REGNUM;
  1041. offset = 0;
  1042. }
  1043. if (readbuf)
  1044. {
  1045. while (len > 0)
  1046. {
  1047. regcache->cooked_read_part (regnum, offset, std::min (len, 8),
  1048. readbuf);
  1049. readbuf += std::min (len, 8);
  1050. len -= std::min (len, 8);
  1051. regnum++;
  1052. }
  1053. }
  1054. if (writebuf)
  1055. {
  1056. while (len > 0)
  1057. {
  1058. regcache->cooked_write_part (regnum, offset, std::min (len, 8),
  1059. writebuf);
  1060. writebuf += std::min (len, 8);
  1061. len -= std::min (len, 8);
  1062. regnum++;
  1063. }
  1064. }
  1065. return RETURN_VALUE_REGISTER_CONVENTION;
  1066. }
  1067. static CORE_ADDR
  1068. hppa32_convert_from_func_ptr_addr (struct gdbarch *gdbarch, CORE_ADDR addr,
  1069. struct target_ops *targ)
  1070. {
  1071. if (addr & 2)
  1072. {
  1073. struct type *func_ptr_type = builtin_type (gdbarch)->builtin_func_ptr;
  1074. CORE_ADDR plabel = addr & ~3;
  1075. return read_memory_typed_address (plabel, func_ptr_type);
  1076. }
  1077. return addr;
  1078. }
  1079. static CORE_ADDR
  1080. hppa32_frame_align (struct gdbarch *gdbarch, CORE_ADDR addr)
  1081. {
  1082. /* HP frames are 64-byte (or cache line) aligned (yes that's _byte_
  1083. and not _bit_)! */
  1084. return align_up (addr, 64);
  1085. }
  1086. /* Force all frames to 16-byte alignment. Better safe than sorry. */
  1087. static CORE_ADDR
  1088. hppa64_frame_align (struct gdbarch *gdbarch, CORE_ADDR addr)
  1089. {
  1090. /* Just always 16-byte align. */
  1091. return align_up (addr, 16);
  1092. }
  1093. static CORE_ADDR
  1094. hppa_read_pc (readable_regcache *regcache)
  1095. {
  1096. ULONGEST ipsw;
  1097. ULONGEST pc;
  1098. regcache->cooked_read (HPPA_IPSW_REGNUM, &ipsw);
  1099. regcache->cooked_read (HPPA_PCOQ_HEAD_REGNUM, &pc);
  1100. /* If the current instruction is nullified, then we are effectively
  1101. still executing the previous instruction. Pretend we are still
  1102. there. This is needed when single stepping; if the nullified
  1103. instruction is on a different line, we don't want GDB to think
  1104. we've stepped onto that line. */
  1105. if (ipsw & 0x00200000)
  1106. pc -= 4;
  1107. return pc & ~0x3;
  1108. }
  1109. void
  1110. hppa_write_pc (struct regcache *regcache, CORE_ADDR pc)
  1111. {
  1112. regcache_cooked_write_unsigned (regcache, HPPA_PCOQ_HEAD_REGNUM, pc);
  1113. regcache_cooked_write_unsigned (regcache, HPPA_PCOQ_TAIL_REGNUM, pc + 4);
  1114. }
  1115. /* For the given instruction (INST), return any adjustment it makes
  1116. to the stack pointer or zero for no adjustment.
  1117. This only handles instructions commonly found in prologues. */
  1118. static int
  1119. prologue_inst_adjust_sp (unsigned long inst)
  1120. {
  1121. /* This must persist across calls. */
  1122. static int save_high21;
  1123. /* The most common way to perform a stack adjustment ldo X(sp),sp */
  1124. if ((inst & 0xffffc000) == 0x37de0000)
  1125. return hppa_extract_14 (inst);
  1126. /* stwm X,D(sp) */
  1127. if ((inst & 0xffe00000) == 0x6fc00000)
  1128. return hppa_extract_14 (inst);
  1129. /* std,ma X,D(sp) */
  1130. if ((inst & 0xffe00008) == 0x73c00008)
  1131. return (inst & 0x1 ? -(1 << 13) : 0) | (((inst >> 4) & 0x3ff) << 3);
  1132. /* addil high21,%r30; ldo low11,(%r1),%r30)
  1133. save high bits in save_high21 for later use. */
  1134. if ((inst & 0xffe00000) == 0x2bc00000)
  1135. {
  1136. save_high21 = hppa_extract_21 (inst);
  1137. return 0;
  1138. }
  1139. if ((inst & 0xffff0000) == 0x343e0000)
  1140. return save_high21 + hppa_extract_14 (inst);
  1141. /* fstws as used by the HP compilers. */
  1142. if ((inst & 0xffffffe0) == 0x2fd01220)
  1143. return hppa_extract_5_load (inst);
  1144. /* No adjustment. */
  1145. return 0;
  1146. }
  1147. /* Return nonzero if INST is a branch of some kind, else return zero. */
  1148. static int
  1149. is_branch (unsigned long inst)
  1150. {
  1151. switch (inst >> 26)
  1152. {
  1153. case 0x20:
  1154. case 0x21:
  1155. case 0x22:
  1156. case 0x23:
  1157. case 0x27:
  1158. case 0x28:
  1159. case 0x29:
  1160. case 0x2a:
  1161. case 0x2b:
  1162. case 0x2f:
  1163. case 0x30:
  1164. case 0x31:
  1165. case 0x32:
  1166. case 0x33:
  1167. case 0x38:
  1168. case 0x39:
  1169. case 0x3a:
  1170. case 0x3b:
  1171. return 1;
  1172. default:
  1173. return 0;
  1174. }
  1175. }
  1176. /* Return the register number for a GR which is saved by INST or
  1177. zero if INST does not save a GR.
  1178. Referenced from:
  1179. parisc 1.1:
  1180. https://parisc.wiki.kernel.org/images-parisc/6/68/Pa11_acd.pdf
  1181. parisc 2.0:
  1182. https://parisc.wiki.kernel.org/images-parisc/7/73/Parisc2.0.pdf
  1183. According to Table 6-5 of Chapter 6 (Memory Reference Instructions)
  1184. on page 106 in parisc 2.0, all instructions for storing values from
  1185. the general registers are:
  1186. Store: stb, sth, stw, std (according to Chapter 7, they
  1187. are only in both "inst >> 26" and "inst >> 6".
  1188. Store Absolute: stwa, stda (according to Chapter 7, they are only
  1189. in "inst >> 6".
  1190. Store Bytes: stby, stdby (according to Chapter 7, they are
  1191. only in "inst >> 6").
  1192. For (inst >> 26), according to Chapter 7:
  1193. The effective memory reference address is formed by the addition
  1194. of an immediate displacement to a base value.
  1195. - stb: 0x18, store a byte from a general register.
  1196. - sth: 0x19, store a halfword from a general register.
  1197. - stw: 0x1a, store a word from a general register.
  1198. - stwm: 0x1b, store a word from a general register and perform base
  1199. register modification (2.0 will still treat it as stw).
  1200. - std: 0x1c, store a doubleword from a general register (2.0 only).
  1201. - stw: 0x1f, store a word from a general register (2.0 only).
  1202. For (inst >> 6) when ((inst >> 26) == 0x03), according to Chapter 7:
  1203. The effective memory reference address is formed by the addition
  1204. of an index value to a base value specified in the instruction.
  1205. - stb: 0x08, store a byte from a general register (1.1 calls stbs).
  1206. - sth: 0x09, store a halfword from a general register (1.1 calls
  1207. sths).
  1208. - stw: 0x0a, store a word from a general register (1.1 calls stws).
  1209. - std: 0x0b: store a doubleword from a general register (2.0 only)
  1210. Implement fast byte moves (stores) to unaligned word or doubleword
  1211. destination.
  1212. - stby: 0x0c, for unaligned word (1.1 calls stbys).
  1213. - stdby: 0x0d for unaligned doubleword (2.0 only).
  1214. Store a word or doubleword using an absolute memory address formed
  1215. using short or long displacement or indexed
  1216. - stwa: 0x0e, store a word from a general register to an absolute
  1217. address (1.0 calls stwas).
  1218. - stda: 0x0f, store a doubleword from a general register to an
  1219. absolute address (2.0 only). */
  1220. static int
  1221. inst_saves_gr (unsigned long inst)
  1222. {
  1223. switch ((inst >> 26) & 0x0f)
  1224. {
  1225. case 0x03:
  1226. switch ((inst >> 6) & 0x0f)
  1227. {
  1228. case 0x08:
  1229. case 0x09:
  1230. case 0x0a:
  1231. case 0x0b:
  1232. case 0x0c:
  1233. case 0x0d:
  1234. case 0x0e:
  1235. case 0x0f:
  1236. return hppa_extract_5R_store (inst);
  1237. default:
  1238. return 0;
  1239. }
  1240. case 0x18:
  1241. case 0x19:
  1242. case 0x1a:
  1243. case 0x1b:
  1244. case 0x1c:
  1245. /* no 0x1d or 0x1e -- according to parisc 2.0 document */
  1246. case 0x1f:
  1247. return hppa_extract_5R_store (inst);
  1248. default:
  1249. return 0;
  1250. }
  1251. }
  1252. /* Return the register number for a FR which is saved by INST or
  1253. zero it INST does not save a FR.
  1254. Note we only care about full 64bit register stores (that's the only
  1255. kind of stores the prologue will use).
  1256. FIXME: What about argument stores with the HP compiler in ANSI mode? */
  1257. static int
  1258. inst_saves_fr (unsigned long inst)
  1259. {
  1260. /* Is this an FSTD? */
  1261. if ((inst & 0xfc00dfc0) == 0x2c001200)
  1262. return hppa_extract_5r_store (inst);
  1263. if ((inst & 0xfc000002) == 0x70000002)
  1264. return hppa_extract_5R_store (inst);
  1265. /* Is this an FSTW? */
  1266. if ((inst & 0xfc00df80) == 0x24001200)
  1267. return hppa_extract_5r_store (inst);
  1268. if ((inst & 0xfc000002) == 0x7c000000)
  1269. return hppa_extract_5R_store (inst);
  1270. return 0;
  1271. }
  1272. /* Advance PC across any function entry prologue instructions
  1273. to reach some "real" code.
  1274. Use information in the unwind table to determine what exactly should
  1275. be in the prologue. */
  1276. static CORE_ADDR
  1277. skip_prologue_hard_way (struct gdbarch *gdbarch, CORE_ADDR pc,
  1278. int stop_before_branch)
  1279. {
  1280. enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
  1281. gdb_byte buf[4];
  1282. CORE_ADDR orig_pc = pc;
  1283. unsigned long inst, stack_remaining, save_gr, save_fr, save_rp, save_sp;
  1284. unsigned long args_stored, status, i, restart_gr, restart_fr;
  1285. struct unwind_table_entry *u;
  1286. int final_iteration;
  1287. restart_gr = 0;
  1288. restart_fr = 0;
  1289. restart:
  1290. u = find_unwind_entry (pc);
  1291. if (!u)
  1292. return pc;
  1293. /* If we are not at the beginning of a function, then return now. */
  1294. if ((pc & ~0x3) != u->region_start)
  1295. return pc;
  1296. /* This is how much of a frame adjustment we need to account for. */
  1297. stack_remaining = u->Total_frame_size << 3;
  1298. /* Magic register saves we want to know about. */
  1299. save_rp = u->Save_RP;
  1300. save_sp = u->Save_SP;
  1301. /* An indication that args may be stored into the stack. Unfortunately
  1302. the HPUX compilers tend to set this in cases where no args were
  1303. stored too!. */
  1304. args_stored = 1;
  1305. /* Turn the Entry_GR field into a bitmask. */
  1306. save_gr = 0;
  1307. for (i = 3; i < u->Entry_GR + 3; i++)
  1308. {
  1309. /* Frame pointer gets saved into a special location. */
  1310. if (u->Save_SP && i == HPPA_FP_REGNUM)
  1311. continue;
  1312. save_gr |= (1 << i);
  1313. }
  1314. save_gr &= ~restart_gr;
  1315. /* Turn the Entry_FR field into a bitmask too. */
  1316. save_fr = 0;
  1317. for (i = 12; i < u->Entry_FR + 12; i++)
  1318. save_fr |= (1 << i);
  1319. save_fr &= ~restart_fr;
  1320. final_iteration = 0;
  1321. /* Loop until we find everything of interest or hit a branch.
  1322. For unoptimized GCC code and for any HP CC code this will never ever
  1323. examine any user instructions.
  1324. For optimized GCC code we're faced with problems. GCC will schedule
  1325. its prologue and make prologue instructions available for delay slot
  1326. filling. The end result is user code gets mixed in with the prologue
  1327. and a prologue instruction may be in the delay slot of the first branch
  1328. or call.
  1329. Some unexpected things are expected with debugging optimized code, so
  1330. we allow this routine to walk past user instructions in optimized
  1331. GCC code. */
  1332. while (save_gr || save_fr || save_rp || save_sp || stack_remaining > 0
  1333. || args_stored)
  1334. {
  1335. unsigned int reg_num;
  1336. unsigned long old_stack_remaining, old_save_gr, old_save_fr;
  1337. unsigned long old_save_rp, old_save_sp, next_inst;
  1338. /* Save copies of all the triggers so we can compare them later
  1339. (only for HPC). */
  1340. old_save_gr = save_gr;
  1341. old_save_fr = save_fr;
  1342. old_save_rp = save_rp;
  1343. old_save_sp = save_sp;
  1344. old_stack_remaining = stack_remaining;
  1345. status = target_read_memory (pc, buf, 4);
  1346. inst = extract_unsigned_integer (buf, 4, byte_order);
  1347. /* Yow! */
  1348. if (status != 0)
  1349. return pc;
  1350. /* Note the interesting effects of this instruction. */
  1351. stack_remaining -= prologue_inst_adjust_sp (inst);
  1352. /* There are limited ways to store the return pointer into the
  1353. stack. */
  1354. if (inst == 0x6bc23fd9 || inst == 0x0fc212c1 || inst == 0x73c23fe1)
  1355. save_rp = 0;
  1356. /* These are the only ways we save SP into the stack. At this time
  1357. the HP compilers never bother to save SP into the stack. */
  1358. if ((inst & 0xffffc000) == 0x6fc10000
  1359. || (inst & 0xffffc00c) == 0x73c10008)
  1360. save_sp = 0;
  1361. /* Are we loading some register with an offset from the argument
  1362. pointer? */
  1363. if ((inst & 0xffe00000) == 0x37a00000
  1364. || (inst & 0xffffffe0) == 0x081d0240)
  1365. {
  1366. pc += 4;
  1367. continue;
  1368. }
  1369. /* Account for general and floating-point register saves. */
  1370. reg_num = inst_saves_gr (inst);
  1371. save_gr &= ~(1 << reg_num);
  1372. /* Ugh. Also account for argument stores into the stack.
  1373. Unfortunately args_stored only tells us that some arguments
  1374. where stored into the stack. Not how many or what kind!
  1375. This is a kludge as on the HP compiler sets this bit and it
  1376. never does prologue scheduling. So once we see one, skip past
  1377. all of them. We have similar code for the fp arg stores below.
  1378. FIXME. Can still die if we have a mix of GR and FR argument
  1379. stores! */
  1380. if (reg_num >= (gdbarch_ptr_bit (gdbarch) == 64 ? 19 : 23)
  1381. && reg_num <= 26)
  1382. {
  1383. while (reg_num >= (gdbarch_ptr_bit (gdbarch) == 64 ? 19 : 23)
  1384. && reg_num <= 26)
  1385. {
  1386. pc += 4;
  1387. status = target_read_memory (pc, buf, 4);
  1388. inst = extract_unsigned_integer (buf, 4, byte_order);
  1389. if (status != 0)
  1390. return pc;
  1391. reg_num = inst_saves_gr (inst);
  1392. }
  1393. args_stored = 0;
  1394. continue;
  1395. }
  1396. reg_num = inst_saves_fr (inst);
  1397. save_fr &= ~(1 << reg_num);
  1398. status = target_read_memory (pc + 4, buf, 4);
  1399. next_inst = extract_unsigned_integer (buf, 4, byte_order);
  1400. /* Yow! */
  1401. if (status != 0)
  1402. return pc;
  1403. /* We've got to be read to handle the ldo before the fp register
  1404. save. */
  1405. if ((inst & 0xfc000000) == 0x34000000
  1406. && inst_saves_fr (next_inst) >= 4
  1407. && inst_saves_fr (next_inst)
  1408. <= (gdbarch_ptr_bit (gdbarch) == 64 ? 11 : 7))
  1409. {
  1410. /* So we drop into the code below in a reasonable state. */
  1411. reg_num = inst_saves_fr (next_inst);
  1412. pc -= 4;
  1413. }
  1414. /* Ugh. Also account for argument stores into the stack.
  1415. This is a kludge as on the HP compiler sets this bit and it
  1416. never does prologue scheduling. So once we see one, skip past
  1417. all of them. */
  1418. if (reg_num >= 4
  1419. && reg_num <= (gdbarch_ptr_bit (gdbarch) == 64 ? 11 : 7))
  1420. {
  1421. while (reg_num >= 4
  1422. && reg_num
  1423. <= (gdbarch_ptr_bit (gdbarch) == 64 ? 11 : 7))
  1424. {
  1425. pc += 8;
  1426. status = target_read_memory (pc, buf, 4);
  1427. inst = extract_unsigned_integer (buf, 4, byte_order);
  1428. if (status != 0)
  1429. return pc;
  1430. if ((inst & 0xfc000000) != 0x34000000)
  1431. break;
  1432. status = target_read_memory (pc + 4, buf, 4);
  1433. next_inst = extract_unsigned_integer (buf, 4, byte_order);
  1434. if (status != 0)
  1435. return pc;
  1436. reg_num = inst_saves_fr (next_inst);
  1437. }
  1438. args_stored = 0;
  1439. continue;
  1440. }
  1441. /* Quit if we hit any kind of branch. This can happen if a prologue
  1442. instruction is in the delay slot of the first call/branch. */
  1443. if (is_branch (inst) && stop_before_branch)
  1444. break;
  1445. /* What a crock. The HP compilers set args_stored even if no
  1446. arguments were stored into the stack (boo hiss). This could
  1447. cause this code to then skip a bunch of user insns (up to the
  1448. first branch).
  1449. To combat this we try to identify when args_stored was bogusly
  1450. set and clear it. We only do this when args_stored is nonzero,
  1451. all other resources are accounted for, and nothing changed on
  1452. this pass. */
  1453. if (args_stored
  1454. && !(save_gr || save_fr || save_rp || save_sp || stack_remaining > 0)
  1455. && old_save_gr == save_gr && old_save_fr == save_fr
  1456. && old_save_rp == save_rp && old_save_sp == save_sp
  1457. && old_stack_remaining == stack_remaining)
  1458. break;
  1459. /* Bump the PC. */
  1460. pc += 4;
  1461. /* !stop_before_branch, so also look at the insn in the delay slot
  1462. of the branch. */
  1463. if (final_iteration)
  1464. break;
  1465. if (is_branch (inst))
  1466. final_iteration = 1;
  1467. }
  1468. /* We've got a tentative location for the end of the prologue. However
  1469. because of limitations in the unwind descriptor mechanism we may
  1470. have went too far into user code looking for the save of a register
  1471. that does not exist. So, if there registers we expected to be saved
  1472. but never were, mask them out and restart.
  1473. This should only happen in optimized code, and should be very rare. */
  1474. if (save_gr || (save_fr && !(restart_fr || restart_gr)))
  1475. {
  1476. pc = orig_pc;
  1477. restart_gr = save_gr;
  1478. restart_fr = save_fr;
  1479. goto restart;
  1480. }
  1481. return pc;
  1482. }
  1483. /* Return the address of the PC after the last prologue instruction if
  1484. we can determine it from the debug symbols. Else return zero. */
  1485. static CORE_ADDR
  1486. after_prologue (CORE_ADDR pc)
  1487. {
  1488. struct symtab_and_line sal;
  1489. CORE_ADDR func_addr, func_end;
  1490. /* If we can not find the symbol in the partial symbol table, then
  1491. there is no hope we can determine the function's start address
  1492. with this code. */
  1493. if (!find_pc_partial_function (pc, NULL, &func_addr, &func_end))
  1494. return 0;
  1495. /* Get the line associated with FUNC_ADDR. */
  1496. sal = find_pc_line (func_addr, 0);
  1497. /* There are only two cases to consider. First, the end of the source line
  1498. is within the function bounds. In that case we return the end of the
  1499. source line. Second is the end of the source line extends beyond the
  1500. bounds of the current function. We need to use the slow code to
  1501. examine instructions in that case.
  1502. Anything else is simply a bug elsewhere. Fixing it here is absolutely
  1503. the wrong thing to do. In fact, it should be entirely possible for this
  1504. function to always return zero since the slow instruction scanning code
  1505. is supposed to *always* work. If it does not, then it is a bug. */
  1506. if (sal.end < func_end)
  1507. return sal.end;
  1508. else
  1509. return 0;
  1510. }
  1511. /* To skip prologues, I use this predicate. Returns either PC itself
  1512. if the code at PC does not look like a function prologue; otherwise
  1513. returns an address that (if we're lucky) follows the prologue.
  1514. hppa_skip_prologue is called by gdb to place a breakpoint in a function.
  1515. It doesn't necessarily skips all the insns in the prologue. In fact
  1516. we might not want to skip all the insns because a prologue insn may
  1517. appear in the delay slot of the first branch, and we don't want to
  1518. skip over the branch in that case. */
  1519. static CORE_ADDR
  1520. hppa_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
  1521. {
  1522. CORE_ADDR post_prologue_pc;
  1523. /* See if we can determine the end of the prologue via the symbol table.
  1524. If so, then return either PC, or the PC after the prologue, whichever
  1525. is greater. */
  1526. post_prologue_pc = after_prologue (pc);
  1527. /* If after_prologue returned a useful address, then use it. Else
  1528. fall back on the instruction skipping code.
  1529. Some folks have claimed this causes problems because the breakpoint
  1530. may be the first instruction of the prologue. If that happens, then
  1531. the instruction skipping code has a bug that needs to be fixed. */
  1532. if (post_prologue_pc != 0)
  1533. return std::max (pc, post_prologue_pc);
  1534. else
  1535. return (skip_prologue_hard_way (gdbarch, pc, 1));
  1536. }
  1537. /* Return an unwind entry that falls within the frame's code block. */
  1538. static struct unwind_table_entry *
  1539. hppa_find_unwind_entry_in_block (struct frame_info *this_frame)
  1540. {
  1541. CORE_ADDR pc = get_frame_address_in_block (this_frame);
  1542. /* FIXME drow/20070101: Calling gdbarch_addr_bits_remove on the
  1543. result of get_frame_address_in_block implies a problem.
  1544. The bits should have been removed earlier, before the return
  1545. value of gdbarch_unwind_pc. That might be happening already;
  1546. if it isn't, it should be fixed. Then this call can be
  1547. removed. */
  1548. pc = gdbarch_addr_bits_remove (get_frame_arch (this_frame), pc);
  1549. return find_unwind_entry (pc);
  1550. }
  1551. struct hppa_frame_cache
  1552. {
  1553. CORE_ADDR base;
  1554. trad_frame_saved_reg *saved_regs;
  1555. };
  1556. static struct hppa_frame_cache *
  1557. hppa_frame_cache (struct frame_info *this_frame, void **this_cache)
  1558. {
  1559. struct gdbarch *gdbarch = get_frame_arch (this_frame);
  1560. enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
  1561. int word_size = gdbarch_ptr_bit (gdbarch) / 8;
  1562. struct hppa_frame_cache *cache;
  1563. long saved_gr_mask;
  1564. long saved_fr_mask;
  1565. long frame_size;
  1566. struct unwind_table_entry *u;
  1567. CORE_ADDR prologue_end;
  1568. int fp_in_r1 = 0;
  1569. int i;
  1570. if (hppa_debug)
  1571. gdb_printf (gdb_stdlog, "{ hppa_frame_cache (frame=%d) -> ",
  1572. frame_relative_level(this_frame));
  1573. if ((*this_cache) != NULL)
  1574. {
  1575. if (hppa_debug)
  1576. gdb_printf (gdb_stdlog, "base=%s (cached) }",
  1577. paddress (gdbarch, ((struct hppa_frame_cache *)*this_cache)->base));
  1578. return (struct hppa_frame_cache *) (*this_cache);
  1579. }
  1580. cache = FRAME_OBSTACK_ZALLOC (struct hppa_frame_cache);
  1581. (*this_cache) = cache;
  1582. cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
  1583. /* Yow! */
  1584. u = hppa_find_unwind_entry_in_block (this_frame);
  1585. if (!u)
  1586. {
  1587. if (hppa_debug)
  1588. gdb_printf (gdb_stdlog, "base=NULL (no unwind entry) }");
  1589. return (struct hppa_frame_cache *) (*this_cache);
  1590. }
  1591. /* Turn the Entry_GR field into a bitmask. */
  1592. saved_gr_mask = 0;
  1593. for (i = 3; i < u->Entry_GR + 3; i++)
  1594. {
  1595. /* Frame pointer gets saved into a special location. */
  1596. if (u->Save_SP && i == HPPA_FP_REGNUM)
  1597. continue;
  1598. saved_gr_mask |= (1 << i);
  1599. }
  1600. /* Turn the Entry_FR field into a bitmask too. */
  1601. saved_fr_mask = 0;
  1602. for (i = 12; i < u->Entry_FR + 12; i++)
  1603. saved_fr_mask |= (1 << i);
  1604. /* Loop until we find everything of interest or hit a branch.
  1605. For unoptimized GCC code and for any HP CC code this will never ever
  1606. examine any user instructions.
  1607. For optimized GCC code we're faced with problems. GCC will schedule
  1608. its prologue and make prologue instructions available for delay slot
  1609. filling. The end result is user code gets mixed in with the prologue
  1610. and a prologue instruction may be in the delay slot of the first branch
  1611. or call.
  1612. Some unexpected things are expected with debugging optimized code, so
  1613. we allow this routine to walk past user instructions in optimized
  1614. GCC code. */
  1615. {
  1616. int final_iteration = 0;
  1617. CORE_ADDR pc, start_pc, end_pc;
  1618. int looking_for_sp = u->Save_SP;
  1619. int looking_for_rp = u->Save_RP;
  1620. int fp_loc = -1;
  1621. /* We have to use skip_prologue_hard_way instead of just
  1622. skip_prologue_using_sal, in case we stepped into a function without
  1623. symbol information. hppa_skip_prologue also bounds the returned
  1624. pc by the passed in pc, so it will not return a pc in the next
  1625. function.
  1626. We used to call hppa_skip_prologue to find the end of the prologue,
  1627. but if some non-prologue instructions get scheduled into the prologue,
  1628. and the program is compiled with debug information, the "easy" way
  1629. in hppa_skip_prologue will return a prologue end that is too early
  1630. for us to notice any potential frame adjustments. */
  1631. /* We used to use get_frame_func to locate the beginning of the
  1632. function to pass to skip_prologue. However, when objects are
  1633. compiled without debug symbols, get_frame_func can return the wrong
  1634. function (or 0). We can do better than that by using unwind records.
  1635. This only works if the Region_description of the unwind record
  1636. indicates that it includes the entry point of the function.
  1637. HP compilers sometimes generate unwind records for regions that
  1638. do not include the entry or exit point of a function. GNU tools
  1639. do not do this. */
  1640. if ((u->Region_description & 0x2) == 0)
  1641. start_pc = u->region_start;
  1642. else
  1643. start_pc = get_frame_func (this_frame);
  1644. prologue_end = skip_prologue_hard_way (gdbarch, start_pc, 0);
  1645. end_pc = get_frame_pc (this_frame);
  1646. if (prologue_end != 0 && end_pc > prologue_end)
  1647. end_pc = prologue_end;
  1648. frame_size = 0;
  1649. for (pc = start_pc;
  1650. ((saved_gr_mask || saved_fr_mask
  1651. || looking_for_sp || looking_for_rp
  1652. || frame_size < (u->Total_frame_size << 3))
  1653. && pc < end_pc);
  1654. pc += 4)
  1655. {
  1656. int reg;
  1657. gdb_byte buf4[4];
  1658. long inst;
  1659. if (!safe_frame_unwind_memory (this_frame, pc, buf4))
  1660. {
  1661. error (_("Cannot read instruction at %s."),
  1662. paddress (gdbarch, pc));
  1663. return (struct hppa_frame_cache *) (*this_cache);
  1664. }
  1665. inst = extract_unsigned_integer (buf4, sizeof buf4, byte_order);
  1666. /* Note the interesting effects of this instruction. */
  1667. frame_size += prologue_inst_adjust_sp (inst);
  1668. /* There are limited ways to store the return pointer into the
  1669. stack. */
  1670. if (inst == 0x6bc23fd9) /* stw rp,-0x14(sr0,sp) */
  1671. {
  1672. looking_for_rp = 0;
  1673. cache->saved_regs[HPPA_RP_REGNUM].set_addr (-20);
  1674. }
  1675. else if (inst == 0x6bc23fd1) /* stw rp,-0x18(sr0,sp) */
  1676. {
  1677. looking_for_rp = 0;
  1678. cache->saved_regs[HPPA_RP_REGNUM].set_addr (-24);
  1679. }
  1680. else if (inst == 0x0fc212c1
  1681. || inst == 0x73c23fe1) /* std rp,-0x10(sr0,sp) */
  1682. {
  1683. looking_for_rp = 0;
  1684. cache->saved_regs[HPPA_RP_REGNUM].set_addr (-16);
  1685. }
  1686. /* Check to see if we saved SP into the stack. This also
  1687. happens to indicate the location of the saved frame
  1688. pointer. */
  1689. if ((inst & 0xffffc000) == 0x6fc10000 /* stw,ma r1,N(sr0,sp) */
  1690. || (inst & 0xffffc00c) == 0x73c10008) /* std,ma r1,N(sr0,sp) */
  1691. {
  1692. looking_for_sp = 0;
  1693. cache->saved_regs[HPPA_FP_REGNUM].set_addr (0);
  1694. }
  1695. else if (inst == 0x08030241) /* copy %r3, %r1 */
  1696. {
  1697. fp_in_r1 = 1;
  1698. }
  1699. /* Account for general and floating-point register saves. */
  1700. reg = inst_saves_gr (inst);
  1701. if (reg >= 3 && reg <= 18
  1702. && (!u->Save_SP || reg != HPPA_FP_REGNUM))
  1703. {
  1704. saved_gr_mask &= ~(1 << reg);
  1705. if ((inst >> 26) == 0x1b && hppa_extract_14 (inst) >= 0)
  1706. /* stwm with a positive displacement is a _post_
  1707. _modify_. */
  1708. cache->saved_regs[reg].set_addr (0);
  1709. else if ((inst & 0xfc00000c) == 0x70000008)
  1710. /* A std has explicit post_modify forms. */
  1711. cache->saved_regs[reg].set_addr (0);
  1712. else
  1713. {
  1714. CORE_ADDR offset;
  1715. if ((inst >> 26) == 0x1c)
  1716. offset = (inst & 0x1 ? -(1 << 13) : 0)
  1717. | (((inst >> 4) & 0x3ff) << 3);
  1718. else if ((inst >> 26) == 0x03)
  1719. offset = hppa_low_hppa_sign_extend (inst & 0x1f, 5);
  1720. else
  1721. offset = hppa_extract_14 (inst);
  1722. /* Handle code with and without frame pointers. */
  1723. if (u->Save_SP)
  1724. cache->saved_regs[reg].set_addr (offset);
  1725. else
  1726. cache->saved_regs[reg].set_addr ((u->Total_frame_size << 3)
  1727. + offset);
  1728. }
  1729. }
  1730. /* GCC handles callee saved FP regs a little differently.
  1731. It emits an instruction to put the value of the start of
  1732. the FP store area into %r1. It then uses fstds,ma with a
  1733. basereg of %r1 for the stores.
  1734. HP CC emits them at the current stack pointer modifying the
  1735. stack pointer as it stores each register. */
  1736. /* ldo X(%r3),%r1 or ldo X(%r30),%r1. */
  1737. if ((inst & 0xffffc000) == 0x34610000
  1738. || (inst & 0xffffc000) == 0x37c10000)
  1739. fp_loc = hppa_extract_14 (inst);
  1740. reg = inst_saves_fr (inst);
  1741. if (reg >= 12 && reg <= 21)
  1742. {
  1743. /* Note +4 braindamage below is necessary because the FP
  1744. status registers are internally 8 registers rather than
  1745. the expected 4 registers. */
  1746. saved_fr_mask &= ~(1 << reg);
  1747. if (fp_loc == -1)
  1748. {
  1749. /* 1st HP CC FP register store. After this
  1750. instruction we've set enough state that the GCC and
  1751. HPCC code are both handled in the same manner. */
  1752. cache->saved_regs[reg + HPPA_FP4_REGNUM + 4].set_addr (0);
  1753. fp_loc = 8;
  1754. }
  1755. else
  1756. {
  1757. cache->saved_regs[reg + HPPA_FP0_REGNUM + 4].set_addr (fp_loc);
  1758. fp_loc += 8;
  1759. }
  1760. }
  1761. /* Quit if we hit any kind of branch the previous iteration. */
  1762. if (final_iteration)
  1763. break;
  1764. /* We want to look precisely one instruction beyond the branch
  1765. if we have not found everything yet. */
  1766. if (is_branch (inst))
  1767. final_iteration = 1;
  1768. }
  1769. }
  1770. {
  1771. /* The frame base always represents the value of %sp at entry to
  1772. the current function (and is thus equivalent to the "saved"
  1773. stack pointer. */
  1774. CORE_ADDR this_sp = get_frame_register_unsigned (this_frame,
  1775. HPPA_SP_REGNUM);
  1776. CORE_ADDR fp;
  1777. if (hppa_debug)
  1778. gdb_printf (gdb_stdlog, " (this_sp=%s, pc=%s, "
  1779. "prologue_end=%s) ",
  1780. paddress (gdbarch, this_sp),
  1781. paddress (gdbarch, get_frame_pc (this_frame)),
  1782. paddress (gdbarch, prologue_end));
  1783. /* Check to see if a frame pointer is available, and use it for
  1784. frame unwinding if it is.
  1785. There are some situations where we need to rely on the frame
  1786. pointer to do stack unwinding. For example, if a function calls
  1787. alloca (), the stack pointer can get adjusted inside the body of
  1788. the function. In this case, the ABI requires that the compiler
  1789. maintain a frame pointer for the function.
  1790. The unwind record has a flag (alloca_frame) that indicates that
  1791. a function has a variable frame; unfortunately, gcc/binutils
  1792. does not set this flag. Instead, whenever a frame pointer is used
  1793. and saved on the stack, the Save_SP flag is set. We use this to
  1794. decide whether to use the frame pointer for unwinding.
  1795. TODO: For the HP compiler, maybe we should use the alloca_frame flag
  1796. instead of Save_SP. */
  1797. fp = get_frame_register_unsigned (this_frame, HPPA_FP_REGNUM);
  1798. if (u->alloca_frame)
  1799. fp -= u->Total_frame_size << 3;
  1800. if (get_frame_pc (this_frame) >= prologue_end
  1801. && (u->Save_SP || u->alloca_frame) && fp != 0)
  1802. {
  1803. cache->base = fp;
  1804. if (hppa_debug)
  1805. gdb_printf (gdb_stdlog, " (base=%s) [frame pointer]",
  1806. paddress (gdbarch, cache->base));
  1807. }
  1808. else if (u->Save_SP
  1809. && cache->saved_regs[HPPA_SP_REGNUM].is_addr ())
  1810. {
  1811. /* Both we're expecting the SP to be saved and the SP has been
  1812. saved. The entry SP value is saved at this frame's SP
  1813. address. */
  1814. cache->base = read_memory_integer (this_sp, word_size, byte_order);
  1815. if (hppa_debug)
  1816. gdb_printf (gdb_stdlog, " (base=%s) [saved]",
  1817. paddress (gdbarch, cache->base));
  1818. }
  1819. else
  1820. {
  1821. /* The prologue has been slowly allocating stack space. Adjust
  1822. the SP back. */
  1823. cache->base = this_sp - frame_size;
  1824. if (hppa_debug)
  1825. gdb_printf (gdb_stdlog, " (base=%s) [unwind adjust]",
  1826. paddress (gdbarch, cache->base));
  1827. }
  1828. cache->saved_regs[HPPA_SP_REGNUM].set_value (cache->base);
  1829. }
  1830. /* The PC is found in the "return register", "Millicode" uses "r31"
  1831. as the return register while normal code uses "rp". */
  1832. if (u->Millicode)
  1833. {
  1834. if (cache->saved_regs[31].is_addr ())
  1835. {
  1836. cache->saved_regs[HPPA_PCOQ_HEAD_REGNUM] = cache->saved_regs[31];
  1837. if (hppa_debug)
  1838. gdb_printf (gdb_stdlog, " (pc=r31) [stack] } ");
  1839. }
  1840. else
  1841. {
  1842. ULONGEST r31 = get_frame_register_unsigned (this_frame, 31);
  1843. cache->saved_regs[HPPA_PCOQ_HEAD_REGNUM].set_value (r31);
  1844. if (hppa_debug)
  1845. gdb_printf (gdb_stdlog, " (pc=r31) [frame] } ");
  1846. }
  1847. }
  1848. else
  1849. {
  1850. if (cache->saved_regs[HPPA_RP_REGNUM].is_addr ())
  1851. {
  1852. cache->saved_regs[HPPA_PCOQ_HEAD_REGNUM] =
  1853. cache->saved_regs[HPPA_RP_REGNUM];
  1854. if (hppa_debug)
  1855. gdb_printf (gdb_stdlog, " (pc=rp) [stack] } ");
  1856. }
  1857. else
  1858. {
  1859. ULONGEST rp = get_frame_register_unsigned (this_frame,
  1860. HPPA_RP_REGNUM);
  1861. cache->saved_regs[HPPA_PCOQ_HEAD_REGNUM].set_value (rp);
  1862. if (hppa_debug)
  1863. gdb_printf (gdb_stdlog, " (pc=rp) [frame] } ");
  1864. }
  1865. }
  1866. /* If Save_SP is set, then we expect the frame pointer to be saved in the
  1867. frame. However, there is a one-insn window where we haven't saved it
  1868. yet, but we've already clobbered it. Detect this case and fix it up.
  1869. The prologue sequence for frame-pointer functions is:
  1870. 0: stw %rp, -20(%sp)
  1871. 4: copy %r3, %r1
  1872. 8: copy %sp, %r3
  1873. c: stw,ma %r1, XX(%sp)
  1874. So if we are at offset c, the r3 value that we want is not yet saved
  1875. on the stack, but it's been overwritten. The prologue analyzer will
  1876. set fp_in_r1 when it sees the copy insn so we know to get the value
  1877. from r1 instead. */
  1878. if (u->Save_SP && !cache->saved_regs[HPPA_FP_REGNUM].is_addr ()
  1879. && fp_in_r1)
  1880. {
  1881. ULONGEST r1 = get_frame_register_unsigned (this_frame, 1);
  1882. cache->saved_regs[HPPA_FP_REGNUM].set_value (r1);
  1883. }
  1884. {
  1885. /* Convert all the offsets into addresses. */
  1886. int reg;
  1887. for (reg = 0; reg < gdbarch_num_regs (gdbarch); reg++)
  1888. {
  1889. if (cache->saved_regs[reg].is_addr ())
  1890. cache->saved_regs[reg].set_addr (cache->saved_regs[reg].addr ()
  1891. + cache->base);
  1892. }
  1893. }
  1894. {
  1895. hppa_gdbarch_tdep *tdep = (hppa_gdbarch_tdep *) gdbarch_tdep (gdbarch);
  1896. if (tdep->unwind_adjust_stub)
  1897. tdep->unwind_adjust_stub (this_frame, cache->base, cache->saved_regs);
  1898. }
  1899. if (hppa_debug)
  1900. gdb_printf (gdb_stdlog, "base=%s }",
  1901. paddress (gdbarch, ((struct hppa_frame_cache *)*this_cache)->base));
  1902. return (struct hppa_frame_cache *) (*this_cache);
  1903. }
  1904. static void
  1905. hppa_frame_this_id (struct frame_info *this_frame, void **this_cache,
  1906. struct frame_id *this_id)
  1907. {
  1908. struct hppa_frame_cache *info;
  1909. struct unwind_table_entry *u;
  1910. info = hppa_frame_cache (this_frame, this_cache);
  1911. u = hppa_find_unwind_entry_in_block (this_frame);
  1912. (*this_id) = frame_id_build (info->base, u->region_start);
  1913. }
  1914. static struct value *
  1915. hppa_frame_prev_register (struct frame_info *this_frame,
  1916. void **this_cache, int regnum)
  1917. {
  1918. struct hppa_frame_cache *info = hppa_frame_cache (this_frame, this_cache);
  1919. return hppa_frame_prev_register_helper (this_frame,
  1920. info->saved_regs, regnum);
  1921. }
  1922. static int
  1923. hppa_frame_unwind_sniffer (const struct frame_unwind *self,
  1924. struct frame_info *this_frame, void **this_cache)
  1925. {
  1926. if (hppa_find_unwind_entry_in_block (this_frame))
  1927. return 1;
  1928. return 0;
  1929. }
  1930. static const struct frame_unwind hppa_frame_unwind =
  1931. {
  1932. "hppa unwind table",
  1933. NORMAL_FRAME,
  1934. default_frame_unwind_stop_reason,
  1935. hppa_frame_this_id,
  1936. hppa_frame_prev_register,
  1937. NULL,
  1938. hppa_frame_unwind_sniffer
  1939. };
  1940. /* This is a generic fallback frame unwinder that kicks in if we fail all
  1941. the other ones. Normally we would expect the stub and regular unwinder
  1942. to work, but in some cases we might hit a function that just doesn't
  1943. have any unwind information available. In this case we try to do
  1944. unwinding solely based on code reading. This is obviously going to be
  1945. slow, so only use this as a last resort. Currently this will only
  1946. identify the stack and pc for the frame. */
  1947. static struct hppa_frame_cache *
  1948. hppa_fallback_frame_cache (struct frame_info *this_frame, void **this_cache)
  1949. {
  1950. struct gdbarch *gdbarch = get_frame_arch (this_frame);
  1951. enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
  1952. struct hppa_frame_cache *cache;
  1953. unsigned int frame_size = 0;
  1954. int found_rp = 0;
  1955. CORE_ADDR start_pc;
  1956. if (hppa_debug)
  1957. gdb_printf (gdb_stdlog,
  1958. "{ hppa_fallback_frame_cache (frame=%d) -> ",
  1959. frame_relative_level (this_frame));
  1960. cache = FRAME_OBSTACK_ZALLOC (struct hppa_frame_cache);
  1961. (*this_cache) = cache;
  1962. cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
  1963. start_pc = get_frame_func (this_frame);
  1964. if (start_pc)
  1965. {
  1966. CORE_ADDR cur_pc = get_frame_pc (this_frame);
  1967. CORE_ADDR pc;
  1968. for (pc = start_pc; pc < cur_pc; pc += 4)
  1969. {
  1970. unsigned int insn;
  1971. insn = read_memory_unsigned_integer (pc, 4, byte_order);
  1972. frame_size += prologue_inst_adjust_sp (insn);
  1973. /* There are limited ways to store the return pointer into the
  1974. stack. */
  1975. if (insn == 0x6bc23fd9) /* stw rp,-0x14(sr0,sp) */
  1976. {
  1977. cache->saved_regs[HPPA_RP_REGNUM].set_addr (-20);
  1978. found_rp = 1;
  1979. }
  1980. else if (insn == 0x0fc212c1
  1981. || insn == 0x73c23fe1) /* std rp,-0x10(sr0,sp) */
  1982. {
  1983. cache->saved_regs[HPPA_RP_REGNUM].set_addr (-16);
  1984. found_rp = 1;
  1985. }
  1986. }
  1987. }
  1988. if (hppa_debug)
  1989. gdb_printf (gdb_stdlog, " frame_size=%d, found_rp=%d }\n",
  1990. frame_size, found_rp);
  1991. cache->base = get_frame_register_unsigned (this_frame, HPPA_SP_REGNUM);
  1992. cache->base -= frame_size;
  1993. cache->saved_regs[HPPA_SP_REGNUM].set_value (cache->base);
  1994. if (cache->saved_regs[HPPA_RP_REGNUM].is_addr ())
  1995. {
  1996. cache->saved_regs[HPPA_RP_REGNUM].set_addr (cache->saved_regs[HPPA_RP_REGNUM].addr ()
  1997. + cache->base);
  1998. cache->saved_regs[HPPA_PCOQ_HEAD_REGNUM] =
  1999. cache->saved_regs[HPPA_RP_REGNUM];
  2000. }
  2001. else
  2002. {
  2003. ULONGEST rp;
  2004. rp = get_frame_register_unsigned (this_frame, HPPA_RP_REGNUM);
  2005. cache->saved_regs[HPPA_PCOQ_HEAD_REGNUM].set_value (rp);
  2006. }
  2007. return cache;
  2008. }
  2009. static void
  2010. hppa_fallback_frame_this_id (struct frame_info *this_frame, void **this_cache,
  2011. struct frame_id *this_id)
  2012. {
  2013. struct hppa_frame_cache *info =
  2014. hppa_fallback_frame_cache (this_frame, this_cache);
  2015. (*this_id) = frame_id_build (info->base, get_frame_func (this_frame));
  2016. }
  2017. static struct value *
  2018. hppa_fallback_frame_prev_register (struct frame_info *this_frame,
  2019. void **this_cache, int regnum)
  2020. {
  2021. struct hppa_frame_cache *info
  2022. = hppa_fallback_frame_cache (this_frame, this_cache);
  2023. return hppa_frame_prev_register_helper (this_frame,
  2024. info->saved_regs, regnum);
  2025. }
  2026. static const struct frame_unwind hppa_fallback_frame_unwind =
  2027. {
  2028. "hppa prologue",
  2029. NORMAL_FRAME,
  2030. default_frame_unwind_stop_reason,
  2031. hppa_fallback_frame_this_id,
  2032. hppa_fallback_frame_prev_register,
  2033. NULL,
  2034. default_frame_sniffer
  2035. };
  2036. /* Stub frames, used for all kinds of call stubs. */
  2037. struct hppa_stub_unwind_cache
  2038. {
  2039. CORE_ADDR base;
  2040. trad_frame_saved_reg *saved_regs;
  2041. };
  2042. static struct hppa_stub_unwind_cache *
  2043. hppa_stub_frame_unwind_cache (struct frame_info *this_frame,
  2044. void **this_cache)
  2045. {
  2046. struct hppa_stub_unwind_cache *info;
  2047. if (*this_cache)
  2048. return (struct hppa_stub_unwind_cache *) *this_cache;
  2049. info = FRAME_OBSTACK_ZALLOC (struct hppa_stub_unwind_cache);
  2050. *this_cache = info;
  2051. info->saved_regs = trad_frame_alloc_saved_regs (this_frame);
  2052. info->base = get_frame_register_unsigned (this_frame, HPPA_SP_REGNUM);
  2053. /* By default we assume that stubs do not change the rp. */
  2054. info->saved_regs[HPPA_PCOQ_HEAD_REGNUM].set_realreg (HPPA_RP_REGNUM);
  2055. return info;
  2056. }
  2057. static void
  2058. hppa_stub_frame_this_id (struct frame_info *this_frame,
  2059. void **this_prologue_cache,
  2060. struct frame_id *this_id)
  2061. {
  2062. struct hppa_stub_unwind_cache *info
  2063. = hppa_stub_frame_unwind_cache (this_frame, this_prologue_cache);
  2064. if (info)
  2065. *this_id = frame_id_build (info->base, get_frame_func (this_frame));
  2066. }
  2067. static struct value *
  2068. hppa_stub_frame_prev_register (struct frame_info *this_frame,
  2069. void **this_prologue_cache, int regnum)
  2070. {
  2071. struct hppa_stub_unwind_cache *info
  2072. = hppa_stub_frame_unwind_cache (this_frame, this_prologue_cache);
  2073. if (info == NULL)
  2074. error (_("Requesting registers from null frame."));
  2075. return hppa_frame_prev_register_helper (this_frame,
  2076. info->saved_regs, regnum);
  2077. }
  2078. static int
  2079. hppa_stub_unwind_sniffer (const struct frame_unwind *self,
  2080. struct frame_info *this_frame,
  2081. void **this_cache)
  2082. {
  2083. CORE_ADDR pc = get_frame_address_in_block (this_frame);
  2084. struct gdbarch *gdbarch = get_frame_arch (this_frame);
  2085. hppa_gdbarch_tdep *tdep = (hppa_gdbarch_tdep *) gdbarch_tdep (gdbarch);
  2086. if (pc == 0
  2087. || (tdep->in_solib_call_trampoline != NULL
  2088. && tdep->in_solib_call_trampoline (gdbarch, pc))
  2089. || gdbarch_in_solib_return_trampoline (gdbarch, pc, NULL))
  2090. return 1;
  2091. return 0;
  2092. }
  2093. static const struct frame_unwind hppa_stub_frame_unwind = {
  2094. "hppa stub",
  2095. NORMAL_FRAME,
  2096. default_frame_unwind_stop_reason,
  2097. hppa_stub_frame_this_id,
  2098. hppa_stub_frame_prev_register,
  2099. NULL,
  2100. hppa_stub_unwind_sniffer
  2101. };
  2102. CORE_ADDR
  2103. hppa_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
  2104. {
  2105. ULONGEST ipsw;
  2106. CORE_ADDR pc;
  2107. ipsw = frame_unwind_register_unsigned (next_frame, HPPA_IPSW_REGNUM);
  2108. pc = frame_unwind_register_unsigned (next_frame, HPPA_PCOQ_HEAD_REGNUM);
  2109. /* If the current instruction is nullified, then we are effectively
  2110. still executing the previous instruction. Pretend we are still
  2111. there. This is needed when single stepping; if the nullified
  2112. instruction is on a different line, we don't want GDB to think
  2113. we've stepped onto that line. */
  2114. if (ipsw & 0x00200000)
  2115. pc -= 4;
  2116. return pc & ~0x3;
  2117. }
  2118. /* Return the minimal symbol whose name is NAME and stub type is STUB_TYPE.
  2119. Return NULL if no such symbol was found. */
  2120. struct bound_minimal_symbol
  2121. hppa_lookup_stub_minimal_symbol (const char *name,
  2122. enum unwind_stub_types stub_type)
  2123. {
  2124. struct bound_minimal_symbol result;
  2125. for (objfile *objfile : current_program_space->objfiles ())
  2126. {
  2127. for (minimal_symbol *msym : objfile->msymbols ())
  2128. {
  2129. if (strcmp (msym->linkage_name (), name) == 0)
  2130. {
  2131. struct unwind_table_entry *u;
  2132. u = find_unwind_entry (MSYMBOL_VALUE (msym));
  2133. if (u != NULL && u->stub_unwind.stub_type == stub_type)
  2134. {
  2135. result.objfile = objfile;
  2136. result.minsym = msym;
  2137. return result;
  2138. }
  2139. }
  2140. }
  2141. }
  2142. return result;
  2143. }
  2144. static void
  2145. unwind_command (const char *exp, int from_tty)
  2146. {
  2147. CORE_ADDR address;
  2148. struct unwind_table_entry *u;
  2149. /* If we have an expression, evaluate it and use it as the address. */
  2150. if (exp != 0 && *exp != 0)
  2151. address = parse_and_eval_address (exp);
  2152. else
  2153. return;
  2154. u = find_unwind_entry (address);
  2155. if (!u)
  2156. {
  2157. gdb_printf ("Can't find unwind table entry for %s\n", exp);
  2158. return;
  2159. }
  2160. gdb_printf ("unwind_table_entry (%s):\n", host_address_to_string (u));
  2161. gdb_printf ("\tregion_start = %s\n", hex_string (u->region_start));
  2162. gdb_printf ("\tregion_end = %s\n", hex_string (u->region_end));
  2163. #define pif(FLD) if (u->FLD) gdb_printf (" "#FLD);
  2164. gdb_printf ("\n\tflags =");
  2165. pif (Cannot_unwind);
  2166. pif (Millicode);
  2167. pif (Millicode_save_sr0);
  2168. pif (Entry_SR);
  2169. pif (Args_stored);
  2170. pif (Variable_Frame);
  2171. pif (Separate_Package_Body);
  2172. pif (Frame_Extension_Millicode);
  2173. pif (Stack_Overflow_Check);
  2174. pif (Two_Instruction_SP_Increment);
  2175. pif (sr4export);
  2176. pif (cxx_info);
  2177. pif (cxx_try_catch);
  2178. pif (sched_entry_seq);
  2179. pif (Save_SP);
  2180. pif (Save_RP);
  2181. pif (Save_MRP_in_frame);
  2182. pif (save_r19);
  2183. pif (Cleanup_defined);
  2184. pif (MPE_XL_interrupt_marker);
  2185. pif (HP_UX_interrupt_marker);
  2186. pif (Large_frame);
  2187. pif (alloca_frame);
  2188. gdb_putc ('\n');
  2189. #define pin(FLD) gdb_printf ("\t"#FLD" = 0x%x\n", u->FLD);
  2190. pin (Region_description);
  2191. pin (Entry_FR);
  2192. pin (Entry_GR);
  2193. pin (Total_frame_size);
  2194. if (u->stub_unwind.stub_type)
  2195. {
  2196. gdb_printf ("\tstub type = ");
  2197. switch (u->stub_unwind.stub_type)
  2198. {
  2199. case LONG_BRANCH:
  2200. gdb_printf ("long branch\n");
  2201. break;
  2202. case PARAMETER_RELOCATION:
  2203. gdb_printf ("parameter relocation\n");
  2204. break;
  2205. case EXPORT:
  2206. gdb_printf ("export\n");
  2207. break;
  2208. case IMPORT:
  2209. gdb_printf ("import\n");
  2210. break;
  2211. case IMPORT_SHLIB:
  2212. gdb_printf ("import shlib\n");
  2213. break;
  2214. default:
  2215. gdb_printf ("unknown (%d)\n", u->stub_unwind.stub_type);
  2216. }
  2217. }
  2218. }
  2219. /* Return the GDB type object for the "standard" data type of data in
  2220. register REGNUM. */
  2221. static struct type *
  2222. hppa32_register_type (struct gdbarch *gdbarch, int regnum)
  2223. {
  2224. if (regnum < HPPA_FP4_REGNUM)
  2225. return builtin_type (gdbarch)->builtin_uint32;
  2226. else
  2227. return builtin_type (gdbarch)->builtin_float;
  2228. }
  2229. static struct type *
  2230. hppa64_register_type (struct gdbarch *gdbarch, int regnum)
  2231. {
  2232. if (regnum < HPPA64_FP4_REGNUM)
  2233. return builtin_type (gdbarch)->builtin_uint64;
  2234. else
  2235. return builtin_type (gdbarch)->builtin_double;
  2236. }
  2237. /* Return non-zero if REGNUM is not a register available to the user
  2238. through ptrace/ttrace. */
  2239. static int
  2240. hppa32_cannot_store_register (struct gdbarch *gdbarch, int regnum)
  2241. {
  2242. return (regnum == 0
  2243. || regnum == HPPA_PCSQ_HEAD_REGNUM
  2244. || (regnum >= HPPA_PCSQ_TAIL_REGNUM && regnum < HPPA_IPSW_REGNUM)
  2245. || (regnum > HPPA_IPSW_REGNUM && regnum < HPPA_FP4_REGNUM));
  2246. }
  2247. static int
  2248. hppa32_cannot_fetch_register (struct gdbarch *gdbarch, int regnum)
  2249. {
  2250. /* cr26 and cr27 are readable (but not writable) from userspace. */
  2251. if (regnum == HPPA_CR26_REGNUM || regnum == HPPA_CR27_REGNUM)
  2252. return 0;
  2253. else
  2254. return hppa32_cannot_store_register (gdbarch, regnum);
  2255. }
  2256. static int
  2257. hppa64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
  2258. {
  2259. return (regnum == 0
  2260. || regnum == HPPA_PCSQ_HEAD_REGNUM
  2261. || (regnum >= HPPA_PCSQ_TAIL_REGNUM && regnum < HPPA_IPSW_REGNUM)
  2262. || (regnum > HPPA_IPSW_REGNUM && regnum < HPPA64_FP4_REGNUM));
  2263. }
  2264. static int
  2265. hppa64_cannot_fetch_register (struct gdbarch *gdbarch, int regnum)
  2266. {
  2267. /* cr26 and cr27 are readable (but not writable) from userspace. */
  2268. if (regnum == HPPA_CR26_REGNUM || regnum == HPPA_CR27_REGNUM)
  2269. return 0;
  2270. else
  2271. return hppa64_cannot_store_register (gdbarch, regnum);
  2272. }
  2273. static CORE_ADDR
  2274. hppa_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR addr)
  2275. {
  2276. /* The low two bits of the PC on the PA contain the privilege level.
  2277. Some genius implementing a (non-GCC) compiler apparently decided
  2278. this means that "addresses" in a text section therefore include a
  2279. privilege level, and thus symbol tables should contain these bits.
  2280. This seems like a bonehead thing to do--anyway, it seems to work
  2281. for our purposes to just ignore those bits. */
  2282. return (addr &= ~0x3);
  2283. }
  2284. /* Get the ARGIth function argument for the current function. */
  2285. static CORE_ADDR
  2286. hppa_fetch_pointer_argument (struct frame_info *frame, int argi,
  2287. struct type *type)
  2288. {
  2289. return get_frame_register_unsigned (frame, HPPA_R0_REGNUM + 26 - argi);
  2290. }
  2291. static enum register_status
  2292. hppa_pseudo_register_read (struct gdbarch *gdbarch, readable_regcache *regcache,
  2293. int regnum, gdb_byte *buf)
  2294. {
  2295. enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
  2296. ULONGEST tmp;
  2297. enum register_status status;
  2298. status = regcache->raw_read (regnum, &tmp);
  2299. if (status == REG_VALID)
  2300. {
  2301. if (regnum == HPPA_PCOQ_HEAD_REGNUM || regnum == HPPA_PCOQ_TAIL_REGNUM)
  2302. tmp &= ~0x3;
  2303. store_unsigned_integer (buf, sizeof tmp, byte_order, tmp);
  2304. }
  2305. return status;
  2306. }
  2307. static CORE_ADDR
  2308. hppa_find_global_pointer (struct gdbarch *gdbarch, struct value *function)
  2309. {
  2310. return 0;
  2311. }
  2312. struct value *
  2313. hppa_frame_prev_register_helper (struct frame_info *this_frame,
  2314. trad_frame_saved_reg saved_regs[],
  2315. int regnum)
  2316. {
  2317. struct gdbarch *arch = get_frame_arch (this_frame);
  2318. enum bfd_endian byte_order = gdbarch_byte_order (arch);
  2319. if (regnum == HPPA_PCOQ_TAIL_REGNUM)
  2320. {
  2321. int size = register_size (arch, HPPA_PCOQ_HEAD_REGNUM);
  2322. CORE_ADDR pc;
  2323. struct value *pcoq_val =
  2324. trad_frame_get_prev_register (this_frame, saved_regs,
  2325. HPPA_PCOQ_HEAD_REGNUM);
  2326. pc = extract_unsigned_integer (value_contents_all (pcoq_val).data (),
  2327. size, byte_order);
  2328. return frame_unwind_got_constant (this_frame, regnum, pc + 4);
  2329. }
  2330. return trad_frame_get_prev_register (this_frame, saved_regs, regnum);
  2331. }
  2332. /* An instruction to match. */
  2333. struct insn_pattern
  2334. {
  2335. unsigned int data; /* See if it matches this.... */
  2336. unsigned int mask; /* ... with this mask. */
  2337. };
  2338. /* See bfd/elf32-hppa.c */
  2339. static struct insn_pattern hppa_long_branch_stub[] = {
  2340. /* ldil LR'xxx,%r1 */
  2341. { 0x20200000, 0xffe00000 },
  2342. /* be,n RR'xxx(%sr4,%r1) */
  2343. { 0xe0202002, 0xffe02002 },
  2344. { 0, 0 }
  2345. };
  2346. static struct insn_pattern hppa_long_branch_pic_stub[] = {
  2347. /* b,l .+8, %r1 */
  2348. { 0xe8200000, 0xffe00000 },
  2349. /* addil LR'xxx - ($PIC_pcrel$0 - 4), %r1 */
  2350. { 0x28200000, 0xffe00000 },
  2351. /* be,n RR'xxxx - ($PIC_pcrel$0 - 8)(%sr4, %r1) */
  2352. { 0xe0202002, 0xffe02002 },
  2353. { 0, 0 }
  2354. };
  2355. static struct insn_pattern hppa_import_stub[] = {
  2356. /* addil LR'xxx, %dp */
  2357. { 0x2b600000, 0xffe00000 },
  2358. /* ldw RR'xxx(%r1), %r21 */
  2359. { 0x48350000, 0xffffb000 },
  2360. /* bv %r0(%r21) */
  2361. { 0xeaa0c000, 0xffffffff },
  2362. /* ldw RR'xxx+4(%r1), %r19 */
  2363. { 0x48330000, 0xffffb000 },
  2364. { 0, 0 }
  2365. };
  2366. static struct insn_pattern hppa_import_pic_stub[] = {
  2367. /* addil LR'xxx,%r19 */
  2368. { 0x2a600000, 0xffe00000 },
  2369. /* ldw RR'xxx(%r1),%r21 */
  2370. { 0x48350000, 0xffffb000 },
  2371. /* bv %r0(%r21) */
  2372. { 0xeaa0c000, 0xffffffff },
  2373. /* ldw RR'xxx+4(%r1),%r19 */
  2374. { 0x48330000, 0xffffb000 },
  2375. { 0, 0 },
  2376. };
  2377. static struct insn_pattern hppa_plt_stub[] = {
  2378. /* b,l 1b, %r20 - 1b is 3 insns before here */
  2379. { 0xea9f1fdd, 0xffffffff },
  2380. /* depi 0,31,2,%r20 */
  2381. { 0xd6801c1e, 0xffffffff },
  2382. { 0, 0 }
  2383. };
  2384. /* Maximum number of instructions on the patterns above. */
  2385. #define HPPA_MAX_INSN_PATTERN_LEN 4
  2386. /* Return non-zero if the instructions at PC match the series
  2387. described in PATTERN, or zero otherwise. PATTERN is an array of
  2388. 'struct insn_pattern' objects, terminated by an entry whose mask is
  2389. zero.
  2390. When the match is successful, fill INSN[i] with what PATTERN[i]
  2391. matched. */
  2392. static int
  2393. hppa_match_insns (struct gdbarch *gdbarch, CORE_ADDR pc,
  2394. struct insn_pattern *pattern, unsigned int *insn)
  2395. {
  2396. enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
  2397. CORE_ADDR npc = pc;
  2398. int i;
  2399. for (i = 0; pattern[i].mask; i++)
  2400. {
  2401. gdb_byte buf[HPPA_INSN_SIZE];
  2402. target_read_memory (npc, buf, HPPA_INSN_SIZE);
  2403. insn[i] = extract_unsigned_integer (buf, HPPA_INSN_SIZE, byte_order);
  2404. if ((insn[i] & pattern[i].mask) == pattern[i].data)
  2405. npc += 4;
  2406. else
  2407. return 0;
  2408. }
  2409. return 1;
  2410. }
  2411. /* This relaxed version of the instruction matcher allows us to match
  2412. from somewhere inside the pattern, by looking backwards in the
  2413. instruction scheme. */
  2414. static int
  2415. hppa_match_insns_relaxed (struct gdbarch *gdbarch, CORE_ADDR pc,
  2416. struct insn_pattern *pattern, unsigned int *insn)
  2417. {
  2418. int offset, len = 0;
  2419. while (pattern[len].mask)
  2420. len++;
  2421. for (offset = 0; offset < len; offset++)
  2422. if (hppa_match_insns (gdbarch, pc - offset * HPPA_INSN_SIZE,
  2423. pattern, insn))
  2424. return 1;
  2425. return 0;
  2426. }
  2427. static int
  2428. hppa_in_dyncall (CORE_ADDR pc)
  2429. {
  2430. struct unwind_table_entry *u;
  2431. u = find_unwind_entry (hppa_symbol_address ("$$dyncall"));
  2432. if (!u)
  2433. return 0;
  2434. return (pc >= u->region_start && pc <= u->region_end);
  2435. }
  2436. int
  2437. hppa_in_solib_call_trampoline (struct gdbarch *gdbarch, CORE_ADDR pc)
  2438. {
  2439. unsigned int insn[HPPA_MAX_INSN_PATTERN_LEN];
  2440. struct unwind_table_entry *u;
  2441. if (in_plt_section (pc) || hppa_in_dyncall (pc))
  2442. return 1;
  2443. /* The GNU toolchain produces linker stubs without unwind
  2444. information. Since the pattern matching for linker stubs can be
  2445. quite slow, so bail out if we do have an unwind entry. */
  2446. u = find_unwind_entry (pc);
  2447. if (u != NULL)
  2448. return 0;
  2449. return
  2450. (hppa_match_insns_relaxed (gdbarch, pc, hppa_import_stub, insn)
  2451. || hppa_match_insns_relaxed (gdbarch, pc, hppa_import_pic_stub, insn)
  2452. || hppa_match_insns_relaxed (gdbarch, pc, hppa_long_branch_stub, insn)
  2453. || hppa_match_insns_relaxed (gdbarch, pc,
  2454. hppa_long_branch_pic_stub, insn));
  2455. }
  2456. /* This code skips several kind of "trampolines" used on PA-RISC
  2457. systems: $$dyncall, import stubs and PLT stubs. */
  2458. CORE_ADDR
  2459. hppa_skip_trampoline_code (struct frame_info *frame, CORE_ADDR pc)
  2460. {
  2461. struct gdbarch *gdbarch = get_frame_arch (frame);
  2462. struct type *func_ptr_type = builtin_type (gdbarch)->builtin_func_ptr;
  2463. unsigned int insn[HPPA_MAX_INSN_PATTERN_LEN];
  2464. int dp_rel;
  2465. /* $$dyncall handles both PLABELs and direct addresses. */
  2466. if (hppa_in_dyncall (pc))
  2467. {
  2468. pc = get_frame_register_unsigned (frame, HPPA_R0_REGNUM + 22);
  2469. /* PLABELs have bit 30 set; if it's a PLABEL, then dereference it. */
  2470. if (pc & 0x2)
  2471. pc = read_memory_typed_address (pc & ~0x3, func_ptr_type);
  2472. return pc;
  2473. }
  2474. dp_rel = hppa_match_insns (gdbarch, pc, hppa_import_stub, insn);
  2475. if (dp_rel || hppa_match_insns (gdbarch, pc, hppa_import_pic_stub, insn))
  2476. {
  2477. /* Extract the target address from the addil/ldw sequence. */
  2478. pc = hppa_extract_21 (insn[0]) + hppa_extract_14 (insn[1]);
  2479. if (dp_rel)
  2480. pc += get_frame_register_unsigned (frame, HPPA_DP_REGNUM);
  2481. else
  2482. pc += get_frame_register_unsigned (frame, HPPA_R0_REGNUM + 19);
  2483. /* fallthrough */
  2484. }
  2485. if (in_plt_section (pc))
  2486. {
  2487. pc = read_memory_typed_address (pc, func_ptr_type);
  2488. /* If the PLT slot has not yet been resolved, the target will be
  2489. the PLT stub. */
  2490. if (in_plt_section (pc))
  2491. {
  2492. /* Sanity check: are we pointing to the PLT stub? */
  2493. if (!hppa_match_insns (gdbarch, pc, hppa_plt_stub, insn))
  2494. {
  2495. warning (_("Cannot resolve PLT stub at %s."),
  2496. paddress (gdbarch, pc));
  2497. return 0;
  2498. }
  2499. /* This should point to the fixup routine. */
  2500. pc = read_memory_typed_address (pc + 8, func_ptr_type);
  2501. }
  2502. }
  2503. return pc;
  2504. }
  2505. /* Here is a table of C type sizes on hppa with various compiles
  2506. and options. I measured this on PA 9000/800 with HP-UX 11.11
  2507. and these compilers:
  2508. /usr/ccs/bin/cc HP92453-01 A.11.01.21
  2509. /opt/ansic/bin/cc HP92453-01 B.11.11.28706.GP
  2510. /opt/aCC/bin/aCC B3910B A.03.45
  2511. gcc gcc 3.3.2 native hppa2.0w-hp-hpux11.11
  2512. cc : 1 2 4 4 8 : 4 8 -- : 4 4
  2513. ansic +DA1.1 : 1 2 4 4 8 : 4 8 16 : 4 4
  2514. ansic +DA2.0 : 1 2 4 4 8 : 4 8 16 : 4 4
  2515. ansic +DA2.0W : 1 2 4 8 8 : 4 8 16 : 8 8
  2516. acc +DA1.1 : 1 2 4 4 8 : 4 8 16 : 4 4
  2517. acc +DA2.0 : 1 2 4 4 8 : 4 8 16 : 4 4
  2518. acc +DA2.0W : 1 2 4 8 8 : 4 8 16 : 8 8
  2519. gcc : 1 2 4 4 8 : 4 8 16 : 4 4
  2520. Each line is:
  2521. compiler and options
  2522. char, short, int, long, long long
  2523. float, double, long double
  2524. char *, void (*)()
  2525. So all these compilers use either ILP32 or LP64 model.
  2526. TODO: gcc has more options so it needs more investigation.
  2527. For floating point types, see:
  2528. http://docs.hp.com/hpux/pdf/B3906-90006.pdf
  2529. HP-UX floating-point guide, hpux 11.00
  2530. -- chastain 2003-12-18 */
  2531. static struct gdbarch *
  2532. hppa_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
  2533. {
  2534. struct gdbarch *gdbarch;
  2535. /* find a candidate among the list of pre-declared architectures. */
  2536. arches = gdbarch_list_lookup_by_info (arches, &info);
  2537. if (arches != NULL)
  2538. return (arches->gdbarch);
  2539. /* If none found, then allocate and initialize one. */
  2540. hppa_gdbarch_tdep *tdep = new hppa_gdbarch_tdep;
  2541. gdbarch = gdbarch_alloc (&info, tdep);
  2542. /* Determine from the bfd_arch_info structure if we are dealing with
  2543. a 32 or 64 bits architecture. If the bfd_arch_info is not available,
  2544. then default to a 32bit machine. */
  2545. if (info.bfd_arch_info != NULL)
  2546. tdep->bytes_per_address =
  2547. info.bfd_arch_info->bits_per_address / info.bfd_arch_info->bits_per_byte;
  2548. else
  2549. tdep->bytes_per_address = 4;
  2550. tdep->find_global_pointer = hppa_find_global_pointer;
  2551. /* Some parts of the gdbarch vector depend on whether we are running
  2552. on a 32 bits or 64 bits target. */
  2553. switch (tdep->bytes_per_address)
  2554. {
  2555. case 4:
  2556. set_gdbarch_num_regs (gdbarch, hppa32_num_regs);
  2557. set_gdbarch_register_name (gdbarch, hppa32_register_name);
  2558. set_gdbarch_register_type (gdbarch, hppa32_register_type);
  2559. set_gdbarch_cannot_store_register (gdbarch,
  2560. hppa32_cannot_store_register);
  2561. set_gdbarch_cannot_fetch_register (gdbarch,
  2562. hppa32_cannot_fetch_register);
  2563. break;
  2564. case 8:
  2565. set_gdbarch_num_regs (gdbarch, hppa64_num_regs);
  2566. set_gdbarch_register_name (gdbarch, hppa64_register_name);
  2567. set_gdbarch_register_type (gdbarch, hppa64_register_type);
  2568. set_gdbarch_dwarf2_reg_to_regnum (gdbarch, hppa64_dwarf_reg_to_regnum);
  2569. set_gdbarch_cannot_store_register (gdbarch,
  2570. hppa64_cannot_store_register);
  2571. set_gdbarch_cannot_fetch_register (gdbarch,
  2572. hppa64_cannot_fetch_register);
  2573. break;
  2574. default:
  2575. internal_error (__FILE__, __LINE__, _("Unsupported address size: %d"),
  2576. tdep->bytes_per_address);
  2577. }
  2578. set_gdbarch_long_bit (gdbarch, tdep->bytes_per_address * TARGET_CHAR_BIT);
  2579. set_gdbarch_ptr_bit (gdbarch, tdep->bytes_per_address * TARGET_CHAR_BIT);
  2580. /* The following gdbarch vector elements are the same in both ILP32
  2581. and LP64, but might show differences some day. */
  2582. set_gdbarch_long_long_bit (gdbarch, 64);
  2583. set_gdbarch_long_double_bit (gdbarch, 128);
  2584. set_gdbarch_long_double_format (gdbarch, floatformats_ieee_quad);
  2585. /* The following gdbarch vector elements do not depend on the address
  2586. size, or in any other gdbarch element previously set. */
  2587. set_gdbarch_skip_prologue (gdbarch, hppa_skip_prologue);
  2588. set_gdbarch_stack_frame_destroyed_p (gdbarch,
  2589. hppa_stack_frame_destroyed_p);
  2590. set_gdbarch_inner_than (gdbarch, core_addr_greaterthan);
  2591. set_gdbarch_sp_regnum (gdbarch, HPPA_SP_REGNUM);
  2592. set_gdbarch_fp0_regnum (gdbarch, HPPA_FP0_REGNUM);
  2593. set_gdbarch_addr_bits_remove (gdbarch, hppa_addr_bits_remove);
  2594. set_gdbarch_believe_pcc_promotion (gdbarch, 1);
  2595. set_gdbarch_read_pc (gdbarch, hppa_read_pc);
  2596. set_gdbarch_write_pc (gdbarch, hppa_write_pc);
  2597. /* Helper for function argument information. */
  2598. set_gdbarch_fetch_pointer_argument (gdbarch, hppa_fetch_pointer_argument);
  2599. /* When a hardware watchpoint triggers, we'll move the inferior past
  2600. it by removing all eventpoints; stepping past the instruction
  2601. that caused the trigger; reinserting eventpoints; and checking
  2602. whether any watched location changed. */
  2603. set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
  2604. /* Inferior function call methods. */
  2605. switch (tdep->bytes_per_address)
  2606. {
  2607. case 4:
  2608. set_gdbarch_push_dummy_call (gdbarch, hppa32_push_dummy_call);
  2609. set_gdbarch_frame_align (gdbarch, hppa32_frame_align);
  2610. set_gdbarch_convert_from_func_ptr_addr
  2611. (gdbarch, hppa32_convert_from_func_ptr_addr);
  2612. break;
  2613. case 8:
  2614. set_gdbarch_push_dummy_call (gdbarch, hppa64_push_dummy_call);
  2615. set_gdbarch_frame_align (gdbarch, hppa64_frame_align);
  2616. break;
  2617. default:
  2618. internal_error (__FILE__, __LINE__, _("bad switch"));
  2619. }
  2620. /* Struct return methods. */
  2621. switch (tdep->bytes_per_address)
  2622. {
  2623. case 4:
  2624. set_gdbarch_return_value (gdbarch, hppa32_return_value);
  2625. break;
  2626. case 8:
  2627. set_gdbarch_return_value (gdbarch, hppa64_return_value);
  2628. break;
  2629. default:
  2630. internal_error (__FILE__, __LINE__, _("bad switch"));
  2631. }
  2632. set_gdbarch_breakpoint_kind_from_pc (gdbarch, hppa_breakpoint::kind_from_pc);
  2633. set_gdbarch_sw_breakpoint_from_kind (gdbarch, hppa_breakpoint::bp_from_kind);
  2634. set_gdbarch_pseudo_register_read (gdbarch, hppa_pseudo_register_read);
  2635. /* Frame unwind methods. */
  2636. set_gdbarch_unwind_pc (gdbarch, hppa_unwind_pc);
  2637. /* Hook in ABI-specific overrides, if they have been registered. */
  2638. gdbarch_init_osabi (info, gdbarch);
  2639. /* Hook in the default unwinders. */
  2640. frame_unwind_append_unwinder (gdbarch, &hppa_stub_frame_unwind);
  2641. frame_unwind_append_unwinder (gdbarch, &hppa_frame_unwind);
  2642. frame_unwind_append_unwinder (gdbarch, &hppa_fallback_frame_unwind);
  2643. return gdbarch;
  2644. }
  2645. static void
  2646. hppa_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
  2647. {
  2648. hppa_gdbarch_tdep *tdep = (hppa_gdbarch_tdep *) gdbarch_tdep (gdbarch);
  2649. gdb_printf (file, "bytes_per_address = %d\n",
  2650. tdep->bytes_per_address);
  2651. gdb_printf (file, "elf = %s\n", tdep->is_elf ? "yes" : "no");
  2652. }
  2653. void _initialize_hppa_tdep ();
  2654. void
  2655. _initialize_hppa_tdep ()
  2656. {
  2657. gdbarch_register (bfd_arch_hppa, hppa_gdbarch_init, hppa_dump_tdep);
  2658. add_cmd ("unwind", class_maintenance, unwind_command,
  2659. _("Print unwind table entry at given address."),
  2660. &maintenanceprintlist);
  2661. /* Debug this files internals. */
  2662. add_setshow_boolean_cmd ("hppa", class_maintenance, &hppa_debug, _("\
  2663. Set whether hppa target specific debugging information should be displayed."),
  2664. _("\
  2665. Show whether hppa target specific debugging information is displayed."), _("\
  2666. This flag controls whether hppa target specific debugging information is\n\
  2667. displayed. This information is particularly useful for debugging frame\n\
  2668. unwinding problems."),
  2669. NULL,
  2670. NULL, /* FIXME: i18n: hppa debug flag is %s. */
  2671. &setdebuglist, &showdebuglist);
  2672. }