coff-sh.c 93 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236
  1. /* BFD back-end for Renesas Super-H COFF binaries.
  2. Copyright (C) 1993-2022 Free Software Foundation, Inc.
  3. Contributed by Cygnus Support.
  4. Written by Steve Chamberlain, <sac@cygnus.com>.
  5. Relaxing code written by Ian Lance Taylor, <ian@cygnus.com>.
  6. This file is part of BFD, the Binary File Descriptor library.
  7. This program is free software; you can redistribute it and/or modify
  8. it under the terms of the GNU General Public License as published by
  9. the Free Software Foundation; either version 3 of the License, or
  10. (at your option) any later version.
  11. This program is distributed in the hope that it will be useful,
  12. but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. GNU General Public License for more details.
  15. You should have received a copy of the GNU General Public License
  16. along with this program; if not, write to the Free Software
  17. Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
  18. MA 02110-1301, USA. */
  19. #include "sysdep.h"
  20. #include "bfd.h"
  21. #include "libiberty.h"
  22. #include "libbfd.h"
  23. #include "bfdlink.h"
  24. #include "coff/sh.h"
  25. #include "coff/internal.h"
  26. #undef bfd_pe_print_pdata
  27. #ifdef COFF_WITH_PE
  28. #include "coff/pe.h"
  29. #ifndef COFF_IMAGE_WITH_PE
  30. static bool sh_align_load_span
  31. (bfd *, asection *, bfd_byte *,
  32. bool (*) (bfd *, asection *, void *, bfd_byte *, bfd_vma),
  33. void *, bfd_vma **, bfd_vma *, bfd_vma, bfd_vma, bool *);
  34. #define _bfd_sh_align_load_span sh_align_load_span
  35. #endif
  36. #define bfd_pe_print_pdata _bfd_pe_print_ce_compressed_pdata
  37. #else
  38. #define bfd_pe_print_pdata NULL
  39. #endif /* COFF_WITH_PE. */
  40. #include "libcoff.h"
  41. /* Internal functions. */
  42. #ifdef COFF_WITH_PE
  43. /* Can't build import tables with 2**4 alignment. */
  44. #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER 2
  45. #else
  46. /* Default section alignment to 2**4. */
  47. #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER 4
  48. #endif
  49. #ifdef COFF_IMAGE_WITH_PE
  50. /* Align PE executables. */
  51. #define COFF_PAGE_SIZE 0x1000
  52. #endif
  53. /* Generate long file names. */
  54. #define COFF_LONG_FILENAMES
  55. #ifdef COFF_WITH_PE
  56. /* Return TRUE if this relocation should
  57. appear in the output .reloc section. */
  58. static bool
  59. in_reloc_p (bfd * abfd ATTRIBUTE_UNUSED,
  60. reloc_howto_type * howto)
  61. {
  62. return ! howto->pc_relative && howto->type != R_SH_IMAGEBASE;
  63. }
  64. #endif
  65. static bfd_reloc_status_type
  66. sh_reloc (bfd *, arelent *, asymbol *, void *, asection *, bfd *, char **);
  67. static bool
  68. sh_relocate_section (bfd *, struct bfd_link_info *, bfd *, asection *,
  69. bfd_byte *, struct internal_reloc *,
  70. struct internal_syment *, asection **);
  71. static bool
  72. sh_align_loads (bfd *, asection *, struct internal_reloc *,
  73. bfd_byte *, bool *);
  74. /* The supported relocations. There are a lot of relocations defined
  75. in coff/internal.h which we do not expect to ever see. */
  76. static reloc_howto_type sh_coff_howtos[] =
  77. {
  78. EMPTY_HOWTO (0),
  79. EMPTY_HOWTO (1),
  80. #ifdef COFF_WITH_PE
  81. /* Windows CE */
  82. HOWTO (R_SH_IMM32CE, /* type */
  83. 0, /* rightshift */
  84. 2, /* size (0 = byte, 1 = short, 2 = long) */
  85. 32, /* bitsize */
  86. false, /* pc_relative */
  87. 0, /* bitpos */
  88. complain_overflow_bitfield, /* complain_on_overflow */
  89. sh_reloc, /* special_function */
  90. "r_imm32ce", /* name */
  91. true, /* partial_inplace */
  92. 0xffffffff, /* src_mask */
  93. 0xffffffff, /* dst_mask */
  94. false), /* pcrel_offset */
  95. #else
  96. EMPTY_HOWTO (2),
  97. #endif
  98. EMPTY_HOWTO (3), /* R_SH_PCREL8 */
  99. EMPTY_HOWTO (4), /* R_SH_PCREL16 */
  100. EMPTY_HOWTO (5), /* R_SH_HIGH8 */
  101. EMPTY_HOWTO (6), /* R_SH_IMM24 */
  102. EMPTY_HOWTO (7), /* R_SH_LOW16 */
  103. EMPTY_HOWTO (8),
  104. EMPTY_HOWTO (9), /* R_SH_PCDISP8BY4 */
  105. HOWTO (R_SH_PCDISP8BY2, /* type */
  106. 1, /* rightshift */
  107. 1, /* size (0 = byte, 1 = short, 2 = long) */
  108. 8, /* bitsize */
  109. true, /* pc_relative */
  110. 0, /* bitpos */
  111. complain_overflow_signed, /* complain_on_overflow */
  112. sh_reloc, /* special_function */
  113. "r_pcdisp8by2", /* name */
  114. true, /* partial_inplace */
  115. 0xff, /* src_mask */
  116. 0xff, /* dst_mask */
  117. true), /* pcrel_offset */
  118. EMPTY_HOWTO (11), /* R_SH_PCDISP8 */
  119. HOWTO (R_SH_PCDISP, /* type */
  120. 1, /* rightshift */
  121. 1, /* size (0 = byte, 1 = short, 2 = long) */
  122. 12, /* bitsize */
  123. true, /* pc_relative */
  124. 0, /* bitpos */
  125. complain_overflow_signed, /* complain_on_overflow */
  126. sh_reloc, /* special_function */
  127. "r_pcdisp12by2", /* name */
  128. true, /* partial_inplace */
  129. 0xfff, /* src_mask */
  130. 0xfff, /* dst_mask */
  131. true), /* pcrel_offset */
  132. EMPTY_HOWTO (13),
  133. HOWTO (R_SH_IMM32, /* type */
  134. 0, /* rightshift */
  135. 2, /* size (0 = byte, 1 = short, 2 = long) */
  136. 32, /* bitsize */
  137. false, /* pc_relative */
  138. 0, /* bitpos */
  139. complain_overflow_bitfield, /* complain_on_overflow */
  140. sh_reloc, /* special_function */
  141. "r_imm32", /* name */
  142. true, /* partial_inplace */
  143. 0xffffffff, /* src_mask */
  144. 0xffffffff, /* dst_mask */
  145. false), /* pcrel_offset */
  146. EMPTY_HOWTO (15),
  147. #ifdef COFF_WITH_PE
  148. HOWTO (R_SH_IMAGEBASE, /* type */
  149. 0, /* rightshift */
  150. 2, /* size (0 = byte, 1 = short, 2 = long) */
  151. 32, /* bitsize */
  152. false, /* pc_relative */
  153. 0, /* bitpos */
  154. complain_overflow_bitfield, /* complain_on_overflow */
  155. sh_reloc, /* special_function */
  156. "rva32", /* name */
  157. true, /* partial_inplace */
  158. 0xffffffff, /* src_mask */
  159. 0xffffffff, /* dst_mask */
  160. false), /* pcrel_offset */
  161. #else
  162. EMPTY_HOWTO (16), /* R_SH_IMM8 */
  163. #endif
  164. EMPTY_HOWTO (17), /* R_SH_IMM8BY2 */
  165. EMPTY_HOWTO (18), /* R_SH_IMM8BY4 */
  166. EMPTY_HOWTO (19), /* R_SH_IMM4 */
  167. EMPTY_HOWTO (20), /* R_SH_IMM4BY2 */
  168. EMPTY_HOWTO (21), /* R_SH_IMM4BY4 */
  169. HOWTO (R_SH_PCRELIMM8BY2, /* type */
  170. 1, /* rightshift */
  171. 1, /* size (0 = byte, 1 = short, 2 = long) */
  172. 8, /* bitsize */
  173. true, /* pc_relative */
  174. 0, /* bitpos */
  175. complain_overflow_unsigned, /* complain_on_overflow */
  176. sh_reloc, /* special_function */
  177. "r_pcrelimm8by2", /* name */
  178. true, /* partial_inplace */
  179. 0xff, /* src_mask */
  180. 0xff, /* dst_mask */
  181. true), /* pcrel_offset */
  182. HOWTO (R_SH_PCRELIMM8BY4, /* type */
  183. 2, /* rightshift */
  184. 1, /* size (0 = byte, 1 = short, 2 = long) */
  185. 8, /* bitsize */
  186. true, /* pc_relative */
  187. 0, /* bitpos */
  188. complain_overflow_unsigned, /* complain_on_overflow */
  189. sh_reloc, /* special_function */
  190. "r_pcrelimm8by4", /* name */
  191. true, /* partial_inplace */
  192. 0xff, /* src_mask */
  193. 0xff, /* dst_mask */
  194. true), /* pcrel_offset */
  195. HOWTO (R_SH_IMM16, /* type */
  196. 0, /* rightshift */
  197. 1, /* size (0 = byte, 1 = short, 2 = long) */
  198. 16, /* bitsize */
  199. false, /* pc_relative */
  200. 0, /* bitpos */
  201. complain_overflow_bitfield, /* complain_on_overflow */
  202. sh_reloc, /* special_function */
  203. "r_imm16", /* name */
  204. true, /* partial_inplace */
  205. 0xffff, /* src_mask */
  206. 0xffff, /* dst_mask */
  207. false), /* pcrel_offset */
  208. HOWTO (R_SH_SWITCH16, /* type */
  209. 0, /* rightshift */
  210. 1, /* size (0 = byte, 1 = short, 2 = long) */
  211. 16, /* bitsize */
  212. false, /* pc_relative */
  213. 0, /* bitpos */
  214. complain_overflow_bitfield, /* complain_on_overflow */
  215. sh_reloc, /* special_function */
  216. "r_switch16", /* name */
  217. true, /* partial_inplace */
  218. 0xffff, /* src_mask */
  219. 0xffff, /* dst_mask */
  220. false), /* pcrel_offset */
  221. HOWTO (R_SH_SWITCH32, /* type */
  222. 0, /* rightshift */
  223. 2, /* size (0 = byte, 1 = short, 2 = long) */
  224. 32, /* bitsize */
  225. false, /* pc_relative */
  226. 0, /* bitpos */
  227. complain_overflow_bitfield, /* complain_on_overflow */
  228. sh_reloc, /* special_function */
  229. "r_switch32", /* name */
  230. true, /* partial_inplace */
  231. 0xffffffff, /* src_mask */
  232. 0xffffffff, /* dst_mask */
  233. false), /* pcrel_offset */
  234. HOWTO (R_SH_USES, /* type */
  235. 0, /* rightshift */
  236. 1, /* size (0 = byte, 1 = short, 2 = long) */
  237. 16, /* bitsize */
  238. false, /* pc_relative */
  239. 0, /* bitpos */
  240. complain_overflow_bitfield, /* complain_on_overflow */
  241. sh_reloc, /* special_function */
  242. "r_uses", /* name */
  243. true, /* partial_inplace */
  244. 0xffff, /* src_mask */
  245. 0xffff, /* dst_mask */
  246. false), /* pcrel_offset */
  247. HOWTO (R_SH_COUNT, /* type */
  248. 0, /* rightshift */
  249. 2, /* size (0 = byte, 1 = short, 2 = long) */
  250. 32, /* bitsize */
  251. false, /* pc_relative */
  252. 0, /* bitpos */
  253. complain_overflow_bitfield, /* complain_on_overflow */
  254. sh_reloc, /* special_function */
  255. "r_count", /* name */
  256. true, /* partial_inplace */
  257. 0xffffffff, /* src_mask */
  258. 0xffffffff, /* dst_mask */
  259. false), /* pcrel_offset */
  260. HOWTO (R_SH_ALIGN, /* type */
  261. 0, /* rightshift */
  262. 2, /* size (0 = byte, 1 = short, 2 = long) */
  263. 32, /* bitsize */
  264. false, /* pc_relative */
  265. 0, /* bitpos */
  266. complain_overflow_bitfield, /* complain_on_overflow */
  267. sh_reloc, /* special_function */
  268. "r_align", /* name */
  269. true, /* partial_inplace */
  270. 0xffffffff, /* src_mask */
  271. 0xffffffff, /* dst_mask */
  272. false), /* pcrel_offset */
  273. HOWTO (R_SH_CODE, /* type */
  274. 0, /* rightshift */
  275. 2, /* size (0 = byte, 1 = short, 2 = long) */
  276. 32, /* bitsize */
  277. false, /* pc_relative */
  278. 0, /* bitpos */
  279. complain_overflow_bitfield, /* complain_on_overflow */
  280. sh_reloc, /* special_function */
  281. "r_code", /* name */
  282. true, /* partial_inplace */
  283. 0xffffffff, /* src_mask */
  284. 0xffffffff, /* dst_mask */
  285. false), /* pcrel_offset */
  286. HOWTO (R_SH_DATA, /* type */
  287. 0, /* rightshift */
  288. 2, /* size (0 = byte, 1 = short, 2 = long) */
  289. 32, /* bitsize */
  290. false, /* pc_relative */
  291. 0, /* bitpos */
  292. complain_overflow_bitfield, /* complain_on_overflow */
  293. sh_reloc, /* special_function */
  294. "r_data", /* name */
  295. true, /* partial_inplace */
  296. 0xffffffff, /* src_mask */
  297. 0xffffffff, /* dst_mask */
  298. false), /* pcrel_offset */
  299. HOWTO (R_SH_LABEL, /* type */
  300. 0, /* rightshift */
  301. 2, /* size (0 = byte, 1 = short, 2 = long) */
  302. 32, /* bitsize */
  303. false, /* pc_relative */
  304. 0, /* bitpos */
  305. complain_overflow_bitfield, /* complain_on_overflow */
  306. sh_reloc, /* special_function */
  307. "r_label", /* name */
  308. true, /* partial_inplace */
  309. 0xffffffff, /* src_mask */
  310. 0xffffffff, /* dst_mask */
  311. false), /* pcrel_offset */
  312. HOWTO (R_SH_SWITCH8, /* type */
  313. 0, /* rightshift */
  314. 0, /* size (0 = byte, 1 = short, 2 = long) */
  315. 8, /* bitsize */
  316. false, /* pc_relative */
  317. 0, /* bitpos */
  318. complain_overflow_bitfield, /* complain_on_overflow */
  319. sh_reloc, /* special_function */
  320. "r_switch8", /* name */
  321. true, /* partial_inplace */
  322. 0xff, /* src_mask */
  323. 0xff, /* dst_mask */
  324. false) /* pcrel_offset */
  325. };
  326. #define SH_COFF_HOWTO_COUNT (sizeof sh_coff_howtos / sizeof sh_coff_howtos[0])
  327. /* Check for a bad magic number. */
  328. #define BADMAG(x) SHBADMAG(x)
  329. /* Customize coffcode.h (this is not currently used). */
  330. #define SH 1
  331. /* FIXME: This should not be set here. */
  332. #define __A_MAGIC_SET__
  333. #ifndef COFF_WITH_PE
  334. /* Swap the r_offset field in and out. */
  335. #define SWAP_IN_RELOC_OFFSET H_GET_32
  336. #define SWAP_OUT_RELOC_OFFSET H_PUT_32
  337. /* Swap out extra information in the reloc structure. */
  338. #define SWAP_OUT_RELOC_EXTRA(abfd, src, dst) \
  339. do \
  340. { \
  341. dst->r_stuff[0] = 'S'; \
  342. dst->r_stuff[1] = 'C'; \
  343. } \
  344. while (0)
  345. #endif
  346. /* Get the value of a symbol, when performing a relocation. */
  347. static long
  348. get_symbol_value (asymbol *symbol)
  349. {
  350. bfd_vma relocation;
  351. if (bfd_is_com_section (symbol->section))
  352. relocation = 0;
  353. else
  354. relocation = (symbol->value +
  355. symbol->section->output_section->vma +
  356. symbol->section->output_offset);
  357. return relocation;
  358. }
  359. #ifdef COFF_WITH_PE
  360. /* Convert an rtype to howto for the COFF backend linker.
  361. Copied from coff-i386. */
  362. #define coff_rtype_to_howto coff_sh_rtype_to_howto
  363. static reloc_howto_type *
  364. coff_sh_rtype_to_howto (bfd * abfd ATTRIBUTE_UNUSED,
  365. asection * sec,
  366. struct internal_reloc * rel,
  367. struct coff_link_hash_entry * h,
  368. struct internal_syment * sym,
  369. bfd_vma * addendp)
  370. {
  371. reloc_howto_type * howto;
  372. howto = sh_coff_howtos + rel->r_type;
  373. *addendp = 0;
  374. if (howto->pc_relative)
  375. *addendp += sec->vma;
  376. if (sym != NULL && sym->n_scnum == 0 && sym->n_value != 0)
  377. {
  378. /* This is a common symbol. The section contents include the
  379. size (sym->n_value) as an addend. The relocate_section
  380. function will be adding in the final value of the symbol. We
  381. need to subtract out the current size in order to get the
  382. correct result. */
  383. BFD_ASSERT (h != NULL);
  384. }
  385. if (howto->pc_relative)
  386. {
  387. *addendp -= 4;
  388. /* If the symbol is defined, then the generic code is going to
  389. add back the symbol value in order to cancel out an
  390. adjustment it made to the addend. However, we set the addend
  391. to 0 at the start of this function. We need to adjust here,
  392. to avoid the adjustment the generic code will make. FIXME:
  393. This is getting a bit hackish. */
  394. if (sym != NULL && sym->n_scnum != 0)
  395. *addendp -= sym->n_value;
  396. }
  397. if (rel->r_type == R_SH_IMAGEBASE)
  398. *addendp -= pe_data (sec->output_section->owner)->pe_opthdr.ImageBase;
  399. return howto;
  400. }
  401. #endif /* COFF_WITH_PE */
  402. /* This structure is used to map BFD reloc codes to SH PE relocs. */
  403. struct shcoff_reloc_map
  404. {
  405. bfd_reloc_code_real_type bfd_reloc_val;
  406. unsigned char shcoff_reloc_val;
  407. };
  408. #ifdef COFF_WITH_PE
  409. /* An array mapping BFD reloc codes to SH PE relocs. */
  410. static const struct shcoff_reloc_map sh_reloc_map[] =
  411. {
  412. { BFD_RELOC_32, R_SH_IMM32CE },
  413. { BFD_RELOC_RVA, R_SH_IMAGEBASE },
  414. { BFD_RELOC_CTOR, R_SH_IMM32CE },
  415. };
  416. #else
  417. /* An array mapping BFD reloc codes to SH PE relocs. */
  418. static const struct shcoff_reloc_map sh_reloc_map[] =
  419. {
  420. { BFD_RELOC_32, R_SH_IMM32 },
  421. { BFD_RELOC_CTOR, R_SH_IMM32 },
  422. };
  423. #endif
  424. /* Given a BFD reloc code, return the howto structure for the
  425. corresponding SH PE reloc. */
  426. #define coff_bfd_reloc_type_lookup sh_coff_reloc_type_lookup
  427. #define coff_bfd_reloc_name_lookup sh_coff_reloc_name_lookup
  428. static reloc_howto_type *
  429. sh_coff_reloc_type_lookup (bfd *abfd,
  430. bfd_reloc_code_real_type code)
  431. {
  432. unsigned int i;
  433. for (i = ARRAY_SIZE (sh_reloc_map); i--;)
  434. if (sh_reloc_map[i].bfd_reloc_val == code)
  435. return &sh_coff_howtos[(int) sh_reloc_map[i].shcoff_reloc_val];
  436. _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
  437. abfd, (unsigned int) code);
  438. return NULL;
  439. }
  440. static reloc_howto_type *
  441. sh_coff_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
  442. const char *r_name)
  443. {
  444. unsigned int i;
  445. for (i = 0; i < sizeof (sh_coff_howtos) / sizeof (sh_coff_howtos[0]); i++)
  446. if (sh_coff_howtos[i].name != NULL
  447. && strcasecmp (sh_coff_howtos[i].name, r_name) == 0)
  448. return &sh_coff_howtos[i];
  449. return NULL;
  450. }
  451. /* This macro is used in coffcode.h to get the howto corresponding to
  452. an internal reloc. */
  453. #define RTYPE2HOWTO(relent, internal) \
  454. ((relent)->howto = \
  455. ((internal)->r_type < SH_COFF_HOWTO_COUNT \
  456. ? &sh_coff_howtos[(internal)->r_type] \
  457. : (reloc_howto_type *) NULL))
  458. /* This is the same as the macro in coffcode.h, except that it copies
  459. r_offset into reloc_entry->addend for some relocs. */
  460. #define CALC_ADDEND(abfd, ptr, reloc, cache_ptr) \
  461. { \
  462. coff_symbol_type *coffsym = (coff_symbol_type *) NULL; \
  463. if (ptr && bfd_asymbol_bfd (ptr) != abfd) \
  464. coffsym = (obj_symbols (abfd) \
  465. + (cache_ptr->sym_ptr_ptr - symbols)); \
  466. else if (ptr) \
  467. coffsym = coff_symbol_from (ptr); \
  468. if (coffsym != (coff_symbol_type *) NULL \
  469. && coffsym->native->u.syment.n_scnum == 0) \
  470. cache_ptr->addend = 0; \
  471. else if (ptr && bfd_asymbol_bfd (ptr) == abfd \
  472. && ptr->section != (asection *) NULL) \
  473. cache_ptr->addend = - (ptr->section->vma + ptr->value); \
  474. else \
  475. cache_ptr->addend = 0; \
  476. if ((reloc).r_type == R_SH_SWITCH8 \
  477. || (reloc).r_type == R_SH_SWITCH16 \
  478. || (reloc).r_type == R_SH_SWITCH32 \
  479. || (reloc).r_type == R_SH_USES \
  480. || (reloc).r_type == R_SH_COUNT \
  481. || (reloc).r_type == R_SH_ALIGN) \
  482. cache_ptr->addend = (reloc).r_offset; \
  483. }
  484. /* This is the howto function for the SH relocations. */
  485. static bfd_reloc_status_type
  486. sh_reloc (bfd * abfd,
  487. arelent * reloc_entry,
  488. asymbol * symbol_in,
  489. void * data,
  490. asection * input_section,
  491. bfd * output_bfd,
  492. char ** error_message ATTRIBUTE_UNUSED)
  493. {
  494. bfd_vma insn;
  495. bfd_vma sym_value;
  496. unsigned short r_type;
  497. bfd_vma addr = reloc_entry->address;
  498. bfd_byte *hit_data = addr + (bfd_byte *) data;
  499. r_type = reloc_entry->howto->type;
  500. if (output_bfd != NULL)
  501. {
  502. /* Partial linking--do nothing. */
  503. reloc_entry->address += input_section->output_offset;
  504. return bfd_reloc_ok;
  505. }
  506. /* Almost all relocs have to do with relaxing. If any work must be
  507. done for them, it has been done in sh_relax_section. */
  508. if (r_type != R_SH_IMM32
  509. #ifdef COFF_WITH_PE
  510. && r_type != R_SH_IMM32CE
  511. && r_type != R_SH_IMAGEBASE
  512. #endif
  513. && (r_type != R_SH_PCDISP
  514. || (symbol_in->flags & BSF_LOCAL) != 0))
  515. return bfd_reloc_ok;
  516. if (symbol_in != NULL
  517. && bfd_is_und_section (symbol_in->section))
  518. return bfd_reloc_undefined;
  519. if (addr > input_section->size)
  520. return bfd_reloc_outofrange;
  521. sym_value = get_symbol_value (symbol_in);
  522. switch (r_type)
  523. {
  524. case R_SH_IMM32:
  525. #ifdef COFF_WITH_PE
  526. case R_SH_IMM32CE:
  527. #endif
  528. insn = bfd_get_32 (abfd, hit_data);
  529. insn += sym_value + reloc_entry->addend;
  530. bfd_put_32 (abfd, insn, hit_data);
  531. break;
  532. #ifdef COFF_WITH_PE
  533. case R_SH_IMAGEBASE:
  534. insn = bfd_get_32 (abfd, hit_data);
  535. insn += sym_value + reloc_entry->addend;
  536. insn -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
  537. bfd_put_32 (abfd, insn, hit_data);
  538. break;
  539. #endif
  540. case R_SH_PCDISP:
  541. insn = bfd_get_16 (abfd, hit_data);
  542. sym_value += reloc_entry->addend;
  543. sym_value -= (input_section->output_section->vma
  544. + input_section->output_offset
  545. + addr
  546. + 4);
  547. sym_value += (((insn & 0xfff) ^ 0x800) - 0x800) << 1;
  548. insn = (insn & 0xf000) | ((sym_value >> 1) & 0xfff);
  549. bfd_put_16 (abfd, insn, hit_data);
  550. if (sym_value + 0x1000 >= 0x2000 || (sym_value & 1) != 0)
  551. return bfd_reloc_overflow;
  552. break;
  553. default:
  554. abort ();
  555. break;
  556. }
  557. return bfd_reloc_ok;
  558. }
  559. #define coff_bfd_merge_private_bfd_data _bfd_generic_verify_endian_match
  560. /* We can do relaxing. */
  561. #define coff_bfd_relax_section sh_relax_section
  562. /* We use the special COFF backend linker. */
  563. #define coff_relocate_section sh_relocate_section
  564. /* When relaxing, we need to use special code to get the relocated
  565. section contents. */
  566. #define coff_bfd_get_relocated_section_contents \
  567. sh_coff_get_relocated_section_contents
  568. #include "coffcode.h"
  569. static bool
  570. sh_relax_delete_bytes (bfd *, asection *, bfd_vma, int);
  571. /* This function handles relaxing on the SH.
  572. Function calls on the SH look like this:
  573. movl L1,r0
  574. ...
  575. jsr @r0
  576. ...
  577. L1:
  578. .long function
  579. The compiler and assembler will cooperate to create R_SH_USES
  580. relocs on the jsr instructions. The r_offset field of the
  581. R_SH_USES reloc is the PC relative offset to the instruction which
  582. loads the register (the r_offset field is computed as though it
  583. were a jump instruction, so the offset value is actually from four
  584. bytes past the instruction). The linker can use this reloc to
  585. determine just which function is being called, and thus decide
  586. whether it is possible to replace the jsr with a bsr.
  587. If multiple function calls are all based on a single register load
  588. (i.e., the same function is called multiple times), the compiler
  589. guarantees that each function call will have an R_SH_USES reloc.
  590. Therefore, if the linker is able to convert each R_SH_USES reloc
  591. which refers to that address, it can safely eliminate the register
  592. load.
  593. When the assembler creates an R_SH_USES reloc, it examines it to
  594. determine which address is being loaded (L1 in the above example).
  595. It then counts the number of references to that address, and
  596. creates an R_SH_COUNT reloc at that address. The r_offset field of
  597. the R_SH_COUNT reloc will be the number of references. If the
  598. linker is able to eliminate a register load, it can use the
  599. R_SH_COUNT reloc to see whether it can also eliminate the function
  600. address.
  601. SH relaxing also handles another, unrelated, matter. On the SH, if
  602. a load or store instruction is not aligned on a four byte boundary,
  603. the memory cycle interferes with the 32 bit instruction fetch,
  604. causing a one cycle bubble in the pipeline. Therefore, we try to
  605. align load and store instructions on four byte boundaries if we
  606. can, by swapping them with one of the adjacent instructions. */
  607. static bool
  608. sh_relax_section (bfd *abfd,
  609. asection *sec,
  610. struct bfd_link_info *link_info,
  611. bool *again)
  612. {
  613. struct internal_reloc *internal_relocs;
  614. bool have_code;
  615. struct internal_reloc *irel, *irelend;
  616. bfd_byte *contents = NULL;
  617. *again = false;
  618. if (bfd_link_relocatable (link_info)
  619. || (sec->flags & SEC_RELOC) == 0
  620. || sec->reloc_count == 0)
  621. return true;
  622. if (coff_section_data (abfd, sec) == NULL)
  623. {
  624. size_t amt = sizeof (struct coff_section_tdata);
  625. sec->used_by_bfd = bfd_zalloc (abfd, amt);
  626. if (sec->used_by_bfd == NULL)
  627. return false;
  628. }
  629. internal_relocs = (_bfd_coff_read_internal_relocs
  630. (abfd, sec, link_info->keep_memory,
  631. (bfd_byte *) NULL, false,
  632. (struct internal_reloc *) NULL));
  633. if (internal_relocs == NULL)
  634. goto error_return;
  635. have_code = false;
  636. irelend = internal_relocs + sec->reloc_count;
  637. for (irel = internal_relocs; irel < irelend; irel++)
  638. {
  639. bfd_vma laddr, paddr, symval;
  640. unsigned short insn;
  641. struct internal_reloc *irelfn, *irelscan, *irelcount;
  642. struct internal_syment sym;
  643. bfd_signed_vma foff;
  644. if (irel->r_type == R_SH_CODE)
  645. have_code = true;
  646. if (irel->r_type != R_SH_USES)
  647. continue;
  648. /* Get the section contents. */
  649. if (contents == NULL)
  650. {
  651. if (coff_section_data (abfd, sec)->contents != NULL)
  652. contents = coff_section_data (abfd, sec)->contents;
  653. else
  654. {
  655. if (!bfd_malloc_and_get_section (abfd, sec, &contents))
  656. goto error_return;
  657. }
  658. }
  659. /* The r_offset field of the R_SH_USES reloc will point us to
  660. the register load. The 4 is because the r_offset field is
  661. computed as though it were a jump offset, which are based
  662. from 4 bytes after the jump instruction. */
  663. laddr = irel->r_vaddr - sec->vma + 4;
  664. /* Careful to sign extend the 32-bit offset. */
  665. laddr += ((irel->r_offset & 0xffffffff) ^ 0x80000000) - 0x80000000;
  666. if (laddr >= sec->size)
  667. {
  668. /* xgettext: c-format */
  669. _bfd_error_handler
  670. (_("%pB: %#" PRIx64 ": warning: bad R_SH_USES offset"),
  671. abfd, (uint64_t) irel->r_vaddr);
  672. continue;
  673. }
  674. insn = bfd_get_16 (abfd, contents + laddr);
  675. /* If the instruction is not mov.l NN,rN, we don't know what to do. */
  676. if ((insn & 0xf000) != 0xd000)
  677. {
  678. _bfd_error_handler
  679. /* xgettext: c-format */
  680. (_("%pB: %#" PRIx64 ": warning: R_SH_USES points to unrecognized insn %#x"),
  681. abfd, (uint64_t) irel->r_vaddr, insn);
  682. continue;
  683. }
  684. /* Get the address from which the register is being loaded. The
  685. displacement in the mov.l instruction is quadrupled. It is a
  686. displacement from four bytes after the movl instruction, but,
  687. before adding in the PC address, two least significant bits
  688. of the PC are cleared. We assume that the section is aligned
  689. on a four byte boundary. */
  690. paddr = insn & 0xff;
  691. paddr *= 4;
  692. paddr += (laddr + 4) &~ (bfd_vma) 3;
  693. if (paddr >= sec->size)
  694. {
  695. _bfd_error_handler
  696. /* xgettext: c-format */
  697. (_("%pB: %#" PRIx64 ": warning: bad R_SH_USES load offset"),
  698. abfd, (uint64_t) irel->r_vaddr);
  699. continue;
  700. }
  701. /* Get the reloc for the address from which the register is
  702. being loaded. This reloc will tell us which function is
  703. actually being called. */
  704. paddr += sec->vma;
  705. for (irelfn = internal_relocs; irelfn < irelend; irelfn++)
  706. if (irelfn->r_vaddr == paddr
  707. #ifdef COFF_WITH_PE
  708. && (irelfn->r_type == R_SH_IMM32
  709. || irelfn->r_type == R_SH_IMM32CE
  710. || irelfn->r_type == R_SH_IMAGEBASE)
  711. #else
  712. && irelfn->r_type == R_SH_IMM32
  713. #endif
  714. )
  715. break;
  716. if (irelfn >= irelend)
  717. {
  718. _bfd_error_handler
  719. /* xgettext: c-format */
  720. (_("%pB: %#" PRIx64 ": warning: could not find expected reloc"),
  721. abfd, (uint64_t) paddr);
  722. continue;
  723. }
  724. /* Get the value of the symbol referred to by the reloc. */
  725. if (! _bfd_coff_get_external_symbols (abfd))
  726. goto error_return;
  727. bfd_coff_swap_sym_in (abfd,
  728. ((bfd_byte *) obj_coff_external_syms (abfd)
  729. + (irelfn->r_symndx
  730. * bfd_coff_symesz (abfd))),
  731. &sym);
  732. if (sym.n_scnum != 0 && sym.n_scnum != sec->target_index)
  733. {
  734. _bfd_error_handler
  735. /* xgettext: c-format */
  736. (_("%pB: %#" PRIx64 ": warning: symbol in unexpected section"),
  737. abfd, (uint64_t) paddr);
  738. continue;
  739. }
  740. if (sym.n_sclass != C_EXT)
  741. {
  742. symval = (sym.n_value
  743. - sec->vma
  744. + sec->output_section->vma
  745. + sec->output_offset);
  746. }
  747. else
  748. {
  749. struct coff_link_hash_entry *h;
  750. h = obj_coff_sym_hashes (abfd)[irelfn->r_symndx];
  751. BFD_ASSERT (h != NULL);
  752. if (h->root.type != bfd_link_hash_defined
  753. && h->root.type != bfd_link_hash_defweak)
  754. {
  755. /* This appears to be a reference to an undefined
  756. symbol. Just ignore it--it will be caught by the
  757. regular reloc processing. */
  758. continue;
  759. }
  760. symval = (h->root.u.def.value
  761. + h->root.u.def.section->output_section->vma
  762. + h->root.u.def.section->output_offset);
  763. }
  764. symval += bfd_get_32 (abfd, contents + paddr - sec->vma);
  765. /* See if this function call can be shortened. */
  766. foff = (symval
  767. - (irel->r_vaddr
  768. - sec->vma
  769. + sec->output_section->vma
  770. + sec->output_offset
  771. + 4));
  772. if (foff < -0x1000 || foff >= 0x1000)
  773. {
  774. /* After all that work, we can't shorten this function call. */
  775. continue;
  776. }
  777. /* Shorten the function call. */
  778. /* For simplicity of coding, we are going to modify the section
  779. contents, the section relocs, and the BFD symbol table. We
  780. must tell the rest of the code not to free up this
  781. information. It would be possible to instead create a table
  782. of changes which have to be made, as is done in coff-mips.c;
  783. that would be more work, but would require less memory when
  784. the linker is run. */
  785. coff_section_data (abfd, sec)->relocs = internal_relocs;
  786. coff_section_data (abfd, sec)->keep_relocs = true;
  787. coff_section_data (abfd, sec)->contents = contents;
  788. coff_section_data (abfd, sec)->keep_contents = true;
  789. obj_coff_keep_syms (abfd) = true;
  790. /* Replace the jsr with a bsr. */
  791. /* Change the R_SH_USES reloc into an R_SH_PCDISP reloc, and
  792. replace the jsr with a bsr. */
  793. irel->r_type = R_SH_PCDISP;
  794. irel->r_symndx = irelfn->r_symndx;
  795. if (sym.n_sclass != C_EXT)
  796. {
  797. /* If this needs to be changed because of future relaxing,
  798. it will be handled here like other internal PCDISP
  799. relocs. */
  800. bfd_put_16 (abfd,
  801. (bfd_vma) 0xb000 | ((foff >> 1) & 0xfff),
  802. contents + irel->r_vaddr - sec->vma);
  803. }
  804. else
  805. {
  806. /* We can't fully resolve this yet, because the external
  807. symbol value may be changed by future relaxing. We let
  808. the final link phase handle it. */
  809. bfd_put_16 (abfd, (bfd_vma) 0xb000,
  810. contents + irel->r_vaddr - sec->vma);
  811. }
  812. /* See if there is another R_SH_USES reloc referring to the same
  813. register load. */
  814. for (irelscan = internal_relocs; irelscan < irelend; irelscan++)
  815. if (irelscan->r_type == R_SH_USES
  816. && laddr == irelscan->r_vaddr - sec->vma + 4 + irelscan->r_offset)
  817. break;
  818. if (irelscan < irelend)
  819. {
  820. /* Some other function call depends upon this register load,
  821. and we have not yet converted that function call.
  822. Indeed, we may never be able to convert it. There is
  823. nothing else we can do at this point. */
  824. continue;
  825. }
  826. /* Look for a R_SH_COUNT reloc on the location where the
  827. function address is stored. Do this before deleting any
  828. bytes, to avoid confusion about the address. */
  829. for (irelcount = internal_relocs; irelcount < irelend; irelcount++)
  830. if (irelcount->r_vaddr == paddr
  831. && irelcount->r_type == R_SH_COUNT)
  832. break;
  833. /* Delete the register load. */
  834. if (! sh_relax_delete_bytes (abfd, sec, laddr, 2))
  835. goto error_return;
  836. /* That will change things, so, just in case it permits some
  837. other function call to come within range, we should relax
  838. again. Note that this is not required, and it may be slow. */
  839. *again = true;
  840. /* Now check whether we got a COUNT reloc. */
  841. if (irelcount >= irelend)
  842. {
  843. _bfd_error_handler
  844. /* xgettext: c-format */
  845. (_("%pB: %#" PRIx64 ": warning: could not find expected COUNT reloc"),
  846. abfd, (uint64_t) paddr);
  847. continue;
  848. }
  849. /* The number of uses is stored in the r_offset field. We've
  850. just deleted one. */
  851. if (irelcount->r_offset == 0)
  852. {
  853. /* xgettext: c-format */
  854. _bfd_error_handler (_("%pB: %#" PRIx64 ": warning: bad count"),
  855. abfd, (uint64_t) paddr);
  856. continue;
  857. }
  858. --irelcount->r_offset;
  859. /* If there are no more uses, we can delete the address. Reload
  860. the address from irelfn, in case it was changed by the
  861. previous call to sh_relax_delete_bytes. */
  862. if (irelcount->r_offset == 0)
  863. {
  864. if (! sh_relax_delete_bytes (abfd, sec,
  865. irelfn->r_vaddr - sec->vma, 4))
  866. goto error_return;
  867. }
  868. /* We've done all we can with that function call. */
  869. }
  870. /* Look for load and store instructions that we can align on four
  871. byte boundaries. */
  872. if (have_code)
  873. {
  874. bool swapped;
  875. /* Get the section contents. */
  876. if (contents == NULL)
  877. {
  878. if (coff_section_data (abfd, sec)->contents != NULL)
  879. contents = coff_section_data (abfd, sec)->contents;
  880. else
  881. {
  882. if (!bfd_malloc_and_get_section (abfd, sec, &contents))
  883. goto error_return;
  884. }
  885. }
  886. if (! sh_align_loads (abfd, sec, internal_relocs, contents, &swapped))
  887. goto error_return;
  888. if (swapped)
  889. {
  890. coff_section_data (abfd, sec)->relocs = internal_relocs;
  891. coff_section_data (abfd, sec)->keep_relocs = true;
  892. coff_section_data (abfd, sec)->contents = contents;
  893. coff_section_data (abfd, sec)->keep_contents = true;
  894. obj_coff_keep_syms (abfd) = true;
  895. }
  896. }
  897. if (internal_relocs != NULL
  898. && internal_relocs != coff_section_data (abfd, sec)->relocs)
  899. {
  900. if (! link_info->keep_memory)
  901. free (internal_relocs);
  902. else
  903. coff_section_data (abfd, sec)->relocs = internal_relocs;
  904. }
  905. if (contents != NULL && contents != coff_section_data (abfd, sec)->contents)
  906. {
  907. if (! link_info->keep_memory)
  908. free (contents);
  909. else
  910. /* Cache the section contents for coff_link_input_bfd. */
  911. coff_section_data (abfd, sec)->contents = contents;
  912. }
  913. return true;
  914. error_return:
  915. if (internal_relocs != coff_section_data (abfd, sec)->relocs)
  916. free (internal_relocs);
  917. if (contents != coff_section_data (abfd, sec)->contents)
  918. free (contents);
  919. return false;
  920. }
  921. /* Delete some bytes from a section while relaxing. */
  922. static bool
  923. sh_relax_delete_bytes (bfd *abfd,
  924. asection *sec,
  925. bfd_vma addr,
  926. int count)
  927. {
  928. bfd_byte *contents;
  929. struct internal_reloc *irel, *irelend;
  930. struct internal_reloc *irelalign;
  931. bfd_vma toaddr;
  932. bfd_byte *esym, *esymend;
  933. bfd_size_type symesz;
  934. struct coff_link_hash_entry **sym_hash;
  935. asection *o;
  936. contents = coff_section_data (abfd, sec)->contents;
  937. /* The deletion must stop at the next ALIGN reloc for an alignment
  938. power larger than the number of bytes we are deleting. */
  939. irelalign = NULL;
  940. toaddr = sec->size;
  941. irel = coff_section_data (abfd, sec)->relocs;
  942. irelend = irel + sec->reloc_count;
  943. for (; irel < irelend; irel++)
  944. {
  945. if (irel->r_type == R_SH_ALIGN
  946. && irel->r_vaddr - sec->vma > addr
  947. && count < (1 << irel->r_offset))
  948. {
  949. irelalign = irel;
  950. toaddr = irel->r_vaddr - sec->vma;
  951. break;
  952. }
  953. }
  954. /* Actually delete the bytes. */
  955. memmove (contents + addr, contents + addr + count,
  956. (size_t) (toaddr - addr - count));
  957. if (irelalign == NULL)
  958. sec->size -= count;
  959. else
  960. {
  961. int i;
  962. #define NOP_OPCODE (0x0009)
  963. BFD_ASSERT ((count & 1) == 0);
  964. for (i = 0; i < count; i += 2)
  965. bfd_put_16 (abfd, (bfd_vma) NOP_OPCODE, contents + toaddr - count + i);
  966. }
  967. /* Adjust all the relocs. */
  968. for (irel = coff_section_data (abfd, sec)->relocs; irel < irelend; irel++)
  969. {
  970. bfd_vma nraddr, stop;
  971. bfd_vma start = 0;
  972. int insn = 0;
  973. struct internal_syment sym;
  974. int off, adjust, oinsn;
  975. bfd_signed_vma voff = 0;
  976. bool overflow;
  977. /* Get the new reloc address. */
  978. nraddr = irel->r_vaddr - sec->vma;
  979. if ((irel->r_vaddr - sec->vma > addr
  980. && irel->r_vaddr - sec->vma < toaddr)
  981. || (irel->r_type == R_SH_ALIGN
  982. && irel->r_vaddr - sec->vma == toaddr))
  983. nraddr -= count;
  984. /* See if this reloc was for the bytes we have deleted, in which
  985. case we no longer care about it. Don't delete relocs which
  986. represent addresses, though. */
  987. if (irel->r_vaddr - sec->vma >= addr
  988. && irel->r_vaddr - sec->vma < addr + count
  989. && irel->r_type != R_SH_ALIGN
  990. && irel->r_type != R_SH_CODE
  991. && irel->r_type != R_SH_DATA
  992. && irel->r_type != R_SH_LABEL)
  993. irel->r_type = R_SH_UNUSED;
  994. /* If this is a PC relative reloc, see if the range it covers
  995. includes the bytes we have deleted. */
  996. switch (irel->r_type)
  997. {
  998. default:
  999. break;
  1000. case R_SH_PCDISP8BY2:
  1001. case R_SH_PCDISP:
  1002. case R_SH_PCRELIMM8BY2:
  1003. case R_SH_PCRELIMM8BY4:
  1004. start = irel->r_vaddr - sec->vma;
  1005. insn = bfd_get_16 (abfd, contents + nraddr);
  1006. break;
  1007. }
  1008. switch (irel->r_type)
  1009. {
  1010. default:
  1011. start = stop = addr;
  1012. break;
  1013. case R_SH_IMM32:
  1014. #ifdef COFF_WITH_PE
  1015. case R_SH_IMM32CE:
  1016. case R_SH_IMAGEBASE:
  1017. #endif
  1018. /* If this reloc is against a symbol defined in this
  1019. section, and the symbol will not be adjusted below, we
  1020. must check the addend to see it will put the value in
  1021. range to be adjusted, and hence must be changed. */
  1022. bfd_coff_swap_sym_in (abfd,
  1023. ((bfd_byte *) obj_coff_external_syms (abfd)
  1024. + (irel->r_symndx
  1025. * bfd_coff_symesz (abfd))),
  1026. &sym);
  1027. if (sym.n_sclass != C_EXT
  1028. && sym.n_scnum == sec->target_index
  1029. && ((bfd_vma) sym.n_value <= addr
  1030. || (bfd_vma) sym.n_value >= toaddr))
  1031. {
  1032. bfd_vma val;
  1033. val = bfd_get_32 (abfd, contents + nraddr);
  1034. val += sym.n_value;
  1035. if (val > addr && val < toaddr)
  1036. bfd_put_32 (abfd, val - count, contents + nraddr);
  1037. }
  1038. start = stop = addr;
  1039. break;
  1040. case R_SH_PCDISP8BY2:
  1041. off = insn & 0xff;
  1042. if (off & 0x80)
  1043. off -= 0x100;
  1044. stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
  1045. break;
  1046. case R_SH_PCDISP:
  1047. bfd_coff_swap_sym_in (abfd,
  1048. ((bfd_byte *) obj_coff_external_syms (abfd)
  1049. + (irel->r_symndx
  1050. * bfd_coff_symesz (abfd))),
  1051. &sym);
  1052. if (sym.n_sclass == C_EXT)
  1053. start = stop = addr;
  1054. else
  1055. {
  1056. off = insn & 0xfff;
  1057. if (off & 0x800)
  1058. off -= 0x1000;
  1059. stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
  1060. }
  1061. break;
  1062. case R_SH_PCRELIMM8BY2:
  1063. off = insn & 0xff;
  1064. stop = start + 4 + off * 2;
  1065. break;
  1066. case R_SH_PCRELIMM8BY4:
  1067. off = insn & 0xff;
  1068. stop = (start &~ (bfd_vma) 3) + 4 + off * 4;
  1069. break;
  1070. case R_SH_SWITCH8:
  1071. case R_SH_SWITCH16:
  1072. case R_SH_SWITCH32:
  1073. /* These relocs types represent
  1074. .word L2-L1
  1075. The r_offset field holds the difference between the reloc
  1076. address and L1. That is the start of the reloc, and
  1077. adding in the contents gives us the top. We must adjust
  1078. both the r_offset field and the section contents. */
  1079. start = irel->r_vaddr - sec->vma;
  1080. stop = (bfd_vma) ((bfd_signed_vma) start - (long) irel->r_offset);
  1081. if (start > addr
  1082. && start < toaddr
  1083. && (stop <= addr || stop >= toaddr))
  1084. irel->r_offset += count;
  1085. else if (stop > addr
  1086. && stop < toaddr
  1087. && (start <= addr || start >= toaddr))
  1088. irel->r_offset -= count;
  1089. start = stop;
  1090. if (irel->r_type == R_SH_SWITCH16)
  1091. voff = bfd_get_signed_16 (abfd, contents + nraddr);
  1092. else if (irel->r_type == R_SH_SWITCH8)
  1093. voff = bfd_get_8 (abfd, contents + nraddr);
  1094. else
  1095. voff = bfd_get_signed_32 (abfd, contents + nraddr);
  1096. stop = (bfd_vma) ((bfd_signed_vma) start + voff);
  1097. break;
  1098. case R_SH_USES:
  1099. start = irel->r_vaddr - sec->vma;
  1100. stop = (bfd_vma) ((bfd_signed_vma) start
  1101. + (long) irel->r_offset
  1102. + 4);
  1103. break;
  1104. }
  1105. if (start > addr
  1106. && start < toaddr
  1107. && (stop <= addr || stop >= toaddr))
  1108. adjust = count;
  1109. else if (stop > addr
  1110. && stop < toaddr
  1111. && (start <= addr || start >= toaddr))
  1112. adjust = - count;
  1113. else
  1114. adjust = 0;
  1115. if (adjust != 0)
  1116. {
  1117. oinsn = insn;
  1118. overflow = false;
  1119. switch (irel->r_type)
  1120. {
  1121. default:
  1122. abort ();
  1123. break;
  1124. case R_SH_PCDISP8BY2:
  1125. case R_SH_PCRELIMM8BY2:
  1126. insn += adjust / 2;
  1127. if ((oinsn & 0xff00) != (insn & 0xff00))
  1128. overflow = true;
  1129. bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
  1130. break;
  1131. case R_SH_PCDISP:
  1132. insn += adjust / 2;
  1133. if ((oinsn & 0xf000) != (insn & 0xf000))
  1134. overflow = true;
  1135. bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
  1136. break;
  1137. case R_SH_PCRELIMM8BY4:
  1138. BFD_ASSERT (adjust == count || count >= 4);
  1139. if (count >= 4)
  1140. insn += adjust / 4;
  1141. else
  1142. {
  1143. if ((irel->r_vaddr & 3) == 0)
  1144. ++insn;
  1145. }
  1146. if ((oinsn & 0xff00) != (insn & 0xff00))
  1147. overflow = true;
  1148. bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
  1149. break;
  1150. case R_SH_SWITCH8:
  1151. voff += adjust;
  1152. if (voff < 0 || voff >= 0xff)
  1153. overflow = true;
  1154. bfd_put_8 (abfd, (bfd_vma) voff, contents + nraddr);
  1155. break;
  1156. case R_SH_SWITCH16:
  1157. voff += adjust;
  1158. if (voff < - 0x8000 || voff >= 0x8000)
  1159. overflow = true;
  1160. bfd_put_signed_16 (abfd, (bfd_vma) voff, contents + nraddr);
  1161. break;
  1162. case R_SH_SWITCH32:
  1163. voff += adjust;
  1164. bfd_put_signed_32 (abfd, (bfd_vma) voff, contents + nraddr);
  1165. break;
  1166. case R_SH_USES:
  1167. irel->r_offset += adjust;
  1168. break;
  1169. }
  1170. if (overflow)
  1171. {
  1172. _bfd_error_handler
  1173. /* xgettext: c-format */
  1174. (_("%pB: %#" PRIx64 ": fatal: reloc overflow while relaxing"),
  1175. abfd, (uint64_t) irel->r_vaddr);
  1176. bfd_set_error (bfd_error_bad_value);
  1177. return false;
  1178. }
  1179. }
  1180. irel->r_vaddr = nraddr + sec->vma;
  1181. }
  1182. /* Look through all the other sections. If there contain any IMM32
  1183. relocs against internal symbols which we are not going to adjust
  1184. below, we may need to adjust the addends. */
  1185. for (o = abfd->sections; o != NULL; o = o->next)
  1186. {
  1187. struct internal_reloc *internal_relocs;
  1188. struct internal_reloc *irelscan, *irelscanend;
  1189. bfd_byte *ocontents;
  1190. if (o == sec
  1191. || (o->flags & SEC_RELOC) == 0
  1192. || o->reloc_count == 0)
  1193. continue;
  1194. /* We always cache the relocs. Perhaps, if info->keep_memory is
  1195. FALSE, we should free them, if we are permitted to, when we
  1196. leave sh_coff_relax_section. */
  1197. internal_relocs = (_bfd_coff_read_internal_relocs
  1198. (abfd, o, true, (bfd_byte *) NULL, false,
  1199. (struct internal_reloc *) NULL));
  1200. if (internal_relocs == NULL)
  1201. return false;
  1202. ocontents = NULL;
  1203. irelscanend = internal_relocs + o->reloc_count;
  1204. for (irelscan = internal_relocs; irelscan < irelscanend; irelscan++)
  1205. {
  1206. struct internal_syment sym;
  1207. #ifdef COFF_WITH_PE
  1208. if (irelscan->r_type != R_SH_IMM32
  1209. && irelscan->r_type != R_SH_IMAGEBASE
  1210. && irelscan->r_type != R_SH_IMM32CE)
  1211. #else
  1212. if (irelscan->r_type != R_SH_IMM32)
  1213. #endif
  1214. continue;
  1215. bfd_coff_swap_sym_in (abfd,
  1216. ((bfd_byte *) obj_coff_external_syms (abfd)
  1217. + (irelscan->r_symndx
  1218. * bfd_coff_symesz (abfd))),
  1219. &sym);
  1220. if (sym.n_sclass != C_EXT
  1221. && sym.n_scnum == sec->target_index
  1222. && ((bfd_vma) sym.n_value <= addr
  1223. || (bfd_vma) sym.n_value >= toaddr))
  1224. {
  1225. bfd_vma val;
  1226. if (ocontents == NULL)
  1227. {
  1228. if (coff_section_data (abfd, o)->contents != NULL)
  1229. ocontents = coff_section_data (abfd, o)->contents;
  1230. else
  1231. {
  1232. if (!bfd_malloc_and_get_section (abfd, o, &ocontents))
  1233. return false;
  1234. /* We always cache the section contents.
  1235. Perhaps, if info->keep_memory is FALSE, we
  1236. should free them, if we are permitted to,
  1237. when we leave sh_coff_relax_section. */
  1238. coff_section_data (abfd, o)->contents = ocontents;
  1239. }
  1240. }
  1241. val = bfd_get_32 (abfd, ocontents + irelscan->r_vaddr - o->vma);
  1242. val += sym.n_value;
  1243. if (val > addr && val < toaddr)
  1244. bfd_put_32 (abfd, val - count,
  1245. ocontents + irelscan->r_vaddr - o->vma);
  1246. coff_section_data (abfd, o)->keep_contents = true;
  1247. }
  1248. }
  1249. }
  1250. /* Adjusting the internal symbols will not work if something has
  1251. already retrieved the generic symbols. It would be possible to
  1252. make this work by adjusting the generic symbols at the same time.
  1253. However, this case should not arise in normal usage. */
  1254. if (obj_symbols (abfd) != NULL
  1255. || obj_raw_syments (abfd) != NULL)
  1256. {
  1257. _bfd_error_handler
  1258. (_("%pB: fatal: generic symbols retrieved before relaxing"), abfd);
  1259. bfd_set_error (bfd_error_invalid_operation);
  1260. return false;
  1261. }
  1262. /* Adjust all the symbols. */
  1263. sym_hash = obj_coff_sym_hashes (abfd);
  1264. symesz = bfd_coff_symesz (abfd);
  1265. esym = (bfd_byte *) obj_coff_external_syms (abfd);
  1266. esymend = esym + obj_raw_syment_count (abfd) * symesz;
  1267. while (esym < esymend)
  1268. {
  1269. struct internal_syment isym;
  1270. bfd_coff_swap_sym_in (abfd, esym, &isym);
  1271. if (isym.n_scnum == sec->target_index
  1272. && (bfd_vma) isym.n_value > addr
  1273. && (bfd_vma) isym.n_value < toaddr)
  1274. {
  1275. isym.n_value -= count;
  1276. bfd_coff_swap_sym_out (abfd, &isym, esym);
  1277. if (*sym_hash != NULL)
  1278. {
  1279. BFD_ASSERT ((*sym_hash)->root.type == bfd_link_hash_defined
  1280. || (*sym_hash)->root.type == bfd_link_hash_defweak);
  1281. BFD_ASSERT ((*sym_hash)->root.u.def.value >= addr
  1282. && (*sym_hash)->root.u.def.value < toaddr);
  1283. (*sym_hash)->root.u.def.value -= count;
  1284. }
  1285. }
  1286. esym += (isym.n_numaux + 1) * symesz;
  1287. sym_hash += isym.n_numaux + 1;
  1288. }
  1289. /* See if we can move the ALIGN reloc forward. We have adjusted
  1290. r_vaddr for it already. */
  1291. if (irelalign != NULL)
  1292. {
  1293. bfd_vma alignto, alignaddr;
  1294. alignto = BFD_ALIGN (toaddr, 1 << irelalign->r_offset);
  1295. alignaddr = BFD_ALIGN (irelalign->r_vaddr - sec->vma,
  1296. 1 << irelalign->r_offset);
  1297. if (alignto != alignaddr)
  1298. {
  1299. /* Tail recursion. */
  1300. return sh_relax_delete_bytes (abfd, sec, alignaddr,
  1301. (int) (alignto - alignaddr));
  1302. }
  1303. }
  1304. return true;
  1305. }
  1306. /* This is yet another version of the SH opcode table, used to rapidly
  1307. get information about a particular instruction. */
  1308. /* The opcode map is represented by an array of these structures. The
  1309. array is indexed by the high order four bits in the instruction. */
  1310. struct sh_major_opcode
  1311. {
  1312. /* A pointer to the instruction list. This is an array which
  1313. contains all the instructions with this major opcode. */
  1314. const struct sh_minor_opcode *minor_opcodes;
  1315. /* The number of elements in minor_opcodes. */
  1316. unsigned short count;
  1317. };
  1318. /* This structure holds information for a set of SH opcodes. The
  1319. instruction code is anded with the mask value, and the resulting
  1320. value is used to search the order opcode list. */
  1321. struct sh_minor_opcode
  1322. {
  1323. /* The sorted opcode list. */
  1324. const struct sh_opcode *opcodes;
  1325. /* The number of elements in opcodes. */
  1326. unsigned short count;
  1327. /* The mask value to use when searching the opcode list. */
  1328. unsigned short mask;
  1329. };
  1330. /* This structure holds information for an SH instruction. An array
  1331. of these structures is sorted in order by opcode. */
  1332. struct sh_opcode
  1333. {
  1334. /* The code for this instruction, after it has been anded with the
  1335. mask value in the sh_major_opcode structure. */
  1336. unsigned short opcode;
  1337. /* Flags for this instruction. */
  1338. unsigned long flags;
  1339. };
  1340. /* Flag which appear in the sh_opcode structure. */
  1341. /* This instruction loads a value from memory. */
  1342. #define LOAD (0x1)
  1343. /* This instruction stores a value to memory. */
  1344. #define STORE (0x2)
  1345. /* This instruction is a branch. */
  1346. #define BRANCH (0x4)
  1347. /* This instruction has a delay slot. */
  1348. #define DELAY (0x8)
  1349. /* This instruction uses the value in the register in the field at
  1350. mask 0x0f00 of the instruction. */
  1351. #define USES1 (0x10)
  1352. #define USES1_REG(x) ((x & 0x0f00) >> 8)
  1353. /* This instruction uses the value in the register in the field at
  1354. mask 0x00f0 of the instruction. */
  1355. #define USES2 (0x20)
  1356. #define USES2_REG(x) ((x & 0x00f0) >> 4)
  1357. /* This instruction uses the value in register 0. */
  1358. #define USESR0 (0x40)
  1359. /* This instruction sets the value in the register in the field at
  1360. mask 0x0f00 of the instruction. */
  1361. #define SETS1 (0x80)
  1362. #define SETS1_REG(x) ((x & 0x0f00) >> 8)
  1363. /* This instruction sets the value in the register in the field at
  1364. mask 0x00f0 of the instruction. */
  1365. #define SETS2 (0x100)
  1366. #define SETS2_REG(x) ((x & 0x00f0) >> 4)
  1367. /* This instruction sets register 0. */
  1368. #define SETSR0 (0x200)
  1369. /* This instruction sets a special register. */
  1370. #define SETSSP (0x400)
  1371. /* This instruction uses a special register. */
  1372. #define USESSP (0x800)
  1373. /* This instruction uses the floating point register in the field at
  1374. mask 0x0f00 of the instruction. */
  1375. #define USESF1 (0x1000)
  1376. #define USESF1_REG(x) ((x & 0x0f00) >> 8)
  1377. /* This instruction uses the floating point register in the field at
  1378. mask 0x00f0 of the instruction. */
  1379. #define USESF2 (0x2000)
  1380. #define USESF2_REG(x) ((x & 0x00f0) >> 4)
  1381. /* This instruction uses floating point register 0. */
  1382. #define USESF0 (0x4000)
  1383. /* This instruction sets the floating point register in the field at
  1384. mask 0x0f00 of the instruction. */
  1385. #define SETSF1 (0x8000)
  1386. #define SETSF1_REG(x) ((x & 0x0f00) >> 8)
  1387. #define USESAS (0x10000)
  1388. #define USESAS_REG(x) (((((x) >> 8) - 2) & 3) + 2)
  1389. #define USESR8 (0x20000)
  1390. #define SETSAS (0x40000)
  1391. #define SETSAS_REG(x) USESAS_REG (x)
  1392. #define MAP(a) a, sizeof a / sizeof a[0]
  1393. #ifndef COFF_IMAGE_WITH_PE
  1394. /* The opcode maps. */
  1395. static const struct sh_opcode sh_opcode00[] =
  1396. {
  1397. { 0x0008, SETSSP }, /* clrt */
  1398. { 0x0009, 0 }, /* nop */
  1399. { 0x000b, BRANCH | DELAY | USESSP }, /* rts */
  1400. { 0x0018, SETSSP }, /* sett */
  1401. { 0x0019, SETSSP }, /* div0u */
  1402. { 0x001b, 0 }, /* sleep */
  1403. { 0x0028, SETSSP }, /* clrmac */
  1404. { 0x002b, BRANCH | DELAY | SETSSP }, /* rte */
  1405. { 0x0038, USESSP | SETSSP }, /* ldtlb */
  1406. { 0x0048, SETSSP }, /* clrs */
  1407. { 0x0058, SETSSP } /* sets */
  1408. };
  1409. static const struct sh_opcode sh_opcode01[] =
  1410. {
  1411. { 0x0003, BRANCH | DELAY | USES1 | SETSSP }, /* bsrf rn */
  1412. { 0x000a, SETS1 | USESSP }, /* sts mach,rn */
  1413. { 0x001a, SETS1 | USESSP }, /* sts macl,rn */
  1414. { 0x0023, BRANCH | DELAY | USES1 }, /* braf rn */
  1415. { 0x0029, SETS1 | USESSP }, /* movt rn */
  1416. { 0x002a, SETS1 | USESSP }, /* sts pr,rn */
  1417. { 0x005a, SETS1 | USESSP }, /* sts fpul,rn */
  1418. { 0x006a, SETS1 | USESSP }, /* sts fpscr,rn / sts dsr,rn */
  1419. { 0x0083, LOAD | USES1 }, /* pref @rn */
  1420. { 0x007a, SETS1 | USESSP }, /* sts a0,rn */
  1421. { 0x008a, SETS1 | USESSP }, /* sts x0,rn */
  1422. { 0x009a, SETS1 | USESSP }, /* sts x1,rn */
  1423. { 0x00aa, SETS1 | USESSP }, /* sts y0,rn */
  1424. { 0x00ba, SETS1 | USESSP } /* sts y1,rn */
  1425. };
  1426. static const struct sh_opcode sh_opcode02[] =
  1427. {
  1428. { 0x0002, SETS1 | USESSP }, /* stc <special_reg>,rn */
  1429. { 0x0004, STORE | USES1 | USES2 | USESR0 }, /* mov.b rm,@(r0,rn) */
  1430. { 0x0005, STORE | USES1 | USES2 | USESR0 }, /* mov.w rm,@(r0,rn) */
  1431. { 0x0006, STORE | USES1 | USES2 | USESR0 }, /* mov.l rm,@(r0,rn) */
  1432. { 0x0007, SETSSP | USES1 | USES2 }, /* mul.l rm,rn */
  1433. { 0x000c, LOAD | SETS1 | USES2 | USESR0 }, /* mov.b @(r0,rm),rn */
  1434. { 0x000d, LOAD | SETS1 | USES2 | USESR0 }, /* mov.w @(r0,rm),rn */
  1435. { 0x000e, LOAD | SETS1 | USES2 | USESR0 }, /* mov.l @(r0,rm),rn */
  1436. { 0x000f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.l @rm+,@rn+ */
  1437. };
  1438. static const struct sh_minor_opcode sh_opcode0[] =
  1439. {
  1440. { MAP (sh_opcode00), 0xffff },
  1441. { MAP (sh_opcode01), 0xf0ff },
  1442. { MAP (sh_opcode02), 0xf00f }
  1443. };
  1444. static const struct sh_opcode sh_opcode10[] =
  1445. {
  1446. { 0x1000, STORE | USES1 | USES2 } /* mov.l rm,@(disp,rn) */
  1447. };
  1448. static const struct sh_minor_opcode sh_opcode1[] =
  1449. {
  1450. { MAP (sh_opcode10), 0xf000 }
  1451. };
  1452. static const struct sh_opcode sh_opcode20[] =
  1453. {
  1454. { 0x2000, STORE | USES1 | USES2 }, /* mov.b rm,@rn */
  1455. { 0x2001, STORE | USES1 | USES2 }, /* mov.w rm,@rn */
  1456. { 0x2002, STORE | USES1 | USES2 }, /* mov.l rm,@rn */
  1457. { 0x2004, STORE | SETS1 | USES1 | USES2 }, /* mov.b rm,@-rn */
  1458. { 0x2005, STORE | SETS1 | USES1 | USES2 }, /* mov.w rm,@-rn */
  1459. { 0x2006, STORE | SETS1 | USES1 | USES2 }, /* mov.l rm,@-rn */
  1460. { 0x2007, SETSSP | USES1 | USES2 | USESSP }, /* div0s */
  1461. { 0x2008, SETSSP | USES1 | USES2 }, /* tst rm,rn */
  1462. { 0x2009, SETS1 | USES1 | USES2 }, /* and rm,rn */
  1463. { 0x200a, SETS1 | USES1 | USES2 }, /* xor rm,rn */
  1464. { 0x200b, SETS1 | USES1 | USES2 }, /* or rm,rn */
  1465. { 0x200c, SETSSP | USES1 | USES2 }, /* cmp/str rm,rn */
  1466. { 0x200d, SETS1 | USES1 | USES2 }, /* xtrct rm,rn */
  1467. { 0x200e, SETSSP | USES1 | USES2 }, /* mulu.w rm,rn */
  1468. { 0x200f, SETSSP | USES1 | USES2 } /* muls.w rm,rn */
  1469. };
  1470. static const struct sh_minor_opcode sh_opcode2[] =
  1471. {
  1472. { MAP (sh_opcode20), 0xf00f }
  1473. };
  1474. static const struct sh_opcode sh_opcode30[] =
  1475. {
  1476. { 0x3000, SETSSP | USES1 | USES2 }, /* cmp/eq rm,rn */
  1477. { 0x3002, SETSSP | USES1 | USES2 }, /* cmp/hs rm,rn */
  1478. { 0x3003, SETSSP | USES1 | USES2 }, /* cmp/ge rm,rn */
  1479. { 0x3004, SETSSP | USESSP | USES1 | USES2 }, /* div1 rm,rn */
  1480. { 0x3005, SETSSP | USES1 | USES2 }, /* dmulu.l rm,rn */
  1481. { 0x3006, SETSSP | USES1 | USES2 }, /* cmp/hi rm,rn */
  1482. { 0x3007, SETSSP | USES1 | USES2 }, /* cmp/gt rm,rn */
  1483. { 0x3008, SETS1 | USES1 | USES2 }, /* sub rm,rn */
  1484. { 0x300a, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* subc rm,rn */
  1485. { 0x300b, SETS1 | SETSSP | USES1 | USES2 }, /* subv rm,rn */
  1486. { 0x300c, SETS1 | USES1 | USES2 }, /* add rm,rn */
  1487. { 0x300d, SETSSP | USES1 | USES2 }, /* dmuls.l rm,rn */
  1488. { 0x300e, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* addc rm,rn */
  1489. { 0x300f, SETS1 | SETSSP | USES1 | USES2 } /* addv rm,rn */
  1490. };
  1491. static const struct sh_minor_opcode sh_opcode3[] =
  1492. {
  1493. { MAP (sh_opcode30), 0xf00f }
  1494. };
  1495. static const struct sh_opcode sh_opcode40[] =
  1496. {
  1497. { 0x4000, SETS1 | SETSSP | USES1 }, /* shll rn */
  1498. { 0x4001, SETS1 | SETSSP | USES1 }, /* shlr rn */
  1499. { 0x4002, STORE | SETS1 | USES1 | USESSP }, /* sts.l mach,@-rn */
  1500. { 0x4004, SETS1 | SETSSP | USES1 }, /* rotl rn */
  1501. { 0x4005, SETS1 | SETSSP | USES1 }, /* rotr rn */
  1502. { 0x4006, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,mach */
  1503. { 0x4008, SETS1 | USES1 }, /* shll2 rn */
  1504. { 0x4009, SETS1 | USES1 }, /* shlr2 rn */
  1505. { 0x400a, SETSSP | USES1 }, /* lds rm,mach */
  1506. { 0x400b, BRANCH | DELAY | USES1 }, /* jsr @rn */
  1507. { 0x4010, SETS1 | SETSSP | USES1 }, /* dt rn */
  1508. { 0x4011, SETSSP | USES1 }, /* cmp/pz rn */
  1509. { 0x4012, STORE | SETS1 | USES1 | USESSP }, /* sts.l macl,@-rn */
  1510. { 0x4014, SETSSP | USES1 }, /* setrc rm */
  1511. { 0x4015, SETSSP | USES1 }, /* cmp/pl rn */
  1512. { 0x4016, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,macl */
  1513. { 0x4018, SETS1 | USES1 }, /* shll8 rn */
  1514. { 0x4019, SETS1 | USES1 }, /* shlr8 rn */
  1515. { 0x401a, SETSSP | USES1 }, /* lds rm,macl */
  1516. { 0x401b, LOAD | SETSSP | USES1 }, /* tas.b @rn */
  1517. { 0x4020, SETS1 | SETSSP | USES1 }, /* shal rn */
  1518. { 0x4021, SETS1 | SETSSP | USES1 }, /* shar rn */
  1519. { 0x4022, STORE | SETS1 | USES1 | USESSP }, /* sts.l pr,@-rn */
  1520. { 0x4024, SETS1 | SETSSP | USES1 | USESSP }, /* rotcl rn */
  1521. { 0x4025, SETS1 | SETSSP | USES1 | USESSP }, /* rotcr rn */
  1522. { 0x4026, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,pr */
  1523. { 0x4028, SETS1 | USES1 }, /* shll16 rn */
  1524. { 0x4029, SETS1 | USES1 }, /* shlr16 rn */
  1525. { 0x402a, SETSSP | USES1 }, /* lds rm,pr */
  1526. { 0x402b, BRANCH | DELAY | USES1 }, /* jmp @rn */
  1527. { 0x4052, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpul,@-rn */
  1528. { 0x4056, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpul */
  1529. { 0x405a, SETSSP | USES1 }, /* lds.l rm,fpul */
  1530. { 0x4062, STORE | SETS1 | USES1 | USESSP }, /* sts.l fpscr / dsr,@-rn */
  1531. { 0x4066, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,fpscr / dsr */
  1532. { 0x406a, SETSSP | USES1 }, /* lds rm,fpscr / lds rm,dsr */
  1533. { 0x4072, STORE | SETS1 | USES1 | USESSP }, /* sts.l a0,@-rn */
  1534. { 0x4076, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,a0 */
  1535. { 0x407a, SETSSP | USES1 }, /* lds.l rm,a0 */
  1536. { 0x4082, STORE | SETS1 | USES1 | USESSP }, /* sts.l x0,@-rn */
  1537. { 0x4086, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,x0 */
  1538. { 0x408a, SETSSP | USES1 }, /* lds.l rm,x0 */
  1539. { 0x4092, STORE | SETS1 | USES1 | USESSP }, /* sts.l x1,@-rn */
  1540. { 0x4096, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,x1 */
  1541. { 0x409a, SETSSP | USES1 }, /* lds.l rm,x1 */
  1542. { 0x40a2, STORE | SETS1 | USES1 | USESSP }, /* sts.l y0,@-rn */
  1543. { 0x40a6, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,y0 */
  1544. { 0x40aa, SETSSP | USES1 }, /* lds.l rm,y0 */
  1545. { 0x40b2, STORE | SETS1 | USES1 | USESSP }, /* sts.l y1,@-rn */
  1546. { 0x40b6, LOAD | SETS1 | SETSSP | USES1 }, /* lds.l @rm+,y1 */
  1547. { 0x40ba, SETSSP | USES1 } /* lds.l rm,y1 */
  1548. };
  1549. static const struct sh_opcode sh_opcode41[] =
  1550. {
  1551. { 0x4003, STORE | SETS1 | USES1 | USESSP }, /* stc.l <special_reg>,@-rn */
  1552. { 0x4007, LOAD | SETS1 | SETSSP | USES1 }, /* ldc.l @rm+,<special_reg> */
  1553. { 0x400c, SETS1 | USES1 | USES2 }, /* shad rm,rn */
  1554. { 0x400d, SETS1 | USES1 | USES2 }, /* shld rm,rn */
  1555. { 0x400e, SETSSP | USES1 }, /* ldc rm,<special_reg> */
  1556. { 0x400f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.w @rm+,@rn+ */
  1557. };
  1558. static const struct sh_minor_opcode sh_opcode4[] =
  1559. {
  1560. { MAP (sh_opcode40), 0xf0ff },
  1561. { MAP (sh_opcode41), 0xf00f }
  1562. };
  1563. static const struct sh_opcode sh_opcode50[] =
  1564. {
  1565. { 0x5000, LOAD | SETS1 | USES2 } /* mov.l @(disp,rm),rn */
  1566. };
  1567. static const struct sh_minor_opcode sh_opcode5[] =
  1568. {
  1569. { MAP (sh_opcode50), 0xf000 }
  1570. };
  1571. static const struct sh_opcode sh_opcode60[] =
  1572. {
  1573. { 0x6000, LOAD | SETS1 | USES2 }, /* mov.b @rm,rn */
  1574. { 0x6001, LOAD | SETS1 | USES2 }, /* mov.w @rm,rn */
  1575. { 0x6002, LOAD | SETS1 | USES2 }, /* mov.l @rm,rn */
  1576. { 0x6003, SETS1 | USES2 }, /* mov rm,rn */
  1577. { 0x6004, LOAD | SETS1 | SETS2 | USES2 }, /* mov.b @rm+,rn */
  1578. { 0x6005, LOAD | SETS1 | SETS2 | USES2 }, /* mov.w @rm+,rn */
  1579. { 0x6006, LOAD | SETS1 | SETS2 | USES2 }, /* mov.l @rm+,rn */
  1580. { 0x6007, SETS1 | USES2 }, /* not rm,rn */
  1581. { 0x6008, SETS1 | USES2 }, /* swap.b rm,rn */
  1582. { 0x6009, SETS1 | USES2 }, /* swap.w rm,rn */
  1583. { 0x600a, SETS1 | SETSSP | USES2 | USESSP }, /* negc rm,rn */
  1584. { 0x600b, SETS1 | USES2 }, /* neg rm,rn */
  1585. { 0x600c, SETS1 | USES2 }, /* extu.b rm,rn */
  1586. { 0x600d, SETS1 | USES2 }, /* extu.w rm,rn */
  1587. { 0x600e, SETS1 | USES2 }, /* exts.b rm,rn */
  1588. { 0x600f, SETS1 | USES2 } /* exts.w rm,rn */
  1589. };
  1590. static const struct sh_minor_opcode sh_opcode6[] =
  1591. {
  1592. { MAP (sh_opcode60), 0xf00f }
  1593. };
  1594. static const struct sh_opcode sh_opcode70[] =
  1595. {
  1596. { 0x7000, SETS1 | USES1 } /* add #imm,rn */
  1597. };
  1598. static const struct sh_minor_opcode sh_opcode7[] =
  1599. {
  1600. { MAP (sh_opcode70), 0xf000 }
  1601. };
  1602. static const struct sh_opcode sh_opcode80[] =
  1603. {
  1604. { 0x8000, STORE | USES2 | USESR0 }, /* mov.b r0,@(disp,rn) */
  1605. { 0x8100, STORE | USES2 | USESR0 }, /* mov.w r0,@(disp,rn) */
  1606. { 0x8200, SETSSP }, /* setrc #imm */
  1607. { 0x8400, LOAD | SETSR0 | USES2 }, /* mov.b @(disp,rm),r0 */
  1608. { 0x8500, LOAD | SETSR0 | USES2 }, /* mov.w @(disp,rn),r0 */
  1609. { 0x8800, SETSSP | USESR0 }, /* cmp/eq #imm,r0 */
  1610. { 0x8900, BRANCH | USESSP }, /* bt label */
  1611. { 0x8b00, BRANCH | USESSP }, /* bf label */
  1612. { 0x8c00, SETSSP }, /* ldrs @(disp,pc) */
  1613. { 0x8d00, BRANCH | DELAY | USESSP }, /* bt/s label */
  1614. { 0x8e00, SETSSP }, /* ldre @(disp,pc) */
  1615. { 0x8f00, BRANCH | DELAY | USESSP } /* bf/s label */
  1616. };
  1617. static const struct sh_minor_opcode sh_opcode8[] =
  1618. {
  1619. { MAP (sh_opcode80), 0xff00 }
  1620. };
  1621. static const struct sh_opcode sh_opcode90[] =
  1622. {
  1623. { 0x9000, LOAD | SETS1 } /* mov.w @(disp,pc),rn */
  1624. };
  1625. static const struct sh_minor_opcode sh_opcode9[] =
  1626. {
  1627. { MAP (sh_opcode90), 0xf000 }
  1628. };
  1629. static const struct sh_opcode sh_opcodea0[] =
  1630. {
  1631. { 0xa000, BRANCH | DELAY } /* bra label */
  1632. };
  1633. static const struct sh_minor_opcode sh_opcodea[] =
  1634. {
  1635. { MAP (sh_opcodea0), 0xf000 }
  1636. };
  1637. static const struct sh_opcode sh_opcodeb0[] =
  1638. {
  1639. { 0xb000, BRANCH | DELAY } /* bsr label */
  1640. };
  1641. static const struct sh_minor_opcode sh_opcodeb[] =
  1642. {
  1643. { MAP (sh_opcodeb0), 0xf000 }
  1644. };
  1645. static const struct sh_opcode sh_opcodec0[] =
  1646. {
  1647. { 0xc000, STORE | USESR0 | USESSP }, /* mov.b r0,@(disp,gbr) */
  1648. { 0xc100, STORE | USESR0 | USESSP }, /* mov.w r0,@(disp,gbr) */
  1649. { 0xc200, STORE | USESR0 | USESSP }, /* mov.l r0,@(disp,gbr) */
  1650. { 0xc300, BRANCH | USESSP }, /* trapa #imm */
  1651. { 0xc400, LOAD | SETSR0 | USESSP }, /* mov.b @(disp,gbr),r0 */
  1652. { 0xc500, LOAD | SETSR0 | USESSP }, /* mov.w @(disp,gbr),r0 */
  1653. { 0xc600, LOAD | SETSR0 | USESSP }, /* mov.l @(disp,gbr),r0 */
  1654. { 0xc700, SETSR0 }, /* mova @(disp,pc),r0 */
  1655. { 0xc800, SETSSP | USESR0 }, /* tst #imm,r0 */
  1656. { 0xc900, SETSR0 | USESR0 }, /* and #imm,r0 */
  1657. { 0xca00, SETSR0 | USESR0 }, /* xor #imm,r0 */
  1658. { 0xcb00, SETSR0 | USESR0 }, /* or #imm,r0 */
  1659. { 0xcc00, LOAD | SETSSP | USESR0 | USESSP }, /* tst.b #imm,@(r0,gbr) */
  1660. { 0xcd00, LOAD | STORE | USESR0 | USESSP }, /* and.b #imm,@(r0,gbr) */
  1661. { 0xce00, LOAD | STORE | USESR0 | USESSP }, /* xor.b #imm,@(r0,gbr) */
  1662. { 0xcf00, LOAD | STORE | USESR0 | USESSP } /* or.b #imm,@(r0,gbr) */
  1663. };
  1664. static const struct sh_minor_opcode sh_opcodec[] =
  1665. {
  1666. { MAP (sh_opcodec0), 0xff00 }
  1667. };
  1668. static const struct sh_opcode sh_opcoded0[] =
  1669. {
  1670. { 0xd000, LOAD | SETS1 } /* mov.l @(disp,pc),rn */
  1671. };
  1672. static const struct sh_minor_opcode sh_opcoded[] =
  1673. {
  1674. { MAP (sh_opcoded0), 0xf000 }
  1675. };
  1676. static const struct sh_opcode sh_opcodee0[] =
  1677. {
  1678. { 0xe000, SETS1 } /* mov #imm,rn */
  1679. };
  1680. static const struct sh_minor_opcode sh_opcodee[] =
  1681. {
  1682. { MAP (sh_opcodee0), 0xf000 }
  1683. };
  1684. static const struct sh_opcode sh_opcodef0[] =
  1685. {
  1686. { 0xf000, SETSF1 | USESF1 | USESF2 }, /* fadd fm,fn */
  1687. { 0xf001, SETSF1 | USESF1 | USESF2 }, /* fsub fm,fn */
  1688. { 0xf002, SETSF1 | USESF1 | USESF2 }, /* fmul fm,fn */
  1689. { 0xf003, SETSF1 | USESF1 | USESF2 }, /* fdiv fm,fn */
  1690. { 0xf004, SETSSP | USESF1 | USESF2 }, /* fcmp/eq fm,fn */
  1691. { 0xf005, SETSSP | USESF1 | USESF2 }, /* fcmp/gt fm,fn */
  1692. { 0xf006, LOAD | SETSF1 | USES2 | USESR0 }, /* fmov.s @(r0,rm),fn */
  1693. { 0xf007, STORE | USES1 | USESF2 | USESR0 }, /* fmov.s fm,@(r0,rn) */
  1694. { 0xf008, LOAD | SETSF1 | USES2 }, /* fmov.s @rm,fn */
  1695. { 0xf009, LOAD | SETS2 | SETSF1 | USES2 }, /* fmov.s @rm+,fn */
  1696. { 0xf00a, STORE | USES1 | USESF2 }, /* fmov.s fm,@rn */
  1697. { 0xf00b, STORE | SETS1 | USES1 | USESF2 }, /* fmov.s fm,@-rn */
  1698. { 0xf00c, SETSF1 | USESF2 }, /* fmov fm,fn */
  1699. { 0xf00e, SETSF1 | USESF1 | USESF2 | USESF0 } /* fmac f0,fm,fn */
  1700. };
  1701. static const struct sh_opcode sh_opcodef1[] =
  1702. {
  1703. { 0xf00d, SETSF1 | USESSP }, /* fsts fpul,fn */
  1704. { 0xf01d, SETSSP | USESF1 }, /* flds fn,fpul */
  1705. { 0xf02d, SETSF1 | USESSP }, /* float fpul,fn */
  1706. { 0xf03d, SETSSP | USESF1 }, /* ftrc fn,fpul */
  1707. { 0xf04d, SETSF1 | USESF1 }, /* fneg fn */
  1708. { 0xf05d, SETSF1 | USESF1 }, /* fabs fn */
  1709. { 0xf06d, SETSF1 | USESF1 }, /* fsqrt fn */
  1710. { 0xf07d, SETSSP | USESF1 }, /* ftst/nan fn */
  1711. { 0xf08d, SETSF1 }, /* fldi0 fn */
  1712. { 0xf09d, SETSF1 } /* fldi1 fn */
  1713. };
  1714. static const struct sh_minor_opcode sh_opcodef[] =
  1715. {
  1716. { MAP (sh_opcodef0), 0xf00f },
  1717. { MAP (sh_opcodef1), 0xf0ff }
  1718. };
  1719. static struct sh_major_opcode sh_opcodes[] =
  1720. {
  1721. { MAP (sh_opcode0) },
  1722. { MAP (sh_opcode1) },
  1723. { MAP (sh_opcode2) },
  1724. { MAP (sh_opcode3) },
  1725. { MAP (sh_opcode4) },
  1726. { MAP (sh_opcode5) },
  1727. { MAP (sh_opcode6) },
  1728. { MAP (sh_opcode7) },
  1729. { MAP (sh_opcode8) },
  1730. { MAP (sh_opcode9) },
  1731. { MAP (sh_opcodea) },
  1732. { MAP (sh_opcodeb) },
  1733. { MAP (sh_opcodec) },
  1734. { MAP (sh_opcoded) },
  1735. { MAP (sh_opcodee) },
  1736. { MAP (sh_opcodef) }
  1737. };
  1738. /* The double data transfer / parallel processing insns are not
  1739. described here. This will cause sh_align_load_span to leave them alone. */
  1740. static const struct sh_opcode sh_dsp_opcodef0[] =
  1741. {
  1742. { 0xf400, USESAS | SETSAS | LOAD | SETSSP }, /* movs.x @-as,ds */
  1743. { 0xf401, USESAS | SETSAS | STORE | USESSP }, /* movs.x ds,@-as */
  1744. { 0xf404, USESAS | LOAD | SETSSP }, /* movs.x @as,ds */
  1745. { 0xf405, USESAS | STORE | USESSP }, /* movs.x ds,@as */
  1746. { 0xf408, USESAS | SETSAS | LOAD | SETSSP }, /* movs.x @as+,ds */
  1747. { 0xf409, USESAS | SETSAS | STORE | USESSP }, /* movs.x ds,@as+ */
  1748. { 0xf40c, USESAS | SETSAS | LOAD | SETSSP | USESR8 }, /* movs.x @as+r8,ds */
  1749. { 0xf40d, USESAS | SETSAS | STORE | USESSP | USESR8 } /* movs.x ds,@as+r8 */
  1750. };
  1751. static const struct sh_minor_opcode sh_dsp_opcodef[] =
  1752. {
  1753. { MAP (sh_dsp_opcodef0), 0xfc0d }
  1754. };
  1755. /* Given an instruction, return a pointer to the corresponding
  1756. sh_opcode structure. Return NULL if the instruction is not
  1757. recognized. */
  1758. static const struct sh_opcode *
  1759. sh_insn_info (unsigned int insn)
  1760. {
  1761. const struct sh_major_opcode *maj;
  1762. const struct sh_minor_opcode *min, *minend;
  1763. maj = &sh_opcodes[(insn & 0xf000) >> 12];
  1764. min = maj->minor_opcodes;
  1765. minend = min + maj->count;
  1766. for (; min < minend; min++)
  1767. {
  1768. unsigned int l;
  1769. const struct sh_opcode *op, *opend;
  1770. l = insn & min->mask;
  1771. op = min->opcodes;
  1772. opend = op + min->count;
  1773. /* Since the opcodes tables are sorted, we could use a binary
  1774. search here if the count were above some cutoff value. */
  1775. for (; op < opend; op++)
  1776. if (op->opcode == l)
  1777. return op;
  1778. }
  1779. return NULL;
  1780. }
  1781. /* See whether an instruction uses a general purpose register. */
  1782. static bool
  1783. sh_insn_uses_reg (unsigned int insn,
  1784. const struct sh_opcode *op,
  1785. unsigned int reg)
  1786. {
  1787. unsigned int f;
  1788. f = op->flags;
  1789. if ((f & USES1) != 0
  1790. && USES1_REG (insn) == reg)
  1791. return true;
  1792. if ((f & USES2) != 0
  1793. && USES2_REG (insn) == reg)
  1794. return true;
  1795. if ((f & USESR0) != 0
  1796. && reg == 0)
  1797. return true;
  1798. if ((f & USESAS) && reg == USESAS_REG (insn))
  1799. return true;
  1800. if ((f & USESR8) && reg == 8)
  1801. return true;
  1802. return false;
  1803. }
  1804. /* See whether an instruction sets a general purpose register. */
  1805. static bool
  1806. sh_insn_sets_reg (unsigned int insn,
  1807. const struct sh_opcode *op,
  1808. unsigned int reg)
  1809. {
  1810. unsigned int f;
  1811. f = op->flags;
  1812. if ((f & SETS1) != 0
  1813. && SETS1_REG (insn) == reg)
  1814. return true;
  1815. if ((f & SETS2) != 0
  1816. && SETS2_REG (insn) == reg)
  1817. return true;
  1818. if ((f & SETSR0) != 0
  1819. && reg == 0)
  1820. return true;
  1821. if ((f & SETSAS) && reg == SETSAS_REG (insn))
  1822. return true;
  1823. return false;
  1824. }
  1825. /* See whether an instruction uses or sets a general purpose register */
  1826. static bool
  1827. sh_insn_uses_or_sets_reg (unsigned int insn,
  1828. const struct sh_opcode *op,
  1829. unsigned int reg)
  1830. {
  1831. if (sh_insn_uses_reg (insn, op, reg))
  1832. return true;
  1833. return sh_insn_sets_reg (insn, op, reg);
  1834. }
  1835. /* See whether an instruction uses a floating point register. */
  1836. static bool
  1837. sh_insn_uses_freg (unsigned int insn,
  1838. const struct sh_opcode *op,
  1839. unsigned int freg)
  1840. {
  1841. unsigned int f;
  1842. f = op->flags;
  1843. /* We can't tell if this is a double-precision insn, so just play safe
  1844. and assume that it might be. So not only have we test FREG against
  1845. itself, but also even FREG against FREG+1 - if the using insn uses
  1846. just the low part of a double precision value - but also an odd
  1847. FREG against FREG-1 - if the setting insn sets just the low part
  1848. of a double precision value.
  1849. So what this all boils down to is that we have to ignore the lowest
  1850. bit of the register number. */
  1851. if ((f & USESF1) != 0
  1852. && (USESF1_REG (insn) & 0xe) == (freg & 0xe))
  1853. return true;
  1854. if ((f & USESF2) != 0
  1855. && (USESF2_REG (insn) & 0xe) == (freg & 0xe))
  1856. return true;
  1857. if ((f & USESF0) != 0
  1858. && freg == 0)
  1859. return true;
  1860. return false;
  1861. }
  1862. /* See whether an instruction sets a floating point register. */
  1863. static bool
  1864. sh_insn_sets_freg (unsigned int insn,
  1865. const struct sh_opcode *op,
  1866. unsigned int freg)
  1867. {
  1868. unsigned int f;
  1869. f = op->flags;
  1870. /* We can't tell if this is a double-precision insn, so just play safe
  1871. and assume that it might be. So not only have we test FREG against
  1872. itself, but also even FREG against FREG+1 - if the using insn uses
  1873. just the low part of a double precision value - but also an odd
  1874. FREG against FREG-1 - if the setting insn sets just the low part
  1875. of a double precision value.
  1876. So what this all boils down to is that we have to ignore the lowest
  1877. bit of the register number. */
  1878. if ((f & SETSF1) != 0
  1879. && (SETSF1_REG (insn) & 0xe) == (freg & 0xe))
  1880. return true;
  1881. return false;
  1882. }
  1883. /* See whether an instruction uses or sets a floating point register */
  1884. static bool
  1885. sh_insn_uses_or_sets_freg (unsigned int insn,
  1886. const struct sh_opcode *op,
  1887. unsigned int reg)
  1888. {
  1889. if (sh_insn_uses_freg (insn, op, reg))
  1890. return true;
  1891. return sh_insn_sets_freg (insn, op, reg);
  1892. }
  1893. /* See whether instructions I1 and I2 conflict, assuming I1 comes
  1894. before I2. OP1 and OP2 are the corresponding sh_opcode structures.
  1895. This should return TRUE if there is a conflict, or FALSE if the
  1896. instructions can be swapped safely. */
  1897. static bool
  1898. sh_insns_conflict (unsigned int i1,
  1899. const struct sh_opcode *op1,
  1900. unsigned int i2,
  1901. const struct sh_opcode *op2)
  1902. {
  1903. unsigned int f1, f2;
  1904. f1 = op1->flags;
  1905. f2 = op2->flags;
  1906. /* Load of fpscr conflicts with floating point operations.
  1907. FIXME: shouldn't test raw opcodes here. */
  1908. if (((i1 & 0xf0ff) == 0x4066 && (i2 & 0xf000) == 0xf000)
  1909. || ((i2 & 0xf0ff) == 0x4066 && (i1 & 0xf000) == 0xf000))
  1910. return true;
  1911. if ((f1 & (BRANCH | DELAY)) != 0
  1912. || (f2 & (BRANCH | DELAY)) != 0)
  1913. return true;
  1914. if (((f1 | f2) & SETSSP)
  1915. && (f1 & (SETSSP | USESSP))
  1916. && (f2 & (SETSSP | USESSP)))
  1917. return true;
  1918. if ((f1 & SETS1) != 0
  1919. && sh_insn_uses_or_sets_reg (i2, op2, SETS1_REG (i1)))
  1920. return true;
  1921. if ((f1 & SETS2) != 0
  1922. && sh_insn_uses_or_sets_reg (i2, op2, SETS2_REG (i1)))
  1923. return true;
  1924. if ((f1 & SETSR0) != 0
  1925. && sh_insn_uses_or_sets_reg (i2, op2, 0))
  1926. return true;
  1927. if ((f1 & SETSAS)
  1928. && sh_insn_uses_or_sets_reg (i2, op2, SETSAS_REG (i1)))
  1929. return true;
  1930. if ((f1 & SETSF1) != 0
  1931. && sh_insn_uses_or_sets_freg (i2, op2, SETSF1_REG (i1)))
  1932. return true;
  1933. if ((f2 & SETS1) != 0
  1934. && sh_insn_uses_or_sets_reg (i1, op1, SETS1_REG (i2)))
  1935. return true;
  1936. if ((f2 & SETS2) != 0
  1937. && sh_insn_uses_or_sets_reg (i1, op1, SETS2_REG (i2)))
  1938. return true;
  1939. if ((f2 & SETSR0) != 0
  1940. && sh_insn_uses_or_sets_reg (i1, op1, 0))
  1941. return true;
  1942. if ((f2 & SETSAS)
  1943. && sh_insn_uses_or_sets_reg (i1, op1, SETSAS_REG (i2)))
  1944. return true;
  1945. if ((f2 & SETSF1) != 0
  1946. && sh_insn_uses_or_sets_freg (i1, op1, SETSF1_REG (i2)))
  1947. return true;
  1948. /* The instructions do not conflict. */
  1949. return false;
  1950. }
  1951. /* I1 is a load instruction, and I2 is some other instruction. Return
  1952. TRUE if I1 loads a register which I2 uses. */
  1953. static bool
  1954. sh_load_use (unsigned int i1,
  1955. const struct sh_opcode *op1,
  1956. unsigned int i2,
  1957. const struct sh_opcode *op2)
  1958. {
  1959. unsigned int f1;
  1960. f1 = op1->flags;
  1961. if ((f1 & LOAD) == 0)
  1962. return false;
  1963. /* If both SETS1 and SETSSP are set, that means a load to a special
  1964. register using postincrement addressing mode, which we don't care
  1965. about here. */
  1966. if ((f1 & SETS1) != 0
  1967. && (f1 & SETSSP) == 0
  1968. && sh_insn_uses_reg (i2, op2, (i1 & 0x0f00) >> 8))
  1969. return true;
  1970. if ((f1 & SETSR0) != 0
  1971. && sh_insn_uses_reg (i2, op2, 0))
  1972. return true;
  1973. if ((f1 & SETSF1) != 0
  1974. && sh_insn_uses_freg (i2, op2, (i1 & 0x0f00) >> 8))
  1975. return true;
  1976. return false;
  1977. }
  1978. /* Try to align loads and stores within a span of memory. This is
  1979. called by both the ELF and the COFF sh targets. ABFD and SEC are
  1980. the BFD and section we are examining. CONTENTS is the contents of
  1981. the section. SWAP is the routine to call to swap two instructions.
  1982. RELOCS is a pointer to the internal relocation information, to be
  1983. passed to SWAP. PLABEL is a pointer to the current label in a
  1984. sorted list of labels; LABEL_END is the end of the list. START and
  1985. STOP are the range of memory to examine. If a swap is made,
  1986. *PSWAPPED is set to TRUE. */
  1987. #ifdef COFF_WITH_PE
  1988. static
  1989. #endif
  1990. bool
  1991. _bfd_sh_align_load_span (bfd *abfd,
  1992. asection *sec,
  1993. bfd_byte *contents,
  1994. bool (*swap) (bfd *, asection *, void *, bfd_byte *, bfd_vma),
  1995. void * relocs,
  1996. bfd_vma **plabel,
  1997. bfd_vma *label_end,
  1998. bfd_vma start,
  1999. bfd_vma stop,
  2000. bool *pswapped)
  2001. {
  2002. int dsp = (abfd->arch_info->mach == bfd_mach_sh_dsp
  2003. || abfd->arch_info->mach == bfd_mach_sh3_dsp);
  2004. bfd_vma i;
  2005. /* The SH4 has a Harvard architecture, hence aligning loads is not
  2006. desirable. In fact, it is counter-productive, since it interferes
  2007. with the schedules generated by the compiler. */
  2008. if (abfd->arch_info->mach == bfd_mach_sh4)
  2009. return true;
  2010. /* If we are linking sh[3]-dsp code, swap the FPU instructions for DSP
  2011. instructions. */
  2012. if (dsp)
  2013. {
  2014. sh_opcodes[0xf].minor_opcodes = sh_dsp_opcodef;
  2015. sh_opcodes[0xf].count = sizeof sh_dsp_opcodef / sizeof sh_dsp_opcodef [0];
  2016. }
  2017. /* Instructions should be aligned on 2 byte boundaries. */
  2018. if ((start & 1) == 1)
  2019. ++start;
  2020. /* Now look through the unaligned addresses. */
  2021. i = start;
  2022. if ((i & 2) == 0)
  2023. i += 2;
  2024. for (; i < stop; i += 4)
  2025. {
  2026. unsigned int insn;
  2027. const struct sh_opcode *op;
  2028. unsigned int prev_insn = 0;
  2029. const struct sh_opcode *prev_op = NULL;
  2030. insn = bfd_get_16 (abfd, contents + i);
  2031. op = sh_insn_info (insn);
  2032. if (op == NULL
  2033. || (op->flags & (LOAD | STORE)) == 0)
  2034. continue;
  2035. /* This is a load or store which is not on a four byte boundary. */
  2036. while (*plabel < label_end && **plabel < i)
  2037. ++*plabel;
  2038. if (i > start)
  2039. {
  2040. prev_insn = bfd_get_16 (abfd, contents + i - 2);
  2041. /* If INSN is the field b of a parallel processing insn, it is not
  2042. a load / store after all. Note that the test here might mistake
  2043. the field_b of a pcopy insn for the starting code of a parallel
  2044. processing insn; this might miss a swapping opportunity, but at
  2045. least we're on the safe side. */
  2046. if (dsp && (prev_insn & 0xfc00) == 0xf800)
  2047. continue;
  2048. /* Check if prev_insn is actually the field b of a parallel
  2049. processing insn. Again, this can give a spurious match
  2050. after a pcopy. */
  2051. if (dsp && i - 2 > start)
  2052. {
  2053. unsigned pprev_insn = bfd_get_16 (abfd, contents + i - 4);
  2054. if ((pprev_insn & 0xfc00) == 0xf800)
  2055. prev_op = NULL;
  2056. else
  2057. prev_op = sh_insn_info (prev_insn);
  2058. }
  2059. else
  2060. prev_op = sh_insn_info (prev_insn);
  2061. /* If the load/store instruction is in a delay slot, we
  2062. can't swap. */
  2063. if (prev_op == NULL
  2064. || (prev_op->flags & DELAY) != 0)
  2065. continue;
  2066. }
  2067. if (i > start
  2068. && (*plabel >= label_end || **plabel != i)
  2069. && prev_op != NULL
  2070. && (prev_op->flags & (LOAD | STORE)) == 0
  2071. && ! sh_insns_conflict (prev_insn, prev_op, insn, op))
  2072. {
  2073. bool ok;
  2074. /* The load/store instruction does not have a label, and
  2075. there is a previous instruction; PREV_INSN is not
  2076. itself a load/store instruction, and PREV_INSN and
  2077. INSN do not conflict. */
  2078. ok = true;
  2079. if (i >= start + 4)
  2080. {
  2081. unsigned int prev2_insn;
  2082. const struct sh_opcode *prev2_op;
  2083. prev2_insn = bfd_get_16 (abfd, contents + i - 4);
  2084. prev2_op = sh_insn_info (prev2_insn);
  2085. /* If the instruction before PREV_INSN has a delay
  2086. slot--that is, PREV_INSN is in a delay slot--we
  2087. can not swap. */
  2088. if (prev2_op == NULL
  2089. || (prev2_op->flags & DELAY) != 0)
  2090. ok = false;
  2091. /* If the instruction before PREV_INSN is a load,
  2092. and it sets a register which INSN uses, then
  2093. putting INSN immediately after PREV_INSN will
  2094. cause a pipeline bubble, so there is no point to
  2095. making the swap. */
  2096. if (ok
  2097. && (prev2_op->flags & LOAD) != 0
  2098. && sh_load_use (prev2_insn, prev2_op, insn, op))
  2099. ok = false;
  2100. }
  2101. if (ok)
  2102. {
  2103. if (! (*swap) (abfd, sec, relocs, contents, i - 2))
  2104. return false;
  2105. *pswapped = true;
  2106. continue;
  2107. }
  2108. }
  2109. while (*plabel < label_end && **plabel < i + 2)
  2110. ++*plabel;
  2111. if (i + 2 < stop
  2112. && (*plabel >= label_end || **plabel != i + 2))
  2113. {
  2114. unsigned int next_insn;
  2115. const struct sh_opcode *next_op;
  2116. /* There is an instruction after the load/store
  2117. instruction, and it does not have a label. */
  2118. next_insn = bfd_get_16 (abfd, contents + i + 2);
  2119. next_op = sh_insn_info (next_insn);
  2120. if (next_op != NULL
  2121. && (next_op->flags & (LOAD | STORE)) == 0
  2122. && ! sh_insns_conflict (insn, op, next_insn, next_op))
  2123. {
  2124. bool ok;
  2125. /* NEXT_INSN is not itself a load/store instruction,
  2126. and it does not conflict with INSN. */
  2127. ok = true;
  2128. /* If PREV_INSN is a load, and it sets a register
  2129. which NEXT_INSN uses, then putting NEXT_INSN
  2130. immediately after PREV_INSN will cause a pipeline
  2131. bubble, so there is no reason to make this swap. */
  2132. if (prev_op != NULL
  2133. && (prev_op->flags & LOAD) != 0
  2134. && sh_load_use (prev_insn, prev_op, next_insn, next_op))
  2135. ok = false;
  2136. /* If INSN is a load, and it sets a register which
  2137. the insn after NEXT_INSN uses, then doing the
  2138. swap will cause a pipeline bubble, so there is no
  2139. reason to make the swap. However, if the insn
  2140. after NEXT_INSN is itself a load or store
  2141. instruction, then it is misaligned, so
  2142. optimistically hope that it will be swapped
  2143. itself, and just live with the pipeline bubble if
  2144. it isn't. */
  2145. if (ok
  2146. && i + 4 < stop
  2147. && (op->flags & LOAD) != 0)
  2148. {
  2149. unsigned int next2_insn;
  2150. const struct sh_opcode *next2_op;
  2151. next2_insn = bfd_get_16 (abfd, contents + i + 4);
  2152. next2_op = sh_insn_info (next2_insn);
  2153. if (next2_op == NULL
  2154. || ((next2_op->flags & (LOAD | STORE)) == 0
  2155. && sh_load_use (insn, op, next2_insn, next2_op)))
  2156. ok = false;
  2157. }
  2158. if (ok)
  2159. {
  2160. if (! (*swap) (abfd, sec, relocs, contents, i))
  2161. return false;
  2162. *pswapped = true;
  2163. continue;
  2164. }
  2165. }
  2166. }
  2167. }
  2168. return true;
  2169. }
  2170. #endif /* not COFF_IMAGE_WITH_PE */
  2171. /* Swap two SH instructions. */
  2172. static bool
  2173. sh_swap_insns (bfd * abfd,
  2174. asection * sec,
  2175. void * relocs,
  2176. bfd_byte * contents,
  2177. bfd_vma addr)
  2178. {
  2179. struct internal_reloc *internal_relocs = (struct internal_reloc *) relocs;
  2180. unsigned short i1, i2;
  2181. struct internal_reloc *irel, *irelend;
  2182. /* Swap the instructions themselves. */
  2183. i1 = bfd_get_16 (abfd, contents + addr);
  2184. i2 = bfd_get_16 (abfd, contents + addr + 2);
  2185. bfd_put_16 (abfd, (bfd_vma) i2, contents + addr);
  2186. bfd_put_16 (abfd, (bfd_vma) i1, contents + addr + 2);
  2187. /* Adjust all reloc addresses. */
  2188. irelend = internal_relocs + sec->reloc_count;
  2189. for (irel = internal_relocs; irel < irelend; irel++)
  2190. {
  2191. int type, add;
  2192. /* There are a few special types of relocs that we don't want to
  2193. adjust. These relocs do not apply to the instruction itself,
  2194. but are only associated with the address. */
  2195. type = irel->r_type;
  2196. if (type == R_SH_ALIGN
  2197. || type == R_SH_CODE
  2198. || type == R_SH_DATA
  2199. || type == R_SH_LABEL)
  2200. continue;
  2201. /* If an R_SH_USES reloc points to one of the addresses being
  2202. swapped, we must adjust it. It would be incorrect to do this
  2203. for a jump, though, since we want to execute both
  2204. instructions after the jump. (We have avoided swapping
  2205. around a label, so the jump will not wind up executing an
  2206. instruction it shouldn't). */
  2207. if (type == R_SH_USES)
  2208. {
  2209. bfd_vma off;
  2210. off = irel->r_vaddr - sec->vma + 4 + irel->r_offset;
  2211. if (off == addr)
  2212. irel->r_offset += 2;
  2213. else if (off == addr + 2)
  2214. irel->r_offset -= 2;
  2215. }
  2216. if (irel->r_vaddr - sec->vma == addr)
  2217. {
  2218. irel->r_vaddr += 2;
  2219. add = -2;
  2220. }
  2221. else if (irel->r_vaddr - sec->vma == addr + 2)
  2222. {
  2223. irel->r_vaddr -= 2;
  2224. add = 2;
  2225. }
  2226. else
  2227. add = 0;
  2228. if (add != 0)
  2229. {
  2230. bfd_byte *loc;
  2231. unsigned short insn, oinsn;
  2232. bool overflow;
  2233. loc = contents + irel->r_vaddr - sec->vma;
  2234. overflow = false;
  2235. switch (type)
  2236. {
  2237. default:
  2238. break;
  2239. case R_SH_PCDISP8BY2:
  2240. case R_SH_PCRELIMM8BY2:
  2241. insn = bfd_get_16 (abfd, loc);
  2242. oinsn = insn;
  2243. insn += add / 2;
  2244. if ((oinsn & 0xff00) != (insn & 0xff00))
  2245. overflow = true;
  2246. bfd_put_16 (abfd, (bfd_vma) insn, loc);
  2247. break;
  2248. case R_SH_PCDISP:
  2249. insn = bfd_get_16 (abfd, loc);
  2250. oinsn = insn;
  2251. insn += add / 2;
  2252. if ((oinsn & 0xf000) != (insn & 0xf000))
  2253. overflow = true;
  2254. bfd_put_16 (abfd, (bfd_vma) insn, loc);
  2255. break;
  2256. case R_SH_PCRELIMM8BY4:
  2257. /* This reloc ignores the least significant 3 bits of
  2258. the program counter before adding in the offset.
  2259. This means that if ADDR is at an even address, the
  2260. swap will not affect the offset. If ADDR is an at an
  2261. odd address, then the instruction will be crossing a
  2262. four byte boundary, and must be adjusted. */
  2263. if ((addr & 3) != 0)
  2264. {
  2265. insn = bfd_get_16 (abfd, loc);
  2266. oinsn = insn;
  2267. insn += add / 2;
  2268. if ((oinsn & 0xff00) != (insn & 0xff00))
  2269. overflow = true;
  2270. bfd_put_16 (abfd, (bfd_vma) insn, loc);
  2271. }
  2272. break;
  2273. }
  2274. if (overflow)
  2275. {
  2276. _bfd_error_handler
  2277. /* xgettext: c-format */
  2278. (_("%pB: %#" PRIx64 ": fatal: reloc overflow while relaxing"),
  2279. abfd, (uint64_t) irel->r_vaddr);
  2280. bfd_set_error (bfd_error_bad_value);
  2281. return false;
  2282. }
  2283. }
  2284. }
  2285. return true;
  2286. }
  2287. /* Look for loads and stores which we can align to four byte
  2288. boundaries. See the longer comment above sh_relax_section for why
  2289. this is desirable. This sets *PSWAPPED if some instruction was
  2290. swapped. */
  2291. static bool
  2292. sh_align_loads (bfd *abfd,
  2293. asection *sec,
  2294. struct internal_reloc *internal_relocs,
  2295. bfd_byte *contents,
  2296. bool *pswapped)
  2297. {
  2298. struct internal_reloc *irel, *irelend;
  2299. bfd_vma *labels = NULL;
  2300. bfd_vma *label, *label_end;
  2301. bfd_size_type amt;
  2302. *pswapped = false;
  2303. irelend = internal_relocs + sec->reloc_count;
  2304. /* Get all the addresses with labels on them. */
  2305. amt = (bfd_size_type) sec->reloc_count * sizeof (bfd_vma);
  2306. labels = (bfd_vma *) bfd_malloc (amt);
  2307. if (labels == NULL)
  2308. goto error_return;
  2309. label_end = labels;
  2310. for (irel = internal_relocs; irel < irelend; irel++)
  2311. {
  2312. if (irel->r_type == R_SH_LABEL)
  2313. {
  2314. *label_end = irel->r_vaddr - sec->vma;
  2315. ++label_end;
  2316. }
  2317. }
  2318. /* Note that the assembler currently always outputs relocs in
  2319. address order. If that ever changes, this code will need to sort
  2320. the label values and the relocs. */
  2321. label = labels;
  2322. for (irel = internal_relocs; irel < irelend; irel++)
  2323. {
  2324. bfd_vma start, stop;
  2325. if (irel->r_type != R_SH_CODE)
  2326. continue;
  2327. start = irel->r_vaddr - sec->vma;
  2328. for (irel++; irel < irelend; irel++)
  2329. if (irel->r_type == R_SH_DATA)
  2330. break;
  2331. if (irel < irelend)
  2332. stop = irel->r_vaddr - sec->vma;
  2333. else
  2334. stop = sec->size;
  2335. if (! _bfd_sh_align_load_span (abfd, sec, contents, sh_swap_insns,
  2336. internal_relocs, &label,
  2337. label_end, start, stop, pswapped))
  2338. goto error_return;
  2339. }
  2340. free (labels);
  2341. return true;
  2342. error_return:
  2343. free (labels);
  2344. return false;
  2345. }
  2346. /* This is a modification of _bfd_coff_generic_relocate_section, which
  2347. will handle SH relaxing. */
  2348. static bool
  2349. sh_relocate_section (bfd *output_bfd ATTRIBUTE_UNUSED,
  2350. struct bfd_link_info *info,
  2351. bfd *input_bfd,
  2352. asection *input_section,
  2353. bfd_byte *contents,
  2354. struct internal_reloc *relocs,
  2355. struct internal_syment *syms,
  2356. asection **sections)
  2357. {
  2358. struct internal_reloc *rel;
  2359. struct internal_reloc *relend;
  2360. rel = relocs;
  2361. relend = rel + input_section->reloc_count;
  2362. for (; rel < relend; rel++)
  2363. {
  2364. long symndx;
  2365. struct coff_link_hash_entry *h;
  2366. struct internal_syment *sym;
  2367. bfd_vma addend;
  2368. bfd_vma val;
  2369. reloc_howto_type *howto;
  2370. bfd_reloc_status_type rstat;
  2371. /* Almost all relocs have to do with relaxing. If any work must
  2372. be done for them, it has been done in sh_relax_section. */
  2373. if (rel->r_type != R_SH_IMM32
  2374. #ifdef COFF_WITH_PE
  2375. && rel->r_type != R_SH_IMM32CE
  2376. && rel->r_type != R_SH_IMAGEBASE
  2377. #endif
  2378. && rel->r_type != R_SH_PCDISP)
  2379. continue;
  2380. symndx = rel->r_symndx;
  2381. if (symndx == -1)
  2382. {
  2383. h = NULL;
  2384. sym = NULL;
  2385. }
  2386. else
  2387. {
  2388. if (symndx < 0
  2389. || (unsigned long) symndx >= obj_raw_syment_count (input_bfd))
  2390. {
  2391. _bfd_error_handler
  2392. /* xgettext: c-format */
  2393. (_("%pB: illegal symbol index %ld in relocs"),
  2394. input_bfd, symndx);
  2395. bfd_set_error (bfd_error_bad_value);
  2396. return false;
  2397. }
  2398. h = obj_coff_sym_hashes (input_bfd)[symndx];
  2399. sym = syms + symndx;
  2400. }
  2401. if (sym != NULL && sym->n_scnum != 0)
  2402. addend = - sym->n_value;
  2403. else
  2404. addend = 0;
  2405. if (rel->r_type == R_SH_PCDISP)
  2406. addend -= 4;
  2407. if (rel->r_type >= SH_COFF_HOWTO_COUNT)
  2408. howto = NULL;
  2409. else
  2410. howto = &sh_coff_howtos[rel->r_type];
  2411. if (howto == NULL)
  2412. {
  2413. bfd_set_error (bfd_error_bad_value);
  2414. return false;
  2415. }
  2416. #ifdef COFF_WITH_PE
  2417. if (rel->r_type == R_SH_IMAGEBASE)
  2418. addend -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
  2419. #endif
  2420. val = 0;
  2421. if (h == NULL)
  2422. {
  2423. asection *sec;
  2424. /* There is nothing to do for an internal PCDISP reloc. */
  2425. if (rel->r_type == R_SH_PCDISP)
  2426. continue;
  2427. if (symndx == -1)
  2428. {
  2429. sec = bfd_abs_section_ptr;
  2430. val = 0;
  2431. }
  2432. else
  2433. {
  2434. sec = sections[symndx];
  2435. val = (sec->output_section->vma
  2436. + sec->output_offset
  2437. + sym->n_value
  2438. - sec->vma);
  2439. }
  2440. }
  2441. else
  2442. {
  2443. if (h->root.type == bfd_link_hash_defined
  2444. || h->root.type == bfd_link_hash_defweak)
  2445. {
  2446. asection *sec;
  2447. sec = h->root.u.def.section;
  2448. val = (h->root.u.def.value
  2449. + sec->output_section->vma
  2450. + sec->output_offset);
  2451. }
  2452. else if (! bfd_link_relocatable (info))
  2453. (*info->callbacks->undefined_symbol)
  2454. (info, h->root.root.string, input_bfd, input_section,
  2455. rel->r_vaddr - input_section->vma, true);
  2456. }
  2457. rstat = _bfd_final_link_relocate (howto, input_bfd, input_section,
  2458. contents,
  2459. rel->r_vaddr - input_section->vma,
  2460. val, addend);
  2461. switch (rstat)
  2462. {
  2463. default:
  2464. abort ();
  2465. case bfd_reloc_ok:
  2466. break;
  2467. case bfd_reloc_overflow:
  2468. {
  2469. const char *name;
  2470. char buf[SYMNMLEN + 1];
  2471. if (symndx == -1)
  2472. name = "*ABS*";
  2473. else if (h != NULL)
  2474. name = NULL;
  2475. else if (sym->_n._n_n._n_zeroes == 0
  2476. && sym->_n._n_n._n_offset != 0)
  2477. name = obj_coff_strings (input_bfd) + sym->_n._n_n._n_offset;
  2478. else
  2479. {
  2480. strncpy (buf, sym->_n._n_name, SYMNMLEN);
  2481. buf[SYMNMLEN] = '\0';
  2482. name = buf;
  2483. }
  2484. (*info->callbacks->reloc_overflow)
  2485. (info, (h ? &h->root : NULL), name, howto->name,
  2486. (bfd_vma) 0, input_bfd, input_section,
  2487. rel->r_vaddr - input_section->vma);
  2488. }
  2489. }
  2490. }
  2491. return true;
  2492. }
  2493. /* This is a version of bfd_generic_get_relocated_section_contents
  2494. which uses sh_relocate_section. */
  2495. static bfd_byte *
  2496. sh_coff_get_relocated_section_contents (bfd *output_bfd,
  2497. struct bfd_link_info *link_info,
  2498. struct bfd_link_order *link_order,
  2499. bfd_byte *data,
  2500. bool relocatable,
  2501. asymbol **symbols)
  2502. {
  2503. asection *input_section = link_order->u.indirect.section;
  2504. bfd *input_bfd = input_section->owner;
  2505. asection **sections = NULL;
  2506. struct internal_reloc *internal_relocs = NULL;
  2507. struct internal_syment *internal_syms = NULL;
  2508. /* We only need to handle the case of relaxing, or of having a
  2509. particular set of section contents, specially. */
  2510. if (relocatable
  2511. || coff_section_data (input_bfd, input_section) == NULL
  2512. || coff_section_data (input_bfd, input_section)->contents == NULL)
  2513. return bfd_generic_get_relocated_section_contents (output_bfd, link_info,
  2514. link_order, data,
  2515. relocatable,
  2516. symbols);
  2517. memcpy (data, coff_section_data (input_bfd, input_section)->contents,
  2518. (size_t) input_section->size);
  2519. if ((input_section->flags & SEC_RELOC) != 0
  2520. && input_section->reloc_count > 0)
  2521. {
  2522. bfd_size_type symesz = bfd_coff_symesz (input_bfd);
  2523. bfd_byte *esym, *esymend;
  2524. struct internal_syment *isymp;
  2525. asection **secpp;
  2526. bfd_size_type amt;
  2527. if (! _bfd_coff_get_external_symbols (input_bfd))
  2528. goto error_return;
  2529. internal_relocs = (_bfd_coff_read_internal_relocs
  2530. (input_bfd, input_section, false, (bfd_byte *) NULL,
  2531. false, (struct internal_reloc *) NULL));
  2532. if (internal_relocs == NULL)
  2533. goto error_return;
  2534. amt = obj_raw_syment_count (input_bfd);
  2535. amt *= sizeof (struct internal_syment);
  2536. internal_syms = (struct internal_syment *) bfd_malloc (amt);
  2537. if (internal_syms == NULL)
  2538. goto error_return;
  2539. amt = obj_raw_syment_count (input_bfd);
  2540. amt *= sizeof (asection *);
  2541. sections = (asection **) bfd_malloc (amt);
  2542. if (sections == NULL)
  2543. goto error_return;
  2544. isymp = internal_syms;
  2545. secpp = sections;
  2546. esym = (bfd_byte *) obj_coff_external_syms (input_bfd);
  2547. esymend = esym + obj_raw_syment_count (input_bfd) * symesz;
  2548. while (esym < esymend)
  2549. {
  2550. bfd_coff_swap_sym_in (input_bfd, esym, isymp);
  2551. if (isymp->n_scnum != 0)
  2552. *secpp = coff_section_from_bfd_index (input_bfd, isymp->n_scnum);
  2553. else
  2554. {
  2555. if (isymp->n_value == 0)
  2556. *secpp = bfd_und_section_ptr;
  2557. else
  2558. *secpp = bfd_com_section_ptr;
  2559. }
  2560. esym += (isymp->n_numaux + 1) * symesz;
  2561. secpp += isymp->n_numaux + 1;
  2562. isymp += isymp->n_numaux + 1;
  2563. }
  2564. if (! sh_relocate_section (output_bfd, link_info, input_bfd,
  2565. input_section, data, internal_relocs,
  2566. internal_syms, sections))
  2567. goto error_return;
  2568. free (sections);
  2569. sections = NULL;
  2570. free (internal_syms);
  2571. internal_syms = NULL;
  2572. free (internal_relocs);
  2573. internal_relocs = NULL;
  2574. }
  2575. return data;
  2576. error_return:
  2577. free (internal_relocs);
  2578. free (internal_syms);
  2579. free (sections);
  2580. return NULL;
  2581. }
  2582. /* The target vectors. */
  2583. #ifndef TARGET_SHL_SYM
  2584. CREATE_BIG_COFF_TARGET_VEC (sh_coff_vec, "coff-sh", BFD_IS_RELAXABLE, 0, '_', NULL, COFF_SWAP_TABLE)
  2585. #endif
  2586. #ifdef TARGET_SHL_SYM
  2587. #define TARGET_SYM TARGET_SHL_SYM
  2588. #else
  2589. #define TARGET_SYM sh_coff_le_vec
  2590. #endif
  2591. #ifndef TARGET_SHL_NAME
  2592. #define TARGET_SHL_NAME "coff-shl"
  2593. #endif
  2594. #ifdef COFF_WITH_PE
  2595. CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
  2596. SEC_CODE | SEC_DATA, '_', NULL, COFF_SWAP_TABLE);
  2597. #else
  2598. CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
  2599. 0, '_', NULL, COFF_SWAP_TABLE)
  2600. #endif
  2601. #ifndef TARGET_SHL_SYM
  2602. /* Some people want versions of the SH COFF target which do not align
  2603. to 16 byte boundaries. We implement that by adding a couple of new
  2604. target vectors. These are just like the ones above, but they
  2605. change the default section alignment. To generate them in the
  2606. assembler, use -small. To use them in the linker, use -b
  2607. coff-sh{l}-small and -oformat coff-sh{l}-small.
  2608. Yes, this is a horrible hack. A general solution for setting
  2609. section alignment in COFF is rather complex. ELF handles this
  2610. correctly. */
  2611. /* Only recognize the small versions if the target was not defaulted.
  2612. Otherwise we won't recognize the non default endianness. */
  2613. static bfd_cleanup
  2614. coff_small_object_p (bfd *abfd)
  2615. {
  2616. if (abfd->target_defaulted)
  2617. {
  2618. bfd_set_error (bfd_error_wrong_format);
  2619. return NULL;
  2620. }
  2621. return coff_object_p (abfd);
  2622. }
  2623. /* Set the section alignment for the small versions. */
  2624. static bool
  2625. coff_small_new_section_hook (bfd *abfd, asection *section)
  2626. {
  2627. if (! coff_new_section_hook (abfd, section))
  2628. return false;
  2629. /* We must align to at least a four byte boundary, because longword
  2630. accesses must be on a four byte boundary. */
  2631. if (section->alignment_power == COFF_DEFAULT_SECTION_ALIGNMENT_POWER)
  2632. section->alignment_power = 2;
  2633. return true;
  2634. }
  2635. /* This is copied from bfd_coff_std_swap_table so that we can change
  2636. the default section alignment power. */
  2637. static bfd_coff_backend_data bfd_coff_small_swap_table =
  2638. {
  2639. coff_swap_aux_in, coff_swap_sym_in, coff_swap_lineno_in,
  2640. coff_swap_aux_out, coff_swap_sym_out,
  2641. coff_swap_lineno_out, coff_swap_reloc_out,
  2642. coff_swap_filehdr_out, coff_swap_aouthdr_out,
  2643. coff_swap_scnhdr_out,
  2644. FILHSZ, AOUTSZ, SCNHSZ, SYMESZ, AUXESZ, RELSZ, LINESZ, FILNMLEN,
  2645. #ifdef COFF_LONG_FILENAMES
  2646. true,
  2647. #else
  2648. false,
  2649. #endif
  2650. COFF_DEFAULT_LONG_SECTION_NAMES,
  2651. 2,
  2652. #ifdef COFF_FORCE_SYMBOLS_IN_STRINGS
  2653. true,
  2654. #else
  2655. false,
  2656. #endif
  2657. #ifdef COFF_DEBUG_STRING_WIDE_PREFIX
  2658. 4,
  2659. #else
  2660. 2,
  2661. #endif
  2662. 32768,
  2663. coff_swap_filehdr_in, coff_swap_aouthdr_in, coff_swap_scnhdr_in,
  2664. coff_swap_reloc_in, coff_bad_format_hook, coff_set_arch_mach_hook,
  2665. coff_mkobject_hook, styp_to_sec_flags, coff_set_alignment_hook,
  2666. coff_slurp_symbol_table, symname_in_debug_hook, coff_pointerize_aux_hook,
  2667. coff_print_aux, coff_reloc16_extra_cases, coff_reloc16_estimate,
  2668. coff_classify_symbol, coff_compute_section_file_positions,
  2669. coff_start_final_link, coff_relocate_section, coff_rtype_to_howto,
  2670. coff_adjust_symndx, coff_link_add_one_symbol,
  2671. coff_link_output_has_begun, coff_final_link_postscript,
  2672. bfd_pe_print_pdata
  2673. };
  2674. #define coff_small_close_and_cleanup \
  2675. coff_close_and_cleanup
  2676. #define coff_small_bfd_free_cached_info \
  2677. coff_bfd_free_cached_info
  2678. #define coff_small_get_section_contents \
  2679. coff_get_section_contents
  2680. #define coff_small_get_section_contents_in_window \
  2681. coff_get_section_contents_in_window
  2682. extern const bfd_target sh_coff_small_le_vec;
  2683. const bfd_target sh_coff_small_vec =
  2684. {
  2685. "coff-sh-small", /* name */
  2686. bfd_target_coff_flavour,
  2687. BFD_ENDIAN_BIG, /* data byte order is big */
  2688. BFD_ENDIAN_BIG, /* header byte order is big */
  2689. (HAS_RELOC | EXEC_P /* object flags */
  2690. | HAS_LINENO | HAS_DEBUG
  2691. | HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
  2692. (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
  2693. '_', /* leading symbol underscore */
  2694. '/', /* ar_pad_char */
  2695. 15, /* ar_max_namelen */
  2696. 0, /* match priority. */
  2697. TARGET_KEEP_UNUSED_SECTION_SYMBOLS, /* keep unused section symbols. */
  2698. bfd_getb64, bfd_getb_signed_64, bfd_putb64,
  2699. bfd_getb32, bfd_getb_signed_32, bfd_putb32,
  2700. bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* data */
  2701. bfd_getb64, bfd_getb_signed_64, bfd_putb64,
  2702. bfd_getb32, bfd_getb_signed_32, bfd_putb32,
  2703. bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* hdrs */
  2704. { /* bfd_check_format */
  2705. _bfd_dummy_target,
  2706. coff_small_object_p,
  2707. bfd_generic_archive_p,
  2708. _bfd_dummy_target
  2709. },
  2710. { /* bfd_set_format */
  2711. _bfd_bool_bfd_false_error,
  2712. coff_mkobject,
  2713. _bfd_generic_mkarchive,
  2714. _bfd_bool_bfd_false_error
  2715. },
  2716. { /* bfd_write_contents */
  2717. _bfd_bool_bfd_false_error,
  2718. coff_write_object_contents,
  2719. _bfd_write_archive_contents,
  2720. _bfd_bool_bfd_false_error
  2721. },
  2722. BFD_JUMP_TABLE_GENERIC (coff_small),
  2723. BFD_JUMP_TABLE_COPY (coff),
  2724. BFD_JUMP_TABLE_CORE (_bfd_nocore),
  2725. BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
  2726. BFD_JUMP_TABLE_SYMBOLS (coff),
  2727. BFD_JUMP_TABLE_RELOCS (coff),
  2728. BFD_JUMP_TABLE_WRITE (coff),
  2729. BFD_JUMP_TABLE_LINK (coff),
  2730. BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
  2731. &sh_coff_small_le_vec,
  2732. &bfd_coff_small_swap_table
  2733. };
  2734. const bfd_target sh_coff_small_le_vec =
  2735. {
  2736. "coff-shl-small", /* name */
  2737. bfd_target_coff_flavour,
  2738. BFD_ENDIAN_LITTLE, /* data byte order is little */
  2739. BFD_ENDIAN_LITTLE, /* header byte order is little endian too*/
  2740. (HAS_RELOC | EXEC_P /* object flags */
  2741. | HAS_LINENO | HAS_DEBUG
  2742. | HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
  2743. (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
  2744. '_', /* leading symbol underscore */
  2745. '/', /* ar_pad_char */
  2746. 15, /* ar_max_namelen */
  2747. 0, /* match priority. */
  2748. TARGET_KEEP_UNUSED_SECTION_SYMBOLS, /* keep unused section symbols. */
  2749. bfd_getl64, bfd_getl_signed_64, bfd_putl64,
  2750. bfd_getl32, bfd_getl_signed_32, bfd_putl32,
  2751. bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* data */
  2752. bfd_getl64, bfd_getl_signed_64, bfd_putl64,
  2753. bfd_getl32, bfd_getl_signed_32, bfd_putl32,
  2754. bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* hdrs */
  2755. { /* bfd_check_format */
  2756. _bfd_dummy_target,
  2757. coff_small_object_p,
  2758. bfd_generic_archive_p,
  2759. _bfd_dummy_target
  2760. },
  2761. { /* bfd_set_format */
  2762. _bfd_bool_bfd_false_error,
  2763. coff_mkobject,
  2764. _bfd_generic_mkarchive,
  2765. _bfd_bool_bfd_false_error
  2766. },
  2767. { /* bfd_write_contents */
  2768. _bfd_bool_bfd_false_error,
  2769. coff_write_object_contents,
  2770. _bfd_write_archive_contents,
  2771. _bfd_bool_bfd_false_error
  2772. },
  2773. BFD_JUMP_TABLE_GENERIC (coff_small),
  2774. BFD_JUMP_TABLE_COPY (coff),
  2775. BFD_JUMP_TABLE_CORE (_bfd_nocore),
  2776. BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
  2777. BFD_JUMP_TABLE_SYMBOLS (coff),
  2778. BFD_JUMP_TABLE_RELOCS (coff),
  2779. BFD_JUMP_TABLE_WRITE (coff),
  2780. BFD_JUMP_TABLE_LINK (coff),
  2781. BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
  2782. &sh_coff_small_vec,
  2783. &bfd_coff_small_swap_table
  2784. };
  2785. #endif