allocator.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766
  1. /* Copyright (C) 2020-2022 Free Software Foundation, Inc.
  2. Contributed by Jakub Jelinek <jakub@redhat.com>.
  3. This file is part of the GNU Offloading and Multi Processing Library
  4. (libgomp).
  5. Libgomp is free software; you can redistribute it and/or modify it
  6. under the terms of the GNU General Public License as published by
  7. the Free Software Foundation; either version 3, or (at your option)
  8. any later version.
  9. Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
  10. WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  11. FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. more details.
  13. Under Section 7 of GPL version 3, you are granted additional
  14. permissions described in the GCC Runtime Library Exception, version
  15. 3.1, as published by the Free Software Foundation.
  16. You should have received a copy of the GNU General Public License and
  17. a copy of the GCC Runtime Library Exception along with this program;
  18. see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
  19. <http://www.gnu.org/licenses/>. */
  20. /* This file contains wrappers for the system allocation routines. Most
  21. places in the OpenMP API do not make any provision for failure, so in
  22. general we cannot allow memory allocation to fail. */
  23. #define _GNU_SOURCE
  24. #include "libgomp.h"
  25. #include <stdlib.h>
  26. #include <string.h>
  27. #define omp_max_predefined_alloc omp_thread_mem_alloc
  28. struct omp_allocator_data
  29. {
  30. omp_memspace_handle_t memspace;
  31. omp_uintptr_t alignment;
  32. omp_uintptr_t pool_size;
  33. omp_uintptr_t used_pool_size;
  34. omp_allocator_handle_t fb_data;
  35. unsigned int sync_hint : 8;
  36. unsigned int access : 8;
  37. unsigned int fallback : 8;
  38. unsigned int pinned : 1;
  39. unsigned int partition : 7;
  40. #ifndef HAVE_SYNC_BUILTINS
  41. gomp_mutex_t lock;
  42. #endif
  43. };
  44. struct omp_mem_header
  45. {
  46. void *ptr;
  47. size_t size;
  48. omp_allocator_handle_t allocator;
  49. void *pad;
  50. };
  51. omp_allocator_handle_t
  52. omp_init_allocator (omp_memspace_handle_t memspace, int ntraits,
  53. const omp_alloctrait_t traits[])
  54. {
  55. struct omp_allocator_data data
  56. = { memspace, 1, ~(uintptr_t) 0, 0, 0, omp_atv_contended, omp_atv_all,
  57. omp_atv_default_mem_fb, omp_atv_false, omp_atv_environment };
  58. struct omp_allocator_data *ret;
  59. int i;
  60. if (memspace > omp_low_lat_mem_space)
  61. return omp_null_allocator;
  62. for (i = 0; i < ntraits; i++)
  63. switch (traits[i].key)
  64. {
  65. case omp_atk_sync_hint:
  66. switch (traits[i].value)
  67. {
  68. case omp_atv_default:
  69. data.sync_hint = omp_atv_contended;
  70. break;
  71. case omp_atv_contended:
  72. case omp_atv_uncontended:
  73. case omp_atv_serialized:
  74. case omp_atv_private:
  75. data.sync_hint = traits[i].value;
  76. break;
  77. default:
  78. return omp_null_allocator;
  79. }
  80. break;
  81. case omp_atk_alignment:
  82. if (traits[i].value == omp_atv_default)
  83. {
  84. data.alignment = 1;
  85. break;
  86. }
  87. if ((traits[i].value & (traits[i].value - 1)) != 0
  88. || !traits[i].value)
  89. return omp_null_allocator;
  90. data.alignment = traits[i].value;
  91. break;
  92. case omp_atk_access:
  93. switch (traits[i].value)
  94. {
  95. case omp_atv_default:
  96. data.access = omp_atv_all;
  97. break;
  98. case omp_atv_all:
  99. case omp_atv_cgroup:
  100. case omp_atv_pteam:
  101. case omp_atv_thread:
  102. data.access = traits[i].value;
  103. break;
  104. default:
  105. return omp_null_allocator;
  106. }
  107. break;
  108. case omp_atk_pool_size:
  109. if (traits[i].value == omp_atv_default)
  110. data.pool_size = ~(uintptr_t) 0;
  111. else
  112. data.pool_size = traits[i].value;
  113. break;
  114. case omp_atk_fallback:
  115. switch (traits[i].value)
  116. {
  117. case omp_atv_default:
  118. data.fallback = omp_atv_default_mem_fb;
  119. break;
  120. case omp_atv_default_mem_fb:
  121. case omp_atv_null_fb:
  122. case omp_atv_abort_fb:
  123. case omp_atv_allocator_fb:
  124. data.fallback = traits[i].value;
  125. break;
  126. default:
  127. return omp_null_allocator;
  128. }
  129. break;
  130. case omp_atk_fb_data:
  131. data.fb_data = traits[i].value;
  132. break;
  133. case omp_atk_pinned:
  134. switch (traits[i].value)
  135. {
  136. case omp_atv_default:
  137. case omp_atv_false:
  138. data.pinned = omp_atv_false;
  139. break;
  140. case omp_atv_true:
  141. data.pinned = omp_atv_true;
  142. break;
  143. default:
  144. return omp_null_allocator;
  145. }
  146. break;
  147. case omp_atk_partition:
  148. switch (traits[i].value)
  149. {
  150. case omp_atv_default:
  151. data.partition = omp_atv_environment;
  152. break;
  153. case omp_atv_environment:
  154. case omp_atv_nearest:
  155. case omp_atv_blocked:
  156. case omp_atv_interleaved:
  157. data.partition = traits[i].value;
  158. break;
  159. default:
  160. return omp_null_allocator;
  161. }
  162. break;
  163. default:
  164. return omp_null_allocator;
  165. }
  166. if (data.alignment < sizeof (void *))
  167. data.alignment = sizeof (void *);
  168. /* No support for these so far (for hbw will use memkind). */
  169. if (data.pinned || data.memspace == omp_high_bw_mem_space)
  170. return omp_null_allocator;
  171. ret = gomp_malloc (sizeof (struct omp_allocator_data));
  172. *ret = data;
  173. #ifndef HAVE_SYNC_BUILTINS
  174. gomp_mutex_init (&ret->lock);
  175. #endif
  176. return (omp_allocator_handle_t) ret;
  177. }
  178. void
  179. omp_destroy_allocator (omp_allocator_handle_t allocator)
  180. {
  181. if (allocator != omp_null_allocator)
  182. {
  183. #ifndef HAVE_SYNC_BUILTINS
  184. gomp_mutex_destroy (&((struct omp_allocator_data *) allocator)->lock);
  185. #endif
  186. free ((void *) allocator);
  187. }
  188. }
  189. ialias (omp_init_allocator)
  190. ialias (omp_destroy_allocator)
  191. void *
  192. omp_aligned_alloc (size_t alignment, size_t size,
  193. omp_allocator_handle_t allocator)
  194. {
  195. struct omp_allocator_data *allocator_data;
  196. size_t new_size, new_alignment;
  197. void *ptr, *ret;
  198. if (__builtin_expect (size == 0, 0))
  199. return NULL;
  200. retry:
  201. new_alignment = alignment;
  202. if (allocator == omp_null_allocator)
  203. {
  204. struct gomp_thread *thr = gomp_thread ();
  205. if (thr->ts.def_allocator == omp_null_allocator)
  206. thr->ts.def_allocator = gomp_def_allocator;
  207. allocator = (omp_allocator_handle_t) thr->ts.def_allocator;
  208. }
  209. if (allocator > omp_max_predefined_alloc)
  210. {
  211. allocator_data = (struct omp_allocator_data *) allocator;
  212. if (new_alignment < allocator_data->alignment)
  213. new_alignment = allocator_data->alignment;
  214. }
  215. else
  216. {
  217. allocator_data = NULL;
  218. if (new_alignment < sizeof (void *))
  219. new_alignment = sizeof (void *);
  220. }
  221. new_size = sizeof (struct omp_mem_header);
  222. if (new_alignment > sizeof (void *))
  223. new_size += new_alignment - sizeof (void *);
  224. if (__builtin_add_overflow (size, new_size, &new_size))
  225. goto fail;
  226. if (__builtin_expect (allocator_data
  227. && allocator_data->pool_size < ~(uintptr_t) 0, 0))
  228. {
  229. uintptr_t used_pool_size;
  230. if (new_size > allocator_data->pool_size)
  231. goto fail;
  232. #ifdef HAVE_SYNC_BUILTINS
  233. used_pool_size = __atomic_load_n (&allocator_data->used_pool_size,
  234. MEMMODEL_RELAXED);
  235. do
  236. {
  237. uintptr_t new_pool_size;
  238. if (__builtin_add_overflow (used_pool_size, new_size,
  239. &new_pool_size)
  240. || new_pool_size > allocator_data->pool_size)
  241. goto fail;
  242. if (__atomic_compare_exchange_n (&allocator_data->used_pool_size,
  243. &used_pool_size, new_pool_size,
  244. true, MEMMODEL_RELAXED,
  245. MEMMODEL_RELAXED))
  246. break;
  247. }
  248. while (1);
  249. #else
  250. gomp_mutex_lock (&allocator_data->lock);
  251. if (__builtin_add_overflow (allocator_data->used_pool_size, new_size,
  252. &used_pool_size)
  253. || used_pool_size > allocator_data->pool_size)
  254. {
  255. gomp_mutex_unlock (&allocator_data->lock);
  256. goto fail;
  257. }
  258. allocator_data->used_pool_size = used_pool_size;
  259. gomp_mutex_unlock (&allocator_data->lock);
  260. #endif
  261. ptr = malloc (new_size);
  262. if (ptr == NULL)
  263. {
  264. #ifdef HAVE_SYNC_BUILTINS
  265. __atomic_add_fetch (&allocator_data->used_pool_size, -new_size,
  266. MEMMODEL_RELAXED);
  267. #else
  268. gomp_mutex_lock (&allocator_data->lock);
  269. allocator_data->used_pool_size -= new_size;
  270. gomp_mutex_unlock (&allocator_data->lock);
  271. #endif
  272. goto fail;
  273. }
  274. }
  275. else
  276. {
  277. ptr = malloc (new_size);
  278. if (ptr == NULL)
  279. goto fail;
  280. }
  281. if (new_alignment > sizeof (void *))
  282. ret = (void *) (((uintptr_t) ptr
  283. + sizeof (struct omp_mem_header)
  284. + new_alignment - sizeof (void *))
  285. & ~(new_alignment - 1));
  286. else
  287. ret = (char *) ptr + sizeof (struct omp_mem_header);
  288. ((struct omp_mem_header *) ret)[-1].ptr = ptr;
  289. ((struct omp_mem_header *) ret)[-1].size = new_size;
  290. ((struct omp_mem_header *) ret)[-1].allocator = allocator;
  291. return ret;
  292. fail:
  293. if (allocator_data)
  294. {
  295. switch (allocator_data->fallback)
  296. {
  297. case omp_atv_default_mem_fb:
  298. if ((new_alignment > sizeof (void *) && new_alignment > alignment)
  299. || (allocator_data
  300. && allocator_data->pool_size < ~(uintptr_t) 0))
  301. {
  302. allocator = omp_default_mem_alloc;
  303. goto retry;
  304. }
  305. /* Otherwise, we've already performed default mem allocation
  306. and if that failed, it won't succeed again (unless it was
  307. intermittent. Return NULL then, as that is the fallback. */
  308. break;
  309. case omp_atv_null_fb:
  310. break;
  311. default:
  312. case omp_atv_abort_fb:
  313. gomp_fatal ("Out of memory allocating %lu bytes",
  314. (unsigned long) size);
  315. case omp_atv_allocator_fb:
  316. allocator = allocator_data->fb_data;
  317. goto retry;
  318. }
  319. }
  320. return NULL;
  321. }
  322. ialias (omp_aligned_alloc)
  323. void *
  324. omp_alloc (size_t size, omp_allocator_handle_t allocator)
  325. {
  326. return ialias_call (omp_aligned_alloc) (1, size, allocator);
  327. }
  328. /* Like omp_aligned_alloc, but apply on top of that:
  329. "For allocations that arise from this ... the null_fb value of the
  330. fallback allocator trait behaves as if the abort_fb had been specified." */
  331. void *
  332. GOMP_alloc (size_t alignment, size_t size, uintptr_t allocator)
  333. {
  334. void *ret
  335. = ialias_call (omp_aligned_alloc) (alignment, size,
  336. (omp_allocator_handle_t) allocator);
  337. if (__builtin_expect (ret == NULL, 0) && size)
  338. gomp_fatal ("Out of memory allocating %lu bytes",
  339. (unsigned long) size);
  340. return ret;
  341. }
  342. void
  343. omp_free (void *ptr, omp_allocator_handle_t allocator)
  344. {
  345. struct omp_mem_header *data;
  346. if (ptr == NULL)
  347. return;
  348. (void) allocator;
  349. data = &((struct omp_mem_header *) ptr)[-1];
  350. if (data->allocator > omp_max_predefined_alloc)
  351. {
  352. struct omp_allocator_data *allocator_data
  353. = (struct omp_allocator_data *) (data->allocator);
  354. if (allocator_data->pool_size < ~(uintptr_t) 0)
  355. {
  356. #ifdef HAVE_SYNC_BUILTINS
  357. __atomic_add_fetch (&allocator_data->used_pool_size, -data->size,
  358. MEMMODEL_RELAXED);
  359. #else
  360. gomp_mutex_lock (&allocator_data->lock);
  361. allocator_data->used_pool_size -= data->size;
  362. gomp_mutex_unlock (&allocator_data->lock);
  363. #endif
  364. }
  365. }
  366. free (data->ptr);
  367. }
  368. ialias (omp_free)
  369. void
  370. GOMP_free (void *ptr, uintptr_t allocator)
  371. {
  372. return ialias_call (omp_free) (ptr, (omp_allocator_handle_t) allocator);
  373. }
  374. void *
  375. omp_aligned_calloc (size_t alignment, size_t nmemb, size_t size,
  376. omp_allocator_handle_t allocator)
  377. {
  378. struct omp_allocator_data *allocator_data;
  379. size_t new_size, size_temp, new_alignment;
  380. void *ptr, *ret;
  381. if (__builtin_expect (size == 0 || nmemb == 0, 0))
  382. return NULL;
  383. retry:
  384. new_alignment = alignment;
  385. if (allocator == omp_null_allocator)
  386. {
  387. struct gomp_thread *thr = gomp_thread ();
  388. if (thr->ts.def_allocator == omp_null_allocator)
  389. thr->ts.def_allocator = gomp_def_allocator;
  390. allocator = (omp_allocator_handle_t) thr->ts.def_allocator;
  391. }
  392. if (allocator > omp_max_predefined_alloc)
  393. {
  394. allocator_data = (struct omp_allocator_data *) allocator;
  395. if (new_alignment < allocator_data->alignment)
  396. new_alignment = allocator_data->alignment;
  397. }
  398. else
  399. {
  400. allocator_data = NULL;
  401. if (new_alignment < sizeof (void *))
  402. new_alignment = sizeof (void *);
  403. }
  404. new_size = sizeof (struct omp_mem_header);
  405. if (new_alignment > sizeof (void *))
  406. new_size += new_alignment - sizeof (void *);
  407. if (__builtin_mul_overflow (size, nmemb, &size_temp))
  408. goto fail;
  409. if (__builtin_add_overflow (size_temp, new_size, &new_size))
  410. goto fail;
  411. if (__builtin_expect (allocator_data
  412. && allocator_data->pool_size < ~(uintptr_t) 0, 0))
  413. {
  414. uintptr_t used_pool_size;
  415. if (new_size > allocator_data->pool_size)
  416. goto fail;
  417. #ifdef HAVE_SYNC_BUILTINS
  418. used_pool_size = __atomic_load_n (&allocator_data->used_pool_size,
  419. MEMMODEL_RELAXED);
  420. do
  421. {
  422. uintptr_t new_pool_size;
  423. if (__builtin_add_overflow (used_pool_size, new_size,
  424. &new_pool_size)
  425. || new_pool_size > allocator_data->pool_size)
  426. goto fail;
  427. if (__atomic_compare_exchange_n (&allocator_data->used_pool_size,
  428. &used_pool_size, new_pool_size,
  429. true, MEMMODEL_RELAXED,
  430. MEMMODEL_RELAXED))
  431. break;
  432. }
  433. while (1);
  434. #else
  435. gomp_mutex_lock (&allocator_data->lock);
  436. if (__builtin_add_overflow (allocator_data->used_pool_size, new_size,
  437. &used_pool_size)
  438. || used_pool_size > allocator_data->pool_size)
  439. {
  440. gomp_mutex_unlock (&allocator_data->lock);
  441. goto fail;
  442. }
  443. allocator_data->used_pool_size = used_pool_size;
  444. gomp_mutex_unlock (&allocator_data->lock);
  445. #endif
  446. ptr = calloc (1, new_size);
  447. if (ptr == NULL)
  448. {
  449. #ifdef HAVE_SYNC_BUILTINS
  450. __atomic_add_fetch (&allocator_data->used_pool_size, -new_size,
  451. MEMMODEL_RELAXED);
  452. #else
  453. gomp_mutex_lock (&allocator_data->lock);
  454. allocator_data->used_pool_size -= new_size;
  455. gomp_mutex_unlock (&allocator_data->lock);
  456. #endif
  457. goto fail;
  458. }
  459. }
  460. else
  461. {
  462. ptr = calloc (1, new_size);
  463. if (ptr == NULL)
  464. goto fail;
  465. }
  466. if (new_alignment > sizeof (void *))
  467. ret = (void *) (((uintptr_t) ptr
  468. + sizeof (struct omp_mem_header)
  469. + new_alignment - sizeof (void *))
  470. & ~(new_alignment - 1));
  471. else
  472. ret = (char *) ptr + sizeof (struct omp_mem_header);
  473. ((struct omp_mem_header *) ret)[-1].ptr = ptr;
  474. ((struct omp_mem_header *) ret)[-1].size = new_size;
  475. ((struct omp_mem_header *) ret)[-1].allocator = allocator;
  476. return ret;
  477. fail:
  478. if (allocator_data)
  479. {
  480. switch (allocator_data->fallback)
  481. {
  482. case omp_atv_default_mem_fb:
  483. if ((new_alignment > sizeof (void *) && new_alignment > alignment)
  484. || (allocator_data
  485. && allocator_data->pool_size < ~(uintptr_t) 0))
  486. {
  487. allocator = omp_default_mem_alloc;
  488. goto retry;
  489. }
  490. /* Otherwise, we've already performed default mem allocation
  491. and if that failed, it won't succeed again (unless it was
  492. intermittent. Return NULL then, as that is the fallback. */
  493. break;
  494. case omp_atv_null_fb:
  495. break;
  496. default:
  497. case omp_atv_abort_fb:
  498. gomp_fatal ("Out of memory allocating %lu bytes",
  499. (unsigned long) (size * nmemb));
  500. case omp_atv_allocator_fb:
  501. allocator = allocator_data->fb_data;
  502. goto retry;
  503. }
  504. }
  505. return NULL;
  506. }
  507. ialias (omp_aligned_calloc)
  508. void *
  509. omp_calloc (size_t nmemb, size_t size, omp_allocator_handle_t allocator)
  510. {
  511. return ialias_call (omp_aligned_calloc) (1, nmemb, size, allocator);
  512. }
  513. void *
  514. omp_realloc (void *ptr, size_t size, omp_allocator_handle_t allocator,
  515. omp_allocator_handle_t free_allocator)
  516. {
  517. struct omp_allocator_data *allocator_data, *free_allocator_data;
  518. size_t new_size, old_size, new_alignment, old_alignment;
  519. void *new_ptr, *ret;
  520. struct omp_mem_header *data;
  521. if (__builtin_expect (ptr == NULL, 0))
  522. return ialias_call (omp_aligned_alloc) (1, size, allocator);
  523. if (__builtin_expect (size == 0, 0))
  524. {
  525. ialias_call (omp_free) (ptr, free_allocator);
  526. return NULL;
  527. }
  528. data = &((struct omp_mem_header *) ptr)[-1];
  529. free_allocator = data->allocator;
  530. retry:
  531. new_alignment = sizeof (void *);
  532. if (allocator == omp_null_allocator)
  533. allocator = free_allocator;
  534. if (allocator > omp_max_predefined_alloc)
  535. {
  536. allocator_data = (struct omp_allocator_data *) allocator;
  537. if (new_alignment < allocator_data->alignment)
  538. new_alignment = allocator_data->alignment;
  539. }
  540. else
  541. allocator_data = NULL;
  542. if (free_allocator > omp_max_predefined_alloc)
  543. free_allocator_data = (struct omp_allocator_data *) free_allocator;
  544. else
  545. free_allocator_data = NULL;
  546. old_alignment = (uintptr_t) ptr - (uintptr_t) (data->ptr);
  547. new_size = sizeof (struct omp_mem_header);
  548. if (new_alignment > sizeof (void *))
  549. new_size += new_alignment - sizeof (void *);
  550. if (__builtin_add_overflow (size, new_size, &new_size))
  551. goto fail;
  552. old_size = data->size;
  553. if (__builtin_expect (allocator_data
  554. && allocator_data->pool_size < ~(uintptr_t) 0, 0))
  555. {
  556. uintptr_t used_pool_size;
  557. size_t prev_size = 0;
  558. /* Check if we can use realloc. Don't use it if extra alignment
  559. was used previously or newly, because realloc might return a pointer
  560. with different alignment and then we'd need to memmove the data
  561. again. */
  562. if (free_allocator_data
  563. && free_allocator_data == allocator_data
  564. && new_alignment == sizeof (void *)
  565. && old_alignment == sizeof (struct omp_mem_header))
  566. prev_size = old_size;
  567. if (new_size > prev_size
  568. && new_size - prev_size > allocator_data->pool_size)
  569. goto fail;
  570. #ifdef HAVE_SYNC_BUILTINS
  571. used_pool_size = __atomic_load_n (&allocator_data->used_pool_size,
  572. MEMMODEL_RELAXED);
  573. do
  574. {
  575. uintptr_t new_pool_size;
  576. if (new_size > prev_size)
  577. {
  578. if (__builtin_add_overflow (used_pool_size, new_size - prev_size,
  579. &new_pool_size)
  580. || new_pool_size > allocator_data->pool_size)
  581. goto fail;
  582. }
  583. else
  584. new_pool_size = used_pool_size + new_size - prev_size;
  585. if (__atomic_compare_exchange_n (&allocator_data->used_pool_size,
  586. &used_pool_size, new_pool_size,
  587. true, MEMMODEL_RELAXED,
  588. MEMMODEL_RELAXED))
  589. break;
  590. }
  591. while (1);
  592. #else
  593. gomp_mutex_lock (&allocator_data->lock);
  594. if (new_size > prev_size)
  595. {
  596. if (__builtin_add_overflow (allocator_data->used_pool_size,
  597. new_size - prev_size,
  598. &used_pool_size)
  599. || used_pool_size > allocator_data->pool_size)
  600. {
  601. gomp_mutex_unlock (&allocator_data->lock);
  602. goto fail;
  603. }
  604. }
  605. else
  606. used_pool_size = (allocator_data->used_pool_size
  607. + new_size - prev_size);
  608. allocator_data->used_pool_size = used_pool_size;
  609. gomp_mutex_unlock (&allocator_data->lock);
  610. #endif
  611. if (prev_size)
  612. new_ptr = realloc (data->ptr, new_size);
  613. else
  614. new_ptr = malloc (new_size);
  615. if (new_ptr == NULL)
  616. {
  617. #ifdef HAVE_SYNC_BUILTINS
  618. __atomic_add_fetch (&allocator_data->used_pool_size,
  619. prev_size - new_size,
  620. MEMMODEL_RELAXED);
  621. #else
  622. gomp_mutex_lock (&allocator_data->lock);
  623. allocator_data->used_pool_size -= new_size - prev_size;
  624. gomp_mutex_unlock (&allocator_data->lock);
  625. #endif
  626. goto fail;
  627. }
  628. else if (prev_size)
  629. {
  630. ret = (char *) new_ptr + sizeof (struct omp_mem_header);
  631. ((struct omp_mem_header *) ret)[-1].ptr = new_ptr;
  632. ((struct omp_mem_header *) ret)[-1].size = new_size;
  633. ((struct omp_mem_header *) ret)[-1].allocator = allocator;
  634. return ret;
  635. }
  636. }
  637. else if (new_alignment == sizeof (void *)
  638. && old_alignment == sizeof (struct omp_mem_header)
  639. && (free_allocator_data == NULL
  640. || free_allocator_data->pool_size == ~(uintptr_t) 0))
  641. {
  642. new_ptr = realloc (data->ptr, new_size);
  643. if (new_ptr == NULL)
  644. goto fail;
  645. ret = (char *) new_ptr + sizeof (struct omp_mem_header);
  646. ((struct omp_mem_header *) ret)[-1].ptr = new_ptr;
  647. ((struct omp_mem_header *) ret)[-1].size = new_size;
  648. ((struct omp_mem_header *) ret)[-1].allocator = allocator;
  649. return ret;
  650. }
  651. else
  652. {
  653. new_ptr = malloc (new_size);
  654. if (new_ptr == NULL)
  655. goto fail;
  656. }
  657. if (new_alignment > sizeof (void *))
  658. ret = (void *) (((uintptr_t) new_ptr
  659. + sizeof (struct omp_mem_header)
  660. + new_alignment - sizeof (void *))
  661. & ~(new_alignment - 1));
  662. else
  663. ret = (char *) new_ptr + sizeof (struct omp_mem_header);
  664. ((struct omp_mem_header *) ret)[-1].ptr = new_ptr;
  665. ((struct omp_mem_header *) ret)[-1].size = new_size;
  666. ((struct omp_mem_header *) ret)[-1].allocator = allocator;
  667. if (old_size - old_alignment < size)
  668. size = old_size - old_alignment;
  669. memcpy (ret, ptr, size);
  670. if (__builtin_expect (free_allocator_data
  671. && free_allocator_data->pool_size < ~(uintptr_t) 0, 0))
  672. {
  673. #ifdef HAVE_SYNC_BUILTINS
  674. __atomic_add_fetch (&free_allocator_data->used_pool_size, -data->size,
  675. MEMMODEL_RELAXED);
  676. #else
  677. gomp_mutex_lock (&free_allocator_data->lock);
  678. free_allocator_data->used_pool_size -= data->size;
  679. gomp_mutex_unlock (&free_allocator_data->lock);
  680. #endif
  681. }
  682. free (data->ptr);
  683. return ret;
  684. fail:
  685. if (allocator_data)
  686. {
  687. switch (allocator_data->fallback)
  688. {
  689. case omp_atv_default_mem_fb:
  690. if (new_alignment > sizeof (void *)
  691. || (allocator_data
  692. && allocator_data->pool_size < ~(uintptr_t) 0))
  693. {
  694. allocator = omp_default_mem_alloc;
  695. goto retry;
  696. }
  697. /* Otherwise, we've already performed default mem allocation
  698. and if that failed, it won't succeed again (unless it was
  699. intermittent. Return NULL then, as that is the fallback. */
  700. break;
  701. case omp_atv_null_fb:
  702. break;
  703. default:
  704. case omp_atv_abort_fb:
  705. gomp_fatal ("Out of memory allocating %lu bytes",
  706. (unsigned long) size);
  707. case omp_atv_allocator_fb:
  708. allocator = allocator_data->fb_data;
  709. goto retry;
  710. }
  711. }
  712. return NULL;
  713. }