parallel.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342
  1. /* Copyright (C) 2005-2022 Free Software Foundation, Inc.
  2. Contributed by Richard Henderson <rth@redhat.com>.
  3. This file is part of the GNU Offloading and Multi Processing Library
  4. (libgomp).
  5. Libgomp is free software; you can redistribute it and/or modify it
  6. under the terms of the GNU General Public License as published by
  7. the Free Software Foundation; either version 3, or (at your option)
  8. any later version.
  9. Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
  10. WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  11. FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. more details.
  13. Under Section 7 of GPL version 3, you are granted additional
  14. permissions described in the GCC Runtime Library Exception, version
  15. 3.1, as published by the Free Software Foundation.
  16. You should have received a copy of the GNU General Public License and
  17. a copy of the GCC Runtime Library Exception along with this program;
  18. see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
  19. <http://www.gnu.org/licenses/>. */
  20. /* This file handles the (bare) PARALLEL construct. */
  21. #include "libgomp.h"
  22. #include <limits.h>
  23. /* Determine the number of threads to be launched for a PARALLEL construct.
  24. This algorithm is explicitly described in OpenMP 3.0 section 2.4.1.
  25. SPECIFIED is a combination of the NUM_THREADS clause and the IF clause.
  26. If the IF clause is false, SPECIFIED is forced to 1. When NUM_THREADS
  27. is not present, SPECIFIED is 0. */
  28. unsigned
  29. gomp_resolve_num_threads (unsigned specified, unsigned count)
  30. {
  31. struct gomp_thread *thr = gomp_thread ();
  32. struct gomp_task_icv *icv;
  33. unsigned threads_requested, max_num_threads, num_threads;
  34. unsigned long busy;
  35. struct gomp_thread_pool *pool;
  36. icv = gomp_icv (false);
  37. if (specified == 1)
  38. return 1;
  39. if (thr->ts.active_level >= 1
  40. /* Accelerators with fixed thread counts require this to return 1 for
  41. nested parallel regions. */
  42. #if !defined(__AMDGCN__) && !defined(__nvptx__)
  43. && icv->max_active_levels_var <= 1
  44. #endif
  45. )
  46. return 1;
  47. else if (thr->ts.active_level >= icv->max_active_levels_var)
  48. return 1;
  49. /* If NUM_THREADS not specified, use nthreads_var. */
  50. if (specified == 0)
  51. threads_requested = icv->nthreads_var;
  52. else
  53. threads_requested = specified;
  54. max_num_threads = threads_requested;
  55. /* If dynamic threads are enabled, bound the number of threads
  56. that we launch. */
  57. if (icv->dyn_var)
  58. {
  59. unsigned dyn = gomp_dynamic_max_threads ();
  60. if (dyn < max_num_threads)
  61. max_num_threads = dyn;
  62. /* Optimization for parallel sections. */
  63. if (count && count < max_num_threads)
  64. max_num_threads = count;
  65. }
  66. /* UINT_MAX stands for infinity. */
  67. if (__builtin_expect (icv->thread_limit_var == UINT_MAX, 1)
  68. || max_num_threads == 1)
  69. return max_num_threads;
  70. /* The threads_busy counter lives in thread_pool, if there
  71. isn't a thread_pool yet, there must be just one thread
  72. in the contention group. If thr->team is NULL, this isn't
  73. nested parallel, so there is just one thread in the
  74. contention group as well, no need to handle it atomically. */
  75. pool = thr->thread_pool;
  76. if (thr->ts.team == NULL || pool == NULL)
  77. {
  78. num_threads = max_num_threads;
  79. if (num_threads > icv->thread_limit_var)
  80. num_threads = icv->thread_limit_var;
  81. if (pool)
  82. pool->threads_busy = num_threads;
  83. return num_threads;
  84. }
  85. #ifdef HAVE_SYNC_BUILTINS
  86. do
  87. {
  88. busy = pool->threads_busy;
  89. num_threads = max_num_threads;
  90. if (icv->thread_limit_var - busy + 1 < num_threads)
  91. num_threads = icv->thread_limit_var - busy + 1;
  92. }
  93. while (__sync_val_compare_and_swap (&pool->threads_busy,
  94. busy, busy + num_threads - 1)
  95. != busy);
  96. #else
  97. gomp_mutex_lock (&gomp_managed_threads_lock);
  98. num_threads = max_num_threads;
  99. busy = pool->threads_busy;
  100. if (icv->thread_limit_var - busy + 1 < num_threads)
  101. num_threads = icv->thread_limit_var - busy + 1;
  102. pool->threads_busy += num_threads - 1;
  103. gomp_mutex_unlock (&gomp_managed_threads_lock);
  104. #endif
  105. return num_threads;
  106. }
  107. void
  108. GOMP_parallel_start (void (*fn) (void *), void *data, unsigned num_threads)
  109. {
  110. num_threads = gomp_resolve_num_threads (num_threads, 0);
  111. gomp_team_start (fn, data, num_threads, 0, gomp_new_team (num_threads),
  112. NULL);
  113. }
  114. void
  115. GOMP_parallel_end (void)
  116. {
  117. struct gomp_task_icv *icv = gomp_icv (false);
  118. if (__builtin_expect (icv->thread_limit_var != UINT_MAX, 0))
  119. {
  120. struct gomp_thread *thr = gomp_thread ();
  121. struct gomp_team *team = thr->ts.team;
  122. unsigned int nthreads = team ? team->nthreads : 1;
  123. gomp_team_end ();
  124. if (nthreads > 1)
  125. {
  126. /* If not nested, there is just one thread in the
  127. contention group left, no need for atomicity. */
  128. if (thr->ts.team == NULL)
  129. thr->thread_pool->threads_busy = 1;
  130. else
  131. {
  132. #ifdef HAVE_SYNC_BUILTINS
  133. __sync_fetch_and_add (&thr->thread_pool->threads_busy,
  134. 1UL - nthreads);
  135. #else
  136. gomp_mutex_lock (&gomp_managed_threads_lock);
  137. thr->thread_pool->threads_busy -= nthreads - 1;
  138. gomp_mutex_unlock (&gomp_managed_threads_lock);
  139. #endif
  140. }
  141. }
  142. }
  143. else
  144. gomp_team_end ();
  145. }
  146. ialias (GOMP_parallel_end)
  147. void
  148. GOMP_parallel (void (*fn) (void *), void *data, unsigned num_threads,
  149. unsigned int flags)
  150. {
  151. num_threads = gomp_resolve_num_threads (num_threads, 0);
  152. gomp_team_start (fn, data, num_threads, flags, gomp_new_team (num_threads),
  153. NULL);
  154. fn (data);
  155. ialias_call (GOMP_parallel_end) ();
  156. }
  157. unsigned
  158. GOMP_parallel_reductions (void (*fn) (void *), void *data,
  159. unsigned num_threads, unsigned int flags)
  160. {
  161. struct gomp_taskgroup *taskgroup;
  162. num_threads = gomp_resolve_num_threads (num_threads, 0);
  163. uintptr_t *rdata = *(uintptr_t **)data;
  164. taskgroup = gomp_parallel_reduction_register (rdata, num_threads);
  165. gomp_team_start (fn, data, num_threads, flags, gomp_new_team (num_threads),
  166. taskgroup);
  167. fn (data);
  168. ialias_call (GOMP_parallel_end) ();
  169. gomp_sem_destroy (&taskgroup->taskgroup_sem);
  170. free (taskgroup);
  171. return num_threads;
  172. }
  173. bool
  174. GOMP_cancellation_point (int which)
  175. {
  176. if (!gomp_cancel_var)
  177. return false;
  178. struct gomp_thread *thr = gomp_thread ();
  179. struct gomp_team *team = thr->ts.team;
  180. if (which & (GOMP_CANCEL_LOOP | GOMP_CANCEL_SECTIONS))
  181. {
  182. if (team == NULL)
  183. return false;
  184. return team->work_share_cancelled != 0;
  185. }
  186. else if (which & GOMP_CANCEL_TASKGROUP)
  187. {
  188. if (thr->task->taskgroup)
  189. {
  190. if (thr->task->taskgroup->cancelled)
  191. return true;
  192. if (thr->task->taskgroup->workshare
  193. && thr->task->taskgroup->prev
  194. && thr->task->taskgroup->prev->cancelled)
  195. return true;
  196. }
  197. /* FALLTHRU into the GOMP_CANCEL_PARALLEL case,
  198. as #pragma omp cancel parallel also cancels all explicit
  199. tasks. */
  200. }
  201. if (team)
  202. return gomp_team_barrier_cancelled (&team->barrier);
  203. return false;
  204. }
  205. ialias (GOMP_cancellation_point)
  206. bool
  207. GOMP_cancel (int which, bool do_cancel)
  208. {
  209. if (!gomp_cancel_var)
  210. return false;
  211. if (!do_cancel)
  212. return ialias_call (GOMP_cancellation_point) (which);
  213. struct gomp_thread *thr = gomp_thread ();
  214. struct gomp_team *team = thr->ts.team;
  215. if (which & (GOMP_CANCEL_LOOP | GOMP_CANCEL_SECTIONS))
  216. {
  217. /* In orphaned worksharing region, all we want to cancel
  218. is current thread. */
  219. if (team != NULL)
  220. team->work_share_cancelled = 1;
  221. return true;
  222. }
  223. else if (which & GOMP_CANCEL_TASKGROUP)
  224. {
  225. if (thr->task->taskgroup)
  226. {
  227. struct gomp_taskgroup *taskgroup = thr->task->taskgroup;
  228. if (taskgroup->workshare && taskgroup->prev)
  229. taskgroup = taskgroup->prev;
  230. if (!taskgroup->cancelled)
  231. {
  232. gomp_mutex_lock (&team->task_lock);
  233. taskgroup->cancelled = true;
  234. gomp_mutex_unlock (&team->task_lock);
  235. }
  236. }
  237. return true;
  238. }
  239. team->team_cancelled = 1;
  240. gomp_team_barrier_cancel (team);
  241. return true;
  242. }
  243. /* The public OpenMP API for thread and team related inquiries. */
  244. int
  245. omp_get_num_threads (void)
  246. {
  247. struct gomp_team *team = gomp_thread ()->ts.team;
  248. return team ? team->nthreads : 1;
  249. }
  250. int
  251. omp_get_thread_num (void)
  252. {
  253. return gomp_thread ()->ts.team_id;
  254. }
  255. /* This wasn't right for OpenMP 2.5. Active region used to be non-zero
  256. when the IF clause doesn't evaluate to false, starting with OpenMP 3.0
  257. it is non-zero with more than one thread in the team. */
  258. int
  259. omp_in_parallel (void)
  260. {
  261. return gomp_thread ()->ts.active_level > 0;
  262. }
  263. int
  264. omp_get_level (void)
  265. {
  266. return gomp_thread ()->ts.level;
  267. }
  268. int
  269. omp_get_ancestor_thread_num (int level)
  270. {
  271. struct gomp_team_state *ts = &gomp_thread ()->ts;
  272. if (level < 0 || level > ts->level)
  273. return -1;
  274. for (level = ts->level - level; level > 0; --level)
  275. ts = &ts->team->prev_ts;
  276. return ts->team_id;
  277. }
  278. int
  279. omp_get_team_size (int level)
  280. {
  281. struct gomp_team_state *ts = &gomp_thread ()->ts;
  282. if (level < 0 || level > ts->level)
  283. return -1;
  284. for (level = ts->level - level; level > 0; --level)
  285. ts = &ts->team->prev_ts;
  286. if (ts->team == NULL)
  287. return 1;
  288. else
  289. return ts->team->nthreads;
  290. }
  291. int
  292. omp_get_active_level (void)
  293. {
  294. return gomp_thread ()->ts.active_level;
  295. }
  296. ialias (omp_get_num_threads)
  297. ialias (omp_get_thread_num)
  298. ialias (omp_in_parallel)
  299. ialias (omp_get_level)
  300. ialias (omp_get_ancestor_thread_num)
  301. ialias (omp_get_team_size)
  302. ialias (omp_get_active_level)