offload_engine.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724
  1. /*
  2. Copyright (c) 2014-2016 Intel Corporation. All Rights Reserved.
  3. Redistribution and use in source and binary forms, with or without
  4. modification, are permitted provided that the following conditions
  5. are met:
  6. * Redistributions of source code must retain the above copyright
  7. notice, this list of conditions and the following disclaimer.
  8. * Redistributions in binary form must reproduce the above copyright
  9. notice, this list of conditions and the following disclaimer in the
  10. documentation and/or other materials provided with the distribution.
  11. * Neither the name of Intel Corporation nor the names of its
  12. contributors may be used to endorse or promote products derived
  13. from this software without specific prior written permission.
  14. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  15. "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  16. LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  17. A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  18. HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  19. SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  20. LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  21. DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  22. THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  23. (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #ifndef OFFLOAD_ENGINE_H_INCLUDED
  27. #define OFFLOAD_ENGINE_H_INCLUDED
  28. #include <limits.h>
  29. #include <bitset>
  30. #include <list>
  31. #include <set>
  32. #include <map>
  33. #include "offload_common.h"
  34. #include "coi/coi_client.h"
  35. #define SIGNAL_HAS_COMPLETED ((OffloadDescriptor *)-1)
  36. const int64_t no_stream = -1;
  37. // Address range
  38. class MemRange {
  39. public:
  40. MemRange() : m_start(0), m_length(0) {}
  41. MemRange(const void *addr, uint64_t len) : m_start(addr), m_length(len) {}
  42. const void* start() const {
  43. return m_start;
  44. }
  45. const void* end() const {
  46. return static_cast<const char*>(m_start) + m_length;
  47. }
  48. uint64_t length() const {
  49. return m_length;
  50. }
  51. // returns true if given range overlaps with another one
  52. bool overlaps(const MemRange &o) const {
  53. // Two address ranges A[start, end) and B[start,end) overlap
  54. // if A.start < B.end and A.end > B.start.
  55. return start() < o.end() && end() > o.start();
  56. }
  57. // returns true if given range contains the other range
  58. bool contains(const MemRange &o) const {
  59. return start() <= o.start() && o.end() <= end();
  60. }
  61. private:
  62. const void* m_start;
  63. uint64_t m_length;
  64. };
  65. // Data associated with a pointer variable
  66. class PtrData {
  67. public:
  68. PtrData(const void *addr, uint64_t len) :
  69. cpu_addr(addr, len), cpu_buf(0),
  70. mic_addr(0), alloc_disp(0), mic_buf(0), mic_offset(0),
  71. ref_count(0), is_static(false), is_omp_associate(false)
  72. {}
  73. //
  74. // Copy constructor
  75. //
  76. PtrData(const PtrData& ptr):
  77. cpu_addr(ptr.cpu_addr), cpu_buf(ptr.cpu_buf),
  78. mic_addr(ptr.mic_addr), alloc_disp(ptr.alloc_disp),
  79. mic_buf(ptr.mic_buf), mic_offset(ptr.mic_offset),
  80. ref_count(ptr.ref_count), is_static(ptr.is_static),
  81. is_omp_associate(ptr.is_omp_associate),
  82. var_alloc_type(0)
  83. {}
  84. bool operator<(const PtrData &o) const {
  85. // Variables are sorted by the CPU start address.
  86. // Overlapping memory ranges are considered equal.
  87. return (cpu_addr.start() < o.cpu_addr.start()) &&
  88. !cpu_addr.overlaps(o.cpu_addr);
  89. }
  90. long add_reference() {
  91. if (is_omp_associate || (is_static && !var_alloc_type)) {
  92. return LONG_MAX;
  93. }
  94. #ifndef TARGET_WINNT
  95. return __sync_fetch_and_add(&ref_count, 1);
  96. #else // TARGET_WINNT
  97. return _InterlockedIncrement(&ref_count) - 1;
  98. #endif // TARGET_WINNT
  99. }
  100. long remove_reference() {
  101. if (is_omp_associate || (is_static && !var_alloc_type)) {
  102. return LONG_MAX;
  103. }
  104. #ifndef TARGET_WINNT
  105. return __sync_sub_and_fetch(&ref_count, 1);
  106. #else // TARGET_WINNT
  107. return _InterlockedDecrement(&ref_count);
  108. #endif // TARGET_WINNT
  109. }
  110. long get_reference() const {
  111. if (is_omp_associate || (is_static && !var_alloc_type)) {
  112. return LONG_MAX;
  113. }
  114. return ref_count;
  115. }
  116. public:
  117. // CPU address range
  118. const MemRange cpu_addr;
  119. // CPU and MIC buffers
  120. COIBUFFER cpu_buf;
  121. COIBUFFER mic_buf;
  122. // placeholder for buffer address on mic
  123. uint64_t mic_addr;
  124. uint64_t alloc_disp;
  125. // additional offset to pointer data on MIC for improving bandwidth for
  126. // data which is not 4K aligned
  127. uint32_t mic_offset;
  128. // if true buffers are created from static memory
  129. bool is_static;
  130. // true if MIC buffer created by omp_target_associate
  131. bool is_omp_associate;
  132. bool var_alloc_type;
  133. mutex_t alloc_ptr_data_lock;
  134. private:
  135. // reference count for the entry
  136. long ref_count;
  137. };
  138. typedef std::list<PtrData*> PtrDataList;
  139. class PtrDataTable {
  140. public:
  141. typedef std::set<PtrData> PtrSet;
  142. PtrData* find_ptr_data(const void *ptr) {
  143. m_ptr_lock.lock();
  144. PtrSet::iterator res = list.find(PtrData(ptr, 0));
  145. m_ptr_lock.unlock();
  146. if (res == list.end()) {
  147. return 0;
  148. }
  149. return const_cast<PtrData*>(res.operator->());
  150. }
  151. PtrData* insert_ptr_data(const void *ptr, uint64_t len, bool &is_new) {
  152. m_ptr_lock.lock();
  153. std::pair<PtrSet::iterator, bool> res =
  154. list.insert(PtrData(ptr, len));
  155. PtrData* ptr_data = const_cast<PtrData*>(res.first.operator->());
  156. m_ptr_lock.unlock();
  157. is_new = res.second;
  158. if (is_new) {
  159. // It's necessary to lock as soon as possible.
  160. // unlock must be done at call site of insert_ptr_data at
  161. // branch for is_new
  162. ptr_data->alloc_ptr_data_lock.lock();
  163. }
  164. return ptr_data;
  165. }
  166. void remove_ptr_data(const void *ptr) {
  167. m_ptr_lock.lock();
  168. list.erase(PtrData(ptr, 0));
  169. m_ptr_lock.unlock();
  170. }
  171. private:
  172. PtrSet list;
  173. mutex_t m_ptr_lock;
  174. };
  175. // Data associated with automatic variable
  176. class AutoData {
  177. public:
  178. AutoData(const void *addr, uint64_t len) :
  179. cpu_addr(addr, len), ref_count(0)
  180. {}
  181. bool operator<(const AutoData &o) const {
  182. // Variables are sorted by the CPU start address.
  183. // Overlapping memory ranges are considered equal.
  184. return (cpu_addr.start() < o.cpu_addr.start()) &&
  185. !cpu_addr.overlaps(o.cpu_addr);
  186. }
  187. long add_reference() {
  188. #ifndef TARGET_WINNT
  189. return __sync_fetch_and_add(&ref_count, 1);
  190. #else // TARGET_WINNT
  191. return _InterlockedIncrement(&ref_count) - 1;
  192. #endif // TARGET_WINNT
  193. }
  194. long remove_reference() {
  195. #ifndef TARGET_WINNT
  196. return __sync_sub_and_fetch(&ref_count, 1);
  197. #else // TARGET_WINNT
  198. return _InterlockedDecrement(&ref_count);
  199. #endif // TARGET_WINNT
  200. }
  201. long nullify_reference() {
  202. #ifndef TARGET_WINNT
  203. return __sync_lock_test_and_set(&ref_count, 0);
  204. #else // TARGET_WINNT
  205. return _InterlockedExchange(&ref_count,0);
  206. #endif // TARGET_WINNT
  207. }
  208. long get_reference() const {
  209. return ref_count;
  210. }
  211. public:
  212. // CPU address range
  213. const MemRange cpu_addr;
  214. private:
  215. // reference count for the entry
  216. long ref_count;
  217. };
  218. // Set of autimatic variables
  219. typedef std::set<AutoData> AutoSet;
  220. // Target image data
  221. struct TargetImage
  222. {
  223. TargetImage(const char *_name, const void *_data, uint64_t _size,
  224. const char *_origin, uint64_t _offset) :
  225. name(_name), data(_data), size(_size),
  226. origin(_origin), offset(_offset)
  227. {}
  228. // library name
  229. const char* name;
  230. // contents and size
  231. const void* data;
  232. uint64_t size;
  233. // file of origin and offset within that file
  234. const char* origin;
  235. uint64_t offset;
  236. };
  237. typedef std::list<TargetImage> TargetImageList;
  238. // dynamic library and Image associated with lib
  239. struct DynLib
  240. {
  241. DynLib(const char *_name, const void *_data,
  242. COILIBRARY _lib) :
  243. name(_name), data(_data), lib(_lib)
  244. {}
  245. // library name
  246. const char* name;
  247. // contents
  248. const void* data;
  249. COILIBRARY lib;
  250. };
  251. typedef std::list<DynLib> DynLibList;
  252. // Data associated with persistent auto objects
  253. struct PersistData
  254. {
  255. PersistData(const void *addr, uint64_t routine_num,
  256. uint64_t size, uint64_t thread) :
  257. stack_cpu_addr(addr), routine_id(routine_num), thread_id(thread)
  258. {
  259. stack_ptr_data = new PtrData(0, size);
  260. }
  261. // 1-st key value - beginning of the stack at CPU
  262. const void * stack_cpu_addr;
  263. // 2-nd key value - identifier of routine invocation at CPU
  264. uint64_t routine_id;
  265. // 3-rd key value - thread identifier
  266. uint64_t thread_id;
  267. // corresponded PtrData; only stack_ptr_data->mic_buf is used
  268. PtrData * stack_ptr_data;
  269. // used to get offset of the variable in stack buffer
  270. char * cpu_stack_addr;
  271. };
  272. typedef std::list<PersistData> PersistDataList;
  273. // Data associated with stream
  274. struct Stream
  275. {
  276. Stream(int device, int num_of_cpus) :
  277. m_number_of_cpus(num_of_cpus), m_pipeline(0), m_last_offload(0),
  278. m_device(device)
  279. {}
  280. ~Stream() {
  281. if (m_pipeline) {
  282. COI::PipelineDestroy(m_pipeline);
  283. }
  284. }
  285. COIPIPELINE get_pipeline(void) {
  286. return(m_pipeline);
  287. }
  288. int get_device(void) {
  289. return(m_device);
  290. }
  291. int get_cpu_number(void) {
  292. return(m_number_of_cpus);
  293. }
  294. void set_pipeline(COIPIPELINE pipeline) {
  295. m_pipeline = pipeline;
  296. }
  297. OffloadDescriptor* get_last_offload(void) {
  298. return(m_last_offload);
  299. }
  300. void set_last_offload(OffloadDescriptor* last_offload) {
  301. m_last_offload = last_offload;
  302. }
  303. static Stream* find_stream(uint64_t handle, bool remove);
  304. static _Offload_stream add_stream(int device, int number_of_cpus) {
  305. _Offload_stream result;
  306. m_stream_lock.lock();
  307. result = ++m_streams_count;
  308. all_streams[m_streams_count] = new Stream(device, number_of_cpus);
  309. m_stream_lock.unlock();
  310. return(result);
  311. }
  312. static uint64_t get_streams_count() {
  313. return m_streams_count;
  314. }
  315. typedef std::map<uint64_t, Stream*> StreamMap;
  316. static uint64_t m_streams_count;
  317. static StreamMap all_streams;
  318. static mutex_t m_stream_lock;
  319. int m_device;
  320. // number of cpus
  321. int m_number_of_cpus;
  322. // The pipeline associated with the stream
  323. COIPIPELINE m_pipeline;
  324. // The last offload occured via the stream
  325. OffloadDescriptor* m_last_offload;
  326. // Cpus used by the stream
  327. std::bitset<COI_MAX_HW_THREADS> m_stream_cpus;
  328. };
  329. typedef std::map<uint64_t, Stream*> StreamMap;
  330. typedef std::bitset<COI_MAX_HW_THREADS> micLcpuMask;
  331. // ordered by count double linked list of cpus used by streams
  332. typedef struct CpuEl{
  333. uint64_t count; // number of streams using the cpu
  334. struct CpuEl* prev; // cpu with the same or lesser count
  335. struct CpuEl* next; // cpu with the same or greater count
  336. } CpuEl;
  337. // class representing a single engine
  338. struct Engine {
  339. friend void __offload_init_library_once(void);
  340. friend void __offload_fini_library(void);
  341. #define CPU_INDEX(x) (x - m_cpus)
  342. #define check_result(res, tag, ...) \
  343. { \
  344. if (res == COI_PROCESS_DIED) { \
  345. fini_process(true); \
  346. exit(1); \
  347. } \
  348. if (res != COI_SUCCESS) { \
  349. __liboffload_error_support(tag, __VA_ARGS__); \
  350. exit(1); \
  351. } \
  352. }
  353. int get_logical_index() const {
  354. return m_index;
  355. }
  356. int get_physical_index() const {
  357. return m_physical_index;
  358. }
  359. const COIPROCESS& get_process() const {
  360. return m_process;
  361. }
  362. bool get_ready() {
  363. return m_ready;
  364. }
  365. uint64_t get_thread_id(void);
  366. // initialize device
  367. void init(void);
  368. // unload library
  369. void unload_library(const void *data, const char *name);
  370. // add new library
  371. void add_lib(const TargetImage &lib)
  372. {
  373. m_lock.lock();
  374. m_ready = false;
  375. m_images.push_back(lib);
  376. m_lock.unlock();
  377. }
  378. COIRESULT compute(
  379. _Offload_stream stream,
  380. const std::list<COIBUFFER> &buffers,
  381. const void* data,
  382. uint16_t data_size,
  383. void* ret,
  384. uint16_t ret_size,
  385. uint32_t num_deps,
  386. const COIEVENT* deps,
  387. COIEVENT* event
  388. );
  389. #ifdef MYO_SUPPORT
  390. // temporary workaround for blocking behavior for myoiLibInit/Fini calls
  391. void init_myo(COIEVENT *event) {
  392. COIRESULT res;
  393. res = COI::PipelineRunFunction(get_pipeline(),
  394. m_funcs[c_func_myo_init],
  395. 0, 0, 0, 0, 0, 0, 0, 0, 0,
  396. event);
  397. check_result(res, c_pipeline_run_func, m_index, res);
  398. }
  399. void fini_myo(COIEVENT *event) {
  400. COIRESULT res;
  401. res = COI::PipelineRunFunction(get_pipeline(),
  402. m_funcs[c_func_myo_fini],
  403. 0, 0, 0, 0, 0, 0, 0, 0, 0,
  404. event);
  405. check_result(res, c_pipeline_run_func, m_index, res);
  406. }
  407. #endif // MYO_SUPPORT
  408. //
  409. // Memory association table
  410. //
  411. PtrData* find_ptr_data(const void *ptr) {
  412. return m_ptr_set.find_ptr_data(ptr);
  413. }
  414. PtrData* find_targetptr_data(const void *ptr) {
  415. return m_targetptr_set.find_ptr_data(ptr);
  416. }
  417. PtrData* insert_ptr_data(const void *ptr, uint64_t len, bool &is_new) {
  418. return m_ptr_set.insert_ptr_data(ptr, len, is_new);
  419. }
  420. PtrData* insert_targetptr_data(const void *ptr, uint64_t len,
  421. bool &is_new) {
  422. return m_targetptr_set.insert_ptr_data(ptr, len, is_new);
  423. }
  424. void remove_ptr_data(const void *ptr) {
  425. m_ptr_set.remove_ptr_data(ptr);
  426. }
  427. void remove_targetptr_data(const void *ptr) {
  428. m_targetptr_set.remove_ptr_data(ptr);
  429. }
  430. //
  431. // Automatic variables
  432. //
  433. AutoData* find_auto_data(const void *ptr) {
  434. AutoSet &auto_vars = get_auto_vars();
  435. AutoSet::iterator res = auto_vars.find(AutoData(ptr, 0));
  436. if (res == auto_vars.end()) {
  437. return 0;
  438. }
  439. return const_cast<AutoData*>(res.operator->());
  440. }
  441. AutoData* insert_auto_data(const void *ptr, uint64_t len) {
  442. AutoSet &auto_vars = get_auto_vars();
  443. std::pair<AutoSet::iterator, bool> res =
  444. auto_vars.insert(AutoData(ptr, len));
  445. return const_cast<AutoData*>(res.first.operator->());
  446. }
  447. void remove_auto_data(const void *ptr) {
  448. get_auto_vars().erase(AutoData(ptr, 0));
  449. }
  450. //
  451. // Signals
  452. //
  453. void add_signal(const void *signal, OffloadDescriptor *desc) {
  454. m_signal_lock.lock();
  455. m_signal_map[signal] = desc;
  456. m_signal_lock.unlock();
  457. }
  458. OffloadDescriptor* find_signal(const void *signal, bool remove) {
  459. OffloadDescriptor *desc = 0;
  460. m_signal_lock.lock();
  461. {
  462. SignalMap::iterator it = m_signal_map.find(signal);
  463. if (it != m_signal_map.end()) {
  464. desc = it->second;
  465. if (remove) {
  466. it->second = SIGNAL_HAS_COMPLETED;
  467. }
  468. }
  469. }
  470. m_signal_lock.unlock();
  471. return desc;
  472. }
  473. void complete_signaled_ofld(const void *signal) {
  474. m_signal_lock.lock();
  475. {
  476. SignalMap::iterator it = m_signal_map.find(signal);
  477. if (it != m_signal_map.end()) {
  478. it->second = SIGNAL_HAS_COMPLETED;
  479. }
  480. }
  481. m_signal_lock.unlock();
  482. }
  483. void stream_destroy(_Offload_stream handle);
  484. void move_cpu_el_after(CpuEl* cpu_what, CpuEl* cpu_after);
  485. void print_stream_cpu_list(const char *);
  486. COIPIPELINE get_pipeline(_Offload_stream stream);
  487. StreamMap get_stream_map() {
  488. return m_stream_map;
  489. }
  490. // stop device process
  491. void fini_process(bool verbose);
  492. // list of stacks active at the engine
  493. PersistDataList m_persist_list;
  494. private:
  495. Engine() : m_index(-1), m_physical_index(-1), m_process(0), m_ready(false),
  496. m_proc_number(0), m_assigned_cpus(0), m_cpus(0), m_cpu_head(0)
  497. {}
  498. ~Engine() {
  499. m_ready = false;
  500. for (StreamMap::iterator it = m_stream_map.begin();
  501. it != m_stream_map.end(); it++) {
  502. Stream * stream = it->second;
  503. delete stream;
  504. }
  505. if (m_process != 0) {
  506. fini_process(false);
  507. }
  508. if (m_assigned_cpus) {
  509. delete m_assigned_cpus;
  510. }
  511. }
  512. // set indexes
  513. void set_indexes(int logical_index, int physical_index) {
  514. m_index = logical_index;
  515. m_physical_index = physical_index;
  516. }
  517. // set CPU mask
  518. void set_cpu_mask(micLcpuMask *cpu_mask)
  519. {
  520. m_assigned_cpus = cpu_mask;
  521. }
  522. // start process on device
  523. void init_process();
  524. void load_libraries(void);
  525. void init_ptr_data(void);
  526. // performs library intialization on the device side
  527. pid_t init_device(void);
  528. private:
  529. // get pipeline associated with a calling thread
  530. COIPIPELINE get_pipeline(void);
  531. // get automatic vars set associated with the calling thread
  532. AutoSet& get_auto_vars(void);
  533. // destructor for thread data
  534. static void destroy_thread_data(void *data);
  535. private:
  536. typedef std::set<PtrData> PtrSet;
  537. typedef std::map<const void*, OffloadDescriptor*> SignalMap;
  538. // device indexes
  539. int m_index;
  540. int m_physical_index;
  541. // cpu mask
  542. micLcpuMask *m_assigned_cpus;
  543. // number of COI pipes created for the engine
  544. long m_proc_number;
  545. // process handle
  546. COIPROCESS m_process;
  547. // If false, device either has not been initialized or new libraries
  548. // have been added.
  549. bool m_ready;
  550. mutex_t m_lock;
  551. // List of libraries to be loaded
  552. TargetImageList m_images;
  553. // var tables
  554. PtrDataTable m_ptr_set;
  555. PtrDataTable m_targetptr_set;
  556. // signals
  557. SignalMap m_signal_map;
  558. mutex_t m_signal_lock;
  559. // streams
  560. StreamMap m_stream_map;
  561. mutex_t m_stream_lock;
  562. int m_num_cores;
  563. int m_num_threads;
  564. CpuEl* m_cpus;
  565. CpuEl* m_cpu_head;
  566. // List of dynamic libraries to be registred
  567. DynLibList m_dyn_libs;
  568. // constants for accessing device function handles
  569. enum {
  570. c_func_compute = 0,
  571. #ifdef MYO_SUPPORT
  572. c_func_myo_init,
  573. c_func_myo_fini,
  574. #endif // MYO_SUPPORT
  575. c_func_init,
  576. c_func_var_table_size,
  577. c_func_var_table_copy,
  578. c_func_set_stream_affinity,
  579. c_funcs_total
  580. };
  581. static const char* m_func_names[c_funcs_total];
  582. // device function handles
  583. COIFUNCTION m_funcs[c_funcs_total];
  584. // int -> name mapping for device signals
  585. static const int c_signal_max = 32;
  586. static const char* c_signal_names[c_signal_max];
  587. };
  588. #endif // OFFLOAD_ENGINE_H_INCLUDED