Mercurial > hg > CbC > CbC_llvm
view openmp/libomptarget/src/device.cpp @ 173:0572611fdcc8 llvm10 llvm12
reorgnization done
author | Shinji KONO <kono@ie.u-ryukyu.ac.jp> |
---|---|
date | Mon, 25 May 2020 11:55:54 +0900 |
parents | 1d019706d866 |
children | 2e18cbf3894f |
line wrap: on
line source
//===--------- device.cpp - Target independent OpenMP target RTL ----------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // Functionality for managing devices that are handled by RTL plugins. // //===----------------------------------------------------------------------===// #include "device.h" #include "private.h" #include "rtl.h" #include <cassert> #include <climits> #include <string> /// Map between Device ID (i.e. openmp device id) and its DeviceTy. DevicesTy Devices; int DeviceTy::associatePtr(void *HstPtrBegin, void *TgtPtrBegin, int64_t Size) { DataMapMtx.lock(); // Check if entry exists for (auto &HT : HostDataToTargetMap) { if ((uintptr_t)HstPtrBegin == HT.HstPtrBegin) { // Mapping already exists bool isValid = HT.HstPtrBegin == (uintptr_t) HstPtrBegin && HT.HstPtrEnd == (uintptr_t) HstPtrBegin + Size && HT.TgtPtrBegin == (uintptr_t) TgtPtrBegin; DataMapMtx.unlock(); if (isValid) { DP("Attempt to re-associate the same device ptr+offset with the same " "host ptr, nothing to do\n"); return OFFLOAD_SUCCESS; } else { DP("Not allowed to re-associate a different device ptr+offset with the " "same host ptr\n"); return OFFLOAD_FAIL; } } } // Mapping does not exist, allocate it with refCount=INF HostDataToTargetTy newEntry((uintptr_t) HstPtrBegin /*HstPtrBase*/, (uintptr_t) HstPtrBegin /*HstPtrBegin*/, (uintptr_t) HstPtrBegin + Size /*HstPtrEnd*/, (uintptr_t) TgtPtrBegin /*TgtPtrBegin*/, true /*IsRefCountINF*/); DP("Creating new map entry: HstBase=" DPxMOD ", HstBegin=" DPxMOD ", HstEnd=" DPxMOD ", TgtBegin=" DPxMOD "\n", DPxPTR(newEntry.HstPtrBase), DPxPTR(newEntry.HstPtrBegin), DPxPTR(newEntry.HstPtrEnd), DPxPTR(newEntry.TgtPtrBegin)); HostDataToTargetMap.push_front(newEntry); DataMapMtx.unlock(); return OFFLOAD_SUCCESS; } int DeviceTy::disassociatePtr(void *HstPtrBegin) { DataMapMtx.lock(); // Check if entry exists for (HostDataToTargetListTy::iterator ii = HostDataToTargetMap.begin(); ii != HostDataToTargetMap.end(); ++ii) { if ((uintptr_t)HstPtrBegin == ii->HstPtrBegin) { // Mapping exists if (ii->isRefCountInf()) { DP("Association found, removing it\n"); HostDataToTargetMap.erase(ii); DataMapMtx.unlock(); return OFFLOAD_SUCCESS; } else { DP("Trying to disassociate a pointer which was not mapped via " "omp_target_associate_ptr\n"); break; } } } // Mapping not found DataMapMtx.unlock(); DP("Association not found\n"); return OFFLOAD_FAIL; } // Get ref count of map entry containing HstPtrBegin uint64_t DeviceTy::getMapEntryRefCnt(void *HstPtrBegin) { uintptr_t hp = (uintptr_t)HstPtrBegin; uint64_t RefCnt = 0; DataMapMtx.lock(); for (auto &HT : HostDataToTargetMap) { if (hp >= HT.HstPtrBegin && hp < HT.HstPtrEnd) { DP("DeviceTy::getMapEntry: requested entry found\n"); RefCnt = HT.getRefCount(); break; } } DataMapMtx.unlock(); if (RefCnt == 0) { DP("DeviceTy::getMapEntry: requested entry not found\n"); } return RefCnt; } LookupResult DeviceTy::lookupMapping(void *HstPtrBegin, int64_t Size) { uintptr_t hp = (uintptr_t)HstPtrBegin; LookupResult lr; DP("Looking up mapping(HstPtrBegin=" DPxMOD ", Size=%ld)...\n", DPxPTR(hp), Size); for (lr.Entry = HostDataToTargetMap.begin(); lr.Entry != HostDataToTargetMap.end(); ++lr.Entry) { auto &HT = *lr.Entry; // Is it contained? lr.Flags.IsContained = hp >= HT.HstPtrBegin && hp < HT.HstPtrEnd && (hp+Size) <= HT.HstPtrEnd; // Does it extend into an already mapped region? lr.Flags.ExtendsBefore = hp < HT.HstPtrBegin && (hp+Size) > HT.HstPtrBegin; // Does it extend beyond the mapped region? lr.Flags.ExtendsAfter = hp < HT.HstPtrEnd && (hp+Size) > HT.HstPtrEnd; if (lr.Flags.IsContained || lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) { break; } } if (lr.Flags.ExtendsBefore) { DP("WARNING: Pointer is not mapped but section extends into already " "mapped data\n"); } if (lr.Flags.ExtendsAfter) { DP("WARNING: Pointer is already mapped but section extends beyond mapped " "region\n"); } return lr; } // Used by target_data_begin // Return the target pointer begin (where the data will be moved). // Allocate memory if this is the first occurrence of this mapping. // Increment the reference counter. // If NULL is returned, then either data allocation failed or the user tried // to do an illegal mapping. void *DeviceTy::getOrAllocTgtPtr(void *HstPtrBegin, void *HstPtrBase, int64_t Size, bool &IsNew, bool &IsHostPtr, bool IsImplicit, bool UpdateRefCount, bool HasCloseModifier) { void *rc = NULL; IsHostPtr = false; IsNew = false; DataMapMtx.lock(); LookupResult lr = lookupMapping(HstPtrBegin, Size); // Check if the pointer is contained. // If a variable is mapped to the device manually by the user - which would // lead to the IsContained flag to be true - then we must ensure that the // device address is returned even under unified memory conditions. if (lr.Flags.IsContained || ((lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) && IsImplicit)) { auto &HT = *lr.Entry; IsNew = false; if (UpdateRefCount) HT.incRefCount(); uintptr_t tp = HT.TgtPtrBegin + ((uintptr_t)HstPtrBegin - HT.HstPtrBegin); DP("Mapping exists%s with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD ", " "Size=%ld,%s RefCount=%s\n", (IsImplicit ? " (implicit)" : ""), DPxPTR(HstPtrBegin), DPxPTR(tp), Size, (UpdateRefCount ? " updated" : ""), HT.isRefCountInf() ? "INF" : std::to_string(HT.getRefCount()).c_str()); rc = (void *)tp; } else if ((lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) && !IsImplicit) { // Explicit extension of mapped data - not allowed. DP("Explicit extension of mapping is not allowed.\n"); } else if (Size) { // If unified shared memory is active, implicitly mapped variables that are not // privatized use host address. Any explicitly mapped variables also use // host address where correctness is not impeded. In all other cases // maps are respected. // In addition to the mapping rules above, the close map // modifier forces the mapping of the variable to the device. if (RTLs->RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY && !HasCloseModifier) { DP("Return HstPtrBegin " DPxMOD " Size=%ld RefCount=%s\n", DPxPTR((uintptr_t)HstPtrBegin), Size, (UpdateRefCount ? " updated" : "")); IsHostPtr = true; rc = HstPtrBegin; } else { // If it is not contained and Size > 0 we should create a new entry for it. IsNew = true; uintptr_t tp = (uintptr_t)RTL->data_alloc(RTLDeviceID, Size, HstPtrBegin); DP("Creating new map entry: HstBase=" DPxMOD ", HstBegin=" DPxMOD ", " "HstEnd=" DPxMOD ", TgtBegin=" DPxMOD "\n", DPxPTR(HstPtrBase), DPxPTR(HstPtrBegin), DPxPTR((uintptr_t)HstPtrBegin + Size), DPxPTR(tp)); HostDataToTargetMap.push_front(HostDataToTargetTy((uintptr_t)HstPtrBase, (uintptr_t)HstPtrBegin, (uintptr_t)HstPtrBegin + Size, tp)); rc = (void *)tp; } } DataMapMtx.unlock(); return rc; } // Used by target_data_begin, target_data_end, target_data_update and target. // Return the target pointer begin (where the data will be moved). // Decrement the reference counter if called from target_data_end. void *DeviceTy::getTgtPtrBegin(void *HstPtrBegin, int64_t Size, bool &IsLast, bool UpdateRefCount, bool &IsHostPtr) { void *rc = NULL; IsHostPtr = false; IsLast = false; DataMapMtx.lock(); LookupResult lr = lookupMapping(HstPtrBegin, Size); if (lr.Flags.IsContained || lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) { auto &HT = *lr.Entry; IsLast = HT.getRefCount() == 1; if (!IsLast && UpdateRefCount) HT.decRefCount(); uintptr_t tp = HT.TgtPtrBegin + ((uintptr_t)HstPtrBegin - HT.HstPtrBegin); DP("Mapping exists with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD ", " "Size=%ld,%s RefCount=%s\n", DPxPTR(HstPtrBegin), DPxPTR(tp), Size, (UpdateRefCount ? " updated" : ""), HT.isRefCountInf() ? "INF" : std::to_string(HT.getRefCount()).c_str()); rc = (void *)tp; } else if (RTLs->RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY) { // If the value isn't found in the mapping and unified shared memory // is on then it means we have stumbled upon a value which we need to // use directly from the host. DP("Get HstPtrBegin " DPxMOD " Size=%ld RefCount=%s\n", DPxPTR((uintptr_t)HstPtrBegin), Size, (UpdateRefCount ? " updated" : "")); IsHostPtr = true; rc = HstPtrBegin; } DataMapMtx.unlock(); return rc; } // Return the target pointer begin (where the data will be moved). // Lock-free version called when loading global symbols from the fat binary. void *DeviceTy::getTgtPtrBegin(void *HstPtrBegin, int64_t Size) { uintptr_t hp = (uintptr_t)HstPtrBegin; LookupResult lr = lookupMapping(HstPtrBegin, Size); if (lr.Flags.IsContained || lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) { auto &HT = *lr.Entry; uintptr_t tp = HT.TgtPtrBegin + (hp - HT.HstPtrBegin); return (void *)tp; } return NULL; } int DeviceTy::deallocTgtPtr(void *HstPtrBegin, int64_t Size, bool ForceDelete, bool HasCloseModifier) { if (RTLs->RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY && !HasCloseModifier) return OFFLOAD_SUCCESS; // Check if the pointer is contained in any sub-nodes. int rc; DataMapMtx.lock(); LookupResult lr = lookupMapping(HstPtrBegin, Size); if (lr.Flags.IsContained || lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) { auto &HT = *lr.Entry; if (ForceDelete) HT.resetRefCount(); if (HT.decRefCount() == 0) { DP("Deleting tgt data " DPxMOD " of size %ld\n", DPxPTR(HT.TgtPtrBegin), Size); RTL->data_delete(RTLDeviceID, (void *)HT.TgtPtrBegin); DP("Removing%s mapping with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD ", Size=%ld\n", (ForceDelete ? " (forced)" : ""), DPxPTR(HT.HstPtrBegin), DPxPTR(HT.TgtPtrBegin), Size); HostDataToTargetMap.erase(lr.Entry); } rc = OFFLOAD_SUCCESS; } else { DP("Section to delete (hst addr " DPxMOD ") does not exist in the allocated" " memory\n", DPxPTR(HstPtrBegin)); rc = OFFLOAD_FAIL; } DataMapMtx.unlock(); return rc; } /// Init device, should not be called directly. void DeviceTy::init() { // Make call to init_requires if it exists for this plugin. if (RTL->init_requires) RTL->init_requires(RTLs->RequiresFlags); int32_t rc = RTL->init_device(RTLDeviceID); if (rc == OFFLOAD_SUCCESS) { IsInit = true; } } /// Thread-safe method to initialize the device only once. int32_t DeviceTy::initOnce() { std::call_once(InitFlag, &DeviceTy::init, this); // At this point, if IsInit is true, then either this thread or some other // thread in the past successfully initialized the device, so we can return // OFFLOAD_SUCCESS. If this thread executed init() via call_once() and it // failed, return OFFLOAD_FAIL. If call_once did not invoke init(), it means // that some other thread already attempted to execute init() and if IsInit // is still false, return OFFLOAD_FAIL. if (IsInit) return OFFLOAD_SUCCESS; else return OFFLOAD_FAIL; } // Load binary to device. __tgt_target_table *DeviceTy::load_binary(void *Img) { RTL->Mtx.lock(); __tgt_target_table *rc = RTL->load_binary(RTLDeviceID, Img); RTL->Mtx.unlock(); return rc; } // Submit data to device int32_t DeviceTy::data_submit(void *TgtPtrBegin, void *HstPtrBegin, int64_t Size, __tgt_async_info *AsyncInfoPtr) { if (!AsyncInfoPtr || !RTL->data_submit_async || !RTL->synchronize) return RTL->data_submit(RTLDeviceID, TgtPtrBegin, HstPtrBegin, Size); else return RTL->data_submit_async(RTLDeviceID, TgtPtrBegin, HstPtrBegin, Size, AsyncInfoPtr); } // Retrieve data from device int32_t DeviceTy::data_retrieve(void *HstPtrBegin, void *TgtPtrBegin, int64_t Size, __tgt_async_info *AsyncInfoPtr) { if (!AsyncInfoPtr || !RTL->data_retrieve_async || !RTL->synchronize) return RTL->data_retrieve(RTLDeviceID, HstPtrBegin, TgtPtrBegin, Size); else return RTL->data_retrieve_async(RTLDeviceID, HstPtrBegin, TgtPtrBegin, Size, AsyncInfoPtr); } // Run region on device int32_t DeviceTy::run_region(void *TgtEntryPtr, void **TgtVarsPtr, ptrdiff_t *TgtOffsets, int32_t TgtVarsSize, __tgt_async_info *AsyncInfoPtr) { if (!AsyncInfoPtr || !RTL->run_region || !RTL->synchronize) return RTL->run_region(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, TgtOffsets, TgtVarsSize); else return RTL->run_region_async(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, TgtOffsets, TgtVarsSize, AsyncInfoPtr); } // Run team region on device. int32_t DeviceTy::run_team_region(void *TgtEntryPtr, void **TgtVarsPtr, ptrdiff_t *TgtOffsets, int32_t TgtVarsSize, int32_t NumTeams, int32_t ThreadLimit, uint64_t LoopTripCount, __tgt_async_info *AsyncInfoPtr) { if (!AsyncInfoPtr || !RTL->run_team_region_async || !RTL->synchronize) return RTL->run_team_region(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, TgtOffsets, TgtVarsSize, NumTeams, ThreadLimit, LoopTripCount); else return RTL->run_team_region_async(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, TgtOffsets, TgtVarsSize, NumTeams, ThreadLimit, LoopTripCount, AsyncInfoPtr); } /// Check whether a device has an associated RTL and initialize it if it's not /// already initialized. bool device_is_ready(int device_num) { DP("Checking whether device %d is ready.\n", device_num); // Devices.size() can only change while registering a new // library, so try to acquire the lock of RTLs' mutex. RTLsMtx->lock(); size_t Devices_size = Devices.size(); RTLsMtx->unlock(); if (Devices_size <= (size_t)device_num) { DP("Device ID %d does not have a matching RTL\n", device_num); return false; } // Get device info DeviceTy &Device = Devices[device_num]; DP("Is the device %d (local ID %d) initialized? %d\n", device_num, Device.RTLDeviceID, Device.IsInit); // Init the device if not done before if (!Device.IsInit && Device.initOnce() != OFFLOAD_SUCCESS) { DP("Failed to init device %d\n", device_num); return false; } DP("Device %d is ready to use.\n", device_num); return true; }