summaryrefslogtreecommitdiffstats
path: root/Tests/CudaOnly
diff options
context:
space:
mode:
authorRobert Maynard <robert.maynard@kitware.com>2018-03-27 18:59:34 (GMT)
committerBrad King <brad.king@kitware.com>2018-03-28 13:38:43 (GMT)
commit41eab150a8ef42bbebff18ff84652e9da1ef4e75 (patch)
tree80799f497ace9a2151b7d900e4f71fadf8353443 /Tests/CudaOnly
parent88c7abb7409c235d411c4cc899377385b91075e2 (diff)
downloadCMake-41eab150a8ef42bbebff18ff84652e9da1ef4e75.zip
CMake-41eab150a8ef42bbebff18ff84652e9da1ef4e75.tar.gz
CMake-41eab150a8ef42bbebff18ff84652e9da1ef4e75.tar.bz2
CUDA: Pass more link libraries to device linking
Previously we dropped non-target items from the device link line because nvcc rejects paths to shared library files, and only with target items do we know the kind of library. However, this also prevents projects from linking to system-provided libraries like `cublas_device` that contain device code. Fix this by passing more link items to device linking. Items that are not file paths, such as `-lfoo`, can simply be passed unconditionally. Items that are targets known to be shared libraries can still be skipped. Items that are paths to library files can be passed directly if they end in `.a`. Otherwise, pass them using `-Xnvlink` to bypass nvcc's front-end. The nvlink tool knows to ignore shared library files. Issue: #16317
Diffstat (limited to 'Tests/CudaOnly')
-rw-r--r--Tests/CudaOnly/CMakeLists.txt1
-rw-r--r--Tests/CudaOnly/LinkSystemDeviceLibraries/CMakeLists.txt15
-rw-r--r--Tests/CudaOnly/LinkSystemDeviceLibraries/main.cu77
3 files changed, 93 insertions, 0 deletions
diff --git a/Tests/CudaOnly/CMakeLists.txt b/Tests/CudaOnly/CMakeLists.txt
index 5ad6e6b..565baca 100644
--- a/Tests/CudaOnly/CMakeLists.txt
+++ b/Tests/CudaOnly/CMakeLists.txt
@@ -2,6 +2,7 @@
ADD_TEST_MACRO(CudaOnly.EnableStandard CudaOnlyEnableStandard)
ADD_TEST_MACRO(CudaOnly.ExportPTX CudaOnlyExportPTX)
ADD_TEST_MACRO(CudaOnly.GPUDebugFlag CudaOnlyGPUDebugFlag)
+ADD_TEST_MACRO(CudaOnly.LinkSystemDeviceLibraries CudaOnlyLinkSystemDeviceLibraries)
ADD_TEST_MACRO(CudaOnly.ResolveDeviceSymbols CudaOnlyResolveDeviceSymbols)
ADD_TEST_MACRO(CudaOnly.SeparateCompilation CudaOnlySeparateCompilation)
ADD_TEST_MACRO(CudaOnly.WithDefs CudaOnlyWithDefs)
diff --git a/Tests/CudaOnly/LinkSystemDeviceLibraries/CMakeLists.txt b/Tests/CudaOnly/LinkSystemDeviceLibraries/CMakeLists.txt
new file mode 100644
index 0000000..62be1e6
--- /dev/null
+++ b/Tests/CudaOnly/LinkSystemDeviceLibraries/CMakeLists.txt
@@ -0,0 +1,15 @@
+cmake_minimum_required(VERSION 3.8)
+project(CudaOnlyLinkSystemDeviceLibraries CUDA)
+
+string(APPEND CMAKE_CUDA_FLAGS " -gencode arch=compute_35,code=compute_35 -gencode arch=compute_35,code=sm_35")
+set(CMAKE_CUDA_STANDARD 11)
+
+add_executable(CudaOnlyLinkSystemDeviceLibraries main.cu)
+set_target_properties( CudaOnlyLinkSystemDeviceLibraries
+ PROPERTIES CUDA_SEPARABLE_COMPILATION ON)
+target_link_libraries( CudaOnlyLinkSystemDeviceLibraries PRIVATE cublas_device)
+
+if(APPLE)
+ # Help the static cuda runtime find the driver (libcuda.dyllib) at runtime.
+ set_property(TARGET CudaOnlyLinkSystemDeviceLibraries PROPERTY BUILD_RPATH ${CMAKE_CUDA_IMPLICIT_LINK_DIRECTORIES})
+endif()
diff --git a/Tests/CudaOnly/LinkSystemDeviceLibraries/main.cu b/Tests/CudaOnly/LinkSystemDeviceLibraries/main.cu
new file mode 100644
index 0000000..7eecec1
--- /dev/null
+++ b/Tests/CudaOnly/LinkSystemDeviceLibraries/main.cu
@@ -0,0 +1,77 @@
+
+#include <cublas_v2.h>
+#include <cuda_runtime.h>
+#include <iostream>
+
+__global__ void deviceCublasSgemm(int n, float alpha, float beta,
+ const float* d_A, const float* d_B,
+ float* d_C)
+{
+ cublasHandle_t cnpHandle;
+ cublasStatus_t status = cublasCreate(&cnpHandle);
+
+ if (status != CUBLAS_STATUS_SUCCESS) {
+ return;
+ }
+
+ // Call function defined in the cublas_device system static library.
+ // This way we can verify that we properly pass system libraries to the
+ // device link line
+ status = cublasSgemm(cnpHandle, CUBLAS_OP_N, CUBLAS_OP_N, n, n, n, &alpha,
+ d_A, n, d_B, n, &beta, d_C, n);
+
+ cublasDestroy(cnpHandle);
+}
+
+int choose_cuda_device()
+{
+ int nDevices = 0;
+ cudaError_t err = cudaGetDeviceCount(&nDevices);
+ if (err != cudaSuccess) {
+ std::cerr << "Failed to retrieve the number of CUDA enabled devices"
+ << std::endl;
+ return 1;
+ }
+ for (int i = 0; i < nDevices; ++i) {
+ cudaDeviceProp prop;
+ cudaError_t err = cudaGetDeviceProperties(&prop, i);
+ if (err != cudaSuccess) {
+ std::cerr << "Could not retrieve properties from CUDA device " << i
+ << std::endl;
+ return 1;
+ }
+
+ if (prop.major > 3 || (prop.major == 3 && prop.minor >= 5)) {
+ err = cudaSetDevice(i);
+ if (err != cudaSuccess) {
+ std::cout << "Could not select CUDA device " << i << std::endl;
+ } else {
+ return 0;
+ }
+ }
+ }
+
+ std::cout << "Could not find a CUDA enabled card supporting compute >=3.5"
+ << std::endl;
+ return 1;
+}
+
+int main(int argc, char** argv)
+{
+ int ret = choose_cuda_device();
+ if (ret) {
+ return 0;
+ }
+
+ // initial values that will make sure that the cublasSgemm won't actually
+ // do any work
+ int n = 0;
+ float alpha = 1;
+ float beta = 1;
+ float* d_A = nullptr;
+ float* d_B = nullptr;
+ float* d_C = nullptr;
+ deviceCublasSgemm<<<1, 1>>>(n, alpha, beta, d_A, d_B, d_C);
+
+ return 0;
+}