summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitlab-ci.yml10
-rw-r--r--.gitlab/ci/configure_nvhpc_ninja.cmake5
-rw-r--r--.gitlab/ci/ctest_exclusions.cmake10
-rw-r--r--.gitlab/ci/docker/nvhpc22.9/Dockerfile6
-rwxr-xr-x.gitlab/ci/docker/nvhpc22.9/install_deps.sh11
-rw-r--r--.gitlab/ci/env_nvhpc_ninja.sh5
-rw-r--r--.gitlab/os-linux.yml19
-rw-r--r--Tests/CudaOnly/SeparateCompilationPTX/main.cu9
8 files changed, 71 insertions, 4 deletions
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 24e19c7..fa477a7 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -237,6 +237,16 @@ t:fedora37-makefiles-nospace:
CMAKE_CI_BUILD_NAME: fedora37_makefiles_nospace
CMAKE_CI_JOB_NIGHTLY: "true"
+t:nvhpc22.9-ninja:
+ extends:
+ - .nvhpc_ninja
+ - .cmake_test_linux_release
+ - .linux_x86_64_v3_builder_tags_cuda
+ - .run_dependent
+ - .needs_centos6_x86_64
+ variables:
+ CMAKE_CI_JOB_NIGHTLY: "true"
+
t:cuda9.2-nvidia:
extends:
- .cuda9.2_nvidia
diff --git a/.gitlab/ci/configure_nvhpc_ninja.cmake b/.gitlab/ci/configure_nvhpc_ninja.cmake
new file mode 100644
index 0000000..ca8ba93
--- /dev/null
+++ b/.gitlab/ci/configure_nvhpc_ninja.cmake
@@ -0,0 +1,5 @@
+set(CMake_TEST_CUDA "NVIDIA" CACHE STRING "")
+
+set(configure_no_sccache 1)
+
+include("${CMAKE_CURRENT_LIST_DIR}/configure_external_test.cmake")
diff --git a/.gitlab/ci/ctest_exclusions.cmake b/.gitlab/ci/ctest_exclusions.cmake
index 89a5ace..b29e785 100644
--- a/.gitlab/ci/ctest_exclusions.cmake
+++ b/.gitlab/ci/ctest_exclusions.cmake
@@ -27,6 +27,16 @@ if ("$ENV{CMAKE_CONFIGURATION}" MATCHES "_jom")
)
endif()
+if ("$ENV{CMAKE_CONFIGURATION}" MATCHES "nvhpc_")
+ list(APPEND test_exclusions
+ # FIXME(#24187): This test fails with NVHPC as the CUDA host compiler.
+ "^CudaOnly.SeparateCompilationPTX$"
+
+ # FIXME(#24188): FindCUDAToolkit breaks on some symlink layouts.
+ "^Cuda.Toolkit$"
+ )
+endif()
+
string(REPLACE ";" "|" test_exclusions "${test_exclusions}")
if (test_exclusions)
set(test_exclusions "(${test_exclusions})")
diff --git a/.gitlab/ci/docker/nvhpc22.9/Dockerfile b/.gitlab/ci/docker/nvhpc22.9/Dockerfile
new file mode 100644
index 0000000..90e7d12
--- /dev/null
+++ b/.gitlab/ci/docker/nvhpc22.9/Dockerfile
@@ -0,0 +1,6 @@
+# https://catalog.ngc.nvidia.com/orgs/nvidia/containers/nvhpc/tags
+FROM nvcr.io/nvidia/nvhpc:22.9-devel-cuda_multi-ubuntu22.04
+MAINTAINER Brad King <brad.king@kitware.com>
+
+COPY install_deps.sh /root/install_deps.sh
+RUN sh /root/install_deps.sh
diff --git a/.gitlab/ci/docker/nvhpc22.9/install_deps.sh b/.gitlab/ci/docker/nvhpc22.9/install_deps.sh
new file mode 100755
index 0000000..51ee410
--- /dev/null
+++ b/.gitlab/ci/docker/nvhpc22.9/install_deps.sh
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+set -e
+
+apt-get update
+
+# Install development tools.
+apt-get install -y \
+ curl
+
+apt-get clean
diff --git a/.gitlab/ci/env_nvhpc_ninja.sh b/.gitlab/ci/env_nvhpc_ninja.sh
new file mode 100644
index 0000000..687403d
--- /dev/null
+++ b/.gitlab/ci/env_nvhpc_ninja.sh
@@ -0,0 +1,5 @@
+export CC=nvc
+export CXX=nvc++
+export FC=nvfortran
+export CUDACXX=nvcc
+export CUDAHOSTCXX=nvc++
diff --git a/.gitlab/os-linux.yml b/.gitlab/os-linux.yml
index 8fc6ae1..f2946c1 100644
--- a/.gitlab/os-linux.yml
+++ b/.gitlab/os-linux.yml
@@ -255,6 +255,18 @@
CMAKE_CONFIGURATION: inteloneapi_makefiles
CMAKE_GENERATOR: "Unix Makefiles"
+### NVHPC Compiler
+
+.nvhpc:
+ image: "kitware/cmake:ci-nvhpc22.9-x86_64-2022-11-22"
+ variables:
+ CMAKE_ARCH: x86_64
+
+.nvhpc_ninja:
+ extends: .nvhpc
+ variables:
+ CMAKE_CONFIGURATION: nvhpc_ninja
+
### CUDA builds
.cuda9.2:
@@ -392,6 +404,13 @@
- docker
- linux
+.linux_x86_64_v3_builder_tags_cuda:
+ tags:
+ - cmake
+ - cuda-rt
+ - docker
+ - linux-x86_64-v3
+
.linux_builder_tags_radeon:
tags:
- cmake
diff --git a/Tests/CudaOnly/SeparateCompilationPTX/main.cu b/Tests/CudaOnly/SeparateCompilationPTX/main.cu
index 164cde5..f94beff 100644
--- a/Tests/CudaOnly/SeparateCompilationPTX/main.cu
+++ b/Tests/CudaOnly/SeparateCompilationPTX/main.cu
@@ -21,10 +21,11 @@ int main()
cuCtxCreate(&context, 0, device);
CUmodule module;
- cuModuleLoadData(&module, kernels);
- if (module == nullptr) {
- std::cerr << "Failed to load the embedded ptx" << std::endl;
+ CUresult result = cuModuleLoadData(&module, kernels);
+ std::cout << "module pointer: " << module << '\n';
+ if (result != CUDA_SUCCESS || module == nullptr) {
+ std::cerr << "Failed to load the embedded ptx with error: "
+ << static_cast<unsigned int>(result) << '\n';
return 1;
}
- std::cout << module << std::endl;
}