Skip to content

Commit

Permalink
Merge pull request #3307 from psychocoderHPC/topic-updateCupla
Browse files Browse the repository at this point in the history
update to cupla 0.3.0-dev
  • Loading branch information
sbastrakov authored Aug 10, 2020
2 parents 360f616 + 7dcee29 commit 9ee70b0
Show file tree
Hide file tree
Showing 500 changed files with 11,127 additions and 6,333 deletions.
6 changes: 6 additions & 0 deletions thirdParty/cupla/.gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,12 @@
/*.cbp
/*.layout

# Visual Studio Code configuration files
.vscode

# JetBrains project files
.idea/

# python byte code
*.pyc

Expand Down
176 changes: 67 additions & 109 deletions thirdParty/cupla/.gitlab-ci.yml
Original file line number Diff line number Diff line change
@@ -1,120 +1,78 @@
.base_job:
script:
# the default build type is Release
# if neccesary, you can rerun the pipeline with another build type-> https://docs.gitlab.com/ee/ci/pipelines.html#manually-executing-pipelines
# to change the build type, you must set the environment variable CUPLA_BUILD_TYPE
- if [[ ! -v CUPLA_BUILD_TYPE ]] ; then
CUPLA_BUILD_TYPE=Release ;
fi
- echo "number of processor threads $(nproc)"
- $CXX --version
- cmake --version
# print boost version
- echo -e "#include <boost/version.hpp>\n#include <iostream>\nint main() { std::cout << BOOST_VERSION << std::endl; return 0; }" | $CXX -x c++ - -o boost_version >/dev/null || { echo 0; }
- echo "Boost version $(./boost_version)"
- export cupla_DIR=$CI_PROJECT_DIR
# use one build directory for all build configurations
- mkdir build
- cd build
- echo "Build type-> $CUPLA_BUILD_TYPE"
# ALPAKA_ACCS contains the backends, which are used for each build
# the backends are set in the sepcialized base jobs .base_gcc,.base_clang and.base_cuda
- for CMAKE_FLAGS in $ALPAKA_ACCS ; do
echo "###################################################"
&& echo "# Example Matrix Multiplication (adapted original)"
&& echo "###################################################"
&& echo "can not run with CPU_B_SEQ_T_SEQ due to missing elements layer in original SDK example"
&& echo "CPU_B_SEQ_T_OMP2/THREADS too many threads necessary (256)"
&& if [[ $CMAKE_FLAGS =~ -*DALPAKA_ACC_GPU_CUDA_ENABLE=ON.* ]]; then
cmake $cupla_DIR/example/CUDASamples/matrixMul/ $CMAKE_FLAGS -DCMAKE_BUILD_TYPE=$CUPLA_BUILD_TYPE
&& make -j
&& time ./matrixMul -wA=64 -wB=64 -hA=64 -hB=64
&& rm -r * ;
fi
&& echo "###################################################"
&& echo "# Example Async API (adapted original)"
&& echo "###################################################"
&& echo "can not run with CPU_B_SEQ_T_SEQ due to missing elements layer in original SDK example"
&& echo "CPU_B_SEQ_T_OMP2/THREADS too many threads necessary (512)"
&& if [[ $CMAKE_FLAGS =~ -*DALPAKA_ACC_GPU_CUDA_ENABLE=ON.* ]]; then
cmake $cupla_DIR/example/CUDASamples/asyncAPI/ $CMAKE_FLAGS -DCMAKE_BUILD_TYPE=$CUPLA_BUILD_TYPE
&& make -j
&& time ./asyncAPI
&& rm -r * ;
fi
&& echo "###################################################"
&& echo "# Example Async API (added elements layer)"
&& echo "###################################################"
&& cmake $cupla_DIR/example/CUDASamples/asyncAPI_tuned/ $CMAKE_FLAGS -DCMAKE_BUILD_TYPE=$CUPLA_BUILD_TYPE
&& make -j
&& time ./asyncAPI_tuned
&& rm -r *
&& echo "###################################################"
&& echo "Example vectorAdd (added elements layer)"
&& echo "###################################################"
&& cmake $cupla_DIR/example/CUDASamples/vectorAdd/ $CMAKE_FLAGS -DCMAKE_BUILD_TYPE=$CUPLA_BUILD_TYPE
&& make -j
&& time ./vectorAdd 100000
&& rm -r *
&& echo "###################################################"
&& echo "Example cuplaVectorAdd (added elements layer)"
&& echo "###################################################"
&& cmake $cupla_DIR/example/CUDASamples/cuplaVectorAdd/ $CMAKE_FLAGS -DCMAKE_BUILD_TYPE=$CUPLA_BUILD_TYPE
&& make -j
&& time ./cuplaVectorAdd 100000
&& rm -r * ;
done
################################################################################
# CUPLA_CXX : {g++, clang++}
# [g++] : {5, 6, 7, 8, 9} <list>
# [clang++] : {4.0, 5.0, 6.0, 7, 8, 9, 10} <list>
# CUPLA_BOOST_VERSIONS : {1.65.1, 1.66.0, 1.67.0, 1.68.0, 1.69.0, 1.70.0, 1.71.0, 1.72.0, 1.73.0} <list>
# CUPLA_BUILD_TYPE : {Debug, Release}
# CUPLA_CMAKE_ARGS : <string>
include:
- local: '/script/compiler_base.yml'

.base_gcc:
cuda92:
image: registry.gitlab.com/hzdr/crp/alpaka-group-container/alpaka-ci:cuda9.2
variables:
GIT_SUBMODULE_STRATEGY: normal
CXX: g++
CC: gcc
ALPAKA_ACCS: "-DALPAKA_ACC_CPU_B_SEQ_T_SEQ_ENABLE=ON
-DALPAKA_ACC_CPU_B_SEQ_T_OMP2_ENABLE=ON
-DALPAKA_ACC_CPU_B_OMP2_T_SEQ_ENABLE=ON"
# -DALPAKA_ACC_CPU_B_SEQ_T_THREADS_ENABLE=ON
extends: .base_job
# x86_64 tag is used to get a multi-core CPU for the tests
tags:
- x86_64
CUPLA_BOOST_VERSIONS: "1.65.1 1.66.0 1.67.0 1.68.0 1.69.0 1.70.0 1.71.0 1.72.0 1.73.0"
extends: .base_cuda

cuda100:
image: registry.gitlab.com/hzdr/crp/alpaka-group-container/alpaka-ci:cuda10.0
variables:
CUPLA_BOOST_VERSIONS: "1.65.1 1.66.0 1.67.0 1.68.0 1.69.0 1.70.0 1.71.0 1.72.0 1.73.0"
extends: .base_cuda

cuda101:
image: registry.gitlab.com/hzdr/crp/alpaka-group-container/alpaka-ci:cuda10.1
variables:
CUPLA_BOOST_VERSIONS: "1.65.1 1.66.0 1.67.0 1.68.0 1.69.0 1.70.0 1.71.0 1.72.0 1.73.0"
extends: .base_cuda

cuda102:
image: registry.gitlab.com/hzdr/crp/alpaka-group-container/alpaka-ci:cuda10.2
variables:
CUPLA_BOOST_VERSIONS: "1.65.1 1.66.0 1.67.0 1.68.0 1.69.0 1.70.0 1.71.0 1.72.0 1.73.0"
extends: .base_cuda

.base_clang:
gcc1:
variables:
GIT_SUBMODULE_STRATEGY: normal
CXX: clang++
CC: clang
ALPAKA_ACCS: "-DALPAKA_ACC_CPU_B_SEQ_T_SEQ_ENABLE=ON
-DALPAKA_ACC_CPU_B_OMP2_T_SEQ_ENABLE=ON"
# -DALPAKA_ACC_CPU_B_SEQ_T_OMP2_ENABLE=ON
# -DALPAKA_ACC_CPU_B_SEQ_T_THREADS_ENABLE=ON
extends: .base_job
# x86_64 tag is used to get a multi-core CPU for the tests
tags:
- x86_64
CUPLA_CXX: "g++-5 g++-6 g++-7 g++-8 g++-9"
CUPLA_BOOST_VERSIONS: "1.65.1 1.66.0 1.67.0"
extends: .base_gcc

.base_cuda:
gcc2:
variables:
GIT_SUBMODULE_STRATEGY: normal
CXX: g++
CC: gcc
ALPAKA_ACCS: "-DALPAKA_ACC_GPU_CUDA_ENABLE=ON"
before_script:
- nvidia-smi
- nvcc --version
extends: .base_job
tags:
- cuda
- intel
CUPLA_CXX: "g++-5 g++-6 g++-7 g++-8 g++-9"
CUPLA_BOOST_VERSIONS: "1.68.0 1.69.0 1.70.0"
extends: .base_gcc

gcc7:
image: registry.gitlab.com/hzdr/cupla-docker/gcc7:latest
gcc3:
variables:
CUPLA_CXX: "g++-5 g++-6 g++-7 g++-8 g++-9"
CUPLA_BOOST_VERSIONS: "1.71.0 1.72.0 1.73.0"
extends: .base_gcc

clang7:
image: registry.gitlab.com/hzdr/cupla-docker/clang7:latest
clang:
variables:
CUPLA_CXX: "clang++-5.0 clang++-6.0 clang++-7 clang++-8 clang++-9 clang++-10"
CUPLA_BOOST_VERSIONS: "1.65.1 1.66.0 1.67.0 1.68.0 1.69.0 1.70.0 1.71.0 1.72.0 1.73.0"
extends: .base_clang

cuda9:
image: registry.gitlab.com/hzdr/cupla-docker/cuda9:latest
extends: .base_cuda
cudaClang92:
image: registry.gitlab.com/hzdr/crp/alpaka-group-container/alpaka-ci:cuda9.2Clang
variables:
CUPLA_CXX: "clang++-8 clang++-9 clang++-10"
CUPLA_BOOST_VERSIONS: "1.65.1 1.66.0 1.67.0 1.68.0 1.69.0 1.70.0 1.71.0 1.72.0 1.73.0"
extends: .base_cuda_clang

cudaClang100:
image: registry.gitlab.com/hzdr/crp/alpaka-group-container/alpaka-ci:cuda10.0Clang
variables:
CUPLA_CXX: "clang++-8 clang++-9 clang++-10"
CUPLA_BOOST_VERSIONS: "1.65.1 1.66.0 1.67.0 1.68.0 1.69.0 1.70.0 1.71.0 1.72.0 1.73.0"
extends: .base_cuda_clang

cudaClang101:
image: registry.gitlab.com/hzdr/crp/alpaka-group-container/alpaka-ci:cuda10.1Clang
variables:
CUPLA_CXX: "clang++-9 clang++-10"
CUPLA_BOOST_VERSIONS: "1.65.1 1.66.0 1.67.0 1.68.0 1.69.0 1.70.0 1.71.0 1.72.0 1.73.0"
extends: .base_cuda_clang
2 changes: 1 addition & 1 deletion thirdParty/cupla/INSTALL.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ Requirements
- `export CMAKE_PREFIX_PATH=$CUPLA_ROOT:$CMAKE_PREFIX_PATH`
- example:
- `mkdir -p $HOME/src`
- `git clone git://github.com/alpaka-group/cupla.git $HOME/src/cupla`
- `git clone https://github.com/alpaka-group/cupla.git $HOME/src/cupla`
- `cd $HOME/src/cupla`
- `export CUPLA_ROOT=$HOME/src/cupla`
- use a different alpaka installation:
Expand Down
Loading

0 comments on commit 9ee70b0

Please sign in to comment.