-
Notifications
You must be signed in to change notification settings - Fork 6
/
CMakeLists.txt
executable file
·165 lines (142 loc) · 3.86 KB
/
CMakeLists.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
cmake_minimum_required(VERSION 2.8.3)
project(tensorrt_yolo4)
option(CUDA_VERBOSE "Verbose output of CUDA modules" OFF)
# set flags for CUDA availability
option(CUDA_AVAIL "CUDA available" OFF)
find_package(CUDA)
if (CUDA_FOUND)
find_library(CUBLAS_LIBRARIES cublas HINTS
${CUDA_TOOLKIT_ROOT_DIR}/lib)
if (CUDA_VERBOSE)
message("CUDA is available!")
message("CUDA Libs: ${CUDA_LIBRARIES}")
message("CUDA Headers: ${CUDA_INCLUDE_DIRS}")
endif ()
set(CUDA_AVAIL ON)
else()
message("CUDA NOT FOUND")
set(CUDA_AVAIL OFF)
endif (CUDA_FOUND)
# set flags for TensorRT availability
option(TRT_AVAIL "TensorRT available" OFF)
# try to find the tensorRT modules
find_library(NVINFER NAMES nvinfer)
find_library(NVPARSERS NAMES nvparsers)
find_library(NVCAFFE_PARSER NAMES nvcaffe_parser)
find_library(NVINFER_PLUGIN NAMES nvinfer_plugin)
if(NVINFER AND NVPARSERS AND NVCAFFE_PARSER AND NVINFER_PLUGIN)
if (CUDA_VERBOSE)
message("TensorRT is available!")
message("NVINFER: ${NVINFER}")
message("NVPARSERS: ${NVPARSERS}")
message("NVCAFFE_PARSER: ${NVCAFFE_PARSER}")
endif ()
set(TRT_AVAIL ON)
else()
message("TensorRT is NOT Available")
set(TRT_AVAIL OFF)
endif()
# set flags for CUDNN availability
option(CUDNN_AVAIL "CUDNN available" OFF)
# try to find the CUDNN module
find_library(CUDNN_LIBRARY
NAMES libcudnn.so${__cudnn_ver_suffix} libcudnn${__cudnn_ver_suffix}.dylib ${__cudnn_lib_win_name}
PATHS $ENV{LD_LIBRARY_PATH} ${__libpath_cudart} ${CUDNN_ROOT_DIR} ${PC_CUDNN_LIBRARY_DIRS} ${CMAKE_INSTALL_PREFIX}
PATH_SUFFIXES lib lib64 bin
DOC "CUDNN library." )
if(CUDNN_LIBRARY)
if (CUDA_VERBOSE)
message("CUDNN is available!")
message("CUDNN_LIBRARY: ${CUDNN_LIBRARY}")
endif ()
set(CUDNN_AVAIL ON)
else()
message("CUDNN is NOT Available")
set(CUDNN_AVAIL OFF)
endif()
# Download caffemodel and prototxt
set(PATH "${CMAKE_CURRENT_SOURCE_DIR}/data")
if (NOT EXISTS "${PATH}")
execute_process(COMMAND mkdir -p ${PATH})
endif()
if(TRT_AVAIL AND CUDA_AVAIL AND CUDNN_AVAIL)
find_package(catkin REQUIRED COMPONENTS
roscpp
roslib
autoware_perception_msgs
cv_bridge
)
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -O3")
catkin_package(
CATKIN_DEPENDS
roscpp
roslib
autoware_perception_msgs
cv_bridge
)
include_directories(
include
lib/include
${catkin_INCLUDE_DIRS}
)
set(SOURCE_FILES
src/tensorrt_yolo4_main.cpp
src/tensorrt_yolo4_ros.cpp
)
add_executable(tensorrt_yolo4
${SOURCE_FILES}
)
add_dependencies(tensorrt_yolo4
${catkin_EXPORTED_TARGETS}
)
cuda_add_library(gpu_tensorrt_yolo4_lib
lib/src/UpsampleLayer.cu
lib/src/YoloLayer.cu
lib/src/MishLayer.cu
)
target_link_libraries(gpu_tensorrt_yolo4_lib
${CUDA_LIBRARIES}
)
add_library(tensorrt_yolo4_lib
lib/src/EntroyCalibrator.cpp
lib/src/TrtNet.cpp
lib/src/UpsampleLayer.cpp
lib/src/YoloLayer.cpp
)
target_link_libraries(tensorrt_yolo4_lib
${NVINFER}
${NVCAFFE_PARSER}
${NVINFER_PLUGIN}
${CUDA_LIBRARIES}
${CUBLAS_LIBRARIES}
${CUDA_curand_LIBRARY}
${CUDNN_LIBRARY}
gpu_tensorrt_yolo4_lib
)
target_link_libraries(tensorrt_yolo4
${catkin_LIBRARIES}
tensorrt_yolo4_lib
)
install(TARGETS
gpu_tensorrt_yolo4_lib
tensorrt_yolo4_lib
tensorrt_yolo4
ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
)
install(DIRECTORY include/
DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}/${PROJECT_NAME}/
)
install(
DIRECTORY
launch
data
DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
)
else()
find_package(catkin REQUIRED)
catkin_package()
message("tensorrt_yolo4 won't be built, CUDA and/or TensorRT were not found.")
endif()