Skip to content

Commit

Permalink
Unify release (#94)
Browse files Browse the repository at this point in the history
  • Loading branch information
Leonz5288 authored Oct 13, 2021
1 parent 1d4caec commit cc57da4
Show file tree
Hide file tree
Showing 57 changed files with 1,135 additions and 1,188 deletions.
18 changes: 11 additions & 7 deletions .github/workflows/presubmit.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ jobs:
uses: actions/setup-python@v2
with:
python-version: 3.8

- name: Setup git & clang-format
run: |
git config user.email "[email protected]"
Expand All @@ -41,19 +41,19 @@ jobs:
git fetch upstream master
sudo apt install clang-format-10
- name: Cache PIP
- name: Cache PIP
uses: actions/cache@v2
with:
path: ~/.cache/pip
key: ${{ hashFiles('setup.py') }}-${{ hashFiles('requirements_lint.txt') }}
key: ${{ hashFiles('setup.py') }}-${{ hashFiles('requirements_dev.txt') }}

- name: Install requirements
- name: Install requirements
run: |
python3 -m pip install --user -r requirements_lint.txt
python3 -m pip install --user -r requirements_dev.txt
- name: Check code format
run: |
python3 python/taichi/code_format.py
python3 misc/code_format.py
git checkout -b _enforced_format
git commit -am "enforce code format" || true
# exit with 1 if there were differences:
Expand Down Expand Up @@ -106,6 +106,7 @@ jobs:
env:
CI_SETUP_CMAKE_ARGS: -DTI_WITH_OPENGL:BOOL=OFF -DTI_WITH_CC:BOOL=ON -DTI_WITH_VULKAN:BOOL=OFF -DTI_BUILD_TESTS:BOOL=ON
CXX: clang++
PROJECT_NAME: taichi

- name: Test
run: .github/workflows/scripts/unix_test.sh
Expand Down Expand Up @@ -153,6 +154,7 @@ jobs:
env:
CI_SETUP_CMAKE_ARGS: -DTI_WITH_OPENGL:BOOL=OFF -DTI_WITH_CC:BOOL=${{ matrix.with_cc }} -DTI_WITH_VULKAN:BOOL=OFF -DTI_BUILD_TESTS:BOOL=${{ matrix.with_cpp_tests }}
CXX: clang++
PROJECT_NAME: taichi
# [DEBUG] Copy this step around to enable debugging inside Github Action instances.
#- name: Setup tmate session
# uses: mxschmitt/action-tmate@v3
Expand Down Expand Up @@ -185,6 +187,7 @@ jobs:
LLVM_PATH: /opt/taichi-llvm-10.0.0/bin
LLVM_DIR: /opt/taichi-llvm-10.0.0/lib/cmake/llvm
CXX: clang++-8
PROJECT_NAME: taichi

- name: Test
run: .github/workflows/scripts/unix_test.sh
Expand Down Expand Up @@ -285,6 +288,7 @@ jobs:
env:
CI_SETUP_CMAKE_ARGS: -DTI_WITH_OPENGL:BOOL=OFF -DTI_WITH_CUDA:BOOL=OFF -DTI_WITH_CC:BOOL=OFF -DTI_WITH_VULKAN:BOOL=OFF -DTI_BUILD_TESTS:BOOL=ON
CXX: clang++
PROJECT_NAME: taichi

- name: Test
run: |
Expand Down
34 changes: 9 additions & 25 deletions .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -47,25 +47,15 @@ jobs:
# automatically. So we don't activate and use desired python version
# directly.
export PATH=/home/buildbot/miniconda3/envs/$PYTHON/bin:$PATH
TAICHI_REPO_DIR=`pwd`
export PATH=$LLVM_LIB_ROOT_DIR/bin:/usr/local/cuda/bin:$PATH
export LLVM_DIR=$LLVM_LIB_ROOT_DIR/lib/cmake/llvm
export CXX=clang++-8
python3 -m pip uninstall taichi taichi-nightly -y
python3 -m pip install -r requirements_dev.txt
python3 -m pip install twine
cd python
git fetch origin master
export TAICHI_CMAKE_ARGS=$CI_SETUP_CMAKE_ARGS
python3 build.py build --project_name $PROJECT_NAME
cd ..
NUM_WHL=`ls dist/*.whl | wc -l`
if [ $NUM_WHL -ne 1 ]; then echo 'ERROR: created more than 1 whl.' && exit 1; fi
python3 -m pip install dist/*.whl
export PATH=/usr/local/cuda/bin:$PATH
.github/workflows/scripts/unix_build.sh
env:
LLVM_LIB_ROOT_DIR: /opt/taichi-llvm-10.0.0
LLVM_PATH: /opt/taichi-llvm-10.0.0/bin
LLVM_DIR: /opt/taichi-llvm-10.0.0/lib/cmake/llvm
BUILD_NUM_THREADS: 8
CI_SETUP_CMAKE_ARGS: -DTI_WITH_VULKAN:BOOL=ON -DTI_WITH_OPENGL:BOOL=ON -DTI_WITH_CC:BOOL=OFF -DTI_BUILD_TESTS:BOOL=${{ matrix.with_cpp_tests }}
CXX: clang++-8
PROJECT_NAME: ${{ matrix.name }}
PYTHON: ${{ matrix.python }}

Expand All @@ -78,18 +68,11 @@ jobs:
- name: Test
run: |
export PATH=/home/buildbot/miniconda3/envs/$PYTHON/bin:$PATH
python3 examples/algorithm/laplace.py
export DISPLAY=:1
hash -r
glewinfo
ti diagnose
ti changelog
ti test -vr2 -t2 -k "not ndarray and not torch"
# ndarray test might OOM if run with -t2.
# FIXME: unify this with presubmit.yml to avoid further divergence
ti test -vr2 -t1 -k "ndarray or torch"
.github/workflows/scripts/unix_test.sh
env:
PYTHON: ${{ matrix.python }}
DISPLAY: :1
GPU_TEST: ON

- name: Upload PyPI
env:
Expand All @@ -100,6 +83,7 @@ jobs:
PYTHON: ${{ matrix.python }}
run: |
export PATH=/home/buildbot/miniconda3/envs/$PYTHON/bin:$PATH
python3 -m pip install twine
cd python
if [ $PROJECT_NAME == "taichi-nightly" ]; then export PYPI_PWD="$NIGHT_PWD" && python3 build.py upload --skip_build --testpypi --project_name $PROJECT_NAME
elif [ $PROJECT_NAME == "taichi" ]; then export PYPI_PWD="$PROD_PWD" && python3 build.py upload --skip_build; fi
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/scripts/unix_build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ python3 -m pip uninstall taichi taichi-nightly -y
python3 -m pip install -r requirements_dev.txt
cd python
git fetch origin master
TAICHI_CMAKE_ARGS=$CI_SETUP_CMAKE_ARGS python3 build.py build
TAICHI_CMAKE_ARGS=$CI_SETUP_CMAKE_ARGS python3 build.py build --project_name $PROJECT_NAME
cd ..
export NUM_WHL=`ls dist/*.whl | wc -l`
if [ $NUM_WHL -ne 1 ]; then echo `ERROR: created more than 1 whl.` && exit 1; fi
Expand Down
3 changes: 3 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -47,3 +47,6 @@
[submodule "external/SPIRV-Tools"]
path = external/SPIRV-Tools
url = https://github.com/KhronosGroup/SPIRV-Tools
[submodule "external/SPIRV-Cross"]
path = external/SPIRV-Cross
url = https://github.com/KhronosGroup/SPIRV-Cross
11 changes: 10 additions & 1 deletion cmake/TaichiCore.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,12 @@ file(GLOB TAICHI_OPENGL_REQUIRED_SOURCE
"taichi/backends/opengl/codegen_opengl.*"
"taichi/backends/opengl/struct_opengl.*"
)
file(GLOB TAICHI_VULKAN_REQUIRED_SOURCE "taichi/backends/vulkan/runtime.h" "taichi/backends/vulkan/runtime.cpp")
file(GLOB TAICHI_VULKAN_REQUIRED_SOURCE
"taichi/backends/vulkan/runtime.h"
"taichi/backends/vulkan/runtime.cpp"
"taichi/backends/vulkan/snode_struct_compiler.cpp"
"taichi/backends/vulkan/snode_struct_compiler.h"
)

list(REMOVE_ITEM TAICHI_CORE_SOURCE ${TAICHI_BACKEND_SOURCE})

Expand Down Expand Up @@ -256,6 +261,10 @@ else()
message(STATUS "TI_WITH_CUDA_TOOLKIT = OFF")
endif()

add_subdirectory(external/SPIRV-Cross)
target_include_directories(${CORE_LIBRARY_NAME} PRIVATE external/SPIRV-Cross)
target_link_libraries(${CORE_LIBRARY_NAME} spirv-cross-glsl spirv-cross-core)

if (TI_WITH_VULKAN)
# Vulkan libs
# https://cmake.org/cmake/help/latest/module/FindVulkan.html
Expand Down
5 changes: 3 additions & 2 deletions examples/minimal.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,9 @@


@ti.kernel
def p():
def p() -> ti.f32:
print(42)
return 40 + 2


p()
print(p())
1 change: 1 addition & 0 deletions external/SPIRV-Cross
Submodule SPIRV-Cross added at 97a438
2 changes: 1 addition & 1 deletion python/taichi/code_format.py → misc/code_format.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
# TODO(#2223): Make `isort` a required package in a future release
print('Please install `isort` or the formatter may not work')

repo_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
repo_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
_yapf_config_path = os.path.join(repo_dir, 'misc', '.style.yapf')


Expand Down
115 changes: 108 additions & 7 deletions python/taichi/lang/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import shutil
import tempfile
import time
from contextlib import contextmanager
from copy import deepcopy as _deepcopy

import taichi.lang.linalg_impl
Expand Down Expand Up @@ -31,6 +32,8 @@
from taichi.linalg import SparseMatrix, SparseMatrixBuilder, SparseSolver
from taichi.misc.util import deprecated
from taichi.profiler import KernelProfiler, get_default_kernel_profiler
from taichi.profiler.kernelmetrics import (CuptiMetric, default_cupti_metrics,
get_predefined_cupti_metrics)
from taichi.snode.fields_builder import FieldsBuilder
from taichi.type.annotations import any_arr, ext_arr, template

Expand Down Expand Up @@ -115,11 +118,12 @@ def print_kernel_profile_info(mode='count'):
"""Print the profiling results of Taichi kernels.
To enable this profiler, set ``kernel_profiler=True`` in ``ti.init()``.
The default print mode is ``COUNT`` mode: print the statistical results (min,max,avg time) of Taichi kernels,
another mode ``TRACE``: print the records of launched Taichi kernels with specific profiling metrics (time, memory load/store and core utilization etc.)
``'count'`` mode: print the statistics (min,max,avg time) of launched kernels,
``'trace'`` mode: print the records of launched kernels with specific profiling metrics (time, memory load/store and core utilization etc.),
and defaults to ``'count'``.
Args:
mode (str): the way to print profiling results
mode (str): the way to print profiling results.
Example::
Expand All @@ -133,15 +137,17 @@ def print_kernel_profile_info(mode='count'):
>>> var[0] = 1.0
>>> compute()
>>> ti.print_kernel_profile_info() #[1]
>>> ti.print_kernel_profile_info()
>>> # equivalent calls :
>>> # ti.print_kernel_profile_info('count')
>>> ti.print_kernel_profile_info('trace')
Note:
[1] Currently the result of `KernelProfiler` could be incorrect on OpenGL
Currently the result of `KernelProfiler` could be incorrect on OpenGL
backend due to its lack of support for `ti.sync()`.
For advanced mode of `KernelProfiler`, please visit https://docs.taichi.graphics/docs/lang/articles/misc/profiler#advanced-mode.
"""
get_default_kernel_profiler().print_info(mode)

Expand All @@ -155,7 +161,7 @@ def query_kernel_profile_info(name):
name (str): kernel name.
Returns:
struct KernelProfilerQueryResult with member varaibles(counter, min, max, avg)
KernelProfilerQueryResult (class): with member variables(counter, min, max, avg)
Example::
Expand Down Expand Up @@ -204,11 +210,106 @@ def kernel_profiler_total_time():
"""Get elapsed time of all kernels recorded in KernelProfiler.
Returns:
time (double): total time in second
time (float): total time in second.
"""
return get_default_kernel_profiler().get_total_time()


def set_kernel_profile_metrics(metric_list=default_cupti_metrics):
"""Set metrics that will be collected by the CUPTI toolkit.
Args:
metric_list (list): a list of :class:`~taichi.lang.CuptiMetric()` instances, default value: :data:`~taichi.lang.default_cupti_metrics`.
Example::
>>> import taichi as ti
>>> ti.init(kernel_profiler=True, arch=ti.cuda)
>>> num_elements = 128*1024*1024
>>> x = ti.field(ti.f32, shape=num_elements)
>>> y = ti.field(ti.f32, shape=())
>>> y[None] = 0
>>> @ti.kernel
>>> def reduction():
>>> for i in x:
>>> y[None] += x[i]
>>> # In the case of not pramater, Taichi will print its pre-defined metrics list
>>> ti.get_predefined_cupti_metrics()
>>> # get Taichi pre-defined metrics
>>> profiling_metrics = ti.get_predefined_cupti_metrics('shared_access')
>>> global_op_atom = ti.CuptiMetric(
>>> name='l1tex__t_set_accesses_pipe_lsu_mem_global_op_atom.sum',
>>> header=' global.atom ',
>>> format=' {:8.0f} ')
>>> # add user defined metrics
>>> profiling_metrics += [global_op_atom]
>>> # metrics setting will be retained until the next configuration
>>> ti.set_kernel_profile_metrics(profiling_metrics)
>>> for i in range(16):
>>> reduction()
>>> ti.print_kernel_profile_info('trace')
Note:
Metrics setting will be retained until the next configuration.
"""
get_default_kernel_profiler().set_metrics(metric_list)


@contextmanager
def collect_kernel_profile_metrics(metric_list=default_cupti_metrics):
"""Set temporary metrics that will be collected by the CUPTI toolkit within this context.
Args:
metric_list (list): a list of :class:`~taichi.lang.CuptiMetric()` instances, default value: :data:`~taichi.lang.default_cupti_metrics`.
Example::
>>> import taichi as ti
>>> ti.init(kernel_profiler=True, arch=ti.cuda)
>>> num_elements = 128*1024*1024
>>> x = ti.field(ti.f32, shape=num_elements)
>>> y = ti.field(ti.f32, shape=())
>>> y[None] = 0
>>> @ti.kernel
>>> def reduction():
>>> for i in x:
>>> y[None] += x[i]
>>> # In the case of not pramater, Taichi will print its pre-defined metrics list
>>> ti.get_predefined_cupti_metrics()
>>> # get Taichi pre-defined metrics
>>> profiling_metrics = ti.get_predefined_cupti_metrics('device_utilization')
>>> global_op_atom = ti.CuptiMetric(
>>> name='l1tex__t_set_accesses_pipe_lsu_mem_global_op_atom.sum',
>>> header=' global.atom ',
>>> format=' {:8.0f} ')
>>> # add user defined metrics
>>> profiling_metrics += [global_op_atom]
>>> # metrics setting is temporary, and will be clear when exit from this context.
>>> with ti.collect_kernel_profile_metrics(profiling_metrics):
>>> for i in range(16):
>>> reduction()
>>> ti.print_kernel_profile_info('trace')
Note:
The configuration of the ``metric_list`` will be clear when exit from this context.
"""
get_default_kernel_profiler().set_metrics(metric_list)
yield get_default_kernel_profiler()
get_default_kernel_profiler().set_metrics()


@deprecated('memory_profiler_print()', 'print_memory_profile_info()')
def memory_profiler_print():
return print_memory_profile_info()
Expand Down
Loading

0 comments on commit cc57da4

Please sign in to comment.