Skip to content

Commit

Permalink
Repo Sync (#299)
Browse files Browse the repository at this point in the history
  • Loading branch information
anakinxc authored Aug 11, 2023
1 parent 702512f commit bb5cb11
Show file tree
Hide file tree
Showing 72 changed files with 995 additions and 785 deletions.
6 changes: 6 additions & 0 deletions .bazelrc
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,12 @@ build --enable_platform_specific_config
build --cxxopt=-std=c++17
build --host_cxxopt=-std=c++17

build:avx --copt=-mavx
build:avx --host_copt=-mavx
build:avx --copt=-DCHECK_AVX
build:avx --host_copt=-DCHECK_AVX


# Binary safety flags
build --copt=-fPIC
build --copt=-fstack-protector-strong
Expand Down
9 changes: 6 additions & 3 deletions .github/pull_request_template.md
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
### What problem does this PR solve?
# Pull Request

## What problem does this PR solve?

Issue Number: Fixed #

### Possible side effects?
## Possible side effects?

- Performance:

- Backward compatibility:
- Backward compatibility:
7 changes: 5 additions & 2 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,10 @@
},
"editor.formatOnSave": false,
},
"python.formatting.provider": "black",
"python.formatting.provider": "none",
"esbonio.sphinx.confDir": "",
"git.ignoreLimitWarning": true
"git.ignoreLimitWarning": true,
"[python]": {
"editor.defaultFormatter": "ms-python.black-formatter"
}
}
21 changes: 20 additions & 1 deletion bazel/patches/grpc.patch
Original file line number Diff line number Diff line change
@@ -1,5 +1,24 @@
diff --git a/bazel/grpc_deps.bzl b/bazel/grpc_deps.bzl
index 5e65a65df4..03bbd2361e 100644
--- a/bazel/grpc_deps.bzl
+++ b/bazel/grpc_deps.bzl
@@ -57,12 +57,12 @@ def grpc_deps():

native.bind(
name = "libssl",
- actual = "@boringssl//:ssl",
+ actual = "@com_github_openssl_openssl//:openssl",
)

native.bind(
name = "libcrypto",
- actual = "@boringssl//:crypto",
+ actual = "@com_github_openssl_openssl//:openssl",
)

native.bind(
diff --git a/bazel/grpc_extra_deps.bzl b/bazel/grpc_extra_deps.bzl
index 4d8afa3..6aa977a 100644
index 4d8afa3131..6aa977a08d 100644
--- a/bazel/grpc_extra_deps.bzl
+++ b/bazel/grpc_extra_deps.bzl
@@ -53,7 +53,7 @@ def grpc_extra_deps(ignore_version_differences = False):
Expand Down
10 changes: 6 additions & 4 deletions bazel/repositories.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")

SECRETFLOW_GIT = "https://github.com/secretflow"

YACL_COMMIT_ID = "5d6112505d52cfa7a27cd10e364e1c9893e50c8f"
YACL_COMMIT_ID = "ff20dff1476071ca885c69bee94d2b3bdf85034c"

def spu_deps():
_bazel_platform()
Expand Down Expand Up @@ -266,14 +266,16 @@ def _com_github_microsoft_seal():
)

def _com_github_eigenteam_eigen():
EIGEN_COMMIT = "66e8f38891841bf88ee976a316c0c78a52f0cee5"
EIGEN_SHA256 = "01fcd68409c038bbcfd16394274c2bf71e2bb6dda89a2319e23fc59a2da17210"
maybe(
http_archive,
name = "com_github_eigenteam_eigen",
sha256 = "c1b115c153c27c02112a0ecbf1661494295d9dcff6427632113f2e4af9f3174d",
sha256 = EIGEN_SHA256,
build_file = "@spulib//bazel:eigen.BUILD",
strip_prefix = "eigen-3.4",
strip_prefix = "eigen-{commit}".format(commit = EIGEN_COMMIT),
urls = [
"https://gitlab.com/libeigen/eigen/-/archive/3.4/eigen-3.4.tar.gz",
"https://gitlab.com/libeigen/eigen/-/archive/{commit}/eigen-{commit}.tar.gz".format(commit = EIGEN_COMMIT),
],
)

Expand Down
2 changes: 2 additions & 0 deletions examples/python/ml/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,14 @@ package(default_visibility = ["//visibility:public"])

py_test(
name = "ml_test",
timeout = "long",
srcs = ["ml_test.py"],
data = [
"//examples/python/conf",
],
tags = [
"manual",
"no-sandbox",
],
deps = [
"//examples/python/ml/flax_mlp",
Expand Down
46 changes: 7 additions & 39 deletions examples/python/ml/ml_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,6 @@
import unittest
from time import perf_counter
import os
import warnings
from io import StringIO

import multiprocess
import numpy.testing as npt
Expand All @@ -36,48 +34,15 @@
logger = logging.getLogger(ppd.__name__)
logger.setLevel(level=logging.WARN)


FILENAME = "ml_test_run_data.csv"


def read_history_record():
return pd.read_csv(StringIO(content), index_col=False)


_test_perf_table = read_history_record()


def compute_history_average():
test_pints = _test_perf_table["name"].unique()

avg_dict = {}
for tp in test_pints:
history = _test_perf_table[_test_perf_table["name"] == tp]
avg = history["duration"].mean()
avg_dict[tp] = avg

return avg_dict


_perf_history_avg = compute_history_average()
_test_perf_table = pd.DataFrame({'name': [], 'duration': []})


def add_profile_data(name, duration):
global _test_perf_table
# Check data
valid = True
if name in _perf_history_avg:
history_avg = _perf_history_avg[name]
if duration > 1.05 * history_avg:
warnings.warn(
f"Testpoint {name} is more than 5% slower in this job, history average = {history_avg}, current = {duration}.\n"
)
valid = True

if valid:
# Save result to table
new_row = pd.DataFrame({'name': name, 'duration': duration}, index=[0])
_test_perf_table = pd.concat([_test_perf_table, new_row], ignore_index=True)
# Save result to table
new_row = pd.DataFrame({'name': name, 'duration': duration}, index=[0])
_test_perf_table = pd.concat([_test_perf_table, new_row], ignore_index=True)


def profile_test_point(foo, *args, **kwargs):
Expand All @@ -96,6 +61,9 @@ def profile_test_point(foo, *args, **kwargs):

def save_perf_report():
buf = _test_perf_table.to_csv(index=False)
p = os.path.expanduser(os.path.join('~', '.ml_test_perf.csv'))
with open(p, '+w') as f:
f.write(buf)


class UnitTests(unittest.TestCase):
Expand Down
28 changes: 5 additions & 23 deletions libspu/core/ndarray_ref.cc
Original file line number Diff line number Diff line change
Expand Up @@ -333,30 +333,14 @@ NdArrayRef NdArrayRef::slice_scalar_at(const Index& indices) const {
&at(indices) - buf()->data<std::byte>());
}

NdArrayRef NdArrayRef::transpose(const Axes& permutation) const {
std::vector<int64_t> perm(shape().size());
if (permutation.empty()) {
for (size_t i = 0; i < perm.size(); ++i) {
perm[i] = static_cast<int64_t>(shape().size()) - 1 - i;
}
} else {
std::vector<int64_t> reverse_permutation(shape().size(), -1);
SPU_ENFORCE(permutation.size() == shape().size(),
"axes don't match array, permutation = {}, input shape = {}",
permutation, shape());

for (size_t i = 0; i < permutation.size(); i++) {
auto axis = permutation[i];
SPU_ENFORCE(reverse_permutation[axis] == -1,
"repeated axis in transpose");
reverse_permutation[axis] = i;
perm[i] = axis;
}
}
NdArrayRef NdArrayRef::transpose(const Axes& perm) const {
// sanity check.
SPU_ENFORCE_EQ(perm.size(), shape().size());
std::set<int64_t> uniq(perm.begin(), perm.end());
SPU_ENFORCE_EQ(uniq.size(), perm.size(), "perm={} is not unique", perm);

Shape ret_shape(shape().size());
Strides ret_strides(strides().size());

for (size_t i = 0; i < shape().size(); i++) {
ret_shape[i] = shape()[perm[i]];
ret_strides[i] = strides()[perm[i]];
Expand Down Expand Up @@ -440,8 +424,6 @@ NdArrayRef NdArrayRef::pad(const NdArrayRef& padding_value,
const auto& result_shape = result.shape();
const auto& input_shape = shape();

// auto elsize = result.elsize();

yacl::parallel_for(0, numel(), 1024, [&](int64_t begin, int64_t end) {
auto unflatten = unflattenIndex(begin, input_shape);

Expand Down
14 changes: 6 additions & 8 deletions libspu/device/pphlo/pphlo_executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -428,13 +428,12 @@ void execute(OpExecutor *executor, SPUContext *sctx, SymbolScope *sscope,
config.outputFeatureDimension = dnums.getOutputFeatureDimension();
config.outputSpatialDimensions = dnums.getOutputSpatialDimensions();

spu::Value result;
SPU_ENFORCE(
dnums.getInputSpatialDimensions().size() == 2,
"Convolution with more than 2 spatial dimensions is not supported");

if (dnums.getInputSpatialDimensions().size() == 2) {
result = kernel::hlo::Convolution2D(sctx, lhs, rhs, config, ret_shape);
} else {
result = kernel::hlo::Convolution(sctx, lhs, rhs, config, ret_shape);
}
spu::Value result =
kernel::hlo::Convolution2D(sctx, lhs, rhs, config, ret_shape);

addValue(sscope, op.getResult(), std::move(result), opts);
}
Expand Down Expand Up @@ -579,8 +578,7 @@ void execute(OpExecutor *executor, SPUContext *sctx, SymbolScope *sscope,
}
}

// auto ret = kernel::hlo::SelectAndScatterNaive(
auto ret = kernel::hlo::SelectAndScatterExpanded(
auto ret = kernel::hlo::SelectAndScatter(
sctx, operand, source, init_val, window_shape, window_strides,
window_padding,
[&](const spu::Value &selected, const spu::Value &current) {
Expand Down
7 changes: 3 additions & 4 deletions libspu/kernel/hal/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ spu_cc_library(
hdrs = ["ring.h"],
deps = [
":prot_wrapper",
":shape_ops",
"//libspu/core:context",
],
)
Expand Down Expand Up @@ -265,11 +266,9 @@ spu_cc_library(
srcs = ["shape_ops.cc"],
hdrs = ["shape_ops.h"],
deps = [
":ring",
":type_cast",
# Please DONT add extra dependency here.
":prot_wrapper",
"//libspu/core:context",
"//libspu/core:prelude",
"//libspu/core:vectorize",
],
)

Expand Down
11 changes: 10 additions & 1 deletion libspu/kernel/hal/fxp_base.cc
Original file line number Diff line number Diff line change
Expand Up @@ -271,13 +271,22 @@ Value f_mmul(SPUContext* ctx, const Value& x, const Value& y) {

Value f_conv2d(SPUContext* ctx, const Value& x, const Value& y,
const Strides& window_strides) {
SPU_TRACE_HAL_LEAF(ctx, x, y);
SPU_TRACE_HAL_LEAF(ctx, x, y, window_strides);

SPU_ENFORCE(x.isFxp() && y.isFxp() && x.dtype() == y.dtype());

return _trunc(ctx, _conv2d(ctx, x, y, window_strides)).setDtype(x.dtype());
}

Value f_tensordot(SPUContext* ctx, const Value& x, const Value& y,
const Index& ix, const Index& iy) {
SPU_TRACE_HAL_LEAF(ctx, x, y, ix, iy);

SPU_ENFORCE(x.isFxp() && y.isFxp() && x.dtype() == y.dtype());

return _trunc(ctx, _tensordot(ctx, x, y, ix, iy)).setDtype(x.dtype());
}

Value f_div(SPUContext* ctx, const Value& x, const Value& y) {
SPU_TRACE_HAL_LEAF(ctx, x, y);

Expand Down
3 changes: 3 additions & 0 deletions libspu/kernel/hal/fxp_base.h
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,9 @@ Value f_mmul(SPUContext* ctx, const Value& x, const Value& y);
Value f_conv2d(SPUContext* ctx, const Value& x, const Value& y,
const Strides& window_strides);

Value f_tensordot(SPUContext* ctx, const Value& x, const Value& y,
const Index& ix, const Index& iy);

Value f_div(SPUContext* ctx, const Value& x, const Value& y);

Value f_equal(SPUContext* ctx, const Value& x, const Value& y);
Expand Down
7 changes: 7 additions & 0 deletions libspu/kernel/hal/integer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -90,4 +90,11 @@ Value i_conv2d(SPUContext* ctx, const Value& x, const Value& y,
return _conv2d(ctx, x, y, window_strides).setDtype(x.dtype());
}

Value i_tensordot(SPUContext* ctx, const Value& x, const Value& y,
const Index& ix, const Index& iy) {
SPU_TRACE_HAL_LEAF(ctx, x, y, ix, iy);
ENSURE_INT_AND_DTYPE_MATCH(x, y);
return _tensordot(ctx, x, y, ix, iy).setDtype(x.dtype());
}

} // namespace spu::kernel::hal
3 changes: 3 additions & 0 deletions libspu/kernel/hal/integer.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,9 @@ Value i_mul(SPUContext* ctx, const Value& x, const Value& y);

Value i_mmul(SPUContext* ctx, const Value& x, const Value& y);

Value i_tensordot(SPUContext* ctx, const Value& x, const Value& y,
const Index& ix, const Index& iy);

Value i_conv2d(SPUContext* ctx, const Value& x, const Value& y,
const Strides& window_strides);

Expand Down
Loading

0 comments on commit bb5cb11

Please sign in to comment.