Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Repo sync #391

Merged
merged 2 commits into from
Nov 8, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion bazel/repositories.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")

SECRETFLOW_GIT = "https://github.com/secretflow"

YACL_COMMIT_ID = "f933d7ff4caf0d9f7ea84cc3e9f51a9a6ee9eeca"
YACL_COMMIT_ID = "6be4330542e92b6503317c45a999c99e654ced58"

def spu_deps():
_rules_cuda()
Expand Down
12 changes: 11 additions & 1 deletion libspu/compiler/passes/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -89,10 +89,10 @@ spu_cc_library(
name = "hlo_legalize_to_pphlo",
srcs = ["hlo_legalize_to_pphlo.cc"],
hdrs = ["passes.h"],
include_prefix = "tensorflow/compiler/xla/mlir_hlo/include",
deps = [
":map_stablehlo_to_pphlo_op",
":pass_details",
":utils",
":visibility_inference",
"//libspu/compiler/common:compilation_context",
"//libspu/core:prelude",
Expand Down Expand Up @@ -204,6 +204,7 @@ spu_cc_library(
hdrs = ["passes.h"],
deps = [
":pass_details",
":utils",
"//libspu/dialect:pphlo_dialect",
"@llvm-project//mlir:IR",
"@llvm-project//mlir:TransformUtils",
Expand Down Expand Up @@ -258,6 +259,15 @@ spu_cc_library(
],
)

spu_cc_library(
name = "utils",
srcs = ["utils.cc"],
hdrs = ["utils.h"],
deps = [
"@llvm-project//mlir:IR",
],
)

spu_cc_library(
name = "convert_push_down",
srcs = ["convert_push_down.cc"],
Expand Down
14 changes: 1 addition & 13 deletions libspu/compiler/passes/expand_secret_gather.cc
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"

#include "libspu/compiler/passes/pass_details.h"
#include "libspu/compiler/passes/utils.h"
#include "libspu/dialect/pphlo_ops.h"

namespace mlir::pphlo {
Expand Down Expand Up @@ -51,19 +52,6 @@ std::vector<int64_t> DeleteDimensions(llvm::ArrayRef<int64_t> dims_to_delete,
return result;
}

mlir::DenseIntElementsAttr
ConvertDimensions(OpBuilder *builder, llvm::ArrayRef<int64_t> op_dimensions) {
llvm::SmallVector<APInt, 8> dimensions;
dimensions.reserve(op_dimensions.size());
for (auto value : op_dimensions) {
dimensions.emplace_back(APInt(64, value));
}

return DenseIntElementsAttr::get(
RankedTensorType::get(dimensions.size(), builder->getIntegerType(64)),
dimensions);
}

// Computes how many trips a loop implementing this gather op would take.
int64_t GatherLoopTripCount(GatherOp op) {
auto start_indices = op.getStartIndices();
Expand Down
43 changes: 33 additions & 10 deletions libspu/compiler/passes/hlo_legalize_to_pphlo.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,7 @@

// This file implements logic for lowering HLO dialect to pphlo dialect.

#include <algorithm>
#include <cstdint>
#include <iostream>
#include <string>
#include <utility>
#include <vector>
Expand All @@ -30,6 +28,7 @@

#include "libspu/compiler/passes/map_stablehlo_to_pphlo_op.h"
#include "libspu/compiler/passes/pass_details.h"
#include "libspu/compiler/passes/utils.h"
#include "libspu/compiler/passes/value_visibility_map.h"
#include "libspu/compiler/passes/visibility_inference.h"
#include "libspu/core/prelude.h"
Expand Down Expand Up @@ -983,9 +982,13 @@ struct HloToPPHloOpConverter<stablehlo::SelectAndScatterOp>
auto result_type = HloToPPHloTypeConverter::getTypeWithVisibility(
op.getType(), vis_.getValueVisibility(op.getResult()));

if (op.getPadding().has_value() &&
(!op.getPaddingAttr().isSplat() ||
op.getPaddingAttr().getSplatValue<int64_t>() != 0)) {
bool has_padding = op.getPadding().has_value() &&
(!op.getPaddingAttr().isSplat() ||
op.getPaddingAttr().getSplatValue<int64_t>() != 0);

SelectAndScatterOp new_op;

if (has_padding) {
auto rank =
op->getOperandTypes()[0].dyn_cast<RankedTensorType>().getRank();
llvm::SmallVector<int64_t, 2> padding_low(rank, 0);
Expand All @@ -1003,12 +1006,32 @@ struct HloToPPHloOpConverter<stablehlo::SelectAndScatterOp>
builder.getI64TensorAttr(padding_low),
builder.getI64TensorAttr(padding_high),
builder.getI64TensorAttr(padding_interior));
}

auto new_op = rewriter.replaceOpWithNewOp<pphlo::SelectAndScatterOp>(
op, result_type, materialized_operand, adaptor.getSource(),
materialized_init_value, op.getWindowDimensionsAttr(),
op.getWindowStridesAttr());
new_op = rewriter.create<pphlo::SelectAndScatterOp>(
op->getLoc(), materialized_operand.getType(), materialized_operand,
adaptor.getSource(), materialized_init_value,
op.getWindowDimensionsAttr(), op.getWindowStridesAttr());

llvm::SmallVector<int64_t, 2> slice_end(
new_op.getType().dyn_cast<RankedTensorType>().getShape().begin(),
new_op.getType().dyn_cast<RankedTensorType>().getShape().end());

for (size_t idx = 0; idx < slice_end.size(); ++idx) {
slice_end[idx] -= padding_high[idx];
}

// Slice back
rewriter.replaceOpWithNewOp<pphlo::SliceOp>(
op, result_type, new_op, ConvertDimensions(&builder, padding_low),
ConvertDimensions(&builder, slice_end),
ConvertDimensions(&builder,
llvm::SmallVector<int64_t>(slice_end.size(), 1)));
} else {
new_op = rewriter.replaceOpWithNewOp<pphlo::SelectAndScatterOp>(
op, result_type, materialized_operand, adaptor.getSource(),
materialized_init_value, op.getWindowDimensionsAttr(),
op.getWindowStridesAttr());
}

// Convert the region signature.
TypeConverter::SignatureConversion select_sig_conversion(
Expand Down
36 changes: 36 additions & 0 deletions libspu/compiler/passes/utils.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
// Copyright 2023 Ant Group Co., Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "libspu/compiler/passes/utils.h"

#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypes.h"

namespace mlir::pphlo {

mlir::DenseIntElementsAttr
ConvertDimensions(OpBuilder *builder, llvm::ArrayRef<int64_t> op_dimensions) {
llvm::SmallVector<APInt, 8> dimensions;
dimensions.reserve(op_dimensions.size());
for (auto value : op_dimensions) {
dimensions.emplace_back(APInt(64, value));
}

return DenseIntElementsAttr::get(
RankedTensorType::get(dimensions.size(), builder->getIntegerType(64)),
dimensions);
}

} // namespace mlir::pphlo
13 changes: 7 additions & 6 deletions libspu/mpc/semi2k/sort_test.h → libspu/compiler/passes/utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#include "gtest/gtest.h"
#include "yacl/link/link.h"
#pragma once

#include "libspu/mpc/api_test_params.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"

namespace spu::mpc::test {
namespace mlir::pphlo {

class PermuteTest : public ::testing::TestWithParam<OpTestParams> {};
mlir::DenseIntElementsAttr
ConvertDimensions(OpBuilder *builder, llvm::ArrayRef<int64_t> op_dimensions);

} // namespace spu::mpc::test
}
19 changes: 18 additions & 1 deletion libspu/compiler/tests/hlo_to_pphlo_select_and_scatter.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -20,4 +20,21 @@ func.func @main(%arg0: tensor<128x5x5x32xf32>, %arg1: tensor<128x4x4x32xf32>, %a
"stablehlo.return"(%1) : (tensor<f32>) -> ()
}) {padding = dense<0> : tensor<4x2xi64>, window_dimensions = dense<[1, 2, 2, 1]> : tensor<4xi64>, window_strides = dense<1> : tensor<4xi64>} : (tensor<128x5x5x32xf32>, tensor<128x4x4x32xf32>, tensor<f32>) -> tensor<128x5x5x32xf32>
return %0 : tensor<128x5x5x32xf32>
}
}

// -----

func.func @main(%arg0: tensor<128x16x16x64xf32>, %arg1: tensor<128x8x8x64xf32>, %arg2: tensor<f32>) -> tensor<128x16x16x64xf32> {
// CHECK: "pphlo.select_and_scatter"
// CHECK: "pphlo.slice"
%0 = "stablehlo.select_and_scatter"(%arg0, %arg1, %arg2) ({
^bb0(%arg3: tensor<f32>, %arg4: tensor<f32>):
%1 = stablehlo.compare GE, %arg3, %arg4 : (tensor<f32>, tensor<f32>) -> tensor<i1>
stablehlo.return %1 : tensor<i1>
}, {
^bb0(%arg3: tensor<f32>, %arg4: tensor<f32>):
%1 = stablehlo.add %arg3, %arg4 : tensor<f32>
stablehlo.return %1 : tensor<f32>
}) {padding = dense<[[0, 0], [0, 1], [0, 1], [0, 0]]> : tensor<4x2xi64>, window_dimensions = dense<[1, 3, 3, 1]> : tensor<4xi64>, window_strides = dense<[1, 2, 2, 1]> : tensor<4xi64>} : (tensor<128x16x16x64xf32>, tensor<128x8x8x64xf32>, tensor<f32>) -> tensor<128x16x16x64xf32>
return %0 : tensor<128x16x16x64xf32>
}
3 changes: 1 addition & 2 deletions libspu/core/context.h
Original file line number Diff line number Diff line change
Expand Up @@ -93,8 +93,7 @@ class KernelEvalContext final {
Type, // type of type
uint128_t, // ring constant
int64_t, //
SignType, //
std::vector<Value> // for sort
SignType //
>;

SPUContext* sctx_;
Expand Down
30 changes: 30 additions & 0 deletions libspu/device/pphlo/pphlo_executor_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2263,6 +2263,36 @@ func.func @main(%arg0: tensor<20xi32>) -> (tensor<20xi32>, tensor<20xi32>) {
r.verifyOutput(expected_ret1.data(), 1);
}

TEST_P(ExecutorTest, MixedPayloadDescending) {
xt::xarray<int32_t> op = {10, 9, 8, 7, 6, 5, 4, 3, 2, 1,
99, 97, 98, 96, 91, 11, 12, 13, 14, 15};
xt::xarray<int32_t> expected_ret0 = {99, 98, 97, 96, 91, 15, 14, 13, 12, 11,
10, 9, 8, 7, 6, 5, 4, 3, 2, 1};
xt::xarray<int32_t> expected_ret1 = {10, 12, 11, 13, 14, 19, 18, 17, 16, 15,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9};

Runner r(std::get<0>(GetParam()), std::get<1>(GetParam()),
std::get<2>(GetParam()));

r.addInput(op, VIS_SECRET);

r.run(r.compileMHlo(
R"(
func.func @main(%arg0: tensor<20xi32>) -> (tensor<20xi32>, tensor<20xi32>) {
%0 = "mhlo.iota"() {iota_dimension = 0 : i64} : () -> tensor<20xi32>
%1:2 = "mhlo.sort"(%arg0, %0) ({
^bb0(%arg1: tensor<i32>, %arg2: tensor<i32>, %arg3: tensor<i32>, %arg4: tensor<i32>):
%2 = mhlo.compare GT, %arg1, %arg2 : (tensor<i32>, tensor<i32>) -> tensor<i1>
mhlo.return %2 : tensor<i1>
}) {dimension = 0 : i64, is_stable = true} : (tensor<20xi32>, tensor<20xi32>) -> (tensor<20xi32>, tensor<20xi32>)
return %1#0, %1#1: tensor<20xi32>, tensor<20xi32>
})",
{VIS_SECRET}),
2);
r.verifyOutput(expected_ret0.data(), 0);
r.verifyOutput(expected_ret1.data(), 1);
}

INSTANTIATE_TEST_SUITE_P(
ExecutorTestInstances, ExecutorTest,
testing::Combine(testing::Values(4, 3, 2),
Expand Down
39 changes: 23 additions & 16 deletions libspu/kernel/hal/prot_wrapper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -116,22 +116,6 @@ Value _trunc_v(SPUContext* ctx, const Value& in, size_t bits, SignType sign) {
SPU_TRACE_HAL_DISP(ctx, in, bits, sign);
return mpc::trunc_v(ctx, in, bits, sign);
}
std::vector<Value> _sort_s(SPUContext* ctx, absl::Span<Value const> x) {
SPU_TRACE_HAL_DISP(ctx, x.size());
// FIXME(jimi): formalize mpc sort api

// As pass absl::Span in dynDispatch is dangerous, we initialize a new vector
// here. And the copy of value is cheap, so it's ok.
std::vector<Value> x_val(x.begin(), x.end());
auto ret = dynDispatch<std::vector<Value>>(ctx, "sort_a", x_val);
SPU_ENFORCE_EQ(x_val.size(), ret.size(),
"sorted results and inputs sizes should match");

for (size_t i = 0; i < x_val.size(); ++i) {
ret[i].setDtype(x_val[i].dtype());
}
return ret;
}

// p<->s
MAP_UNARY_OP(p2s)
Expand Down Expand Up @@ -228,4 +212,27 @@ MAP_OPTIONAL_BINARY_OP(equal_ss)
MAP_OPTIONAL_BINARY_OP(equal_sp)
MAP_BINARY_OP(equal_pp)

#define MAP_OPTIONAL_PERM_OP(NAME) \
Value _##NAME(SPUContext* ctx, const Value& x, const Value& y) { \
SPU_TRACE_HAL_DISP(ctx, x, y); \
SPU_ENFORCE(x.shape().ndim() == 1, "x should be a 1-d tensor"); \
auto ret = mpc::NAME(ctx, x, y); \
SPU_ENFORCE(ret.has_value(), "{} api not implemented", #NAME); \
return ret.value(); \
} // namespace spu::kernel::hal

MAP_OPTIONAL_PERM_OP(perm_ss);
MAP_OPTIONAL_PERM_OP(perm_sp);
MAP_OPTIONAL_PERM_OP(inv_perm_ss);
MAP_OPTIONAL_PERM_OP(inv_perm_sp);

Value _rand_perm_s(SPUContext* ctx, const Shape& shape) {
SPU_TRACE_HAL_DISP(ctx, shape);
SPU_ENFORCE(shape.ndim() == 1, "shape should be a 1-d");

auto ret = mpc::rand_perm_s(ctx, shape);
SPU_ENFORCE(ret.has_value(), "rand_perm_s api not implemented");
return ret.value();
}

} // namespace spu::kernel::hal
7 changes: 6 additions & 1 deletion libspu/kernel/hal/prot_wrapper.h
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,12 @@ Value _make_p(SPUContext* ctx, uint128_t init, const Shape& shape);
Value _rand_p(SPUContext* ctx, const Shape& shape);
Value _rand_s(SPUContext* ctx, const Shape& shape);

std::vector<Value> _sort_s(SPUContext* ctx, absl::Span<Value const> x);
// FIXME: temporary API
Value _rand_perm_s(SPUContext* ctx, const Shape& shape);
Value _perm_ss(SPUContext* ctx, const Value& x, const Value& perm);
Value _perm_sp(SPUContext* ctx, const Value& x, const Value& perm);
Value _inv_perm_ss(SPUContext* ctx, const Value& x, const Value& perm);
Value _inv_perm_sp(SPUContext* ctx, const Value& x, const Value& perm);

// NOLINTEND(readability-identifier-naming)

Expand Down
Loading
Loading