Skip to content

Commit

Permalink
Adds Cleartext float via sytorch (#186)
Browse files Browse the repository at this point in the history
* float added from kanav-gpt in sytorch

* cleartext_fp added in Onnxbridge

* i64->T in tensor.h

* debug statement

* typecast corrected in module.h

* removing debug statement

* added testing for sytorch float ct

* rename tests
  • Loading branch information
drunkenlegend authored Aug 2, 2023
1 parent 01285e9 commit c6e60b1
Showing 13 changed files with 650 additions and 30 deletions.
82 changes: 74 additions & 8 deletions .github/workflows/onnx_bridge.yml
Original file line number Diff line number Diff line change
@@ -13,7 +13,7 @@ on:
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:

LLAMA:
Sytorch-LLAMA:
# The type of runner that the job will run on
runs-on: ubuntu-latest
container:
@@ -37,15 +37,15 @@ jobs:
run: |
apt-get update -y
- name: LLAMA
- name: Sytorch LLAMA
if: always()
run: |
cd OnnxBridge/tests/
pytest --backend LLAMA -v -k 'lenet and not batch'
pytest --backend LLAMA -v -k 'hinet and not batch'
shell: bash

LLAMA-batch:
Sytorch-LLAMA-batch:
# The type of runner that the job will run on
runs-on: ubuntu-latest
container:
@@ -69,15 +69,15 @@ jobs:
run: |
apt-get update -y
- name: LLAMA Batch
- name: Sytorch LLAMA Batch
if: always()
run: |
cd OnnxBridge/tests/
pytest --backend LLAMA -v -k 'lenet and batch' --batch_size 2
pytest --backend LLAMA -v -k 'hinet and batch' --batch_size 5
shell: bash

LLAMA-ct:
Sytorch-LLAMA-ct:
# The type of runner that the job will run on
runs-on: ubuntu-latest
container:
@@ -101,7 +101,7 @@ jobs:
run: |
apt-get update -y
- name: LLAMA Cleartext
- name: Sytorch LLAMA Cleartext
if: always()
run: |
cd OnnxBridge/tests/
@@ -110,7 +110,7 @@ jobs:
pytest --backend CLEARTEXT_LLAMA -v -k 'chexpert and not batch'
shell: bash

LLAMA-ct-batch:
Sytorch-LLAMA-ct-batch:
# The type of runner that the job will run on
runs-on: ubuntu-latest
container:
@@ -134,14 +134,80 @@ jobs:
run: |
apt-get update -y
- name: LLAMA Cleartext Batch
- name: Sytorch LLAMA Cleartext Batch
if: always()
run: |
cd OnnxBridge/tests/
pytest --backend CLEARTEXT_LLAMA -v -k 'lenet and batch' --batch_size 2
pytest --backend CLEARTEXT_LLAMA -v -k 'hinet and batch' --batch_size 5
shell: bash

Sytorch-ct-fp:
# The type of runner that the job will run on
runs-on: ubuntu-latest
container:
image: drunkenlegend/onnxbridge:latest
options: --user root

# Steps represent a sequence of tasks that will be executed as part of the job
steps:
- name: Update Git
run: |
add-apt-repository ppa:git-core/ppa -y
apt-get update
apt-get install git -y
- name: Checkout repository
uses: actions/checkout@v3
with:
submodules: 'true'

- name: Install dependencies
run: |
apt-get update -y
- name: Sytorch Cleartext Floating Point
if: always()
run: |
cd OnnxBridge/tests/
pytest --backend CLEARTEXT_fp -v -k 'lenet and not batch'
pytest --backend CLEARTEXT_fp -v -k 'hinet and not batch'
pytest --backend CLEARTEXT_fp -v -k 'chexpert and not batch'
shell: bash

Sytorch-ct-fp-batch:
# The type of runner that the job will run on
runs-on: ubuntu-latest
container:
image: drunkenlegend/onnxbridge:latest
options: --user root

# Steps represent a sequence of tasks that will be executed as part of the job
steps:
- name: Update Git
run: |
add-apt-repository ppa:git-core/ppa -y
apt-get update
apt-get install git -y
- name: Checkout repository
uses: actions/checkout@v3
with:
submodules: 'true'

- name: Install dependencies
run: |
apt-get update -y
- name: Sytorch Cleartext Floating Point Batch
if: always()
run: |
cd OnnxBridge/tests/
pytest --backend CLEARTEXT_fp -v -k 'lenet and batch' --batch_size 2
pytest --backend CLEARTEXT_fp -v -k 'hinet and batch' --batch_size 5
shell: bash


Secfloat:
# The type of runner that the job will run on
runs-on: ubuntu-latest
2 changes: 1 addition & 1 deletion OnnxBridge/LLAMA/compile_llama.sh
Original file line number Diff line number Diff line change
@@ -44,7 +44,7 @@ find_package(Threads REQUIRED)
add_subdirectory($sytorch_dir/ext/cryptoTools $pd/cryptoTools)
add_subdirectory($sytorch_dir/ext/llama $pd/llama)
add_executable($BINARY_NAME
../$FSS_CPP_FILE $sytorch_dir/src/sytorch/random.cpp $sytorch_dir/src/sytorch/backend/cleartext.cpp
../$FSS_CPP_FILE $sytorch_dir/src/sytorch/random.cpp $sytorch_dir/src/sytorch/backend/cleartext.cpp $sytorch_dir/src/sytorch/backend/float.cpp
)
target_include_directories($BINARY_NAME
PUBLIC
43 changes: 42 additions & 1 deletion OnnxBridge/LLAMA/sytorchBackendRep.py
Original file line number Diff line number Diff line change
@@ -137,6 +137,45 @@ def cleartext_post(code_list, program, scale, mode, indent):
)


def cleartext_fp_post(code_list, program, scale, mode, indent):
# Input
n = program[0].shape[0]
c = program[0].shape[1]
dims = program[0].shape[2:]
# n, c, h, w = program[0].shape
code_list.append(
f"""
int main(int argc, char**__argv){'{'}
prngWeights.SetSeed(osuCrypto::toBlock(0, 0));
prngStr.SetSeed(osuCrypto::toBlock(time(NULL)));
int party = atoi(__argv[1]);
std::string ip = "127.0.0.1";
srand(time(NULL));
const u64 scale = 0;
if (party == 0) {'{'}
Net<float> net;
net.init(scale);
std::string weights_file = __argv[2];
net.load(weights_file);
Tensor<float> input({'{'}{iterate_list([n]+ dims +[c])}{'}'});
input.input_nchw(scale);
print_dot_graph(net.root);
net.forward(input);
net.activation.print();
return 0;
{'}'}
{'}'}
"""
)


def llama_pre(code_list, program, scale, mode, bitlength, indent):
code_list.append("#include <sytorch/backend/llama_extended.h>")
code_list.append("#include <sytorch/layers/layers.h>")
@@ -265,7 +304,7 @@ def prepare_export(program, var_dict, value_info, mode, scale, bitlength, backen

# Start CPP program
number_of_nodes = 0
if backend == "CLEARTEXT_LLAMA":
if backend == "CLEARTEXT_LLAMA" or backend == "CLEARTEXT_fp":
cleartext_pre(code_list, program, scale, mode, indent)
elif backend == "LLAMA":
llama_pre(code_list, program, scale, mode, bitlength, indent)
@@ -320,6 +359,8 @@ def prepare_export(program, var_dict, value_info, mode, scale, bitlength, backen

if backend == "CLEARTEXT_LLAMA":
cleartext_post(code_list, program, scale, mode, indent)
elif backend == "CLEARTEXT_fp":
cleartext_fp_post(code_list, program, scale, mode, indent)
elif backend == "LLAMA":
llama_post(code_list, program, scale, mode, bitlength, indent)

6 changes: 3 additions & 3 deletions OnnxBridge/backend.py
Original file line number Diff line number Diff line change
@@ -89,7 +89,7 @@ def preprocess_model(cls, model_fname, logging_level, backend):
logger.error("Model Not Supported")
sys.exit()

if backend in ["CLEARTEXT_LLAMA", "LLAMA"]:
if backend in ["CLEARTEXT_LLAMA", "LLAMA", "CLEARTEXT_fp"]:
weights_path = optimizations.dump_model_weights_as_dat(
model, model_abs_dir, model_name
)
@@ -151,7 +151,7 @@ def is_compatible(cls, model, backend, device: str = "2PC", **kwargs):
]
if backend in ["SECFLOAT", "SECFLOAT_CLEARTEXT"]:
implemented = implemented_secfloat
elif backend in ["CLEARTEXT_LLAMA", "LLAMA"]:
elif backend in ["CLEARTEXT_LLAMA", "LLAMA", "CLEARTEXT_fp"]:
implemented = implemented_sytorch
for node in model.graph.node:
if node.op_type not in implemented:
@@ -217,7 +217,7 @@ def prepare(
backend_rep = FzpcBackendRep(
program, value_info, var_dict, path, file_name[:-5], backend
)
elif backend in ["CLEARTEXT_LLAMA", "LLAMA"]:
elif backend in ["CLEARTEXT_LLAMA", "LLAMA", "CLEARTEXT_fp"]:
backend_rep = SytorchBackendRep(
program, value_info, var_dict, path, file_name[:-5]
)
14 changes: 10 additions & 4 deletions OnnxBridge/main.py
Original file line number Diff line number Diff line change
@@ -6,7 +6,13 @@


def parse_args():
backend = ["CLEARTEXT_LLAMA", "LLAMA", "SECFLOAT", "SECFLOAT_CLEARTEXT"]
backend = [
"CLEARTEXT_LLAMA",
"LLAMA",
"SECFLOAT",
"SECFLOAT_CLEARTEXT",
"CLEARTEXT_fp",
]
parser = argparse.ArgumentParser()
parser.add_argument("--path", required=True, type=str, help="Path to the Model.")
parser.add_argument(
@@ -32,7 +38,7 @@ def parse_args():
)
parser.add_argument(
"--generate",
required=any(b in argv for b in [backend[2], backend[3]]),
required=True,
type=str,
choices=["code", "executable"],
default="code",
@@ -49,7 +55,7 @@ def main():
mode = "u64" if args.backend == "LLAMA" else "i64"

# Export the Model as Secfloat and writes to a cpp file
if args.backend in ["CLEARTEXT_LLAMA", "LLAMA"]:
if args.backend in ["CLEARTEXT_LLAMA", "LLAMA", "CLEARTEXT_fp"]:
main_path = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(main_path, "LLAMA")
backendrep.export_model(mode, args.scale, args.bitlength, args.backend)
@@ -65,7 +71,7 @@ def main():
os.system(
f"{file_path}/compile_secfloat.sh {args.path[:-5]}_secfloat{ct}.cpp"
)
elif args.backend in ["CLEARTEXT_LLAMA", "LLAMA"]:
elif args.backend in ["CLEARTEXT_LLAMA", "LLAMA", "CLEARTEXT_fp"]:
os.system(
f"{file_path}/compile_llama.sh {args.path[:-5]}_{args.backend}_{args.scale}.cpp"
)
10 changes: 8 additions & 2 deletions OnnxBridge/tests/conftest.py
Original file line number Diff line number Diff line change
@@ -9,8 +9,14 @@ def pytest_addoption(parser):
parser.addoption(
"--backend",
action="store",
choices=["CLEARTEXT_LLAMA", "LLAMA", "SECFLOAT", "SECFLOAT_CLEARTEXT"],
help="backend : CLEARTEXT_LLAMA | LLAMA | SECFLOAT | SECFLOAT_CLEARTEXT",
choices=[
"CLEARTEXT_LLAMA",
"LLAMA",
"SECFLOAT",
"SECFLOAT_CLEARTEXT",
"CLEARTEXT_fp",
],
help="backend : CLEARTEXT_LLAMA | CLEARTEXT_fp | LLAMA | SECFLOAT | SECFLOAT_CLEARTEXT",
required=True,
)
parser.addoption(
12 changes: 12 additions & 0 deletions OnnxBridge/tests/utils.py
Original file line number Diff line number Diff line change
@@ -41,6 +41,10 @@ def compile_model(backend):
os.system(
f"python3 {ezpc_dir}/OnnxBridge/main.py --path model.onnx --generate executable --backend {backend} --scale 15 --bitlength 40 "
)
elif backend == "CLEARTEXT_fp":
os.system(
f"python3 {ezpc_dir}/OnnxBridge/main.py --path model.onnx --generate executable --backend {backend} "
)
elif backend == "SECFLOAT" or backend == "SECFLOAT_CLEARTEXT":
os.system(
f"python3 {ezpc_dir}/OnnxBridge/main.py --path model.onnx --generate executable --backend {backend} "
@@ -62,6 +66,14 @@ def run_backend(backend, input):
os.system(
f"./model_CLEARTEXT_LLAMA_15 0 model_input_weights.dat < {input} > {raw_output}"
)
elif backend == "CLEARTEXT_fp":
# check if model compiled
assert os.path.exists("model_CLEARTEXT_fp_0")
assert os.path.exists("model_input_weights.dat")

os.system(
f"./model_CLEARTEXT_fp_0 0 model_input_weights.dat < {input} > {raw_output}"
)
elif backend == "LLAMA":
# check if model compiled
assert os.path.exists("model_LLAMA_15")
38 changes: 38 additions & 0 deletions sytorch/include/sytorch/backend/default.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
#pragma once

#include <sytorch/backend/cleartext.h>
#include <sytorch/backend/float.h>

template <typename T>
Backend<T> *defaultBackend()
{
if constexpr (std::is_floating_point<T>::value)
{
return new FloatClearText<T>();
}
else
{
return new ClearText<T>();
}
}

template <typename T>
inline T type_cast(float val);

template <>
float type_cast(float val)
{
return val;
}

template <>
i64 type_cast(float val)
{
return (i64)val;
}

template <>
u64 type_cast(float val)
{
return (u64(i64(val)));
}
Loading

0 comments on commit c6e60b1

Please sign in to comment.