From 33bd53b46e18e68d060d57e7856ad9f95473a460 Mon Sep 17 00:00:00 2001 From: schillic Date: Sun, 3 Dec 2023 23:29:05 +0100 Subject: [PATCH] add files --- .github/workflows/SpellCheck.yml | 13 + .github/workflows/TagBot.yml | 1 + .github/workflows/ci_PR.yml | 2 +- .gitignore | 3 + .typos.toml | 3 + LICENSE | 2 +- Project.toml | 17 +- README.md | 48 +-- docs/Project.toml | 3 + docs/make.jl | 21 +- docs/src/about.md | 6 +- docs/src/assets/logo.png | Bin 0 -> 20803 bytes docs/src/index.md | 12 +- docs/src/lib/BackwardAlgorithms.md | 19 + docs/src/lib/BidirectionalAlgorithms.md | 17 + docs/src/lib/ForwardAlgorithms.md | 23 + src/BackwardAlgorithms/BackwardAlgorithm.jl | 8 + src/BackwardAlgorithms/BackwardAlgorithms.jl | 22 + src/BackwardAlgorithms/BoxBackward.jl | 55 +++ .../PartitioningLeakyReLU.jl | 52 +++ src/BackwardAlgorithms/PolyhedraBackward.jl | 354 +++++++++++++++ src/BackwardAlgorithms/backward_default.jl | 117 +++++ src/BackwardAlgorithms/simplify_sets.jl | 12 + .../BidirectionalAlgorithm.jl | 6 + .../BidirectionalAlgorithms.jl | 19 + .../SimpleBidirectional.jl | 40 ++ .../bidirectional_default.jl | 89 ++++ src/ForwardAlgorithms/BoxForward.jl | 46 ++ src/ForwardAlgorithms/ConcreteForward.jl | 16 + src/ForwardAlgorithms/DeepZ.jl | 90 ++++ src/ForwardAlgorithms/DefaultForward.jl | 35 ++ src/ForwardAlgorithms/ForwardAlgorithm.jl | 6 + src/ForwardAlgorithms/ForwardAlgorithms.jl | 31 ++ src/ForwardAlgorithms/LazyForward.jl | 11 + src/ForwardAlgorithms/SplitForward.jl | 42 ++ src/ForwardAlgorithms/Verisig.jl | 94 ++++ src/ForwardAlgorithms/forward_default.jl | 53 +++ src/ForwardAlgorithms/init.jl | 6 + .../init_ReachabilityAnalysis.jl | 38 ++ src/JuliaReachTemplatePkg.jl | 7 - src/NeuralNetworkReachability.jl | 14 + test/AISoLA2023/BoxAffineMap.jl | 8 + test/AISoLA2023/forward_backward.jl | 81 ++++ test/AISoLA2023/leaky_relu.jl | 55 +++ test/AISoLA2023/motivation.jl | 73 ++++ test/AISoLA2023/parabola.jl | 31 ++ test/AISoLA2023/parabola.network | 32 ++ .../PartitioningLeakyReLU.jl | 23 + test/BackwardAlgorithms/backward.jl | 408 ++++++++++++++++++ test/BackwardAlgorithms/simplify_sets.jl | 17 + test/BidirectionalAlgorithms/bidirectional.jl | 80 ++++ test/ForwardAlgorithms/forward.jl | 336 +++++++++++++++ test/Project.toml | 12 + test/example_networks.jl | 15 + test/optional_dependencies_not_loaded.jl | 10 + test/runtests.jl | 44 +- 56 files changed, 2605 insertions(+), 73 deletions(-) create mode 100644 .github/workflows/SpellCheck.yml create mode 100644 .typos.toml create mode 100644 docs/src/assets/logo.png create mode 100644 docs/src/lib/BackwardAlgorithms.md create mode 100644 docs/src/lib/BidirectionalAlgorithms.md create mode 100644 docs/src/lib/ForwardAlgorithms.md create mode 100755 src/BackwardAlgorithms/BackwardAlgorithm.jl create mode 100644 src/BackwardAlgorithms/BackwardAlgorithms.jl create mode 100644 src/BackwardAlgorithms/BoxBackward.jl create mode 100644 src/BackwardAlgorithms/PartitioningLeakyReLU.jl create mode 100644 src/BackwardAlgorithms/PolyhedraBackward.jl create mode 100644 src/BackwardAlgorithms/backward_default.jl create mode 100644 src/BackwardAlgorithms/simplify_sets.jl create mode 100644 src/BidirectionalAlgorithms/BidirectionalAlgorithm.jl create mode 100644 src/BidirectionalAlgorithms/BidirectionalAlgorithms.jl create mode 100644 src/BidirectionalAlgorithms/SimpleBidirectional.jl create mode 100644 src/BidirectionalAlgorithms/bidirectional_default.jl create mode 100644 src/ForwardAlgorithms/BoxForward.jl create mode 100644 src/ForwardAlgorithms/ConcreteForward.jl create mode 100644 src/ForwardAlgorithms/DeepZ.jl create mode 100644 src/ForwardAlgorithms/DefaultForward.jl create mode 100755 src/ForwardAlgorithms/ForwardAlgorithm.jl create mode 100644 src/ForwardAlgorithms/ForwardAlgorithms.jl create mode 100644 src/ForwardAlgorithms/LazyForward.jl create mode 100644 src/ForwardAlgorithms/SplitForward.jl create mode 100644 src/ForwardAlgorithms/Verisig.jl create mode 100644 src/ForwardAlgorithms/forward_default.jl create mode 100644 src/ForwardAlgorithms/init.jl create mode 100644 src/ForwardAlgorithms/init_ReachabilityAnalysis.jl delete mode 100644 src/JuliaReachTemplatePkg.jl create mode 100644 src/NeuralNetworkReachability.jl create mode 100644 test/AISoLA2023/BoxAffineMap.jl create mode 100644 test/AISoLA2023/forward_backward.jl create mode 100644 test/AISoLA2023/leaky_relu.jl create mode 100644 test/AISoLA2023/motivation.jl create mode 100644 test/AISoLA2023/parabola.jl create mode 100644 test/AISoLA2023/parabola.network create mode 100644 test/BackwardAlgorithms/PartitioningLeakyReLU.jl create mode 100755 test/BackwardAlgorithms/backward.jl create mode 100644 test/BackwardAlgorithms/simplify_sets.jl create mode 100755 test/BidirectionalAlgorithms/bidirectional.jl create mode 100755 test/ForwardAlgorithms/forward.jl mode change 100644 => 100755 test/Project.toml create mode 100755 test/example_networks.jl create mode 100644 test/optional_dependencies_not_loaded.jl mode change 100644 => 100755 test/runtests.jl diff --git a/.github/workflows/SpellCheck.yml b/.github/workflows/SpellCheck.yml new file mode 100644 index 0000000..fe7db59 --- /dev/null +++ b/.github/workflows/SpellCheck.yml @@ -0,0 +1,13 @@ +name: Spell Check + +on: [pull_request] + +jobs: + typos-check: + name: check spelling + runs-on: ubuntu-latest + steps: + - name: Checkout Actions Repository + uses: actions/checkout@v4 + - name: Check spelling + uses: crate-ci/typos@master diff --git a/.github/workflows/TagBot.yml b/.github/workflows/TagBot.yml index 6887673..6d2efc1 100644 --- a/.github/workflows/TagBot.yml +++ b/.github/workflows/TagBot.yml @@ -14,3 +14,4 @@ jobs: - uses: JuliaRegistries/TagBot@v1 with: token: ${{ secrets.GITHUB_TOKEN }} + ssh: ${{ secrets.DOCUMENTER_KEY }} diff --git a/.github/workflows/ci_PR.yml b/.github/workflows/ci_PR.yml index 58853b6..f0e842a 100644 --- a/.github/workflows/ci_PR.yml +++ b/.github/workflows/ci_PR.yml @@ -22,7 +22,7 @@ jobs: arch: - x64 include: - - version: '1.2' # test on oldest supported version + - version: '1.6' # test on oldest supported version arch: x64 os: ubuntu-latest env: diff --git a/.gitignore b/.gitignore index b54c2eb..73bf2ff 100644 --- a/.gitignore +++ b/.gitignore @@ -19,3 +19,6 @@ docs/site/ .vscode Manifest.toml + +*.pdf +*.png diff --git a/.typos.toml b/.typos.toml new file mode 100644 index 0000000..19132a5 --- /dev/null +++ b/.typos.toml @@ -0,0 +1,3 @@ +[default.extend-words] +# do not correct the following strings: +BA = "BA" diff --git a/LICENSE b/LICENSE index fb61163..f0ad8ea 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2019 JuliaReach +Copyright (c) 2023 JuliaReach Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/Project.toml b/Project.toml index a731430..6fef6d1 100644 --- a/Project.toml +++ b/Project.toml @@ -1,8 +1,19 @@ -name = "JuliaReachTemplatePkg" -uuid = "2b714139-d377-45b9-9f15-0355f7430025" +name = "NeuralNetworkReachability" +uuid = "5de1e908-2f08-45bf-a571-ac88a54f7e7f" version = "0.1.0" [deps] +ControllerFormats = "02ac4b2c-022a-44aa-84a5-ea45a5754bcc" +LazySets = "b4f0291d-fe17-52bc-9479-3d1a343d9043" +LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" +ReachabilityBase = "379f33d0-9447-4353-bd03-d664070e549f" +Reexport = "189a3867-3050-52da-a836-e630ba90ab69" +Requires = "ae029012-a4dd-5104-9daa-d747884805df" [compat] -julia = "1" +ControllerFormats = "0.2" +LazySets = "2.11.1" +ReachabilityBase = "0.2.1" +Reexport = "0.2, 1" +Requires = "0.5, 1" +julia = "1.6" diff --git a/README.md b/README.md index 8c4992b..09f20bc 100644 --- a/README.md +++ b/README.md @@ -1,47 +1,19 @@ -# JuliaReachTemplatePkg.jl +# NeuralNetworkReachability.jl | **Documentation** | **Status** | **Community** | **License** | |:-----------------:|:----------:|:-------------:|:-----------:| -| [![docs-dev][dev-img]][dev-url] | [![CI][ci-img]][ci-url] [![codecov][cov-img]][cov-url] | [![gitter][chat-img]][chat-url] | [![license][lic-img]][lic-url] | +| [![docs-dev][dev-img]][dev-url] | [![CI][ci-img]][ci-url] [![codecov][cov-img]][cov-url] | [![zulip][chat-img]][chat-url] | [![license][lic-img]][lic-url] | [dev-img]: https://img.shields.io/badge/docs-latest-blue.svg -[dev-url]: https://juliareach.github.io/JuliaReachTemplatePkg.jl/dev/ -[ci-img]: https://github.com/JuliaReach/JuliaReachTemplatePkg.jl/workflows/CI/badge.svg -[ci-url]: https://github.com/JuliaReach/JuliaReachTemplatePkg.jl/actions/workflows/ci.yml -[cov-img]: https://codecov.io/github/JuliaReach/JuliaReachTemplatePkg.jl/coverage.svg -[cov-url]: https://app.codecov.io/github/JuliaReach/JuliaReachTemplatePkg.jl +[dev-url]: https://juliareach.github.io/NeuralNetworkReachability.jl/dev/ +[ci-img]: https://github.com/JuliaReach/NeuralNetworkReachability.jl/workflows/CI/badge.svg +[ci-url]: https://github.com/JuliaReach/NeuralNetworkReachability.jl/actions/workflows/ci.yml +[cov-img]: https://codecov.io/github/JuliaReach/NeuralNetworkReachability.jl/coverage.svg +[cov-url]: https://app.codecov.io/github/JuliaReach/NeuralNetworkReachability.jl [chat-img]: https://img.shields.io/badge/zulip-join_chat-brightgreen.svg [chat-url]: https://julialang.zulipchat.com/#narrow/stream/278609-juliareach [lic-img]: https://img.shields.io/github/license/mashape/apistatus.svg -[lic-url]: https://github.com/JuliaReach/JuliaReachTemplatePkg.jl/blob/master/LICENSE +[lic-url]: https://github.com/JuliaReach/NeuralNetworkReachability.jl/blob/master/LICENSE -`JuliaReachTemplatePkg` is a [Julia](http://julialang.org) template package for -the JuliaReach ecosystem. - -## Installing - -This package requires Julia v1.0 or later. -We refer to the [official documentation](https://julialang.org/downloads) on how -to install and run Julia on your system. - -Depending on your needs, choose an appropriate command from the following list -and enter it in Julia's REPL. -To activate the `pkg` mode, type `]` (and to leave it, type ``). - -#### [Install the latest release version](https://pkgdocs.julialang.org/v1/managing-packages/#Adding-registered-packages-1) - -```julia -pkg> add JuliaReachTemplatePkg -``` - -#### Install the latest development version - -```julia -pkg> add JuliaReachTemplatePkg#master -``` - -#### [Clone the package for development](https://pkgdocs.julialang.org/v1/managing-packages/#developing) - -```julia -pkg> dev JuliaReachTemplatePkg -``` +`NeuralNetworkReachability.jl` is a [Julia](http://julialang.org) package to symbolically analyze +neural networks. diff --git a/docs/Project.toml b/docs/Project.toml index dfa65cd..1814eb3 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -1,2 +1,5 @@ [deps] Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" + +[compat] +Documenter = "1" diff --git a/docs/make.jl b/docs/make.jl index 19f4984..3512392 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -1,15 +1,18 @@ -using Documenter, JuliaReachTemplatePkg +using Documenter, NeuralNetworkReachability -DocMeta.setdocmeta!(JuliaReachTemplatePkg, :DocTestSetup, - :(using JuliaReachTemplatePkg); recursive=true) +DocMeta.setdocmeta!(NeuralNetworkReachability, :DocTestSetup, + :(using NeuralNetworkReachability); recursive=true) -makedocs(; sitename="JuliaReachTemplatePkg.jl", - modules=[JuliaReachTemplatePkg], +makedocs(; sitename="NeuralNetworkReachability.jl", + modules=[NeuralNetworkReachability], format=Documenter.HTML(; prettyurls=get(ENV, "CI", nothing) == "true", - assets=["assets/aligned.css"]), + collapselevel=1, assets=["assets/aligned.css"]), + pagesonly=true, pages=["Home" => "index.md", - "About" => "about.md"], - strict=true) + "Library" => Any["ForwardAlgorithms" => "lib/ForwardAlgorithms.md", + "BackwardAlgorithms" => "lib/BackwardAlgorithms.md", + "BidirectionalAlgorithms" => "lib/BidirectionalAlgorithms.md"], + "About" => "about.md"]) -deploydocs(; repo="github.com/JuliaReach/JuliaReachTemplatePkg.jl.git", +deploydocs(; repo="github.com/JuliaReach/NeuralNetworkReachability.jl.git", push_preview=true) diff --git a/docs/src/about.md b/docs/src/about.md index 56ea728..310be8e 100644 --- a/docs/src/about.md +++ b/docs/src/about.md @@ -13,7 +13,7 @@ If you like this package, consider contributing! [Creating an issue](https://docs.github.com/en/issues/tracking-your-work-with-issues/creating-an-issue) in the -[JuliaReachTemplatePkg GitHub issue tracker](https://github.com/JuliaReach/JuliaReachTemplatePkg.jl/issues) +[NeuralNetworkReachability GitHub issue tracker](https://github.com/JuliaReach/NeuralNetworkReachability.jl/issues) to report a bug, open a discussion about existing functionality, or suggest new functionality is appreciated. @@ -53,7 +53,7 @@ To run the unit tests locally, you can do: ```julia julia> using Pkg -julia> Pkg.test("JuliaReachTemplatePkg") +julia> Pkg.test("NeuralNetworkReachability") ``` We also advise adding new unit tests when adding new features to ensure @@ -86,7 +86,7 @@ $ julia --color=yes docs/make.jl ## Credits -Here we list the names of the maintainers of the `JuliaReachTemplatePkg.jl` +Here we list the names of the maintainers of the `NeuralNetworkReachability.jl` library, as well as past and present contributors (in alphabetic order). ### Core developers diff --git a/docs/src/assets/logo.png b/docs/src/assets/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..b2a163b03af1ef1ee7eb3a22500eafa2d2d82207 GIT binary patch literal 20803 zcmc$GbySr>_wE5iI+PL+5Rfh$Q7L1oYLdQoBqMMm) zt^5%vnPhBQ(AmAa`)tFW{sY{Jww2ZWD5F&N!-nSGhwYdZ(GMH;d-e$m*0$@MpZm<* zXa*AV71xyt+c+-*^`jw=ZRI%?b2aDeH}!-uGV$pB~)8(;T0l z?CeuTU5zX_H!jcuNB{r*Yef71*Y^LrWklEV0dhd>^MV2aJNN^$jv_Z&@nZJr_B+yF?(NzoXbiN=_|K&g1`HsJQCNncVa0G)u&Z)smVd)h- z%IMUTnkHrQfg5yr84l>whSa~119XB^AmOTGN~A*W@P)YyiQwV3h{}qlRxJ$3rW87= zv7pA$uNLPjnf*rW49>52i$j&?xFkYUkV7GHMu=VxJR3W40eUUiuLjx@=ByC z@}~=kjb5~X;3wip+G+4)jtko@-x5PD`BSJAlJR44#(?si?Ts?ZxU;QaNEJfLD^DRA zFM^P@OTl=Y!LPRd4#gRsrD3L{_%%I&ya3+Cd|z{yU-dkVz?Vb{z{fPk2p=E!k!NcW zq|w&rCS+y&`eAMLrMP*f#QUoSqKFS@pTE^44VVd0ewCrQj6V7%=Hu~3^fhmc;A>8@ zq-PX;?wjV=IWN5WQ#;WhS`Q8X0u6IaHr#VTHF2m(k)8tLiQxk$;UzN1xqcTtB~WGGqovl>U7Y_ZD zDQ8X!u{FwpwC%GAiQX8Tnj?p|`wU_8n@kinUKN~A$Om{XR35298YR$CU$I0ZjqWWu z^U_X;=<>R3Il)?b7~c{G6oL8k=^2_Pn?ky0n{$_DuOpdr)61T_7Iuw<4K1-~X@$EO z>e($GyY!(P5`hU(exs{brq_ow22ZixJ8M1Lv9C56$kuQg*lfPUgRexU()7i~ygl^` zzkIZa27IJaXP{0T^OSKqK9P~v=$uIIO;z;lRCi^~K9Btpfa zr56&Hy+hjjQQjOeYS7<3KsE5U==1FGa3uJ=^LeHAiZlX^$G{DS|9-^?an_Wv6TXw$|A?-%62I^BA6ExC#(K%vfVqkh+*R-GlL! zcd7|=0>#p98ofC_EFWKs)7b67;^JfWvsE#~veKSnUL;JT!RZ2e_T8HvtK?Q!X31*? z@EEFje|`q3Mx`bUh)JO({~6;jcid=%VyV_L&C@{k*|Gtyh9cGhB3~p6S*c-6@C8cF zEXkj(Yf%z;XMA5>jXb0m>0rE*1$6b)3rtB3_mSA@vI`R-SF&-RH_K|1Nd9B{kfXQv zu$w+Zfuh9>hMguw4j1InxBem^34w4vrY1a01u-aSR$Ow`BfnqW(01T0Mxv(O&?qx< zk!3N)ss-sn4^nAPX5~2OF@4kE^|*d+wH{jIFuvRt6Zd9-3X&j>j_M=Lwxet9fXOA^$mXD>1L%aSTUv3C;7a=g<&&orqc!1J?NOEhmkCEPwhs}_%8irUi2K;yn z3`lCc%`2#XPnxkAq#QBZh|kPYRvR4n@Kg*P^(M2sClA(pxQkMeRlZ+{8`^u!88FO* z*L;GW8lapLxDg|;EPpVabMj8kC`x0s)m1|iG2%CJfOrys0PE$xm{8x0Pv}JK?5An` zD*rVxyQXh>6tmS3@A#Ps9WNS>)=*7}Z=68_}*4=IdExswe%=4~(YAIQg{` z!#&DcSy&#k_T!k#QU)+Q{3iC?cJL4mYw+>?O;;rl)WepTc3Mng$N>2#2^xZ$syiPc z$nVaYWUE*VH8L5xY4ay=bhHp9NItnD)_Rpl;S z;^++nc}3&_{$NJ%)Rs<=G%m6G&N1OM6l?s^1WPW4iS5xoKSNYJ|G>Nr2ZYfUc|g=i z5EF<*YliD{$Rl8DF~6`+TunImG39X!mH&1)k9aIn6nG;>$0*)RZm)SmnvFm>ps~{V z&}-n8XEzX1v9dm2+iCUVzeG*93Q>uCMt=Q%XI?l0ow^^LT6Y`w`45j9H%ONwXs3M` zBU!fQ+fjMUa(hwi3Pvh!7ZdB(j;retvRodwysA>;y|^6ww6}NGb9FMBQhGbM&v#F+ ze35?s0Zq#1+9PCQ$V8&$Ot;)SoaLgp9!><>#0kyplf`eA<@)GDNykN5oh}vob0fK} zjPhp?{a}2|Y6UQ91(?an+iq1Tin9IPJs4TOJ{Ip;c_%21`S~j0t_PI-Z?nIHGbTjpso6S_OO{+Yo*0APIW26?1gS!lXo8S& z)vraURC}|-)4As3>C=YV?VHeZebeJH(jf^TXr7Nwj@Iko#3|b>FmOi9;y#JMCSp4a z21kOtoh0oKWJf~KV1U4augvz7_y7s?bjWkCQf)J0=Bc2xu@FA$AIQZTyjA4IU$(s# zY-AvFPPdOfZ$}oIJ)6rD*hXqbXHE?;fnQysQcD&XAiQ2`KT|{u(06nU3zVRdGGv6z z_lDF|{pP|H;{z|gWg1hSLC$gUyA(Fqx|{0$h9Qzb1AD%A8a&?M-8ft89uae70(p@W zg#6)+>g^+JdE5pE4VY?w7Md{zOht@q?9$%*jNp%nHO&*pORUJ=6C?8`Mo1obpNxl> zM|AnDACdbFqv}h_58l@TamoEVy_k=N=g|^EMsL%K#z6pWRW&g{#zAc2j7G*MyN}Ec zXe$}|TfVm~Jo&ppNJXl%J?LC5rt~J^T*Me_)laoXt6VWyRayW^n;V=0p=9xK<~*Cd z_T)lTeHe($lMSKb=LsbpXtY)uZw*p3!(86qu?0w_KgG=GwzFv5iQ0F1l`k)uZWYnl z!^G;C_B77=>N?f+AW2t)DDxvmb3Jg}%F@=FU2{xX>+xWqO;e7iAf>#w&RiQ2zj7!Z zUbgQ-+2^-{h@k%L?;UiI(6@Ai+B9Rgjp`bH`cfeSf&F}xp)%LjUda67s~HG3N-8qU zjTk~_r>H*qkdcLm_$r@2Bd|eecL$xP(}W2;4e|jT>MaSGQ=N;kDM6TK554IQ+RX)ctnSOf*hx_X`E>9{q5;oo~|(ky0A$|2QA z)P&lPs`l@HEuCo2hzju%*VLM?-y>SguBctWk!yKYV`4=wE#sX1bc-k0d}F;f7gIoe z+iK;TqO0g@~)#hL5#xj)aS z;QG7Co~%D<{W$v8GgNe@#5`{s40DP-=K<2IJ_@bc9`U6xifEVAR7$y@KR#B!KQ9dH z)Cz91pQRE|kHBBva7DvCuimjjD#nt?4b;g&Iu)ENcF<*Sa8JB$Y9!pc8tc#fdl3aU z|5!uH@pHb)^Sv!=5!i8(^2J^>@c6P$Z=UTRYa1Eq_8vL)whufdpb_CGZRF)tJBdh4 z#G%5K{zNV>Ux)d*1%u?CjS!uh*Y7s)`Z`I_nYZ9G5yXQW49Q#6L5}LB##xX1&KYm^ z8#BaT?fWa2yPK5XPm{48N&0Deg;EBpSh7}CPhx5CD3dpisxnFV3luGpL`SF5na{MZ zjzR||Ot@uGhw3-_6ZRZiPLPagV^#tQ!xa0B93@bqcDP$P>V-Wibaq?tQ;aiYn3z%~QzRLiBF zbGSc7Fmp6~O!#*X_uMj7U-(=xs9tGq_;1Y>bzwPN33 z@zz+Ju7$JcUB53WDPYm8wi|4iWzg*vQGIcuiV-mQ+q zlfF?vfOz?S#fk#+m4I-Z)kpcG6DF+STEBzJ8C=muzeMuRj}>q@=xrdjBzmv|FyhT| z@1uSXGgVenRGQ?pob8FKH+INzYopL&4q81z+N8&90{ znK`}j_20R!cyGU}a;k>2RB zdX_3t%Mxu%pDJwZr_K_%hAxa>iXNNjRls81-h_o@Wd6+NNUU|W**V%Lhv{kMS1(4H zAb`z@WUyeCKW(5i(}*+rv~@$6>lUtyo?m}Y>^&&^)5ErZmH+!)9Wo!F4B1b=<*RsJ z{v2Y6Yq^^DtkRtCbTLZJx7?Y<5}>M)I%6Slf*KXnTGI-OdX#UITmrp2?q=j?c}je3vrC=I*b`yzP?;y316;!enTqQjd>Y z#0ba7FtkgNnTHQ~%4@&4s<4BGD^VEk!%oqfAV9?Aju_ugLQ<=u#cd z+pHd>VuH%|Q|d=U@$Ley5oOM;w{$QZQ3Lh+CW?L*L%xP42WFPMrmXvAO_qr;JZVe- zIQ{Bd4yZdfD^9vXT7> zMrF}$uZ^NRV@?sZrN6g*J^kN^*_*B_TxafXLs5Hq{b*jf5NVw06@>Jxtx9r}W$5{R zG~@F*_~T4o;L?VL22bJYhHnP9v?#Jqv&iA23jM(U{$p%%loe&MN%OkP7BC}w!;9|u z+`k^v-W&y8EQao0d+VYXy-_@PhQD_>=YnCe`#bz)b3H3v_Luqj`I7clOmjUaDoCf{ z-A6}1VLM%imlL=eZ!Mgr3{^idE~*Bpm}PWieYgy{&mh}-A0JO*G&SvhxlxyBDTcX9 z?8;Vjx!0sqbJW|3!oTFn0*zF$Dra`0(_WPz!R!` zDyrwKC0DbUywIj+IKhRpU!}Er_Fc1$Y@CWL<^BCHhInk&Z<ICR% z0iF2bksoIc7KYY&GkZK+HSB+tQ5>4Onq8 zprJ3IAyoi|_^_xer!M2nAQYkcbMH4yd$ui(diq55>Mrs8p8jKz_hzbZ*bQgTQ9UzcIig*!3V+y#)#I*4D?r)H|e7_h? z;f%E~Ltdt_IWPCnG4o3^{65S_%YO5;aISzJ^NV9|zFXVWdv=~5hGb;$$Uv3Om2BD? zq5B#QmI3}rVUEw1f-*kH`_kxA(hn%BvW{T>8$`GI{YWGn?QTw96Q0b~O`o}&+a13{ z8@t11WNXc=xBukKbfSLu(}91{S*5l^a!f;48Cx5NWXMoQPmj_3S7`=&$#j@fuxgDE zN8QE*F`>SrV=|*f9}2q+dhkZB^h;7PHuo1CF>^>z+(aW!#DQ?rZsmf2#qyPwy!}ot zX|=W?H)ZbVN8u?R(bt?$F<~&AOOuoaA-u->hVi(I!Jm~S`t21w{-koIH<9@w``Qh) z1|dTyC@Ad>CBI{uMlz$2E<6xnk987VEt4&e^Dprp5Wsr#3W_#_nlQ=2oi++I-G8g^ zeBVgYMn_IOcbOC(9yNVl=e;oVJI|^yH-VO{ZcOhvD>GT&s&CrRK6xg}L2^-v02;Rj zkeXz^I~4KTt&ws5xYHb)j&ThRW&_ZJdG(u2ZPJ3;n|fvF;~{vFsN;Pw00@e24lf|< zZ_$IZr+n$_=a9->_ReE97s{@?#GOT5f6g5nNZD{$PEo#oVVW@SwP9RNMU5 z9D5$Dp2tJA1q)ZHTWg4#*&ZYQ3>!a7c=x0SqgR7>JCKs1_0%|*+Z)nGyhZi&_)bLsVn#&c5N9onxEjIczSlwV|5)e#I~qi+b;P)OQrl+^~>WKEgt~- zOa~JAz30|DQD32nGYVNr+{RKynQHHP?7Jley+rtH`k#wFFE#~YmXn{Q2hDVm;8(e$ zrKL|p5w)5M9V=5#O*tg3n}0Vg_ZNdhH6-?8$Gcw@Twj$EG`g%LqB+US*`W89;j^GggnfB-vc=O&nSEXGogASMpd%niFcNeS3h=+=e_5V zzj~)ENNMw0;jyZY)DLjD+2f^`9+O(x=n>RRY8H5J30nyS%vT&;)?;1^cCEcmLBzy1 zo!A>0WhREnbDj!3mn5BgRQ(XoX)#O5<)&0Q*eQJT_ebjD6Z2jCT_X`ac~rLty*Br- z$Rl}T$?4wxi-Q@#h{UJjLME$EpwB33rfcNFU3zdIJ>|c=T;XkK)aedMAKIB)i^Q9O zhX@wtza|n1ZbRbErFkCS(?O?0)WFSMK2+GL_;{!_VR1}QelHioa%|CQEwGNXVSeNe z;jpr`p111c^zq5(@HtyW+&MbY=zciv1T?=q>~`6u~!asO785@E8s6{MEuWKBL9$wxem4U)1En9Qo=FX$}X;C5x}F zjwM<2zNOGF-Xe5gziB@+d%t$)S7HQHkY%2r<6&`M1jY-XBPOMGyq$1PzjdrsJAujVW-Ig#gYbU$cqnjl0G&T@^K?~3{s$v3lEzaJ1c;*t zGvt`^H!%kd&J%J*Q_Z!k=^uJ%eoMh9}BOgUZ9-6Td%K@op&!?X&fu-TPuqZ@J zGyjSBDF21f`Wc^{UA$qf%cZQ~#&-ciNq?yO;$oC?7P2t~7zyJE&U%W(Lw2l`MDec^ z5fsxEn|w>4{W0;cvBCJ`196CC5-K~Jd);G=s{J`yQT^_YqCPjDW?gv9-QBS$Kce4W zFm{6iJ?#ngU!w_%H(f(X0e$GHj>2O&O(Ul#zrxR|vlPN(z;?@|lq5vvT?HE=K+YdLH^d*P;V=d*Wl8560WkwW{tQp+q=6i5DO7Z4` zHl7meP*|K%KMx2IMO%FH55h-{=h0fWg24hR5;Ua0tu`;&Syf}mA`GX>T~z1jrUhib z7RGKB^bq@=gKRhoAS<<1^Zl9lsGdWO-Z-A7D-jFzb55?g;F4O;|^h^32KD_ zkg`T@|G9hPgBMi}*5je7+%@-^I=_hV@m&=SRZ+H~rkt30K?UIeYh$?()y}E0;+LN{ zBdDg1gvpIKOyeQsU<3r*`W`hL=iQfj`()_d*KF>ir<9(|9noWrRljVrtQZ++f3dRU zOwThdyB}NVbWx$q$4b%YG`6YDwbdULH6m(xVOzG02tW9UL8N_(c4<7s&Y$ zkqE`=01Z@~I_RPvKdm))!?+WjAzp^BuYJdF{Sds6-kCh|y9-aW{f)e> zB%jsC@bo*3SX4EgqhBkqRkZI~!+%b;egSciH*cLzrgn1=J&o7>AJ<>XS3&rg6R(7$ zx>q^tCDMPtXAY>Gy7(=fiaTrkZ@~ylJ;AxG3%^WKa;jyvmcchL(w;mfNxlx# ziFiB4n$;f@F?p!{kjYxYbo*h(S%H0^!cfmb{!>Ba`knbRgCf78%1OIq?B5PKsmB|{ z-WgdFIa6ujyF?YVU#BN?{!SeSfYtn_Km26rL4Vk3u2_+-b*HeDJXVAle8tQOef=}{ ztur;yo1nRN?3M0=GfyO&)cqF_w$oJ15vS#AgJ{hhV53RcV$SJKJ;mjwIm;pVsZI7b zEf?2|RMKXz(WcbquI6MTEKyGLXY|PZM$^$E!gi91EEfcLz)~l~hc3R!{*A20+pwg& z;ko1V#H{l`0erFhaLD&exS->!te9w{vg>u>Mn8k={xkbQGWq|h1=umwh~3J=UmtVLUdRH3!#g6=+w!N*Plx5Rmx`d27~``NYi_eyqK z#eM4TbFMvR_zj#w^|HACs+P`dH8sp3WMC|x3Bpr+DP+Qam@>n2MBkgzK1Cv&&hZWN zFK8^ZrTuJqTk2%0C@H#Af9?SoQ_a!@ff@r8X%GN92GW90Js&duUZ`a5@2nH&Tk8Pu zJmu<=f}JGF5FpOLWB*Q5(vITb%-?>_OFIe4VR^dra;vHFI*Fq9*mZl?4?+wU1Nw-D z%G3|;N!R6%a}yw$1Vh|5mYBDbZhrdmn(127PIz-sbbP^G8#kWs2P7 zX?KJvFKe$>?vNIQ^+ixClWP{GR?-h{phLd?PxHOJK8;cc*NsCmBtkbZam8c%FPI1X z`99i_$F5QEKZ10<3{sg?N_^sAt2|av=|4y*W@i}U-`8u=(iXpqo95J9pBwI3!|EVW zB{Zh;AXy|B+m0vYe~Nq74ON>uRF98Y(gUZSYez^Yl86-Q5Y-ezC-gr2(e?UW?Wb;21+FAZ1W_M${xJy?AJ-lB7?1# z-Xte^cXoZPBy%TqrXo70H^vKw>8+NI%{D;Fbo z;xB(tG5J*Aj>h!V&r)3-!Zl}4~uVeC`xxk0$ zk_L4Ytp$&F2p&6Y>1`o5%L%;@DwKEei3TSFLdBPK1ii;jZaGWzbOLLEpgzF_izlYL z%h8K~;)s`r7EqQp@OH5Rs_Ke5`Rd&ugTqXUFJ7gs+Ld4E-Bb(LD?Xsii!<)N&&RZW z&u(co)+T+P-FL`*Ew)4iLZ|UV=aXck+ycATC9M91RStDa(Z9@i%3BxD0)I%L2Y-Z9 zcXz45glOj_i2#S%n07UECbkHtm>&CZ@fuPv`XsqS`IxhU^}cN!Q4!%7AJ~xE{1o_z zwr3h0wMu$aTV&PtF6%4)C*PDq=9Tn#Fo8|^y4UJ+6Aw$Y`>-pk7VM!v`rba~#Gxj1 zdsRs&I(!{CjvuF0zh90+`$s$Eiv`H9S@tUEdBzRH9ks8&|GnSbeAU1=I{C%^#RzEX zA@>-ZOAuxMu-T2S291YLtW`9bdU05iul!zgb?d{wP4_++YGkG!fbr=+WHhy3iZd#3 zouhc>Y$Qy;Q(o6e&q1<4G^dBj#rO1Fkufuu`@iDW$HDpdi=p^Ukjn25t1qAMCy2Zj zf##mX@j~AknUYeHuJcrMPW|@xp6FOX<>$W@C{pGjbOf~L2LR-SJUrx3grnEg6NhR{ z<}btKR1PSUvU_+u0y~<#Ov1HU!!~1sm>oPwSRi~~(rT#i$1w5EX1dmZoKy+}wh@(M z&@`3x>CTK~^i#09Jb@2pFN1LPeVwEGLo&qdRb!WGQVO_fsiVQ*Fv^6J-jdxL!Sfn4 zad?hT@E(@#vmuaeHMeH+E5Da0&=^enezhV{#3%9NPs-spEyaF3U!U9Bb3u7IrJee$ z{E)_}xdK-QPfW630jOX|zeh3$2r>8pP%-noaW}!SX^Crjt^1ucIUj@%pqN+huY4{W z$PY#hac;ZUa0{+`EmFuZA9$zwkDJ3Vms>d-bLyu24nRRPQmb$bsT_$aSRHn?mHA6$ z-FZ$Qmf>fMo(ZQ*y_wn@|>O)=;`7${tom_ZGMiP z`l^LM{wSCTp@xe3&!g8YI%?nA(#MJO2H;d(Egdyz@k_ja;rN&Ya0PwnhMJG2o07{^ z$UxO*?L0R0?UIf4*IS#c{lIV5oAGme3Yro+QD`54o}jt?>*bycNBb9f-rxD3D%DVf z7jnzi!g!dr3PaAp%6{C*QvwVF_Br>B4XfM%?O?5^!{(stcJrUR>pzB!m|gwJ2zUib)x#Ga4GYcyZO-+-9ZkyQ73& zp->Y}KP1NqqL>wCRTELRPri@GO=B@vkk#`pN_F(ZVzU`x!Q#^*DrEL5AQj7nFP<75 znRzkaD>^|e7Ce=Cfo|~^B{>70YL{i-E0L#ZT@aGAv&W@@Qvfo|on5LO0a{NUeu6K+ z>o~J2$C6pG)K{1p<4_2D?Z|)gVh$a}~G?-w~F6oA%o3j$U z5;e_c^v$f)+=;4h`I8D24+M1^mLQC6R|)PM2kNF{me&HVhAWb7CR`gxe`t6HC>WQj z8;0BH65-lh&&O~{=v`g+x*Bju`~^JUU}%rEIxL_}8CDOSbuN$W4g zsDs2w_GE^o3X_Kg;+XWk^pXpAGL4xiwaG4T-)DSt*!jMuOOB`L+6|bX1b*-VhB%|k ztFd3)BC{H~JGwQOEIFT$*;PP7=-()!Ps`e-g1y`S)hkPD*(7|-1p+Wi?=zSl_K!WB z7eyWox%fR8vdzP!*5#QxjTl1tkNxRRuCR0eoPw*L1kAjynm0TW6R0=MlgzCkz*Z)R6Z3Nx!UWx4}!o z+BEamsXj6bv)STDBApRHszp0qBxq<>O32s%5y6EB+*KxIWlNcuerpxl*&Ab?e?}(G zrSqhv+O+3j8n5IS9et7nP%@ZmBd|-uD{2oGn=6bguLB*5nZC&}rhJK{y961m>6q0z zX&J>mtV{{YMrlB!UM|y4_uKGJ{&E=^bVX`@5&fXv$y zaTD0NucCn9N*RhLeay1C`cqIzfWlKPl6Gxa2tvp_^=BpQr*lIC{duspkt}lk1GD(^ zRjOR$xp=ziLk7fVxqJQT6wJ|c2v^J=d)&(!YNOH7{KYY<2#W|>c2Z!`1{)q zvWSJ5te=LTwjR&uV7Dw(d`S;X`%>o-jY|j0hi6mkrH#cZdgu*A=esQfnJg$}l zCz~BbPE9Z@OGzF%MQ1MXv$V)!K{vhIBO{t18UtyO8itN9KN1G=u|97j9WddpmvSOk zgVG$xBK>q;Y84TQ)eM1`r{z1n6En=1m8FNWN-^mfsAiU(at?Ju+<&alqJ9dVW~O4= z{DJn5i!)0$c5*c%#&-vTp4z3tpZcBvhW8-|d7X61y@C1pxRaBbKkNFo?@mB0$c#fE z#)6>KwpsD!&EqhF=!de+4Eby6tJEZCR%fJFWzUnT#(7!F1neC~ z!9#ai(CukK?a!WT(`0Cn7h|`*F;=9Scw$t*4+z9Iwv7#qnC))?^6ZMXltZ7b@`pW3 z!+VN0RQm9J)Q4CjwW1+r;ybKmzOY8x{aE6daie>uJ#{Tb%Tppo4UCA56wj@F|2H~G5hL-pK#bR~f8&?1NPk#H(MAT*B=@RFe>JFg_24*cyF{vy zw}ymgif}CRjt@f4v%8?l5j061Bor8_SdiZdAp7HjX{J8F$prTic7(0n#whSF!|lMT0aO=RP{wJv}u?w{@GP9K2@%< zoH|O1YTKP%rubI|vrTAIEPWB>KwRmXCYsjcY=2pErc{^JxuGrp$Hv4!dwd~b;inUZ z9y|WkQD^U8vz540(NxSYn<_p48Ps}qI&t^B5{sT2Z;_olLUj_Hpv?x}*&719M?B_%{&?h#@OOjav}L&lXGH71N9_v$hrt#6*g+*NOmj14Z5} z;$PbytOgg+7~A7qS7~5TH!=lHCh8u!l!?h23;gIYDisj8+TU~uCJcC(;x(W~M}ib6 zEuZq^{xl7}xu-%KYVdg#_v}Z*RDeD0BnT$68#jgA&bZ|qZq>p!)PlypWcd?`LWL+K zQB08zggl$uOD}rg)&d>vzM&(YAs)X{SxK_WVk4-6`{f%+_7N%pz@$Pc{cr;4n0-VF z$ig_ocdLuYSp^z*rr|5SrZu``q@a%O#=&Q-#Q{C_U3?b4Zei`@4ivwSkOJtBX2GYx zNfaMg3*KnyHP#<85sN4j7kQ)Z$b|!K@7e7N^wdLIC52B!nveIo4{PMnk0{1&qGgew za*d=$_CS>11G)TXw1RWw=MLVQeht@j0hl~@TYpOf6Blws?>n7w`i(Lu^^)H#J_Vay zWItYiG2F+`TYAGYE6Q8O2;>E0Oh!kpmjkA9?A=L4n${-B32|Qb+*NL0sVgtsF74~3 z9=V99aPd<$N}4|1eJL2`29Nadqh(3wc$jD5bR$7GOD!}b&qGe0jT6)&K8meV)i;$} z_c_s7T{^`{ZoXqe0Tur?<*l zh}}vRB2*uM@L3AkmIv(Bj3$cC}3O|c^&C|}l*Gs;`)ez~$= z^ycH(Spn9KDyV_@CY<^zRSi{T#lq$TL`Tu9vLyFXMCbK~1cKQdll#goJO%a~KU@sz zRnoZbsRRP1`vmyATW2v}bnu=>wgJ{NFJc3P}8LckxlAv z$X+KZd*4UjxXPdWba&xA;5O*DdIX#ltH=pb}-Aj{{>%U1Ae z)9I~C?})qqZ$#Mn8OIixBc2D?y9DgC<_?tJ7&@K+%u1Vsia66Aw%NSe>u+vq`Q67{Jl0ZkMBK} zDmN@14JkD)F3Id-1QUH`@^_2vL*h=I&soJgI1xuDoo0vM&qb>V>-<?_+6w#VMSOuhH=obveQ+QL5FmA9|D6v(%mPAzyJsgQDtJ{8v|5R|X-XlpRv>jU=J&~s$z_7j3p%V}$a9d@ zi@Z|gp>D9Wdb+ZO?bT4MtWG_e~Kwg=gpKo9prM||HPm42Gcr>^Jmu=}C ziwg2;^IaI+@dz^&G5NZWxmmsaW}qCx3^hm8PjkUXMyv%NEs{gUODvfoiDN#Ut9Yj@ zj|FEFp{X&Ou9{Njbf-_0G(WL}!kR=XRSHFx+{q4dUr(fq1*D-+VBr>DEEqFPz&`{=W{ZuY z{{{bhyO++1lff5dINb`9Grp^5T1oDo2Ge56J(RJ_z${vc*aZ?!IRDpizTJfm1fNeJ z{&&>e<}!(qM7f+>sDQ+u8M;yO^8WP8q!0d#tizs^K3@hJvn}LuUchZQs%NR-I~MGV zx+>sRA@U%l>dMn{Asj$Y$6e%Ay!V(i0{?sP=GEIx z1SD=W@AX$ro0|Yg@?br-GUT>gNl|nIt`j2z6u|dN)})4=Q4X8|SD88{)B>M3tH80K zQweZaffZ>IM4&FH18;W0cCx8p`fv23*#P|SIv|L4;H7lvDP_Z?lF4H?vp-XzDMLdz z3lfmT*urG*Odo&R=qKW@Nho?<9}Qk^VWypdL8ZsV7h4}X@)b_0>1J1_^S{$Md!#7W z1r1X1`K~)Zb|g{sSvUo2dkm`T6-Q6^PX?7VZ((93E*?|~!H@8+=K`O8BtIV78BSlaSi*&;c1Z$*p9IL~x1}$rlW_|w#XBp59n^5z zZMikx7L}Do@=~BVtKdk1%Ng(K^F%kF!gW7oF7V~`CO{=#$2#cNbFKIkmrHgcs`5je z#85dgsR^k@pr~hUstH-xmfDr+WZ#ulD2_DIM#}I;Ymsmmn00PnAytFda^qO%aVV&J z4)Q}h;Hkg6tsS?B+~1$alCM3 z*&0p$xa(Pv3VOwtK{?AvQ56SAhKkw`NAIB$)O+O~Bsrh9V~e$M3bNi}`~jj$Dsm3Y zS{ASG-87EQ>=(o%9J#x!b6nj^M{xV$c^=jD<6#zt_lHd`vDb-!z<)`x@NO*(T? zZyz3eOHA2+29MA9apaGnZ~I6~wZEn5E<0$7=KGb()r_NQt_D_tBmY4H>4HZ`zTgpBTdnLKO4|4$K?PO$sp4jzJ03(=`=(fhgg!5-D zm{adul7qGS5EZ^1Z29ysPB@044Eh-#OLB&1dT9iV?s6F5C9-EXojnL%8JFo$Yet_? z{A~xQeEnxWX4dc9aZ2vNroXI6D$AZ;72IH>J%9K$oNm5_oQSwrZq55=ovk3t!c4wX z^9XvXNM?-XNI?v?DqBe+jF0&|>@pOF7n|X=up<7cR8S6xR>kQ9o;kqZzY+ z{OzD!$wGW}9b(Eq zbb|M9V%J%4E^j;dyX1(jTnC!GE{Lf|n}%^x>s9&9nFTioNW$sls-E_sDQpZ9J_hXX zio^kG$OW@ASGKGjd+iYcgY)&a_P_IHdGIkesI?3XN;^3$){M@nG+#q}ieQ^cQ($$9 zWE=b3t1T8hG+zt*Zk^MYwTjq=Hp`)BJ_Ro{R$T^IHR)q{i(fW|fZIX0p?u67-Aa$z z&D?6QKcYS8W$k|%SlI`T8|*|`5H~nQjh15g;qsEXI#5e zrpR`}j2uV#r^!7kh@=JI=}5qYsC*qcK|2V-7yjs7;* zjBBC~EvjT+7LBG}&to0({*0Xr=wA2XC91}lVb{k{4%-4e`@FgDtRdGVI^SiojQJ^B zm!s-cdfoozjTCY2v3y`iJHjbS)Lr#ANGO%`Uj|SqvA#ul|r()zGTY7NM;>=>kkebGi)kzx3%J6$tF!BZLd7FriW-R8E!jhJ zzhmWNX~REZQqwap_>Htu#mb*lJoIVprV2g%T-{rQ&t}@0jb%AP$5N$7BXL%9JzTsE zQGvfqx=+iAK}{&jp^KfGX`MWR?><^8$Kw4qTm19hH**q~M~_K^wxQ4L%>T@@j=i84 zg$TVd9gV{6ei2tV1`mvseYTZX>CxN??#y>_vv#hS0(ilpJxeZ77_$}`BI#4kEhxzF zlc=kr+`{(#*i>vxF_jCG8M?6Z`-N`aJHN9?XHZTR4_zA>%|Pb2ITzeUxO78$o)m8} zRTOjZZT%s*R`Q!@nSfZz&=KbP8NUu%hci?5x>>#Z`#7)kBvvnu`$fbSH=$VYw3F&s zv46aNlE0pYjA;lZ%S!IIN3QTID_y8-k6v!bdR`F{^Rj$&9=Lp{`L@XcL4c-^{`9*t zV5okI=p}8-2K)o1bHgt?Xh(3c0`+b3iXZ2tIGg19VW`2ATp!2GlBA`RF^|qWNIznm zH(+kodrL$}3Ix;-UeXfi1*!3FM_F0)DAC_M%wHu&+QA_Z{JDp}3t+qVOR`O4u;wKq zOd6_nJHyTCtJY(vY!D{BHLQw51c$pv;>Crk6;KDLp-Oe60f&H;=BI;x6x1%rC02E_ z3><<58ZTK#n(ytRgDP+5Emfjn8?eZBKen8dB*Hy3=@@XTceid!f|V{aREZ=nQ~0aQ z$zql=ixMZ@kEPU=pE?4CE*acgWc7D69Cx%REE9F{Q;#ON6jzvo!6?ez5*Oc1OvW@_ zBwbq6n{{JaS!D5XKj=WI$QEI}5oNG}116ja>aYEZS`gn`5b6=Xy`loZuJ>ERRQ;q3 z)Mr^$3sNLha#rO^-W2AG4Q@Lnesy?re)AXZY?vd|+mJp1Wjs3-Q6mzVpeL`ycB_mp z*4tC8_)Zj|+^cmh=30FZjBe~y~JH=f)0WI$I_91Tc21bx)zu|x9 z2PnDMHa5qw@VkoZc_UiqzAl13=B8s3<4iz+d5G{UTh* zKzU){Ez|-+#M;&d3RZr{uA6(dP^$VOxm8PR4)nRK$MS z`uX13_~temQ!SSsG^c&`&!kV*%cx~#dXtT$|07bn+eBkcvy&=^bKdV>*?H3C%tKO^0NU# zebNNnv^du+Xg-&Q*xr5ltoH9^*u&697I|I-S4K2-=>zh6jR%V2Wi`b`^R}djbInz1 zxJB1Kb0qo@iD9`K9HzVDe!bXaSXd!I`F2KYAwxzl!|>(A`uJH`uvJ^OjP;e_{sNDi zmF+e3{kD48yJ*_z2LW$M(DjtjH(%0sPKNW;TmMd|Uj4+-q~_+X-R6Ek#+0lGqnZq+ z!V2zBoaj(XHgpKm$L(2QhE#=?Xy&A2Y_;2Zjq)yv!H@ie ze9L*e)D^lW9j}Djv_Cc<5|j5qh`pcZ>L5kdc}oooGva%;m#Y!WhthQgGy%8*0GID||F6B#p< zL!@l|igwsJB_v6RoMJE{jG+b*lfyV9rYCz&Su0A!n^F>`~Cs%_xJhqe0ZLF zt!Mq#x}W>LuIs-0T&it~{cXD2y*Q~*J7=LGxMAuz>7n0la(_Dpn;Fztj=ncy#Dq`> zju<X@AD^j(&F7`y&FE)90#c zB#O0RrVbT0?8Em4`#IDYdE(KRx%tvB}EZO_s|Uap50p=a$BZg2D{Y~+Xc7wNeu!CfE)y+T&lDszR~ zT9FlyubJ5akNYDiiBrWQVb!?fUHN8v%(ASiYh8Gq{7P26?7{^{Uc3*XX16!Vl9r&p0J%lLD21p;|@p zRz-z|F`be$9H~xkBVc8HlJ!|e*Ls zLJ!cYpa{t2%_sdilyj%Fauneza(9k#YwYr7mKyLZ!gqelQ4c5o)Com%le(8$Ri5a# zAw2_Eh%2_f;ctJi>9hKAqSC%!A&%0WfLsFX=APg=AH z$e$c1ebTT06BF}-Nnd!-zZ9uhd*Z7EMYLMVSK~H3=}baKRPjY0m#gb_-HD%n(&ZfJ z9G~O2_EfsrO(-CAusmG-u=)FRFo7tX7|^C)&&+oE^E)KLG+F;7F$ODQGWUh@yE^Tm zPK|m^VP0cin$q@&QpVq`@f8_Oeum)0oqY zZiNqz40o(jlnBq;mv^^Y{de} zTN158mmceEZaCE!)Y9%+pWi;Q7h4wVyRqqJpY=RK2~clRAljJ#s(4$+wc#Pg(`q^r zRWU!7&-^wW*3q$$?CN_5pZRF{2&t(bMBlO#Y-kVG0#bOqy`7E9?v<^d-JyrdKXW^_x+aHm_BbRd zXxyciVv7x43a$=zbrUK2@u)dyIB42}{V~fz(SEKPGjn??{_79Tbonow4f}!?vdW{M z5}=0b0PR5=Ck(}C$^jYZ> zrGAy3GOC9IZp_@gaO2$JDQ|R++)GlKFLuo~RYgFh%j>bfHqv>yEp9DrDwp{J9VaGR zJCP}4DMX4;207;^F%Mh}BJA?C31EQg zDp&Vto;6?pQa2B)SK0j|Hv5=uG)sSSU<09rayB+VIx7>KwRakYp~i22QL3-uo(hz2L>SONgsBnl$^QQry9m2sK~y_ zsa1t|PrWmTNbiy(1Kt3^Zu5{hZE*L4a4r>s;td6g$XmC1$s(@}U8`J76e?L%Y}q+^ zqLV+Rr8fL;h3HGs85fK{bLivyXeWa#v+ za%sUzZqV@rn`MPd@xplitE=ozlj&s3f=qHa-%AKw5@Dfx8q{3gq>i&Xh{?9smnv-e zFMD?=7u09jJ!?-Nb!dwg_uCJ`<3n>Lcxpfknwa1U^(@7V1@y%65_nQpT3JvCDoD9n z2<~RxyTczr1KhN15KTP`gTHtkIH8q=728F4wdL}P$Xn6((Ug0mXBC1lN1(=|E*ux0 zAW*M{qEL>bH4vwzs?!|enuaq+2a%< zn4||9yol=(aH6%;o{#hc&T0wa;%}Z##1oVSB+@dC72M@AV8BHjB=Fa@$hI!Q@N?NE z=o}hz?rrhGCToB;wbqZ-R^hZ?enNgy2WL9%>G#eboqe6!)W-v+9(fDH2^L_YY0YoH z{kY~HgE#PofMO8*hLJ80kmMY6IRVL-mA3x#`}67qM{J^Ui*_D1@zUcPT-@%;$GiFc zy@d}(5UIy|m%D|#rq@gfcvZu!tC^$F4o2dZ>g`^|)*0CP1HQkWf_5RX^Z6OpYk|KS z!m`HhXC2M?buQ^Yek^^gP+qljj-kfb45Fc&jw~`b)BYM)9JwhRAyWX5eKWg7C%fcR zC)t9cK8^=we2{$C_`w2dRl8S3B7|j+81sGw*lrMyhoqiD7+*PP9K{iDQ56CAcfoTU zg%;IQaV4Bhz3-3+2e2YfN)R6m90Zx>br&%|UymN&hXYLo?8eu`D2Vwwg94`{)Cf5Z zzREKO6f<7py54KNy52ghOV?VWyk=4R`Lmoa^q*Y?qkz+M1#0O`*^s79?Y^gkRZfXE z0cO`U#=)9PBq!d~f$5EsL-;rn;4@BAkXO1Z@?Zt0H=j{dc&PiGkzTJSekuADb$^ZA zkb5Vd!1oQHqD?cuz0Nh|@F)ENAXMcf7YtIcAC zms9PJ=rYd2P$MiM1&|)`0wlMRirKtS+@PxkMH)Ka6#Rw;A`Txbp+YDQBdz2-m%438 z)xroxOn|3Jts7Jbx#4$ ax <= h - b + # ax + b >= l <=> -ax <= b - l + l, h = low(Y, 1), high(Y, 1) + a = vec(W) + N = promote_type(eltype(l), eltype(b)) + if eltype(a) != N + a = Vector{N}(a) + end + return HPolyhedron([HalfSpace(a, h - b[1]), HalfSpace(-a, b[1] - l)]) +end + +# specialization for HalfSpace +function _backward_1D(Y::HalfSpace, W::AbstractMatrix, b::AbstractVector, + ::PolyhedraBackward) + @assert dim(Y) == size(W, 1) == length(b) == 1 + # if Y = cx <= d (normalized: x <= d/c), then X should have one constraint: + # ax + b <= d/c <=> ax <= d/c - b + a = vec(W) + offset = Y.b / Y.a[1] - b[1] + N = promote_type(eltype(a), typeof(offset)) + if eltype(a) != N + a = Vector{N}(a) + end + if typeof(offset) != N + offset = N(offset) + end + return HalfSpace(a, offset) +end + +# apply inverse affine map to n-dimensional Y +function _backward_nD(Y::LazySet, W::AbstractMatrix, b::AbstractVector, + ::PolyhedraBackward) + return _backward_affine_map(W, Y, b) +end + +# apply inverse affine map to universe +function backward(Y::Universe{N}, W::AbstractMatrix, b::AbstractVector, + ::PolyhedraBackward) where {N} + @assert dim(Y) == size(W, 1) == length(b) + return Universe{N}(size(W, 2)) +end + +# apply inverse affine map to union of sets +function backward(Y::UnionSetArray, W::AbstractMatrix, b::AbstractVector, + algo::PolyhedraBackward) + return _backward_union(Y, W, b, algo) +end + +# apply inverse leaky ReLU activation function +function backward(Y::LazySet, act::LeakyReLU, algo::PolyhedraBackward) + return _backward_pwa(Y, act, algo) +end + +function _backward_pwa(Y::LazySet{N}, act::ActivationFunction, + ::PolyhedraBackward) where {N} + @assert is_polyhedral(Y) "expected a polyhedron, got $(typeof(Y))" + + out = LazySet{N}[] + n = dim(Y) + + for (Pj, αj) in pwa_partitioning(act, n, N) + # inverse affine map + αA, αb = αj + if iszero(αA) + # constant case + if αb ∈ Y + push!(out, Pj) + end + else + # injective case + R = affine_map_inverse(αA, Y, αb) + X = intersection(Pj, R) + push!(out, X) + end + end + + filter!(!isempty, out) + return simplify_union(out; n=dim(Y), N=N) +end + +# apply inverse ReLU activation function +function backward(Y::LazySet, act::ReLU, ::PolyhedraBackward) + n = dim(Y) + if n == 1 + X = _backward_1D(Y, act) + elseif n == 2 + X = _backward_2D(Y, act) + else + X = _backward_nD(Y, act) + end + return simplify_set(X) +end + +function _backward_1D(Y::LazySet{N}, ::ReLU) where {N} + l, h = extrema(Y, 1) + if !_leq(l, zero(N)) # l > 0 + if isinf(h) + # positive but unbounded from above + return HalfSpace(N[-1], N(-l)) + else + # positive and bounded from above, so ReLU⁻¹ = Id + return Y + end + elseif isinf(h) + # unbounded everywhere + return Universe{N}(1) + else + # bounded from above + return HalfSpace(N[1], N(h)) + end +end + +# more efficient special case for Interval +function _backward_1D(Y::Interval{N}, ::ReLU) where {N} + if !_leq(min(Y), zero(N)) + return Y + else + return HalfSpace(N[1], max(Y)) + end +end + +# apply inverse ReLU activation function to 2D polyhedron +function _backward_2D(Y::LazySet{N}, ::ReLU) where {N} + @assert is_polyhedral(Y) "expected a polyhedron, got $(typeof(Y))" + + out = LazySet{N}[] + + # intersect with nonnegative quadrant + Q₊ = HPolyhedron([HalfSpace(N[-1, 0], zero(N)), + HalfSpace(N[0, -1], zero(N))]) + if Y ⊆ Q₊ + Y₊ = Y + else + Y₊ = intersection(Y, Q₊) + end + if isempty(Y₊) # pre-image is empty if image was not nonnegative + return EmptySet{N}(dim(Y)) + end + if !_leq(high(Y₊, 1), zero(N)) && !_leq(high(Y₊, 2), zero(N)) # at least one positive point (assuming convexity) + push!(out, Y₊) + end + + # intersect with x-axis + H₋x = HalfSpace(N[0, 1], zero(N)) + Rx = intersection(Y₊, H₋x) + isempty_Rx = isempty(Rx) + if !isempty_Rx + _extend_relu_2d_xaxis!(out, Rx, H₋x, N) + end + + # intersect with y-axis + H₋y = HalfSpace(N[1, 0], zero(N)) + Ry = intersection(Y₊, H₋y) + isempty_Ry = isempty(Ry) + if !isempty_Ry + _extend_relu_2d_yaxis!(out, Ry, H₋y, N) + end + + # if the origin is contained, the nonpositive quadrant is part of the solution + if !isempty_Rx && !isempty_Ry && N[0, 0] ∈ Y₊ + Tz = HPolyhedron([H₋y, H₋x]) + push!(out, Tz) + end + + return simplify_union(out; n=dim(Y), N=N) +end + +function _extend_relu_2d_xaxis!(out, R::AbstractPolyhedron, H₋, N) + h = high(R, 1) + if h <= zero(N) # not relevant, case handled elsewhere + return + end + l = low(R, 1) + if isinf(h) # upper-bound constraint redundant + T = HPolyhedron([H₋, HalfSpace(N[-1, 0], -l)]) + else + T = HPolyhedron([H₋, HalfSpace(N[1, 0], h), HalfSpace(N[-1, 0], -l)]) + end + push!(out, T) + return nothing +end + +function _extend_relu_2d_yaxis!(out, R::AbstractPolyhedron, H₋, N) + h = high(R, 2) + if h <= zero(N) # not relevant, case handled elsewhere + return + end + l = low(R, 2) + if isinf(h) # upper-bound constraint redundant + T = HPolyhedron([H₋, HalfSpace(N[0, -1], -l)]) + else + T = HPolyhedron([H₋, HalfSpace(N[0, 1], h), HalfSpace(N[0, -1], -l)]) + end + push!(out, T) + return nothing +end + +# apply inverse ReLU activation function to arbitrary polyhedron +# +# First compute the intersection Y₊ with the nonnegative orthant. +# Use a bitvector v, where entry 1 means "nonnegative" and entry 0 means "0". +# For instance, in 2D, v = (1, 1) stands for the positive orthant and v =(0, 1) +# stands for "x = 0". For each orthant, the corresponding preimage is the +# inverse linear map of Y₊ under Diagonal(v). Since this is a special case, it +# maps a linear constraint cx <= d to a new constraint whose normal vector is +# [c1v1, c2v2, ...], and since v is a bitvector, it acts as a filter. +function _backward_nD(Y::LazySet{N}, ::ReLU) where {N} + @assert is_polyhedral(Y) "expected a polyhedron, got $(typeof(Y))" + + out = LazySet{N}[] + n = dim(Y) + + # intersect with nonnegative orthant + Q₊ = HPolyhedron([HalfSpace(SingleEntryVector(i, n, -one(N)), zero(N)) for i in 1:n]) + Y₊ = intersection(Y, Q₊) + if isempty(Y₊) # pre-image is empty if image was not nonnegative + return EmptySet{N}(dim(Y)) + end + + # find dimensions in which the set is positive (to save case distinctions) + skip = falses(n) + @inbounds for i in 1:n + if !_leq(low(Y₊, i), zero(N)) + skip[i] = true + end + end + fix = trues(n) # value at non-skip indices does not matter + + for v in BitvectorIterator(skip, fix, false) + if !any(v) + # nonpositive orthant: case needs special treatment + if zeros(N, n) ∈ Y₊ + Tz = HPolyhedron([HalfSpace(SingleEntryVector(i, n, one(N)), zero(N)) for i in 1:n]) + push!(out, Tz) + end + continue + elseif all(v) + # nonnegative orthant: more efficient treatment of special case + # add if set contains at least one positive point + if all(!_leq(high(Y₊, i), zero(N)) for i in 1:n) + push!(out, Y₊) + end + # last iteration + break + end + + # inverse linear map + R = _linear_map_inverse(v, Y₊) + + # compute orthant corresponding to v + constraints = HalfSpace{N,SingleEntryVector{N}}[] + @inbounds for i in 1:n + if v[i] + push!(constraints, HalfSpace(SingleEntryVector(i, n, -one(N)), zero(N))) + else + push!(constraints, HalfSpace(SingleEntryVector(i, n, one(N)), zero(N))) + end + end + O = HPolyhedron(constraints) + + # intersect preimage of Y with orthant + X = intersection(R, O) + + push!(out, X) + end + + filter!(!isempty, out) + return simplify_union(out; n=dim(Y), N=N) +end + +# d is a vector representing a diagonal matrix +function _linear_map_inverse(d::AbstractVector{<:Number}, P::LazySet) + constraints_P = constraints_list(P) + constraints_MP = LazySets._preallocate_constraints(constraints_P) + has_undefs = false + N = promote_type(eltype(d), eltype(P)) + @inbounds for (i, c) in enumerate(constraints_P) + cinv = _linear_map_inverse_mult(d, c.a) + if iszero(cinv) + if zero(N) <= c.b + # constraint is redundant + has_undefs = true + else + # constraint is infeasible + return EmptySet{N}(length(cinv)) + end + else + constraints_MP[i] = HalfSpace(cinv, c.b) + end + end + if has_undefs # there were redundant constraints, so remove them + constraints_MP = [constraints_MP[i] + for i in 1:length(constraints_MP) + if isassigned(constraints_MP, i)] + end + if isempty(constraints_MP) + # in the current usage, each dimension is constrained and d is nonzero, so + # at least one constraint must remain + # COV_EXCL_START + return Universe{N}(size(A, 2)) + # COV_EXCL_STOP + end + return HPolyhedron(constraints_MP) +end + +function _linear_map_inverse_mult(d::AbstractVector{<:Number}, a) + return [d[i] * a[i] for i in eachindex(a)] +end + +function _linear_map_inverse_mult(d::AbstractVector{Bool}, a) + return [d[i] ? a[i] : zero(eltype(a)) for i in eachindex(a)] +end + +# disambiguation +for T in (:ReLU, :LeakyReLU) + @eval begin + function backward(Y::UnionSetArray, act::$T, algo::PolyhedraBackward) + return _backward_union(Y, act, algo) + end + end +end diff --git a/src/BackwardAlgorithms/backward_default.jl b/src/BackwardAlgorithms/backward_default.jl new file mode 100644 index 0000000..5c07fc6 --- /dev/null +++ b/src/BackwardAlgorithms/backward_default.jl @@ -0,0 +1,117 @@ +# backpropagate y through network +function backward(y, net::FeedforwardNetwork, algo::BackwardAlgorithm) + @inbounds for L in reverse(layers(net)) + y = backward(y, L, algo) + + # early termination check + if y isa EmptySet + y = EmptySet(dim_in(net)) + break + end + end + return y +end + +# backpropagate y through layer +function backward(y, L::DenseLayerOp, algo::BackwardAlgorithm) + x = backward(y, L.activation, algo) # apply inverse activation function + remove_constraints(algo, x) && remove_constraints!(x) + if x isa EmptySet + return x + end + w = backward(x, L.weights, L.bias, algo) # apply inverse affine map + remove_constraints(algo, x) && remove_constraints!(w) + return w +end + +remove_constraints!(::LazySet) = nothing + +function remove_constraints!(P::LazySets.HPoly) + m1 = length(P.constraints) + remove_redundant_constraints!(P) + m2 = length(P.constraints) + # println("$(m1 - m2)/$m1 constraints removed") + return nothing +end + +# apply inverse affine map to Y +function backward(Y, W::AbstractMatrix, b::AbstractVector, ::BackwardAlgorithm) + return _backward_affine_map(W, Y, b) +end + +# try to invert the matrix +function _backward_affine_map(W::AbstractMatrix, y::AbstractVector, b::AbstractVector) + return inv(W) * (y .- b) +end + +function _backward_affine_map(W::AbstractMatrix, Y::LazySet, b::AbstractVector) + X = affine_map_inverse(W, Y, b) + return simplify_set(X) +end + +# apply inverse affine map to a union of sets +function backward(Y::UnionSetArray, W::AbstractMatrix, b::AbstractVector, + algo::BackwardAlgorithm) + return _backward_union(Y, W, b, algo) +end + +function _backward_union(Y::UnionSetArray{N}, W::AbstractMatrix, + b::AbstractVector, algo::BackwardAlgorithm) where {N} + @assert dim(Y) == size(W, 1) == length(b) + out = [] + for Yi in array(Y) + append_sets!(out, backward(Yi, W, b, algo)) + end + filter!(!isempty, out) + return simplify_union(out; n=size(W, 2), N=N) +end + +append_sets!(Xs, X::LazySet) = push!(Xs, X) +append_sets!(Xs, X::UnionSetArray) = append!(Xs, array(X)) + +# apply inverse piecewise-affine activation function to a union of sets +# COV_EXCL_START +for T in (:ReLU, :LeakyReLU) + @eval begin + function backward(Y::UnionSetArray, act::$T, algo::BackwardAlgorithm) + return _backward_union(Y, act, algo) + end + end +end +# COV_EXCL_STOP + +function _backward_union(Y::LazySet{N}, act::ActivationFunction, + algo::BackwardAlgorithm) where {N} + out = [] + for Yi in array(Y) + Xs = backward(Yi, act, algo) + if !(Xs isa EmptySet) + append_sets!(out, Xs) + end + end + return simplify_union(out; n=dim(Y), N=N) +end + +function backward(y::AbstractVector, act::ActivationFunction, ::BackwardAlgorithm) + return _inverse(y, act) +end + +_inverse(x::AbstractVector, act::ActivationFunction) = [_inverse(xi, act) for xi in x] +_inverse(x::Number, ::ReLU) = x >= zero(x) ? x : zero(x) +_inverse(x::Number, ::Sigmoid) = @. -log(1 / x - 1) +_inverse(x::Number, act::LeakyReLU) = x >= zero(x) ? x : x / act.slope + +# activation functions must be explicitly supported for sets +function backward(X::LazySet, act::ActivationFunction, algo::BackwardAlgorithm) + throw(ArgumentError("activation function $act not supported by algorithm " * + "$algo for set type $(typeof(X))")) +end + +# disambiguation: apply inverse identity activation function to Y +for T in (:AbstractVector, :LazySet, :UnionSetArray) + @eval begin + function backward(y::$T, ::Id, ::BackwardAlgorithm) + return y + end + end +end diff --git a/src/BackwardAlgorithms/simplify_sets.jl b/src/BackwardAlgorithms/simplify_sets.jl new file mode 100644 index 0000000..7cb632e --- /dev/null +++ b/src/BackwardAlgorithms/simplify_sets.jl @@ -0,0 +1,12 @@ +simplify_set(X::EmptySet) = X +simplify_set(X::LazySet{N}) where {N} = isempty(X) ? EmptySet{N}(dim(X)) : X + +function simplify_union(sets::AbstractVector; n=1, N=Float64) + if length(sets) > 1 + return UnionSetArray([X for X in sets]) # allocate set-specific array + elseif length(sets) == 1 + return sets[1] + else + return EmptySet{N}(n) + end +end diff --git a/src/BidirectionalAlgorithms/BidirectionalAlgorithm.jl b/src/BidirectionalAlgorithms/BidirectionalAlgorithm.jl new file mode 100644 index 0000000..1745e14 --- /dev/null +++ b/src/BidirectionalAlgorithms/BidirectionalAlgorithm.jl @@ -0,0 +1,6 @@ +""" +BidirectionalAlgorithm + +Abstract supertype of bidirectional algorithms. +""" +abstract type BidirectionalAlgorithm end diff --git a/src/BidirectionalAlgorithms/BidirectionalAlgorithms.jl b/src/BidirectionalAlgorithms/BidirectionalAlgorithms.jl new file mode 100644 index 0000000..1092e6a --- /dev/null +++ b/src/BidirectionalAlgorithms/BidirectionalAlgorithms.jl @@ -0,0 +1,19 @@ +module BidirectionalAlgorithms + +using ..ForwardAlgorithms +using ..ForwardAlgorithms: ForwardAlgorithm, _forward_store +using ..BackwardAlgorithms +using ..BackwardAlgorithms: BackwardAlgorithm +using ControllerFormats +using LazySets + +export bidirectional, + SimpleBidirectional, + PolyhedraBidirectional, + BoxBidirectional + +include("BidirectionalAlgorithm.jl") +include("bidirectional_default.jl") +include("SimpleBidirectional.jl") + +end # module diff --git a/src/BidirectionalAlgorithms/SimpleBidirectional.jl b/src/BidirectionalAlgorithms/SimpleBidirectional.jl new file mode 100644 index 0000000..096f70e --- /dev/null +++ b/src/BidirectionalAlgorithms/SimpleBidirectional.jl @@ -0,0 +1,40 @@ +""" + SimpleBidirectional{FA<:ForwardAlgorithm, BA<:BackwardAlgorithm} <: BidirectionalAlgorithm + +Simple bidirectional algorithm parametric in a forward and backward algorithm. + +### Fields + +- `fwd_algo` -- forward algorithm +- `bwd_algo` -- backward algorithm +""" +struct SimpleBidirectional{FA<:ForwardAlgorithm,BA<:BackwardAlgorithm} <: BidirectionalAlgorithm + fwd_algo::FA + bwd_algo::BA +end + +# getter of forward algorithm +function fwd(algo::SimpleBidirectional) + return algo.fwd_algo +end + +# getter of backward algorithm +function bwd(algo::SimpleBidirectional) + return algo.bwd_algo +end + +# intersection of forward and backward result with box algorithms +function _fwd_bwd_intersection(::SimpleBidirectional{<:BoxForward,<:BoxBackward}, + X::LazySet, Y::LazySet) + return box_approximation(intersection(X, Y)) +end + +# convenience constructor that uses the polyhedra algorithms +function PolyhedraBidirectional() + return SimpleBidirectional(ConcreteForward(), PolyhedraBackward()) +end + +# convenience constructor that uses the box algorithms +function BoxBidirectional() + return SimpleBidirectional(BoxForward(), BoxBackward()) +end diff --git a/src/BidirectionalAlgorithms/bidirectional_default.jl b/src/BidirectionalAlgorithms/bidirectional_default.jl new file mode 100644 index 0000000..84f14b9 --- /dev/null +++ b/src/BidirectionalAlgorithms/bidirectional_default.jl @@ -0,0 +1,89 @@ +# propagate X forward and Y backward through network +function bidirectional(X, Y, net::FeedforwardNetwork, + algo::BidirectionalAlgorithm=SimpleBidirectional(); + get_intermediate_results::Bool=false) + # forward propagation + forward_results = _forward_store(X, net, fwd(algo)) + if get_intermediate_results + intermediate_results = Vector{LazySet}(undef, length(forward_results) + 1) + @inbounds for k in eachindex(forward_results) + forward_results[k] + intermediate_results[k + 1] = forward_results[k][2] + end + fill_result = 1 + end + + bwd_algo = bwd(algo) + first_iteration = true + forward_result = undef + + # backward propagation with intersection + @inbounds for k in length(net):-1:1 + layer = layers(net)[k] + X_k = forward_results[k] + + # take intersection with forward set + XY_after_activation = _fwd_bwd_intersection(algo, X_k[2], Y) + if get_intermediate_results + intermediate_results[k + 1] = XY_after_activation + end + + if first_iteration + forward_result = XY_after_activation + first_iteration = false + end + + # early termination (needed for compatibility) + if XY_after_activation isa EmptySet + Y = XY_after_activation + if get_intermediate_results + fill_result = k + end + break + end + + # apply inverse activation function + Y_before_activation = backward(XY_after_activation, layer.activation, + bwd_algo) + + # take intersection with forward set + XY_before_activation = _fwd_bwd_intersection(algo, X_k[1], Y_before_activation) + + # early termination (needed for compatibility) + if XY_before_activation isa EmptySet + Y = XY_before_activation + if get_intermediate_results + fill_result = k + end + break + end + + # apply inverse affine map + Y = backward(XY_before_activation, layer.weights, layer.bias, bwd_algo) + end + + if Y isa EmptySet + # preimage is empty + backward_result = EmptySet{eltype(Y)}(dim(X)) + else + # take intersection with initial set + backward_result = _fwd_bwd_intersection(algo, Y, X) + end + + if get_intermediate_results + @inbounds for k in 2:fill_result + intermediate_results[k] = EmptySet{eltype(Y)}(dim(intermediate_results[k])) + end + @inbounds intermediate_results[1] = backward_result + + # better output type + intermediate_results = [e for e in intermediate_results] + end + + return get_intermediate_results ? intermediate_results : (backward_result, forward_result) +end + +# intersection of forward and backward result +function _fwd_bwd_intersection(::BidirectionalAlgorithm, X::LazySet, Y::LazySet) + return intersection(X, Y) +end diff --git a/src/ForwardAlgorithms/BoxForward.jl b/src/ForwardAlgorithms/BoxForward.jl new file mode 100644 index 0000000..1fdf490 --- /dev/null +++ b/src/ForwardAlgorithms/BoxForward.jl @@ -0,0 +1,46 @@ +""" + BoxForward{AMA<:ForwardAlgorithm} <: ForwardAlgorithm + +Forward algorithm that uses a box approximation for non-identity activations and +applies the affine map according to the specified algorithm. + +### Fields + +- `affine_map_algorithm` -- algorithm to apply for affine maps +""" +struct BoxForward{AMA<:ForwardAlgorithm} <: ForwardAlgorithm + affine_map_algorithm::AMA +end + +# default constructor: compute concrete affine map (which is a zonotope) +function BoxForward() + return BoxForward(ConcreteForward()) +end + +# apply affine map according to the algorithm options +function forward(X::LazySet, W::AbstractMatrix, b::AbstractVector, algo::BoxForward) + return forward(X, W, b, algo.affine_map_algorithm) +end + +# apply ReLU activation function (exploits `Box(ReLU(x)) = ReLU(Box(X))`) +function forward(X::LazySet, ::ReLU, ::BoxForward) + return rectify(box_approximation(X)) +end + +# apply monotonic activation function +for ACT in (:Sigmoid, :Tanh) + @eval function forward(X::LazySet, act::$ACT, ::BoxForward) + l, h = extrema(X) + return Hyperrectangle(; low=act(l), high=act(h)) + end +end + +# apply leaky-ReLU activation function +function forward(X::LazySet, act::LeakyReLU, ::BoxForward) + l, h = extrema(X) + if !(any(isinf, l) || any(isinf, h)) + return Hyperrectangle(; low=act(l), high=act(h)) + else + error("not implemented") + end +end diff --git a/src/ForwardAlgorithms/ConcreteForward.jl b/src/ForwardAlgorithms/ConcreteForward.jl new file mode 100644 index 0000000..d9f024d --- /dev/null +++ b/src/ForwardAlgorithms/ConcreteForward.jl @@ -0,0 +1,16 @@ +""" + ConcreteForward <: ForwardAlgorithm + +Forward algorithm that uses concrete set operations. +""" +struct ConcreteForward <: ForwardAlgorithm end + +# apply concrete affine map +function forward(X::LazySet, W::AbstractMatrix, b::AbstractVector, ::ConcreteForward) + return affine_map(W, X, b) +end + +# apply ReLU activation function +function forward(X::LazySet, ::ReLU, ::ConcreteForward) + return concretize(rectify(X)) +end diff --git a/src/ForwardAlgorithms/DeepZ.jl b/src/ForwardAlgorithms/DeepZ.jl new file mode 100644 index 0000000..6ef48b1 --- /dev/null +++ b/src/ForwardAlgorithms/DeepZ.jl @@ -0,0 +1,90 @@ +""" + DeepZ <: ForwardAlgorithm + +Forward algorithm based on zonotopes for ReLU, sigmoid, and tanh activation +functions from [1]. + +[1]: Singh et al.: *Fast and Effective Robustness Certification*, NeurIPS 2018. +""" +struct DeepZ <: ForwardAlgorithm end + +# apply affine map +function forward(Z, W::AbstractMatrix, b::AbstractVector, ::DeepZ) + return affine_map(W, Z, b) +end + +# apply ReLU activation function (implemented in LazySets) +function forward(Z::AbstractZonotope, ::ReLU, ::DeepZ) + return overapproximate(Rectification(Z), Zonotope) +end + +# apply sigmoid activation function +function forward(Z::AbstractZonotope, ::Sigmoid, ::DeepZ) + f(x) = _sigmoid_DeepZ(x) + f′(x) = _sigmoid2_DeepZ(x) + return _overapproximate_zonotope(Z, f, f′) +end + +# apply tanh activation function +function forward(Z::AbstractZonotope, ::Tanh, ::DeepZ) + f(x) = tanh(x) + f′(x) = 1 - tanh(x)^2 + return _overapproximate_zonotope(Z, f, f′) +end + +function _sigmoid_DeepZ(x::Number) + ex = exp(x) + return ex / (1 + ex) +end + +function _sigmoid2_DeepZ(x::Number) + ex = exp(x) + return ex / (1 + ex)^2 +end + +function _overapproximate_zonotope(Z::AbstractZonotope{N}, f, f′) where {N} + c = copy(center(Z)) + G = copy(genmat(Z)) + n, m = size(G) + row_idx = Vector{Int}() + μ_idx = Vector{N}() + + @inbounds for i in 1:n + lx, ux = low(Z, i), high(Z, i) + uy = f(ux) + + if _isapprox(lx, ux) + c[i] = uy + for j in 1:m + G[i, j] = zero(N) + end + else + ly = f(lx) + λ = min(f′(lx), f′(ux)) + μ₁ = (uy + ly - λ * (ux + lx)) / 2 + # Note: there is a typo in the paper (missing parentheses) + μ₂ = (uy - ly - λ * (ux - lx)) / 2 + c[i] = c[i] * λ + μ₁ + for j in 1:m + G[i, j] = G[i, j] * λ + end + push!(row_idx, i) + push!(μ_idx, μ₂) + end + end + + q = length(row_idx) + if q >= 1 + Gnew = zeros(N, n, q) + j = 1 + @inbounds for i in row_idx + Gnew[i, j] = μ_idx[j] + j += 1 + end + Gout = hcat(G, Gnew) + else + Gout = G + end + + return Zonotope(c, remove_zero_columns(Gout)) +end diff --git a/src/ForwardAlgorithms/DefaultForward.jl b/src/ForwardAlgorithms/DefaultForward.jl new file mode 100644 index 0000000..a608c59 --- /dev/null +++ b/src/ForwardAlgorithms/DefaultForward.jl @@ -0,0 +1,35 @@ +""" + DefaultForward <: ForwardAlgorithm + +Default forward algorithm, which works for vector-like inputs. +""" +struct DefaultForward <: ForwardAlgorithm end + +# propagating set through network not supported (exception below) +function forward(::LazySet, ::FeedforwardNetwork, algo::DefaultForward) + throw(ArgumentError("cannot apply $(typeof(algo)) to a set input")) +end + +# propagate singleton through network +function forward(X::AbstractSingleton, net::FeedforwardNetwork, + algo::DefaultForward=DefaultForward()) + x = element(X) + y = forward(x, net, algo) + return Singleton(y) +end + +function forward(X::AbstractSingleton, net::FeedforwardNetwork, ::ForwardAlgorithm) + return forward(X, net, DefaultForward()) +end + +# propagate singleton through network and store all intermediate results +function _forward_store(X::AbstractSingleton, net::FeedforwardNetwork, + algo::DefaultForward=DefaultForward()) + x = element(X) + results = _forward_store(x, net, algo) + return [(Singleton(y), Singleton(z)) for (y, z) in results] +end + +function _forward_store(X::AbstractSingleton, net::FeedforwardNetwork, ::ForwardAlgorithm) + return _forward_store(X, net, DefaultForward()) +end diff --git a/src/ForwardAlgorithms/ForwardAlgorithm.jl b/src/ForwardAlgorithms/ForwardAlgorithm.jl new file mode 100755 index 0000000..ca91015 --- /dev/null +++ b/src/ForwardAlgorithms/ForwardAlgorithm.jl @@ -0,0 +1,6 @@ +""" + ForwardAlgorithm + +Abstract supertype of forward algorithms. +""" +abstract type ForwardAlgorithm end diff --git a/src/ForwardAlgorithms/ForwardAlgorithms.jl b/src/ForwardAlgorithms/ForwardAlgorithms.jl new file mode 100644 index 0000000..9c27919 --- /dev/null +++ b/src/ForwardAlgorithms/ForwardAlgorithms.jl @@ -0,0 +1,31 @@ +module ForwardAlgorithms + +using ControllerFormats +using LazySets +using LazySets: remove_zero_columns +using ReachabilityBase.Comparison: _isapprox +using ReachabilityBase.Require: require +using Requires + +export forward, + DefaultForward, + ConcreteForward, + LazyForward, + BoxForward, + SplitForward, + DeepZ, + Verisig + +include("ForwardAlgorithm.jl") +include("DefaultForward.jl") +include("forward_default.jl") +include("ConcreteForward.jl") +include("LazyForward.jl") +include("BoxForward.jl") +include("SplitForward.jl") +include("DeepZ.jl") +include("Verisig.jl") + +include("init.jl") + +end # module diff --git a/src/ForwardAlgorithms/LazyForward.jl b/src/ForwardAlgorithms/LazyForward.jl new file mode 100644 index 0000000..51cb870 --- /dev/null +++ b/src/ForwardAlgorithms/LazyForward.jl @@ -0,0 +1,11 @@ +""" + LazyForward <: ForwardAlgorithm + +Forward algorithm that uses lazy set operations. +""" +struct LazyForward <: ForwardAlgorithm end + +# apply lazy ReLU activation function +function forward(X::LazySet, ::ReLU, ::LazyForward) + return Rectification(X) +end diff --git a/src/ForwardAlgorithms/SplitForward.jl b/src/ForwardAlgorithms/SplitForward.jl new file mode 100644 index 0000000..e47a6c4 --- /dev/null +++ b/src/ForwardAlgorithms/SplitForward.jl @@ -0,0 +1,42 @@ +""" + SplitForward{S<:ForwardAlgorithm,FS,FM} <: ForwardAlgorithm + +Forward algorithm that splits a set, then computes the image under the neural +network, and finally merges the resulting sets again, all according to a policy. + +### Fields + +- `algo` -- algorithm to be applied between splitting and merging +- `split_function` -- function for splitting +- `merge_function` -- function for merging +""" +struct SplitForward{FA<:ForwardAlgorithm,FS,FM} <: ForwardAlgorithm + algo::FA + split_function::FS + merge_function::FM +end + +# default constructor +function SplitForward(algo::ForwardAlgorithm) + # box approximation and split in two sets per dimension + split_fun = X -> split(box_approximation(X), 2 * ones(Int, dim(X))) + # box approximation of the union + merge_fun = X -> box_approximation(X) + return SplitForward(algo, split_fun, merge_fun) +end + +# split X, propagate sets through network, and merge +function forward(X::LazySet, net::FeedforwardNetwork, algo::SplitForward) + X_split = algo.split_function(X) + Y_union = UnionSetArray() + for X0 in X_split + Y = forward(X0, net, algo.algo) + push!(array(Y_union), Y) + end + return algo.merge_function(Y_union) +end + +# disambiguation for singleton +function forward(X::AbstractSingleton, net::FeedforwardNetwork, ::SplitForward) + return forward(X, net, DefaultForward()) +end diff --git a/src/ForwardAlgorithms/Verisig.jl b/src/ForwardAlgorithms/Verisig.jl new file mode 100644 index 0000000..824414e --- /dev/null +++ b/src/ForwardAlgorithms/Verisig.jl @@ -0,0 +1,94 @@ +""" + Verisig{R} <: ForwardAlgorithm + +Forward algorithm for sigmoid and tanh activation functions from [1]. + +### Fields + +- `algo` -- reachability algorithm of type `TMJets` + +### Notes + +The implementation is known to be unsound in some cases. + +The implementation currently only supports neural networks with a single hidden +layer. + +[1] Ivanov et al.: *Verisig: verifying safety properties of hybrid systems with +neural network controllers*, HSCC 2019. +""" +struct Verisig{R} <: ForwardAlgorithm + algo::R +end + +# default constructor +function Verisig() + require(@__MODULE__, :ReachabilityAnalysis; fun_name="Verisig") + + return Verisig(_Verisig_default_algorithm()) +end + +function forward(X::LazySet, net::FeedforwardNetwork, algo::Verisig) + require(@__MODULE__, :ReachabilityAnalysis; fun_name="forward") + + return _forward(X, net, algo) +end + +# disambiguation for singleton +function forward(X::AbstractSingleton, net::FeedforwardNetwork, ::Verisig) + return forward(X, net, DefaultForward()) +end + +function load_Verisig() + return quote + using .ReachabilityAnalysis: TMJets, IVP, BlackBoxContinuousSystem, solve, evaluate, + TaylorModelN + + function _Verisig_default_algorithm() + return TMJets(; abstol=1e-14, orderQ=2, orderT=6) + end + + function _forward(X::LazySet, net::FeedforwardNetwork, algo::Verisig) + @assert algo.algo isa TMJets "reachability algorithm of type " * + "$(typeof(algo.algo)) is not supported" + xᴾ = X + + for layer in layers(net) + W = layer.weights + b = layer.bias + m = length(layer) + act = layer.activation + + xᴶ = W * xᴾ + b # affine map + if act isa Id + xᴾ = xᴶ + continue + elseif act isa Sigmoid + xᴾ = Singleton(fill(0.5, m)) + act! = _Verisig_sigmoid! + elseif act isa Tanh + xᴾ = Singleton(fill(0.0, m)) + act! = _Verisig_tanh! + else + throw(ArgumentError("unsupported activation function: $act")) + end + X0 = _cartesian_product(xᴶ, xᴾ) + ivp = IVP(BlackBoxContinuousSystem(act!, 2 * m), X0) + sol = solve(ivp; tspan=(0.0, 1.0), alg=algo.algo) + # obtain final reach set (Vector{TaylorModelN}) + xᴾ = evaluate(sol.F.Xk[end], 1.0)[(m + 1):(2 * m)] + end + return xᴾ + end + + function _cartesian_product(X::AffineMap, Y::Singleton) + return X × Y + end + + function _cartesian_product(X::Vector{<:TaylorModelN}, Y::Singleton) + # not implemented + return error("the Verisig algorithm for multiple hidden layers " * + "is not implemented yet") + end + end # quote +end # function load_Verisig() diff --git a/src/ForwardAlgorithms/forward_default.jl b/src/ForwardAlgorithms/forward_default.jl new file mode 100644 index 0000000..8303240 --- /dev/null +++ b/src/ForwardAlgorithms/forward_default.jl @@ -0,0 +1,53 @@ +# propagate x through network +function forward(x, net::FeedforwardNetwork, algo::ForwardAlgorithm=DefaultForward()) + for L in layers(net) + x = forward(x, L, algo) + end + return x +end + +# propagate x through network and store all intermediate results +function _forward_store(x, net::FeedforwardNetwork, algo::ForwardAlgorithm) + results = Vector{Tuple}() + for L in layers(net) + y, z = _forward_intermediate(x, L, algo) + push!(results, (y, z)) + x = z + end + return results +end + +# propagate x through layer +function forward(x, L::DenseLayerOp, algo::ForwardAlgorithm) + y = forward(x, L.weights, L.bias, algo) # apply affine map + z = forward(y, L.activation, algo) # apply activation function + return z +end + +# propagate x through layer and return all intermediate results +function _forward_intermediate(x, L::DenseLayerOp, algo::ForwardAlgorithm) + y = forward(x, L.weights, L.bias, algo) # apply affine map + z = forward(y, L.activation, algo) # apply activation function + return y, z +end + +# apply affine map +function forward(x, W::AbstractMatrix, b::AbstractVector, ::ForwardAlgorithm) + return W * x + b +end + +# apply activation function +function forward(x, act::ActivationFunction, ::ForwardAlgorithm) + return act(x) +end + +# activation functions must be explicitly supported for sets +function forward(X::LazySet, act::ActivationFunction, algo::ForwardAlgorithm) + throw(ArgumentError("activation function $act not supported by algorithm " * + "$algo for set type $(typeof(X))")) +end + +# identity activation is automatically supported for sets +function forward(X::LazySet, ::Id, ::ForwardAlgorithm) + return X +end diff --git a/src/ForwardAlgorithms/init.jl b/src/ForwardAlgorithms/init.jl new file mode 100644 index 0000000..2e4e29b --- /dev/null +++ b/src/ForwardAlgorithms/init.jl @@ -0,0 +1,6 @@ +# optional dependencies +function __init__() + @require ReachabilityAnalysis = "1e97bd63-91d1-579d-8e8d-501d2b57c93f" begin + include("init_ReachabilityAnalysis.jl") + end +end diff --git a/src/ForwardAlgorithms/init_ReachabilityAnalysis.jl b/src/ForwardAlgorithms/init_ReachabilityAnalysis.jl new file mode 100644 index 0000000..3058110 --- /dev/null +++ b/src/ForwardAlgorithms/init_ReachabilityAnalysis.jl @@ -0,0 +1,38 @@ +# COV_EXCL_START +using .ReachabilityAnalysis: @taylorize, TaylorIntegration, TaylorN, Taylor1 + +# Verisig helper functions (need to be defined in this separate file because of +# the macro) + +# Eq. (4)-(6) +# d(σ(x))/dx = σ(x) * (1 - σ(x)) +# g(t, x) = σ(tx) = 1 / (1 + exp(-tx)) +# dg(t, x)/dt = g'(t, x) = x * g(t, x) * (1 - g(t, x)) +@taylorize function _Verisig_sigmoid!(dx, x, p, t) + n = div(length(x), 2) + for i in 1:n + xᴶ = x[i] + xᴾ = x[n + i] + dx[i] = zero(xᴶ) + dx[n + i] = xᴶ * (xᴾ - xᴾ^2) + end + return dx +end + +# Footnote 3 +# d(tanh(x))/dx = 1 - tanh(x)^2 +# g(t, x) = tanh(tx) +# dg(t, x)/dt = g'(t, x) = x * (1 - g(t, x)^2) +@taylorize function _Verisig_tanh!(dx, x, p, t) + n = div(length(x), 2) + for i in 1:n + xᴶ = x[i] + xᴾ = x[n + i] + dx[i] = zero(xᴶ) + dx[n + i] = xᴶ * (1 - xᴾ^2) + end + return dx +end + +eval(load_Verisig()) +# COV_EXCL_STOP diff --git a/src/JuliaReachTemplatePkg.jl b/src/JuliaReachTemplatePkg.jl deleted file mode 100644 index 76312bc..0000000 --- a/src/JuliaReachTemplatePkg.jl +++ /dev/null @@ -1,7 +0,0 @@ -module JuliaReachTemplatePkg - -function foo() - return println("hello world") -end - -end # module diff --git a/src/NeuralNetworkReachability.jl b/src/NeuralNetworkReachability.jl new file mode 100644 index 0000000..4b06b6c --- /dev/null +++ b/src/NeuralNetworkReachability.jl @@ -0,0 +1,14 @@ +module NeuralNetworkReachability + +using Reexport + +include("ForwardAlgorithms/ForwardAlgorithms.jl") +@reexport using .ForwardAlgorithms + +include("BackwardAlgorithms/BackwardAlgorithms.jl") +@reexport using .BackwardAlgorithms + +include("BidirectionalAlgorithms/BidirectionalAlgorithms.jl") +@reexport using .BidirectionalAlgorithms + +end # module diff --git a/test/AISoLA2023/BoxAffineMap.jl b/test/AISoLA2023/BoxAffineMap.jl new file mode 100644 index 0000000..3dd4ebb --- /dev/null +++ b/test/AISoLA2023/BoxAffineMap.jl @@ -0,0 +1,8 @@ +using NeuralNetworkReachability.ForwardAlgorithms: ForwardAlgorithm +import NeuralNetworkReachability.ForwardAlgorithms: forward + +struct BoxAffineMap <: ForwardAlgorithm end + +function forward(X::LazySet, W::AbstractMatrix, b::AbstractVector, ::BoxAffineMap) + return box_approximation(W * X + b) +end diff --git a/test/AISoLA2023/forward_backward.jl b/test/AISoLA2023/forward_backward.jl new file mode 100644 index 0000000..d5f5582 --- /dev/null +++ b/test/AISoLA2023/forward_backward.jl @@ -0,0 +1,81 @@ +using LazySets: center + +function propagate(X, Z, net; k=4, Y=BallInf(0.5 * ones(dim_out(net.layers[1])), 0.5)) + algo = SimpleBidirectional(BoxForward(BoxAffineMap()), BoxBackward()) + Xs = [] + Ys = [] + Zs = [] + push!(Xs, X) + push!(Ys, Y) + push!(Zs, Z) + + for i in 1:k + Xi, Yi, Zi = bidirectional(X, Z, net, algo; get_intermediate_results=true) + push!(Xs, Xi) + push!(Ys, Yi) + push!(Zs, Zi) + X = Xi + Z = Zi + if isempty(X) || isempty(Z) + break + end + end + + return (Xs, Ys, Zs) +end + +function extract_bounds(Xs, Ys, Zs) + ε = 0.01 + b1 = [isempty(X) ? (k > 1 ? (center(Xs[k - 1], 1) + ε, center(Xs[k - 1], 1) - ε) : (0.5, 0.5)) : + (low(X, 1), high(X, 1)) for (k, X) in enumerate(Xs)] + b2 = [isempty(X) ? (k > 1 ? (center(Xs[k - 1], 2) + ε, center(Xs[k - 1], 2) - ε) : (0.5, 0.5)) : + (low(X, 2), high(X, 2)) for (k, X) in enumerate(Xs)] + b3 = [isempty(Y) ? (k > 1 ? (center(Ys[k - 1], 1) + ε, center(Ys[k - 1], 1) - ε) : (0.5, 0.5)) : + (low(Y, 1), high(Y, 1)) for (k, Y) in enumerate(Ys)] + b4 = [isempty(Y) ? (k > 1 ? (center(Ys[k - 1], 2) + ε, center(Ys[k - 1], 2) - ε) : (0.5, 0.5)) : + (low(Y, 2), high(Y, 2)) for (k, Y) in enumerate(Ys)] + b5 = [isempty(Z) ? (k > 1 ? (center(Zs[k - 1], 1) + ε, center(Zs[k - 1], 1) - ε) : (0.5, 0.5)) : + (low(Z, 1), high(Z, 1)) for (k, Z) in enumerate(Zs)] + return b1, b2, b3, b4, b5 +end + +# neural network +W1 = [-4.60248 4.74295; + -3.19378 2.90011] +b1 = [2.74108, + -1.49695] +l1 = DenseLayerOp(W1, b1, Sigmoid()) +W2 = [-4.57199 4.64925;] +b2 = [2.10176] +l2 = DenseLayerOp(W2, b2, Sigmoid()) +net = FeedforwardNetwork([l1, l2]) + +# low-high domain +dom1 = cartesian_product(Interval(0.0, 0.2), Interval(0.8, 1.0)) +codom1 = Interval(0.0, 1.0) +Xs1, Ys1, Zs1 = propagate(dom1, codom1, net) +bounds1 = extract_bounds(Xs1, Ys1, Zs1) + +# low-low domain +dom2 = cartesian_product(Interval(0.0, 0.2), Interval(0.0, 0.2)) +codom2 = Interval(0.0, 1.0) +Xs2, Ys2, Zs2 = propagate(dom2, codom2, net) +bounds2 = extract_bounds(Xs2, Ys2, Zs2) + +# high-full domain + high codomain +dom3 = cartesian_product(Interval(0.8, 1.0), Interval(0.0, 1.0)) +codom3 = Interval(0.5, 1.0) +Xs3, Ys3, Zs3 = propagate(dom3, codom3, net) +bounds3 = extract_bounds(Xs3, Ys3, Zs3) + +# high-full domain + low codomain +dom4 = cartesian_product(Interval(0.8, 1.0), Interval(0.0, 1.0)) +codom4 = Interval(0.0, 0.5) +Xs4, Ys4, Zs4 = propagate(dom4, codom4, net) +bounds4 = extract_bounds(Xs4, Ys4, Zs4) + +# high-high domain + high codomain +dom5 = cartesian_product(Interval(0.8, 1.0), Interval(0.8, 1.0)) +codom5 = Interval(0.5, 1.0) +Xs5, Ys5, Zs5 = propagate(dom5, codom5, net) +bounds5 = extract_bounds(Xs5, Ys5, Zs5) diff --git a/test/AISoLA2023/leaky_relu.jl b/test/AISoLA2023/leaky_relu.jl new file mode 100644 index 0000000..8bf12e3 --- /dev/null +++ b/test/AISoLA2023/leaky_relu.jl @@ -0,0 +1,55 @@ +using NeuralNetworkReachability.BackwardAlgorithms: _linear_map_inverse + +W1 = [0.30 0.53 + 0.77 0.42] +b1 = [0.43 + -0.42] +l1 = DenseLayerOp(W1, b1, LeakyReLU(0.01)) +W2 = [0.17 -0.07 + 0.71 -0.06] +b2 = [-0.01 + 0.49] +l2 = DenseLayerOp(W2, b2, LeakyReLU(0.02)) +W3 = [0.35 0.17 + -0.04 0.08] +b3 = [0.03 + 0.17] +l3 = DenseLayerOp(W3, b3, Id()) +layers = [l1, l2, l3] +net = FeedforwardNetwork(layers) + +dom = BallInf([0.5, 0.5], 0.5) + +Y = forward(dom, net, BoxForward(BoxAffineMap())) + +# box approximation +algo = BoxBackward() +X3a = box_approximation(backward(Y, l3, algo)) +X2a = box_approximation(backward(X3a, l2, algo)) +X1a = backward(X2a, l1, algo) + +# polyhedral computation +quadrants = [HPolyhedron([HalfSpace([-1.0, 0], 0.0), HalfSpace([0, -1.0], 0.0)]), + HPolyhedron([HalfSpace([-1.0, 0], 0.0), HalfSpace([0, 1.0], 0.0)]), + HPolyhedron([HalfSpace([1.0, 0], 0.0), HalfSpace([0, -1.0], 0.0)]), + HPolyhedron([HalfSpace([1.0, 0], 0.0), HalfSpace([0, 1.0], 0.0)])] + +# manual implementation +algo = PolyhedraBackward() +X3b = backward(Y, l3, algo) +X2P = filter!(!isempty, [intersection(X3b, Q) for Q in quadrants]) +X2A = UnionSetArray([X2P[1], # _linear_map_inverse([1.0, 1.0], X2P[1]), # identity + _linear_map_inverse([l2.activation.slope, 1.0], X2P[2])]) +X2b = backward(X2A, l2.weights, l2.bias, algo) +X1P = filter!(!isempty, array(flatten(UnionSetArray([intersection(X2b, Q) for Q in quadrants])))) +X1A = UnionSetArray([X1P[1], # _linear_map_inverse([1.0, 1.0], X1P[1]), # identity + X1P[2], # _linear_map_inverse([1.0, 1.0], X1P[2]), # identity + _linear_map_inverse([1.0, l1.activation.slope], X1P[3]), + _linear_map_inverse([l1.activation.slope, l1.activation.slope], X1P[4])]) +X1b = backward(X1A, l1.weights, l1.bias, algo) + +# algorithm +algo = PolyhedraBackward() +X1c = backward(Y, net, algo) + +@test isequivalent(X1b, X1c) diff --git a/test/AISoLA2023/motivation.jl b/test/AISoLA2023/motivation.jl new file mode 100644 index 0000000..b6586e1 --- /dev/null +++ b/test/AISoLA2023/motivation.jl @@ -0,0 +1,73 @@ +############# +# Example 1 # +############# + +W = [-0.46 0.32;] +b = [2.0] +Y = Interval(2.0, 3.0) +algo = PolyhedraBackward() + +X = backward(Y, W, b, algo) + +############# +# Example 3 # +############# + +Y = X + +X = backward(Y, ReLU(), algo) + +@test length(X) == 3 # one set is empty +X₁ = X[1] +X₂ = X[2] +X₃ = ∅(2) +X₄ = X[3] + +############# +# Example 4 # +############# + +# neural network + +W1 = [0.30 0.53 + 0.77 0.42] +b1 = [0.43 + -0.42] +l1 = DenseLayerOp(W1, b1, ReLU()) +W2 = [0.17 -0.07 + 0.71 -0.06] +b2 = [-0.01 + 0.49] +l2 = DenseLayerOp(W2, b2, ReLU()) +W3 = [0.35 0.17 + -0.04 0.08] +b3 = [0.03 + 0.17] +l3 = DenseLayerOp(W3, b3, Id()) +layers = [l1, l2, l3] +net = FeedforwardNetwork(layers) + +# samples + +dom = BallInf([0.5, 0.5], 0.5) +seed = 100 +x = sample(dom, 300; seed=seed) +y = net.(x) +x1 = [x[i] for i in eachindex(x) if y[i][1] >= y[i][2]] +x2 = [x[i] for i in eachindex(x) if y[i][1] < y[i][2]] +y1 = [yi for yi in y if yi[1] >= yi[2]] +y2 = [yi for yi in y if yi[1] < yi[2]] + +# forward image under domain + +Y = forward(dom, net, ConcreteForward()) + +# backward image under subset y2 >= y1 + +algo = PolyhedraBackward() +Y1 = HalfSpace([1.0, -1.0], 0.0) +Y2 = backward(Y1, l3, algo) +Y3 = backward(Y2, ReLU(), algo) +Y4 = backward(Y3, W2, b2, algo) +Y5 = backward(Y4, ReLU(), algo) +Y6 = backward(Y5, W1, b1, algo) diff --git a/test/AISoLA2023/parabola.jl b/test/AISoLA2023/parabola.jl new file mode 100644 index 0000000..b766edb --- /dev/null +++ b/test/AISoLA2023/parabola.jl @@ -0,0 +1,31 @@ +actual(x) = x^2 / 20 +dom = Interval(-20.0, 20.0) +codom = Interval(0.0, 20.0) + +net = read_POLAR(joinpath(@__DIR__, "parabola.network")) +algo = PolyhedraBackward() +N = 500 +seed = 0 +s = sample(dom, N; seed=seed) +x_train = hcat(sort!(Float32.(getindex.(s, 1)))...) +y_train = actual.(x_train) +arr_train = vec([Singleton([xi, yi]) for (xi, yi) in zip(x_train, y_train)]) +x_net = [[x] for k in 1:N for x in x_train[1, k]] # requires different type +y_net = net.(x_net) +arr_net = vec([Singleton([xi[1], yi[1]]) for (xi, yi) in zip(x_net, y_net)]) + +for k in 1:20 + local Y = Interval(k - 1, k) + local preimage = backward(Y, net, algo) +end + +# preimage of [100, 105] +Y = Interval(100, 105) +preimage = backward(Y, net, algo) +for (i, X) in enumerate(array(preimage)) + Xi = convert(Interval, X) +end + +# preimage of [-1, 0] +Y = HalfSpace([1.0], 0.0) +preimage = backward(Y, net, algo) diff --git a/test/AISoLA2023/parabola.network b/test/AISoLA2023/parabola.network new file mode 100644 index 0000000..fc401fe --- /dev/null +++ b/test/AISoLA2023/parabola.network @@ -0,0 +1,32 @@ +1 +1 +2 +3 +3 +ReLU +ReLU +Affine +0.6323467493057251 +0.06393361836671829 +0.17108699679374695 +-2.2501626014709473 +-0.7978671789169312 +-2.274787425994873 +0.7802020311355591 +2.174086570739746 +0.38730230927467346 +-2.416353702545166 +-0.7222397923469543 +-0.7464914321899414 +1.0229864120483398 +-1.228379726409912 +-0.13510894775390625 +-0.6387206315994263 +-0.31919407844543457 +0.0 +1.917153000831604 +1.0097975730895996 +0.8264592289924622 +0.33117377758026123 +0.0 +1.0 diff --git a/test/BackwardAlgorithms/PartitioningLeakyReLU.jl b/test/BackwardAlgorithms/PartitioningLeakyReLU.jl new file mode 100644 index 0000000..cb2ddbc --- /dev/null +++ b/test/BackwardAlgorithms/PartitioningLeakyReLU.jl @@ -0,0 +1,23 @@ +@testset "PartitioningLeakyReLU" begin + using NeuralNetworkReachability.BackwardAlgorithms: pwa_partitioning, PartitioningLeakyReLU + + # pwa_partitioning + @test pwa_partitioning(ReLU(), 3, Float32) == PartitioningLeakyReLU{Float32}(3, 0.0f0) + @test pwa_partitioning(LeakyReLU(0.1), 3, Float32) == PartitioningLeakyReLU{Float32}(3, 0.1f0) + + # PartitioningLeakyReLU + P = PartitioningLeakyReLU(2, 0.1) + @test length(P) == 4 + + ev = [(HPolyhedron([HalfSpace([-1.0, 0.0], 0.0), HalfSpace([0.0, -1.0], 0.0)]), + ([1.0 0; 0 1], [0.0, 0])), + (HPolyhedron([HalfSpace([-1.0, 0.0], 0.0), HalfSpace([0.0, 1.0], 0.0)]), + ([1 0; 0 0.1], [0.0, 0])), + (HPolyhedron([HalfSpace([1.0, 0.0], 0.0), HalfSpace([0.0, -1.0], 0.0)]), + ([0.1 0; 0 1], [0.0, 0])), + (HPolyhedron([HalfSpace([1.0, 0.0], 0.0), HalfSpace([0.0, 1.0], 0.0)]), + ([0.1 0; 0 0.1], [0.0, 0]))] + rv = collect(P) + @test length(rv) == 4 && length(unique(rv)) == 4 + @test ev[1] ∈ rv && ev[2] ∈ rv && ev[3] ∈ rv && ev[4] ∈ rv +end diff --git a/test/BackwardAlgorithms/backward.jl b/test/BackwardAlgorithms/backward.jl new file mode 100755 index 0000000..9cab768 --- /dev/null +++ b/test/BackwardAlgorithms/backward.jl @@ -0,0 +1,408 @@ +@testset "PolyhedraBackward algorithm" begin + # invalid input: 2D but not a polyhedron or union + Y = Ball2([-1.0, 1.0], 1.0) + @test_throws AssertionError backward(Y, ReLU(), PolyhedraBackward()) +end + +@testset "Backward affine map" begin + # all algorithms + for algo in (BoxBackward(), PolyhedraBackward()) + # invalid input: incompatible matrix/vector dimensions in affine map + Y = Singleton([1.0, 1.0]) + @test_throws AssertionError backward(Y, rand(2, 3), rand(1), algo) + + # union + W = hcat([1.0]) + b = [1.0] + Y = UnionSetArray([Singleton([2.0]), Singleton([3.0])]) + @test UnionSetArray([Singleton([1.0]), Singleton([2.0])]) ⊆ backward(Y, W, b, algo) + end + + # exact algorithms + for algo in (PolyhedraBackward(),) + # 1D affine map + W = hcat([2.0]) + b = [1.0] + x = [1.0] + X = Singleton(x) + Y = affine_map(W, X, b) + y = element(Y) + @test backward(y, W, b, algo) == x + @test isequivalent(backward(Y, W, b, algo), X) + # non-polytope + Y = convert(HPolyhedron, Y) + @test isequivalent(backward(Y, W, b, algo), X) + # half-space + Y = HalfSpace([1.0], 2.0) + X = HalfSpace([1.0], 0.5) + @test isequivalent(backward(Y, W, b, algo), X) + # mixed numeric types + @test isequivalent(backward(HalfSpace([1.0f0], 2.0f0), W, [1.0f0], algo), X) + @test isequivalent(backward(Y, Float32.(W), b, algo), X) + + # 2D affine map + W = hcat([2.0 3.0; -1.0 -2.0]) + b = [1.0, -2.0] + X = Singleton([1.0, 2.0]) + Y = affine_map(W, X, b) + @test isequivalent(backward(Y, W, b, algo), X) + + # 1D-2D affine map + W = hcat([2.0; -1.0]) + b = [1.0, -2.0] + X = Singleton([1.0]) + Y = affine_map(W, X, b) + @test isequivalent(backward(Y, W, b, algo), X) + + # 2D-1D affine map + W = hcat([2.0 3.0]) + b = [1.0] + X = Singleton([1.0, 2.0]) + Y = affine_map(W, X, b) + X2 = backward(Y, W, b, algo) + @test X ⊆ X2 + # special case: Interval output + W = hcat([2.0 3.0]) + b = [1.0] + X = LineSegment([1.0, 2.0], [3.0, 4.0]) + Y = convert(Interval, affine_map(W, X, b)) + X2 = backward(Y, W, b, algo) + H1 = HalfSpace([2.0, 3.0], 18.0) + H2 = HalfSpace([-2.0, -3.0], -8.0) + @test X ⊆ X2 && (X2.constraints == [H1, H2] || X2.constraints == [H2, H1]) + # special case: HalfSpace output + Y = HalfSpace([2.0], 38.0) + @test backward(Y, W, b, algo) == H1 + # special case: Universe output + Y = Universe(1) + @test backward(Y, W, b, algo) == Universe(2) + end + + # approximate algorithms + for algo in (BoxBackward(),) + # 1D affine map + W = hcat([2.0]) + b = [1.0] + X = Singleton([1.0]) + Y = affine_map(W, X, b) + @test backward(Y, W, b, PolyhedraBackward()) ⊆ backward(Y, W, b, algo) + + # 2D affine map + W = hcat([2.0 3.0; -1.0 -2.0]) + b = [1.0, -2.0] + X = Singleton([1.0, 2.0]) + Y = affine_map(W, X, b) + @test backward(Y, W, b, PolyhedraBackward()) ⊆ backward(Y, W, b, algo) + + # 1D-2D affine map + W = hcat([2.0; -1.0]) + b = [1.0, -2.0] + X = Singleton([1.0]) + Y = affine_map(W, X, b) + @test backward(Y, W, b, PolyhedraBackward()) ⊆ backward(Y, W, b, algo) + end +end + +@testset "Backward Id activation" begin + y = [1.0, 2] + Y = BallInf(y, 0.1) + Yu = UnionSetArray([Y, Y]) + + # exact algorithms + for algo in (BoxBackward(), PolyhedraBackward()) + @test backward(y, Id(), algo) == y + + @test isequivalent(backward(Y, Id(), algo), Y) + @test isequivalent(backward(Yu, Id(), algo), Yu) + end +end + +@testset "Backward ReLU activation" begin + # exact results for all algorithms + for algo in (BoxBackward(), PolyhedraBackward()) + # vector + y = [1.0, 2] + @test backward(y, ReLU(), algo) == y + + # 1D + Y = HalfSpace([-2.0], 1.0) + @test backward(Y, ReLU(), algo) == Universe(1) + Y = Universe(1) + @test backward(Y, ReLU(), algo) == Universe(1) + + # 2D, strictly negative + Y = LineSegment([-1.0, -1.0], [-2.0, -2.0]) + @test backward(Y, ReLU(), algo) == EmptySet(2) + end + + # exact algorithms + for algo in (PolyhedraBackward(),) + # 1D + Y = BallInf([2.0], 1.0) + @test backward(Y, ReLU(), algo) == Y + Y = HalfSpace([2.0], 1.0) + @test backward(Y, ReLU(), algo) == HalfSpace([1.0], 0.5) + Y = HalfSpace([-2.0], -1.0) + @test backward(Y, ReLU(), algo) == HalfSpace([-1.0], -0.5) + Y = Interval(1.0, 2.0) + @test backward(Y, ReLU(), algo) == Y + Y = Interval(-1.0, 2.0) + @test backward(Y, ReLU(), algo) == HalfSpace([1.0], 2.0) + + # 2D + Pneg = HPolyhedron([HalfSpace([1.0, 0.0], 0.0), HalfSpace([0.0, 1.0], 0.0)]) + Px = HPolyhedron([HalfSpace([0.0, 1.0], 0.0), HalfSpace([1.0, 0.0], 2.0), + HalfSpace([-1.0, 0.0], -1.0)]) + Py = HPolyhedron([HalfSpace([1.0, 0.0], 0.0), HalfSpace([0.0, 1.0], 2.0), + HalfSpace([0.0, -1.0], -1.0)]) + Qx = HPolyhedron([HalfSpace([0.0, 1.0], 0.0), HalfSpace([1.0, 0.0], 2.0), + HalfSpace([-1.0, 0.0], 0.0)]) + Qy = HPolyhedron([HalfSpace([1.0, 0.0], 0.0), HalfSpace([0.0, 1.0], 2.0), + HalfSpace([0.0, -1.0], 0.0)]) + # strictly positive + Y = LineSegment([1.0, 1.0], [2.0, 2.0]) + @test backward(Y, ReLU(), algo) == Y + # only origin + Y = Singleton([0.0, 0.0]) + @test backward(Y, ReLU(), algo) == Pneg + # origin + positive + Y = LineSegment([0.0, 0.0], [2.0, 2.0]) + @test backward(Y, ReLU(), algo) == UnionSetArray([Y, Pneg]) + # only x-axis + Y = LineSegment([1.0, 0.0], [2.0, 0.0]) + @test backward(Y, ReLU(), algo) == Px + # positive + x-axis + Y = VPolygon([[1.0, 0.0], [2.0, 2.0], [2.0, 0.0]]) + @test backward(Y, ReLU(), algo) == UnionSetArray([Y, Px]) + # only y-axis + Y = LineSegment([0.0, 1.0], [0.0, 2.0]) + @test backward(Y, ReLU(), algo) == Py + # positive + y-axis + Y = VPolygon([[0.0, 1.0], [2.0, 2.0], [0.0, 2.0]]) + @test backward(Y, ReLU(), algo) == UnionSetArray([Y, Py]) + # positive + both axes + Y = VPolygon([[0.0, 1.0], [0.0, 2.0], [1.0, 0.0], [2.0, 0.0]]) + @test backward(Y, ReLU(), algo) == UnionSetArray([Y, Px, Py]) + # positive + x-axis + origin + Y = VPolygon([[0.0, 0.0], [2.0, 0.0], [2.0, 2.0]]) + @test backward(Y, ReLU(), algo) == UnionSetArray([Y, Qx, Pneg]) + # positive + y-axis + origin + Y = VPolygon([[0.0, 0.0], [0.0, 2.0], [2.0, 2.0]]) + @test backward(Y, ReLU(), algo) == UnionSetArray([Y, Qy, Pneg]) + # positive + both axes + origin + Y = VPolygon([[0.0, 0.0], [0.0, 2.0], [2.0, 0.0]]) + @test backward(Y, ReLU(), algo) == UnionSetArray([Y, Qx, Qy, Pneg]) + # origin + negative + Y = LineSegment([0.0, 0.0], [-2.0, -2.0]) + @test backward(Y, ReLU(), algo) == Pneg + # positive + negative + both axes + origin + Y = VPolygon([[-1.0, -1.0], [-1.0, 3.0], [3.0, -1.0]]) + X = backward(Y, ReLU(), algo) + @test X isa UnionSetArray && length(X) == 4 && X[2:4] == [Qx, Qy, Pneg] && + isequivalent(X[1], VPolygon([[0.0, 0.0], [0.0, 2.0], [2.0, 0.0]])) + # unbounded + Y = HalfSpace([-1.0, 0.0], -1.0) + X = backward(Y, ReLU(), algo) + ## union is too complex -> only perform partial tests + @test X ⊆ Y && low(X) == [1.0, -Inf] && high(X) == [Inf, Inf] + Y = HalfSpace([0.0, -1.0], -1.0) + X = backward(Y, ReLU(), algo) + ## union is too complex -> only perform partial tests + @test X ⊆ Y && low(X) == [-Inf, 1.0] && high(X) == [Inf, Inf] + # union + Y = UnionSetArray([LineSegment([1.0, 1.0], [2.0, 2.0]), Singleton([0.0, 0.0])]) + @test backward(Y, ReLU(), algo) == UnionSetArray([Y[1], Pneg]) + + # 3D + # positive point + Y = Singleton([1.0, 1.0, 1.0]) + @test backward(Y, ReLU(), algo) == Y + # positive + negative + both axes + origin + Y = BallInf(zeros(3), 1.0) + X = backward(Y, ReLU(), algo) # result: x <= 1 && y <= 1 && z <= 1 + # union is too complex -> only perform partial tests + @test X isa UnionSetArray && length(X) == 8 + @test all(high(X, i) == 1.0 for i in 1:3) + @test all(low(X, i) == -Inf for i in 1:3) + + # union + Y = UnionSetArray([Singleton([2.0]), Singleton([3.0])]) + @test Y == backward(Y, ReLU(), algo) + end + + # approximate algorithms + for algo in (BoxBackward(),) + # 1D + Y = BallInf([2.0], 1.0) + @test Y ⊆ backward(Y, ReLU(), algo) + Y = HalfSpace([2.0], 1.0) + @test HalfSpace([1.0], 0.5) ⊆ backward(Y, ReLU(), algo) + Y = HalfSpace([-2.0], -1.0) + @test HalfSpace([-1.0], -0.5) ⊆ backward(Y, ReLU(), algo) + + # 2D + Pneg = HPolyhedron([HalfSpace([1.0, 0.0], 0.0), HalfSpace([0.0, 1.0], 0.0)]) + Px = HPolyhedron([HalfSpace([0.0, 1.0], 0.0), HalfSpace([1.0, 0.0], 2.0), + HalfSpace([-1.0, 0.0], -1.0)]) + Py = HPolyhedron([HalfSpace([1.0, 0.0], 0.0), HalfSpace([0.0, 1.0], 2.0), + HalfSpace([0.0, -1.0], -1.0)]) + Qx = HPolyhedron([HalfSpace([0.0, 1.0], 0.0), HalfSpace([1.0, 0.0], 2.0), + HalfSpace([-1.0, 0.0], 0.0)]) + Qy = HPolyhedron([HalfSpace([1.0, 0.0], 0.0), HalfSpace([0.0, 1.0], 2.0), + HalfSpace([0.0, -1.0], 0.0)]) + # strictly positive + Y = LineSegment([1.0, 1.0], [2.0, 2.0]) + @test Y ⊆ backward(Y, ReLU(), algo) + # only origin + Y = Singleton([0.0, 0.0]) + @test Pneg ⊆ backward(Y, ReLU(), algo) + # origin + positive + Y = LineSegment([0.0, 0.0], [2.0, 2.0]) + @test UnionSetArray([Y, Pneg]) ⊆ backward(Y, ReLU(), algo) + # only x-axis + Y = LineSegment([1.0, 0.0], [2.0, 0.0]) + @test Px ⊆ backward(Y, ReLU(), algo) + # positive + x-axis + Y = VPolygon([[1.0, 0.0], [2.0, 2.0], [2.0, 0.0]]) + @test UnionSetArray([Y, Px]) ⊆ backward(Y, ReLU(), algo) + # only y-axis + Y = LineSegment([0.0, 1.0], [0.0, 2.0]) + @test Py ⊆ backward(Y, ReLU(), algo) + # positive + y-axis + Y = VPolygon([[0.0, 1.0], [2.0, 2.0], [0.0, 2.0]]) + @test UnionSetArray([Y, Py]) ⊆ backward(Y, ReLU(), algo) + # positive + both axes + Y = VPolygon([[0.0, 1.0], [0.0, 2.0], [1.0, 0.0], [2.0, 0.0]]) + @test UnionSetArray([Y, Px, Py]) ⊆ backward(Y, ReLU(), algo) + # positive + x-axis + origin + Y = VPolygon([[0.0, 0.0], [2.0, 0.0], [2.0, 2.0]]) + @test UnionSetArray([Y, Qx, Pneg]) ⊆ backward(Y, ReLU(), algo) + # positive + y-axis + origin + Y = VPolygon([[0.0, 0.0], [0.0, 2.0], [2.0, 2.0]]) + @test UnionSetArray([Y, Qy, Pneg]) ⊆ backward(Y, ReLU(), algo) + # positive + both axes + origin + Y = VPolygon([[0.0, 0.0], [0.0, 2.0], [2.0, 0.0]]) + @test UnionSetArray([Y, Qx, Qy, Pneg]) ⊆ backward(Y, ReLU(), algo) + # origin + negative + Y = LineSegment([0.0, 0.0], [-2.0, -2.0]) + @test backward(Y, ReLU(), algo) == Pneg + # positive + negative + both axes + origin + Y = VPolygon([[-1.0, -1.0], [-1.0, 3.0], [3.0, -1.0]]) + X = backward(Y, ReLU(), algo) + @test UnionSetArray([VPolygon([[0.0, 0.0], [0.0, 2.0], [2.0, 0.0]]), Qx, Qy, Pneg]) ⊆ X + # unbounded + Y = HalfSpace([-1.0, 0.0], -1.0) + @test Y ⊆ backward(Y, ReLU(), algo) + Y = HalfSpace([0.0, -1.0], -1.0) + @test Y ⊆ backward(Y, ReLU(), algo) + # # union + Y = UnionSetArray([LineSegment([1.0, 1.0], [2.0, 2.0]), Singleton([0.0, 0.0])]) + @test UnionSetArray([Y[1], Pneg]) ⊆ backward(Y, ReLU(), algo) + + # 3D + # positive point + Y = Singleton([1.0, 1.0, 1.0]) + @test Y ⊆ backward(Y, ReLU(), algo) + # positive + negative + both axes + origin + Y = BallInf(zeros(3), 1.0) + X = backward(Y, ReLU(), algo) # result: x <= 1 && y <= 1 && z <= 1 + @test all(high(X, i) >= 1 for i in 1:3) + @test all(low(X, i) == -Inf for i in 1:3) + + # union + Y = UnionSetArray([Singleton([2.0]), Singleton([3.0])]) + @test Y ⊆ backward(Y, ReLU(), algo) + end +end + +@testset "Backward sigmoid activation" begin + x = [0.0, 1] + y = Sigmoid()(x) + Y = BallInf(y, 0.0) + + # all algorithms + for algo in (BoxBackward(), PolyhedraBackward(), DummyBackward()) + @test backward(y, Sigmoid(), algo) == x + end + + # exact algorithms + for algo in (BoxBackward(),) + @test isequivalent(backward(Y, Sigmoid(), algo), Singleton(x)) + end + + # algorithms not supporting sigmoid activation + for algo in (PolyhedraBackward(),) + @test_throws ArgumentError backward(Y, Sigmoid(), algo) + end +end + +@testset "Backward leaky-ReLU activation" begin + lr = LeakyReLU(0.1) + x = [0.0, 1] + X = Singleton(x) + y = lr(x) + Y = BallInf(y, 0.0) + + # all algorithms + for algo in (BoxBackward(), PolyhedraBackward(), DummyBackward()) + @test backward(y, lr, algo) == x + end + + # exact algorithms + for algo in (PolyhedraBackward(),) + @test isequivalent(backward(Y, lr, algo), X) + + Y2 = BallInf([0.0], 1.0) + lr0 = LeakyReLU(0.0) + X2 = backward(Y2, lr0, algo) # equivalent to HalfSpace([1.0], 1.0) + # union is too complex -> only perform partial tests + @test high(X2) == [1.0] && low(X2) == [-Inf] + end + + # exact algorithms (in this case) + for algo in (BoxBackward(),) + @test isequivalent(backward(Y, lr, algo), X) + end +end + +@testset "Backward layer" begin + W = [1.0 0; 0 1] + b = [1.0, 0] + L = DenseLayerOp(W, b, Id()) + y = [2.0, 1] + Y = BallInf(y, 0.1) + + for algo in (BoxBackward(), PolyhedraBackward()) + @test backward(y, L, algo) == [1.0, 1] + + @test [1.0, 1] ∈ backward(Y, L, algo) + end +end + +@testset "Backward network" begin + # 2D network + N = example_network_222() + x = [1.0, 2.0] + Y = Singleton(N(x)) + for algo in (BoxBackward(), PolyhedraBackward()) + X = backward(Y, N, algo) + @test x ∈ X + x = [-4.0, 0.0] + @test Singleton(N(x)) == Y && x ∈ X + end + + # 1D/2D network + N = example_network_1221() + X = Interval(2.5, 5.0) + Y = convert(Interval, forward(X, N, ConcreteForward())) + + # exact algorithms + for algo in (PolyhedraBackward(),) + @test isequivalent(X, backward(Y, N, algo)) + end + + # approximate algorithms + for algo in (BoxBackward(),) + @test X ⊆ backward(Y, N, algo) + end +end diff --git a/test/BackwardAlgorithms/simplify_sets.jl b/test/BackwardAlgorithms/simplify_sets.jl new file mode 100644 index 0000000..fe6c11a --- /dev/null +++ b/test/BackwardAlgorithms/simplify_sets.jl @@ -0,0 +1,17 @@ +@testset "Set simplification" begin + # simplify_set + using NeuralNetworkReachability.BackwardAlgorithms: simplify_set + X = BallInf([0.0], 0.0) + @test simplify_set(X) == X + X = HPolyhedron([HalfSpace([1.0], -1.0), HalfSpace([-1.0], -1.0)]) + @test simplify_set(X) == EmptySet(1) + X = HPolyhedron([HalfSpace([1.0], 1.0), HalfSpace([-1.0], 1.0)]) + @test simplify_set(X) == X + + # simplify_union + using NeuralNetworkReachability.BackwardAlgorithms: simplify_union + @test simplify_union([]) == EmptySet(1) + @test simplify_union([]; n=3) == EmptySet(3) + @test simplify_union([X]) == X + @test simplify_union([X, X]) == UnionSetArray([X, X]) +end diff --git a/test/BidirectionalAlgorithms/bidirectional.jl b/test/BidirectionalAlgorithms/bidirectional.jl new file mode 100755 index 0000000..7ca0014 --- /dev/null +++ b/test/BidirectionalAlgorithms/bidirectional.jl @@ -0,0 +1,80 @@ +@testset "Bidirectional with initial point" begin + algoB = BoxBidirectional() + algoP = PolyhedraBidirectional() + algoPB = SimpleBidirectional(ConcreteForward(), BoxBackward()) + algoBP = SimpleBidirectional(BoxForward(), PolyhedraBackward()) + + N = example_network_222() + + x = [1.0, 1.0] + @test forward(x, N) == [-5.0, 5.0] + + # initial point + X = Singleton(x) + # empty preimage + Y = Singleton([5.0, 5.0]) + for algo in (algoB, algoP, algoPB, algoBP) + X2, Y2 = bidirectional(X, Y, N, algo) + @test X2 == EmptySet(2) + + res = bidirectional(X, Y, N, algo; get_intermediate_results=true) + @test res[1] == EmptySet(2) + end + # nonempty preimage + Y = HalfSpace([1.0, -1.0], 0.0) + for algo in (algoB, algoP, algoPB, algoBP) + X2, Y2 = bidirectional(X, Y, N, algo) + @test isequivalent(X, X2) && !isdisjoint(Y, Y2) + + res = bidirectional(X, Y, N, algo; get_intermediate_results=true) + @test !isdisjoint(Y, res[end]) && isequivalent(X, res[1]) + end +end + +@testset "Bidirectional with initial set" begin + algoB = BoxBidirectional() + algoP = PolyhedraBidirectional() + algoPB = SimpleBidirectional(ConcreteForward(), BoxBackward()) + algoBP = SimpleBidirectional(BoxForward(), PolyhedraBackward()) + + N = example_network_222() + + x = [1.0, 1.0] + X = BallInf(x, 1.0) + # empty preimage + Y = Singleton([5.0, 5.0]) + for algo in (algoB, algoP, algoPB, algoBP) + X2, Y2 = bidirectional(X, Y, N, algo) + @test X2 == EmptySet(2) + + res = bidirectional(X, Y, N, algo; get_intermediate_results=true) + @test res[1] == EmptySet(2) + end + # nonempty preimage + Y = HalfSpace([1.0, -1.0], 0.0) + for algo in (algoB, algoP, algoBP, algoPB) + X2, Y2 = bidirectional(X, Y, N, algo) + @test isequivalent(X, X2) && !isdisjoint(Y, Y2) + + res = bidirectional(X, Y, N, algo; get_intermediate_results=true) + @test isequivalent(X, res[1]) && !isdisjoint(Y, res[end]) + end +end + +@testset "Bidirectional with unusual empty intermediate set" begin + # the network is the identity for the diagonal line segment, but the box + # algorithm turns it into a box; the exact backward algorithm detects + # emptiness before the activation function + W = [1.0 0; 0 1] + b = [0.0, 0] + N = FeedforwardNetwork([DenseLayerOp(W, b, ReLU())]) + X = LineSegment([1.0, 1], [2.0, 2]) + Y = Singleton([1.0, 2]) + algo = SimpleBidirectional(BoxForward(), PolyhedraBackward()) + + X2, Y2 = bidirectional(X, Y, N, algo) + @test isequivalent(Y, Y2) && X2 == EmptySet(2) + + X2, Y2 = bidirectional(X, Y, N, algo; get_intermediate_results=true) + @test isequivalent(Y, Y2) && X2 == EmptySet(2) +end diff --git a/test/ForwardAlgorithms/forward.jl b/test/ForwardAlgorithms/forward.jl new file mode 100755 index 0000000..1f10872 --- /dev/null +++ b/test/ForwardAlgorithms/forward.jl @@ -0,0 +1,336 @@ +@testset "Forward affine map" begin + W = [2.0 3; 4 5; 6 7] + b = [1.0, 2, 3] + x = [1.0, 2] + X = BallInf(x, 0.1) + + @test forward(x, W, b, DefaultForward()) == [9.0, 16, 23] + + for algo in (DefaultForward(), ConcreteForward(), LazyForward(), + BoxForward(), BoxForward(LazyForward()), + SplitForward(DefaultForward()), DeepZ(), Verisig(nothing)) + Y = concretize(forward(X, W, b, algo)) + @test isequivalent(Y, affine_map(W, X, b)) + end +end + +@testset "Forward Id activation" begin + x = [1.0, 2] + X = BallInf(x, 0.1) + + @test forward(x, Id(), DefaultForward()) == Id()(x) + + # exact algorithms + for algo in (DefaultForward(), ConcreteForward(), LazyForward(), + BoxForward(), BoxForward(LazyForward()), + SplitForward(DefaultForward()), DeepZ(), Verisig(nothing)) + @test isequivalent(forward(X, Id(), algo), X) + end +end + +@testset "Forward ReLU activation" begin + x1 = [1.0, 2] + x2 = [1.0, -2] + X1 = BallInf(x1, 0.1) + X2 = BallInf(x2, 0.1) + X3 = LineSegment([0.0, -1], [2.0, 1]) + + Y2 = LineSegment([0.9, 0], [1.1, 0]) + Y3 = UnionSet(LineSegment([0.0, 0], [1.0, 0]), LineSegment([1.0, 0], [2.0, 1])) + @test forward(x1, ReLU(), DefaultForward()) == ReLU()(x1) + @test forward(x2, ReLU(), DefaultForward()) == ReLU()(x2) + + # exact algorithms + for algo in (ConcreteForward(), LazyForward()) + @test isequivalent(concretize(forward(X1, ReLU(), algo)), X1) + @test isequivalent(concretize(forward(X2, ReLU(), algo)), Y2) + # equivalence check between unions (not available out of the box) + @test Y3 ⊆ concretize(forward(X3, ReLU(), algo)) + Z3 = concretize(forward(X3, ReLU(), algo)) + @test Z3 isa UnionSetArray && length(Z3) == 2 && + (isequivalent(Z3[1], Y3[1]) || isequivalent(Z3[1], Y3[2])) && + (isequivalent(Z3[2], Y3[1]) || isequivalent(Z3[2], Y3[2])) + end + + # approximate algorithms + for algo in (BoxForward(), BoxForward(LazyForward()), DeepZ()) + @test isequivalent(concretize(forward(X1, ReLU(), algo)), X1) + @test isequivalent(concretize(forward(X2, ReLU(), algo)), Y2) + @test Y3 ⊆ concretize(forward(X3, ReLU(), algo)) + end + + # algorithms currently not supporting ReLU activation + for algo in (SplitForward(ConcreteForward()),) + @test_broken forward(X1, ReLU(), algo) + end + + # algorithms not supporting ReLU activation + for algo in (DefaultForward(), Verisig(nothing)) + @test_throws ArgumentError forward(X1, ReLU(), algo) + end +end + +@testset "Forward sigmoid activation" begin + x1 = [1.0, 2] + x2 = [1.0, -2] + X1 = BallInf(x1, 0.1) + X2 = BallInf(x2, 0.1) + X3 = LineSegment([0.0, -1], [2.0, 1]) + + Y1 = VPolytope(convex_hull([forward(x, Sigmoid(), DefaultForward()) for x in vertices(X1)])) + @test forward(x1, Sigmoid(), DefaultForward()) == Sigmoid()(x1) + + # approximate algorithms + for algo in (BoxForward(), BoxForward(LazyForward()), DeepZ()) + @test Y1 ⊆ forward(X1, Sigmoid(), algo) + end + + # DeepZ() has a special case for flat sets + for algo in (DeepZ(),) + x2 = [1.0, 1.0] + X2 = BallInf(x2, 0.0) + Y = forward(X2, Sigmoid(), algo) + @test isequivalent(Singleton(Sigmoid()(x2)), concretize(Y)) + X3 = LineSegment(x2, [1.0, 2.0]) + Y = forward(X3, Sigmoid(), algo) + Y_exact3 = VPolygon(convex_hull([Sigmoid()(x2) for x in vertices(X3)])) + @test Y_exact3 ⊆ concretize(Y) + end + + # algorithms not supporting sigmoid activation + for algo in (DefaultForward(), ConcreteForward(), LazyForward(), + SplitForward(ConcreteForward()), Verisig(nothing)) + @test_throws ArgumentError forward(X1, Sigmoid(), algo) + end +end + +@testset "Forward tanh activation" begin + x = [1.0, 2] + X = BallInf(x, 0.1) + + Y = VPolytope(convex_hull([forward(x, Tanh(), DefaultForward()) for x in vertices(X)])) + @test forward(x, Tanh(), DefaultForward()) == Tanh()(x) + + # approximate algorithms + for algo in (BoxForward(), BoxForward(LazyForward()), DeepZ()) + @test Y ⊆ forward(X, Tanh(), algo) + end + + # algorithms not supporting tanh activation + for algo in (DefaultForward(), ConcreteForward(), LazyForward(), + SplitForward(ConcreteForward()), Verisig(nothing)) + @test_throws ArgumentError forward(X, Tanh(), algo) + end +end + +@testset "Forward LeakyReLU activation" begin + x = [1.0, 2] + X = BallInf(x, 0.1) + + Y = VPolytope(convex_hull([forward(x, LeakyReLU(0.1), DefaultForward()) for x in vertices(X)])) + @test forward(x, LeakyReLU(0.1), DefaultForward()) == LeakyReLU(0.1)(x) + + # approximate algorithms + for algo in (BoxForward(), BoxForward(LazyForward())) + @test Y ⊆ forward(X, LeakyReLU(0.1), algo) + end + + # algorithms currently not supporting leaky-ReLU activation + for algo in (ConcreteForward(), LazyForward(), SplitForward(ConcreteForward())) + @test_broken forward(X, LeakyReLU(0.1), algo) + end + + # algorithms not supporting leaky-ReLU activation + for algo in (DefaultForward(), DeepZ(), Verisig(nothing)) + @test_throws ArgumentError forward(X, LeakyReLU(0.1), algo) + end +end + +@testset "Forward layer" begin + W = [2.0 3; 4 5; 6 7] + b = [1.0, 2, 3] + L = DenseLayerOp(W, b, Id()) + x = [1.0, 2] + X = BallInf(x, 0.1) + + @test forward(x, L, DefaultForward()) == [9.0, 16, 23] + + for algo in (DefaultForward(), ConcreteForward(), LazyForward(), + BoxForward(), BoxForward(LazyForward()), + SplitForward(DefaultForward()), DeepZ(), Verisig(nothing)) + Y = concretize(forward(X, L, algo)) + @test isequivalent(Y, affine_map(W, X, b)) + end +end + +@testset "Forward network" begin + W1 = [2.0 3; 4 5; 6 7] + b1 = [1.0, 2, 3] + L1 = DenseLayerOp(W1, b1, Id()) + W2 = [1.0 1 1; 2 2 2] + b2 = [-1.0, -2] + L2 = DenseLayerOp(W2, b2, Id()) + N = FeedforwardNetwork([L1, L2]) + x = [1.0, 2] + X = BallInf(x, 0.1) + + Y = affine_map(W2, affine_map(W1, X, b1), b2) + @test forward(x, N, DefaultForward()) == [47.0, 94] + + # exact algorithms (in this case) + for algo in (ConcreteForward(), LazyForward(), BoxForward(), + BoxForward(LazyForward()), DeepZ(), Verisig()) + @test isequivalent(concretize(forward(X, N, algo)), Y) + end + # approximate algorithms + for algo in (SplitForward(ConcreteForward()),) + @test Y ⊆ forward(X, N, algo) + end + for algo in (DefaultForward(),) + @test_throws ArgumentError forward(X, N, algo) + end +end + +@testset "Forward network singleton" begin + # singleton computation uses vector and hence works for each algorithm + for (N, y) in ((example_network_222(ReLU()), [-5.0, 5.0]), + (example_network_232(ReLU()), [-8.0, 8.0]), + (example_network_222(Sigmoid()), [-2.0179862099620918, 2.0179862099620918]), + (example_network_232(Sigmoid()), [-4.211161945852107, 4.211161945852107]), + (example_network_222(Tanh()), [-0.000670700260932966, 0.000670700260932966]), + (example_network_232(Tanh()), [-2.2854531681282273, 2.2854531681282273]), + (example_network_222(LeakyReLU(0.1)), [-4.2, 4.2]), + (example_network_232(LeakyReLU(0.1)), [-7.2, 7.2])) + x = Singleton([1.0, 1.0]) + + # forward with singleton + yv = forward(element(x), N) + @test yv == y + for algo in (DefaultForward(), ConcreteForward(), LazyForward(), + BoxForward(), BoxForward(LazyForward()), + SplitForward(DefaultForward()), DeepZ(), Verisig(nothing)) + ys = forward(x, N, algo) + @test isequivalent(concretize(ys), Singleton(y)) + end + + # _forward_store with singleton + using NeuralNetworkReachability.ForwardAlgorithms: _forward_store + yv = _forward_store(element(x), N, DefaultForward()) + @test length(yv) == length(N) + @test yv[end][2] == y + for algo in (DefaultForward(), ConcreteForward(), LazyForward(), + BoxForward(), BoxForward(LazyForward()), + SplitForward(DefaultForward()), DeepZ(), Verisig(nothing)) + ys = _forward_store(x, N, algo) + @test length(ys) == length(N) + for i in eachindex(ys) + for j in 1:2 + @test isequivalent(concretize(ys[i][j]), Singleton(yv[i][j])) + end + end + end + end +end + +@testset "Forward ReLU network" begin + N = example_network_232(ReLU()) + X = BallInf([1.0, 1.0], 0.1) + Y_exact = LineSegment([-9.7, 9.7], [-6.3, 6.3]) + + # exact algorithms (in this case) + for algo in (ConcreteForward(), LazyForward(), DeepZ()) + Y = forward(X, N, algo) + @test isequivalent(concretize(Y), Y_exact) + end + + # approximate algorithms + for algo in (BoxForward(), BoxForward(LazyForward()), SplitForward(ConcreteForward())) + Y = forward(X, N, algo) + @test Y_exact ⊆ concretize(Y) + end + + # algorithms not supporting ReLU activation + for algo in (DefaultForward(), Verisig()) + @test_throws ArgumentError forward(X, N, algo) + end +end + +@testset "Forward leaky-ReLU network" begin + N = example_network_232(LeakyReLU(0.1)) + X = BallInf([1.0, 1.0], 0.1) + Y_exact = LineSegment([-8.92, 8.92], [-5.48, 5.48]) + + # approximate algorithms + for algo in (BoxForward(), BoxForward(LazyForward())) + Y = forward(X, N, algo) + @test Y_exact ⊆ concretize(Y) + end + + # algorithms currently not supporting leaky-ReLU activation + for algo in (ConcreteForward(), LazyForward(), + SplitForward(ConcreteForward()), DeepZ()) + @test_broken forward(X, N, algo) + end + + # algorithms not supporting ReLU activation + for algo in (DefaultForward(), Verisig()) + @test_throws ArgumentError forward(X, N, algo) + end + + # algorithms currently not supporting unbounded inputs + X = Line([1.0, 1], 1.0) + for algo in (BoxForward(), BoxForward(LazyForward())) + @test_broken forward(X, N, algo) + end +end + +@testset "Forward sigmoid and tanh networks" begin + for act in (Sigmoid(), Tanh()) + N = example_network_232(act) + X = BallInf([1.0, 1.0], 0.1) + # in this case the convex enclosure is exact + Y_exact = VPolygon(convex_hull([forward(x, N) for x in vertices(X)])) + + # approximate algorithms + for algo in (BoxForward(), BoxForward(LazyForward()), SplitForward(DeepZ()), + DeepZ()) + Y = forward(X, N, algo) + @test Y_exact ⊆ concretize(Y) + end + + # Verisig result has a special type + Y = forward(X, N, Verisig()) + if act == Sigmoid() + @test Y_exact ⊆ overapproximate(Y, Zonotope) + elseif act == Tanh() + # this is a known case where the algorithm is unsound + @test_broken Y_exact ⊆ overapproximate(Y, Zonotope) + else + error("unexpected case") + end + + # algorithms not supporting sigmoid activation + for algo in (DefaultForward(), ConcreteForward(), LazyForward(), + SplitForward(ConcreteForward())) + @test_throws ArgumentError forward(X, N, algo) + end + end +end + +@testset "Forward sigmoid multiple hidden layers" begin + N = example_network_1221(Sigmoid()) + X = Interval(1.0, 2.0) + # in this case the convex enclosure is exact + ch = convex_hull([forward(x, N) for x in vertices(X)]) + Y_exact = Interval(ch[1][1], ch[2][1]) + + # approximate algorithms + for algo in (BoxForward(), BoxForward(LazyForward()), SplitForward(DeepZ()), + DeepZ()) + Y = forward(X, N, algo) + @test Y_exact ⊆ concretize(Y) + end + + # not supported yet + @test_broken forward(X, N, Verisig()) +end diff --git a/test/Project.toml b/test/Project.toml old mode 100644 new mode 100755 index d04682e..640b9cf --- a/test/Project.toml +++ b/test/Project.toml @@ -1,4 +1,16 @@ [deps] +CDDLib = "3391f64e-dcde-5f30-b752-e11513730f60" +ControllerFormats = "02ac4b2c-022a-44aa-84a5-ea45a5754bcc" +LazySets = "b4f0291d-fe17-52bc-9479-3d1a343d9043" +Optim = "429524aa-4258-5aef-a3af-852621145aeb" +Polyhedra = "67491407-f73d-577b-9b50-8179a7c68029" +ReachabilityAnalysis = "1e97bd63-91d1-579d-8e8d-501d2b57c93f" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [compat] +CDDLib = "0.6 - 0.9" +ControllerFormats = "0.2" +LazySets = "2.11.1" +Optim = "1" +Polyhedra = "0.6 - 0.7" +ReachabilityAnalysis = "0.22" diff --git a/test/example_networks.jl b/test/example_networks.jl new file mode 100755 index 0000000..bb859d7 --- /dev/null +++ b/test/example_networks.jl @@ -0,0 +1,15 @@ +function example_network_222(act::ActivationFunction=ReLU()) + return FeedforwardNetwork([DenseLayerOp([1.0 2.0; -1.0 -2.0], [1.0, -1.0], act), + DenseLayerOp([-1.0 -2.0; 1.0 2.0], [-1.0, 1.0], Id())]) +end + +function example_network_232(act::ActivationFunction=ReLU()) + return FeedforwardNetwork([DenseLayerOp([1.0 2.0; -1.0 -2.0; 3.0 -3.0], [1.0, -1.0, 1.0], act), + DenseLayerOp([-1.0 -2.0 -3.0; 1.0 2.0 3.0], [-1.0, 1.0], Id())]) +end + +function example_network_1221(act::ActivationFunction=ReLU()) + return FeedforwardNetwork([DenseLayerOp(hcat([1.0; 1.0]), [1.5, 1.5], act), + DenseLayerOp([2.0 2.0; 2.0 2.0], [2.5, 2.5], act), + DenseLayerOp([3.0 3.0;], [3.5], Id())]) +end diff --git a/test/optional_dependencies_not_loaded.jl b/test/optional_dependencies_not_loaded.jl new file mode 100644 index 0000000..ba69519 --- /dev/null +++ b/test/optional_dependencies_not_loaded.jl @@ -0,0 +1,10 @@ +for dummy in [1] + N = example_network_222() + X = BallInf(zeros(2), 1.0) + + # Verisig + if !isdefined(@__MODULE__, :ReachabilityAnalysis) + @test_throws AssertionError Verisig() + @test_throws AssertionError forward(X, N, Verisig(nothing)) + end +end diff --git a/test/runtests.jl b/test/runtests.jl old mode 100644 new mode 100755 index 31a2cd3..daf2307 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,5 +1,43 @@ -using Test, JuliaReachTemplatePkg +using Test, NeuralNetworkReachability +using ControllerFormats, LazySets -@time @testset "foo" begin - JuliaReachTemplatePkg.foo() +include("example_networks.jl") + +@testset "Optional dependencies (not loaded)" begin + include("optional_dependencies_not_loaded.jl") end + +# load optional dependencies +import ReachabilityAnalysis, Polyhedra, CDDLib, Optim + +@testset "ForwardAlgorithms" begin + include("ForwardAlgorithms/forward.jl") +end +@testset "BackwardAlgorithms" begin + struct DummyBackward <: NeuralNetworkReachability.BackwardAlgorithms.BackwardAlgorithm + end + + include("BackwardAlgorithms/simplify_sets.jl") + include("BackwardAlgorithms/PartitioningLeakyReLU.jl") + include("BackwardAlgorithms/backward.jl") +end +@testset "BidirectionalAlgorithms" begin + include("BidirectionalAlgorithms/bidirectional.jl") +end +@testset "AISoLA 2023" begin + include("AISoLA2023/BoxAffineMap.jl") + @testset "AISoLA 2023" begin + include("AISoLA2023/motivation.jl") + end + @testset "AISoLA 2023" begin + include("AISoLA2023/parabola.jl") + end + @testset "AISoLA 2023" begin + include("AISoLA2023/leaky_relu.jl") + end + @testset "AISoLA 2023" begin + include("AISoLA2023/forward_backward.jl") + end +end + +@test isempty(detect_ambiguities(NeuralNetworkReachability))