From 1e61d636c882f909ba715d9c82f68a8663ae4834 Mon Sep 17 00:00:00 2001 From: Oscar Dowson Date: Mon, 9 Sep 2024 14:09:24 +1200 Subject: [PATCH] [docs] fix doc build and avoid segfault via PythonCall (#103) --- .gitignore | 1 - docs/src/developers/design_principles.md | 4 ++ docs/src/manual/predictors.md | 4 ++ docs/src/tutorials/model.pt | Bin 0 -> 3200 bytes docs/src/tutorials/pytorch.jl | 70 +++++++++++------------ 5 files changed, 43 insertions(+), 36 deletions(-) create mode 100644 docs/src/tutorials/model.pt diff --git a/.gitignore b/.gitignore index f37f52d..84946e3 100644 --- a/.gitignore +++ b/.gitignore @@ -3,5 +3,4 @@ dev docs/.CondaPkg docs/build docs/src/tutorials/*.md -docs/src/tutorials/model.pt test/.CondaPkg diff --git a/docs/src/developers/design_principles.md b/docs/src/developers/design_principles.md index 203d517..1b1346b 100644 --- a/docs/src/developers/design_principles.md +++ b/docs/src/developers/design_principles.md @@ -1,3 +1,7 @@ +```@meta +CurrentModule = MathOptAI +``` + # Design principles This project is inspired by two existing projects: diff --git a/docs/src/manual/predictors.md b/docs/src/manual/predictors.md index 2e45680..4553044 100644 --- a/docs/src/manual/predictors.md +++ b/docs/src/manual/predictors.md @@ -1,3 +1,7 @@ +```@meta +CurrentModule = MathOptAI +``` + # Predictors The main entry point for embedding prediction models into JuMP is diff --git a/docs/src/tutorials/model.pt b/docs/src/tutorials/model.pt new file mode 100644 index 0000000000000000000000000000000000000000..aca3b15b7460dba3d32d70f54197449b2dba297b GIT binary patch literal 3200 zcmbtWTWl0n7@lpREkj!tC@4^>+!uj%FQwZGy9*VT${;L@z)-Ej?C!K@+UZXJo!OR) z#e!@v-YOs_z7S$URdQ8aw;ofpx|nRrwLM*0u-VSF7PNt)sHrESs5Tz2#nMSl zF>0fF+?3V0lHhkKkW}KPDr-D&E02P$ABr3z0W%X=LXCGo@vt}@e@i`bLhe>fC1Jpn zXjIoUC2FdA+~9X4VoE}ZZBnBq4^vl$p~NyEMUw69M7k7_$ue;Pu@cwgQV*V04HM&z z$w-D`!nCM_zg5jtJ-b%>gEfe%^c z{Fb>C1O!_OHc(R=O=u3o6V%kEA=n&-U=q>ovjt5^|3L(mzorrQIj3^U!AzJFE5iR9zM60*Q+VprQwkxukOehBI6>MTT zj!RFP*-25bCnHwET(ECqon?GP^vdLz;KRzc-8{77NmuZ5; z7x>RMw)k!z`1l&}{ELr1U%UZ(P5QriUXts%E7)9wJ?|LsrSILm6==EZ%Dm_74Sd34s>;@|VPJ>W?~`m?vs1s0zDG<~}5!}O6q+?hX1jSRt8t7D@N zjPIuS6|I;3+KS8R&cl`duF+fmJGaXNonM{uud1{KE)L8O{2F)qZ=d+izx2cz|G{}@W#`jSCt=~-d6B~Emsc9p}D(REwu!7sja@b2A z8G1a}?E7EkZ}sp5{gX2tyNfU~>64>ZbM*|pmORPo3?+eIR_Y<^1eDZRu-W;*&N#iE zhWf?^XT8hi^17P54W0(4tI_51G&Opi?j~JoE?8T z=I&MU;>mCM@5n;k+?_|>S=n|TZ$_#+zPAgNbIDKck8GQZFXJq5owHCgm$vzuH}W-I zg_^m9%hwFyw`~rlyHGQiX8D>o@-;n$nz{2an<`X zDkPMYtY+dh#cReDv9DGe8^0${G29w?I$}NY65?uDjAS_^OeIGvSB8H?p343Mnu%9# literal 0 HcmV?d00001 diff --git a/docs/src/tutorials/pytorch.jl b/docs/src/tutorials/pytorch.jl index fe5e705..2342ebc 100644 --- a/docs/src/tutorials/pytorch.jl +++ b/docs/src/tutorials/pytorch.jl @@ -11,7 +11,7 @@ # ## Python integration -# This tutorial uses [PythonCall.jl](https://github.com/JuliaPy/PythonCall.jl) +# MathOptAI uses [PythonCall.jl](https://github.com/JuliaPy/PythonCall.jl) # to call from Julia into Python. # # See [CondaPkg.jl](https://github.com/JuliaPy/CondaPkg.jl) for more control @@ -31,7 +31,6 @@ using Test import Ipopt import MathOptAI import Plots -import PythonCall # ## Training a model @@ -44,46 +43,47 @@ import PythonCall # The model is unimportant, but for this example, we are trying to fit noisy # observations of the function ``f(x) = x^2 - 2x``. -filename = joinpath(@__DIR__, "model.pt") -PythonCall.pyexec( - """ - import torch - - model = torch.nn.Sequential( - torch.nn.Linear(1, 16), - torch.nn.ReLU(), - torch.nn.Linear(16, 1), - ) - - n = 1024 - x = torch.arange(-2, 2 + 4 / (n - 1), 4 / (n - 1)).reshape(n, 1) - for epoch in range(100): - N = torch.normal(torch.zeros(n, 1), torch.ones(n, 1)) - y = x ** 2 -2 * x + 0.1 * N - optimizer = torch.optim.Adam(model.parameters(), lr=0.01) - model.train() - loss_fn = torch.nn.MSELoss() - loss = loss_fn(model(x), y) - loss.backward() - optimizer.step() - optimizer.zero_grad() - if epoch % 10 == 0: - print(f"Epoch {epoch}: train={(loss.item()):>8f}") - - torch.save(model, filename) - """, - @__MODULE__, - (; filename = filename), -) +# In Python, I ran: +# ```python +# #!/usr/bin/python3 +# import torch +# model = torch.nn.Sequential( +# torch.nn.Linear(1, 16), +# torch.nn.ReLU(), +# torch.nn.Linear(16, 1), +# ) +# +# n = 1024 +# x = torch.arange(-2, 2 + 4 / (n - 1), 4 / (n - 1)).reshape(n, 1) +# loss_fn = torch.nn.MSELoss() +# optimizer = torch.optim.Adam(model.parameters(), lr=0.01) +# for epoch in range(100): +# optimizer.zero_grad() +# N = torch.normal(torch.zeros(n, 1), torch.ones(n, 1)) +# y = x ** 2 -2 * x + 0.1 * N +# loss = loss_fn(model(x), y) +# loss.backward() +# optimizer.step() +# +# torch.save(model, "model.pt") +# ``` # ## JuMP model -# Load a model from Pytorch using [`MathOptAI.PytorchModel`](@ref). +# Our goal for this JuMP model is to load the Neural Network from PyTorch into +# the objective function, and then minimize the objective for different fixed +# values of `x` to recreate the function that the Neural Network has learned to +# approximate. + +# First, create a JuMP model: model = Model(Ipopt.Optimizer) set_silent(model) @variable(model, x) -ml_model = MathOptAI.PytorchModel(filename) + +# Then, load the model from Pytorch using [`MathOptAI.PytorchModel`](@ref): + +ml_model = MathOptAI.PytorchModel(joinpath(@__DIR__, "model.pt")) y = MathOptAI.add_predictor(model, ml_model, [x]) @objective(model, Min, only(y))