From 5bf10ce592a76a4a21eaf642ee8b25009d178b0f Mon Sep 17 00:00:00 2001 From: Anthony <125415978+apchytr@users.noreply.github.com> Date: Mon, 20 Jan 2025 14:30:11 -0500 Subject: [PATCH] Updating tensorflow tests to work with lab dev (#533) **Context:** Optimizer and training tests need to be updated for the new `lab_dev` classes. **Description of the Change:** Updated `test_opt.py` to use `lab_dev` including any necessary fixes --- mrmustard/lab_dev/states/__init__.py | 2 +- .../states/{gstate.py => gaussian_state.py} | 0 mrmustard/lab_dev/transformations/rgate.py | 16 +- mrmustard/training/optimizer.py | 14 +- mrmustard/training/parameter_update.py | 2 +- ...{test_gstate.py => test_gaussian_state.py} | 0 .../test_transformations/test_rgate.py | 26 +- tests/test_training/test_callbacks.py | 19 +- .../{test_opt.py => test_opt_lab.py} | 0 tests/test_training/test_opt_lab_dev.py | 592 ++++++++++++++++++ tests/test_training/test_trainer.py | 20 +- tests/test_utils/test_serialize.py | 2 +- 12 files changed, 646 insertions(+), 47 deletions(-) rename mrmustard/lab_dev/states/{gstate.py => gaussian_state.py} (100%) rename tests/test_lab_dev/test_states/{test_gstate.py => test_gaussian_state.py} (100%) rename tests/test_training/{test_opt.py => test_opt_lab.py} (100%) create mode 100644 tests/test_training/test_opt_lab_dev.py diff --git a/mrmustard/lab_dev/states/__init__.py b/mrmustard/lab_dev/states/__init__.py index 51725b465..1ba4593c6 100644 --- a/mrmustard/lab_dev/states/__init__.py +++ b/mrmustard/lab_dev/states/__init__.py @@ -23,7 +23,7 @@ from .bargmann_eigenstate import BargmannEigenstate from .coherent import Coherent from .displaced_squeezed import DisplacedSqueezed -from .gstate import GKet, GDM +from .gaussian_state import GKet, GDM from .number import Number from .quadrature_eigenstate import QuadratureEigenstate from .squeezed_vacuum import SqueezedVacuum diff --git a/mrmustard/lab_dev/states/gstate.py b/mrmustard/lab_dev/states/gaussian_state.py similarity index 100% rename from mrmustard/lab_dev/states/gstate.py rename to mrmustard/lab_dev/states/gaussian_state.py diff --git a/mrmustard/lab_dev/transformations/rgate.py b/mrmustard/lab_dev/transformations/rgate.py index c27ffcfb7..cf10e5c60 100644 --- a/mrmustard/lab_dev/transformations/rgate.py +++ b/mrmustard/lab_dev/transformations/rgate.py @@ -40,7 +40,7 @@ class Rgate(Unitary): >>> import numpy as np >>> from mrmustard.lab_dev import Rgate - >>> unitary = Rgate(modes=[1, 2], phi=0.1) + >>> unitary = Rgate(modes=[1, 2], theta=0.1) >>> assert unitary.modes == [1, 2] Args: @@ -55,17 +55,19 @@ class Rgate(Unitary): def __init__( self, modes: Sequence[int], - phi: float | Sequence[float] = 0.0, - phi_trainable: bool = False, - phi_bounds: tuple[float | None, float | None] = (0.0, None), + theta: float | Sequence[float] = 0.0, + theta_trainable: bool = False, + theta_bounds: tuple[float | None, float | None] = (0.0, None), ): super().__init__(name="Rgate") - (phis,) = list(reshape_params(len(modes), phi=phi)) - self.parameters.add_parameter(make_parameter(phi_trainable, phis, "phi", phi_bounds)) + (thetas,) = list(reshape_params(len(modes), theta=theta)) + self.parameters.add_parameter( + make_parameter(theta_trainable, thetas, "theta", theta_bounds) + ) self._representation = self.from_ansatz( modes_in=modes, modes_out=modes, ansatz=PolyExpAnsatz.from_function( - fn=triples.rotation_gate_Abc, theta=self.parameters.phi + fn=triples.rotation_gate_Abc, theta=self.parameters.theta ), ).representation diff --git a/mrmustard/training/optimizer.py b/mrmustard/training/optimizer.py index e528260bb..29d76a008 100644 --- a/mrmustard/training/optimizer.py +++ b/mrmustard/training/optimizer.py @@ -29,7 +29,10 @@ update_symplectic, update_unitary, ) -from mrmustard.lab import Circuit + +import mrmustard.lab as mrml + +from mrmustard.lab_dev import Circuit __all__ = ["Optimizer"] @@ -188,6 +191,11 @@ def _get_trainable_params(trainable_items, root_tag: str = "optimized"): for i, item in enumerate(trainable_items): owner_tag = f"{root_tag}[{i}]" if isinstance(item, Circuit): + for j, op in enumerate(item.components): + tag = f"{owner_tag}:{item.__class__.__qualname__}/_ops[{j}]" + tagged_vars = op.parameters.tagged_variables(tag) + trainables.append(tagged_vars.items()) + elif isinstance(item, mrml.Circuit): for j, op in enumerate(item.ops): tag = f"{owner_tag}:{item.__class__.__qualname__}/_ops[{j}]" tagged_vars = op.parameter_set.tagged_variables(tag) @@ -196,6 +204,10 @@ def _get_trainable_params(trainable_items, root_tag: str = "optimized"): tag = f"{owner_tag}:{item.__class__.__qualname__}" tagged_vars = item.parameter_set.tagged_variables(tag) trainables.append(tagged_vars.items()) + elif hasattr(item, "parameters"): + tag = f"{owner_tag}:{item.__class__.__qualname__}" + tagged_vars = item.parameters.tagged_variables(tag) + trainables.append(tagged_vars.items()) elif math.from_backend(item) and math.is_trainable(item): # the created parameter is wrapped into a list because the case above # returns a list, hence ensuring we have a list of lists diff --git a/mrmustard/training/parameter_update.py b/mrmustard/training/parameter_update.py index 24b0e46e9..485f13841 100644 --- a/mrmustard/training/parameter_update.py +++ b/mrmustard/training/parameter_update.py @@ -19,7 +19,7 @@ from mrmustard.utils.typing import Tensor from mrmustard import math -from .parameter import Trainable +from ..utils.typing import Trainable def update_symplectic(grads_and_vars: Sequence[tuple[Tensor, Trainable]], symplectic_lr: float): diff --git a/tests/test_lab_dev/test_states/test_gstate.py b/tests/test_lab_dev/test_states/test_gaussian_state.py similarity index 100% rename from tests/test_lab_dev/test_states/test_gstate.py rename to tests/test_lab_dev/test_states/test_gaussian_state.py diff --git a/tests/test_lab_dev/test_transformations/test_rgate.py b/tests/test_lab_dev/test_transformations/test_rgate.py index 3be74949b..e29acd00f 100644 --- a/tests/test_lab_dev/test_transformations/test_rgate.py +++ b/tests/test_lab_dev/test_transformations/test_rgate.py @@ -29,21 +29,21 @@ class TestRgate: """ modes = [[0], [1, 2], [7, 9]] - phis = [[1], 1, [1, 2]] + thetas = [[1], 1, [1, 2]] - @pytest.mark.parametrize("modes,phi", zip(modes, phis)) - def test_init(self, modes, phi): - gate = Rgate(modes, phi) + @pytest.mark.parametrize("modes,theta", zip(modes, thetas)) + def test_init(self, modes, theta): + gate = Rgate(modes, theta) assert gate.name == "Rgate" assert gate.modes == [modes] if not isinstance(modes, list) else sorted(modes) def test_init_error(self): - with pytest.raises(ValueError, match="phi"): - Rgate(modes=[0, 1], phi=[2, 3, 4]) + with pytest.raises(ValueError, match="theta"): + Rgate(modes=[0, 1], theta=[2, 3, 4]) def test_representation(self): - rep1 = Rgate(modes=[0], phi=0.1).ansatz + rep1 = Rgate(modes=[0], theta=0.1).ansatz assert math.allclose( rep1.A, [ @@ -56,7 +56,7 @@ def test_representation(self): assert math.allclose(rep1.b, np.zeros((1, 2))) assert math.allclose(rep1.c, [1.0 + 0.0j]) - rep2 = Rgate(modes=[0, 1], phi=[0.1, 0.3]).ansatz + rep2 = Rgate(modes=[0, 1], theta=[0.1, 0.3]).ansatz assert math.allclose( rep2.A, [ @@ -71,7 +71,7 @@ def test_representation(self): assert math.allclose(rep2.b, np.zeros((1, 4))) assert math.allclose(rep2.c, [1.0 + 0.0j]) - rep3 = Rgate(modes=[1], phi=0.1).ansatz + rep3 = Rgate(modes=[1], theta=0.1).ansatz assert math.allclose( rep3.A, [ @@ -89,11 +89,11 @@ def test_trainable_parameters(self): gate2 = Rgate([0], 1, True, (-2, 2)) with pytest.raises(AttributeError): - gate1.parameters.phi.value = 3 + gate1.parameters.theta.value = 3 - gate2.parameters.phi.value = 2 - assert gate2.parameters.phi.value == 2 + gate2.parameters.theta.value = 2 + assert gate2.parameters.theta.value == 2 def test_representation_error(self): with pytest.raises(ValueError): - Rgate(modes=[0], phi=[0.1, 0.2]).ansatz + Rgate(modes=[0], theta=[0.1, 0.2]).ansatz diff --git a/tests/test_training/test_callbacks.py b/tests/test_training/test_callbacks.py index d0d1cae04..02dbf6fb7 100644 --- a/tests/test_training/test_callbacks.py +++ b/tests/test_training/test_callbacks.py @@ -18,9 +18,7 @@ import tensorflow as tf from mrmustard import math, settings -from mrmustard.lab.circuit import Circuit -from mrmustard.lab.gates import BSgate, S2gate -from mrmustard.lab.states import Vacuum +from mrmustard.lab_dev import Circuit, BSgate, S2gate, Vacuum from mrmustard.training import Optimizer, TensorboardCallback from ..conftest import skip_np @@ -33,25 +31,26 @@ def test_tensorboard_callback(tmp_path): settings.SEED = 42 i, k = 2, 3 r = np.arcsinh(1.0) + state_in = Vacuum((0, 1, 2, 3)) s2_0, s2_1, bs = ( - S2gate(r=r, phi=0.0, phi_trainable=True)[0, 1], - S2gate(r=r, phi=0.0, phi_trainable=True)[2, 3], + S2gate((0, 1), r=r, phi=0.0, phi_trainable=True), + S2gate((2, 3), r=r, phi=0.0, phi_trainable=True), BSgate( + (1, 2), theta=np.arccos(np.sqrt(k / (i + k))) + 0.1 * settings.rng.normal(), phi=settings.rng.normal(), theta_trainable=True, phi_trainable=True, - )[1, 2], + ), ) - circ = Circuit([s2_0, s2_1, bs]) - state_in = Vacuum(num_modes=4) + circ = Circuit([state_in, s2_0, s2_1, bs]) cutoff = 1 + i + k free_var = math.new_variable([1.1, -0.2], None, "free_var") def cost_fn(): return tf.abs( - (state_in >> circ).ket(cutoffs=[cutoff] * 4)[i, 1, i + k - 1, k] + circ.contract().fock_array((cutoff,) * 4)[i, 1, i + k - 1, k] ) ** 2 + tf.reduce_sum(free_var**2) tbcb = TensorboardCallback( @@ -64,7 +63,7 @@ def cost_fn(): opt = Optimizer(euclidean_lr=0.01) opt.minimize(cost_fn, by_optimizing=[circ, free_var], max_steps=300, callbacks={"tb": tbcb}) - assert np.allclose(np.cos(bs.theta.value) ** 2, k / (i + k), atol=1e-2) + assert np.allclose(np.cos(bs.parameters.theta.value) ** 2, k / (i + k), atol=1e-2) assert tbcb.logdir.exists() assert len(list(tbcb.writter_logdir.glob("events*"))) > 0 assert len(opt.callback_history["tb"]) == (len(opt.opt_history) - 1) // tbcb.steps_per_call diff --git a/tests/test_training/test_opt.py b/tests/test_training/test_opt_lab.py similarity index 100% rename from tests/test_training/test_opt.py rename to tests/test_training/test_opt_lab.py diff --git a/tests/test_training/test_opt_lab_dev.py b/tests/test_training/test_opt_lab_dev.py new file mode 100644 index 000000000..766dc5332 --- /dev/null +++ b/tests/test_training/test_opt_lab_dev.py @@ -0,0 +1,592 @@ +# Copyright 2022 Xanadu Quantum Technologies Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for the Optimizer class""" + +import numpy as np +import tensorflow as tf +from hypothesis import given +from hypothesis import strategies as st +from thewalrus.symplectic import two_mode_squeezing + +from mrmustard import math, settings +from mrmustard.lab_dev import ( + Circuit, + Sgate, + S2gate, + Vacuum, + BSgate, + Ggate, + Interferometer, + Rgate, + Dgate, + RealInterferometer, + DisplacedSqueezed, + SqueezedVacuum, + GKet, + Number, + TwoModeSqueezedVacuum, +) +from mrmustard.math.parameters import Variable, update_euclidean +from mrmustard.physics.gaussian import von_neumann_entropy, number_means +from mrmustard.training import Optimizer +from mrmustard.training.callbacks import Callback + +from ..conftest import skip_np + + +class TestOptimizer: + r""" + Tests for the ``Optimizer`` class. + """ + + @given(n=st.integers(0, 3)) + def test_S2gate_coincidence_prob(self, n): + """Testing the optimal probability of obtaining |n,n> from a two mode squeezed vacuum""" + skip_np() + + settings.SEED = 40 + rng = tf.random.get_global_generator() + rng.reset_from_seed(settings.SEED) + + S = TwoModeSqueezedVacuum( + (0, 1), r=abs(settings.rng.normal(loc=1.0, scale=0.1)), r_trainable=True + ) + + def cost_fn(): + return -math.abs(S.fock_array((n + 1, n + 1))[n, n]) ** 2 + + def cb(optimizer, cost, trainables, **kwargs): # pylint: disable=unused-argument + return { + "cost": cost, + "lr": optimizer.learning_rate[update_euclidean], + "num_trainables": len(trainables), + } + + opt = Optimizer(euclidean_lr=0.01) + opt.minimize(cost_fn, by_optimizing=[S], max_steps=300, callbacks=cb) + + expected = 1 / (n + 1) * (n / (n + 1)) ** n + assert np.allclose(-cost_fn(), expected, atol=1e-5) + + cb_result = opt.callback_history.get("cb") + assert {res["num_trainables"] for res in cb_result} == {1} + assert {res["lr"] for res in cb_result} == {0.01} + assert [res["cost"] for res in cb_result] == opt.opt_history[1:] + + @given(i=st.integers(1, 5), k=st.integers(1, 5)) + def test_hong_ou_mandel_optimizer(self, i, k): + """Finding the optimal beamsplitter transmission to get Hong-Ou-Mandel dip + This generalizes the single photon Hong-Ou-Mandel effect to the many photon setting + see Eq. 20 of https://journals.aps.org/prresearch/pdf/10.1103/PhysRevResearch.3.043065 + which lacks a square root in the right hand side. + """ + skip_np() + + settings.SEED = 42 + rng = tf.random.get_global_generator() + rng.reset_from_seed(settings.SEED) + + r = np.arcsinh(1.0) + cutoff = 1 + i + k + + state = TwoModeSqueezedVacuum((0, 1), r=r, phi_trainable=True) + state2 = TwoModeSqueezedVacuum((2, 3), r=r, phi_trainable=True) + bs = BSgate( + (1, 2), + theta=np.arccos(np.sqrt(k / (i + k))) + 0.1 * settings.rng.normal(), + phi=settings.rng.normal(), + theta_trainable=True, + phi_trainable=True, + ) + circ = Circuit([state, state2, bs]) + + def cost_fn(): + return math.abs(circ.contract().fock_array((cutoff,) * 4)[i, 1, i + k - 1, k]) ** 2 + + opt = Optimizer(euclidean_lr=0.01) + opt.minimize( + cost_fn, + by_optimizing=[circ], + max_steps=300, + callbacks=[Callback(tag="null_cb", steps_per_call=3)], + ) + assert np.allclose(np.cos(bs.parameters.theta.value) ** 2, k / (i + k), atol=1e-2) + assert "null_cb" in opt.callback_history + assert len(opt.callback_history["null_cb"]) == (len(opt.opt_history) - 1) // 3 + + def test_learning_two_mode_squeezing(self): + """Finding the optimal beamsplitter transmission to make a pair of single photons""" + skip_np() + + settings.SEED = 42 + rng = tf.random.get_global_generator() + rng.reset_from_seed(settings.SEED) + + state_in = Vacuum((0, 1)) + s_gate = Sgate( + (0, 1), + r=abs(settings.rng.normal(size=2)), + phi=settings.rng.normal(size=2), + r_trainable=True, + phi_trainable=True, + ) + bs_gate = BSgate( + (0, 1), + theta=settings.rng.normal(), + phi=settings.rng.normal(), + theta_trainable=True, + phi_trainable=True, + ) + circ = Circuit([state_in, s_gate, bs_gate]) + + def cost_fn(): + amps = circ.contract().fock_array((2, 2)) + return -math.abs(amps[1, 1]) ** 2 + math.abs(amps[0, 1]) ** 2 + + opt = Optimizer(euclidean_lr=0.05) + + opt.minimize(cost_fn, by_optimizing=[circ], max_steps=300) + assert np.allclose(-cost_fn(), 0.25, atol=1e-5) + + def test_learning_two_mode_Ggate(self): + """Finding the optimal Ggate to make a pair of single photons""" + skip_np() + + settings.SEED = 42 + rng = tf.random.get_global_generator() + rng.reset_from_seed(settings.SEED) + + G = GKet((0, 1), symplectic_trainable=True) + + def cost_fn(): + amps = G.fock_array((2, 2)) + return -math.abs(amps[1, 1]) ** 2 + math.abs(amps[0, 1]) ** 2 + + opt = Optimizer(symplectic_lr=0.5, euclidean_lr=0.01) + + opt.minimize(cost_fn, by_optimizing=[G], max_steps=500) + assert np.allclose(-cost_fn(), 0.25, atol=1e-4) + + def test_learning_two_mode_Interferometer(self): + """Finding the optimal Interferometer to make a pair of single photons""" + skip_np() + + settings.SEED = 4 + rng = tf.random.get_global_generator() + rng.reset_from_seed(settings.SEED) + + state_in = Vacuum((0, 1)) + s_gate = Sgate( + (0, 1), + r=settings.rng.normal(size=2) ** 2, + phi=settings.rng.normal(size=2), + r_trainable=True, + phi_trainable=True, + ) + interferometer = Interferometer((0, 1), unitary_trainable=True) + circ = Circuit([state_in, s_gate, interferometer]) + + def cost_fn(): + amps = circ.contract().fock_array((2, 2)) + return -math.abs(amps[1, 1]) ** 2 + math.abs(amps[0, 1]) ** 2 + + opt = Optimizer(unitary_lr=0.5, euclidean_lr=0.01) + + opt.minimize(cost_fn, by_optimizing=[circ], max_steps=1000) + assert np.allclose(-cost_fn(), 0.25, atol=1e-5) + + def test_learning_two_mode_RealInterferometer(self): + """Finding the optimal Interferometer to make a pair of single photons""" + skip_np() + + settings.SEED = 2 + rng = tf.random.get_global_generator() + rng.reset_from_seed(settings.SEED) + + state_in = Vacuum((0, 1)) + s_gate = Sgate( + (0, 1), + r=settings.rng.normal(size=2) ** 2, + phi=settings.rng.normal(size=2), + r_trainable=True, + phi_trainable=True, + ) + r_inter = RealInterferometer((0, 1), orthogonal_trainable=True) + + circ = Circuit([state_in, s_gate, r_inter]) + + def cost_fn(): + amps = circ.contract().fock_array((2, 2)) + return -math.abs(amps[1, 1]) ** 2 + math.abs(amps[0, 1]) ** 2 + + opt = Optimizer(orthogonal_lr=0.5, euclidean_lr=0.01) + + opt.minimize(cost_fn, by_optimizing=[circ], max_steps=1000) + assert np.allclose(-cost_fn(), 0.25, atol=1e-5) + + def test_learning_four_mode_Interferometer(self): + """Finding the optimal Interferometer to make a NOON state with N=2""" + skip_np() + + settings.SEED = 4 + rng = tf.random.get_global_generator() + rng.reset_from_seed(settings.SEED) + + solution_U = np.array( + [ + [ + -0.47541806 + 0.00045878j, + -0.41513474 - 0.27218387j, + -0.11065812 - 0.39556922j, + -0.29912017 + 0.51900235j, + ], + [ + -0.05246398 + 0.5209089j, + -0.29650069 - 0.40653082j, + 0.57434638 - 0.04417284j, + 0.28230532 - 0.24738672j, + ], + [ + 0.28437557 + 0.08773767j, + 0.18377764 - 0.66496587j, + -0.5874942 - 0.19866946j, + 0.2010813 - 0.10210844j, + ], + [ + -0.63173183 - 0.11057324j, + -0.03468292 + 0.15245454j, + -0.25390362 - 0.2244298j, + 0.18706333 - 0.64375049j, + ], + ] + ) + perturbed = ( + Interferometer((0, 1, 2, 3), unitary=solution_U) + >> BSgate((0, 1), settings.rng.normal(scale=0.01)) + >> BSgate((2, 3), settings.rng.normal(scale=0.01)) + >> BSgate((1, 2), settings.rng.normal(scale=0.01)) + >> BSgate((0, 3), settings.rng.normal(scale=0.01)) + ) + X = perturbed.symplectic[0] + perturbed_U = X[:4, :4] + 1j * X[4:, :4] + + state_in = Vacuum((0, 1, 2, 3)) + s_gate = Sgate( + (0, 1, 2, 3), + r=settings.rng.normal(loc=np.arcsinh(1.0), scale=0.01, size=4), + r_trainable=True, + ) + interferometer = Interferometer((0, 1, 2, 3), unitary=perturbed_U, unitary_trainable=True) + + circ = Circuit([state_in, s_gate, interferometer]) + + def cost_fn(): + amps = circ.contract().fock_array((3, 3, 3, 3)) + return -math.abs((amps[1, 1, 2, 0] + amps[1, 1, 0, 2]) / np.sqrt(2)) ** 2 + + opt = Optimizer(unitary_lr=0.05) + opt.minimize(cost_fn, by_optimizing=[circ], max_steps=200) + assert np.allclose(-cost_fn(), 0.0625, atol=1e-5) + + def test_learning_four_mode_RealInterferometer(self): + """Finding the optimal Interferometer to make a NOON state with N=2""" + skip_np() + + settings.SEED = 6 + rng = tf.random.get_global_generator() + rng.reset_from_seed(settings.SEED) + + solution_O = np.array( + [ + [0.5, -0.5, 0.5, 0.5], + [-0.5, -0.5, -0.5, 0.5], + [0.5, 0.5, -0.5, 0.5], + [0.5, -0.5, -0.5, -0.5], + ] + ) + solution_S = (np.arcsinh(1.0), np.array([0.0, np.pi / 2, -np.pi, -np.pi / 2])) + pertubed = ( + RealInterferometer((0, 1, 2, 3), orthogonal=solution_O) + >> BSgate((0, 1), settings.rng.normal(scale=0.01)) + >> BSgate((2, 3), settings.rng.normal(scale=0.01)) + >> BSgate((1, 2), settings.rng.normal(scale=0.01)) + >> BSgate((0, 3), settings.rng.normal(scale=0.01)) + ) + perturbed_O = pertubed.symplectic[0][:4, :4] + + state_in = Vacuum((0, 1, 2, 3)) + s_gate = Sgate( + (0, 1, 2, 3), + r=solution_S[0] + settings.rng.normal(scale=0.01, size=4), + phi=solution_S[1] + settings.rng.normal(scale=0.01, size=4), + r_trainable=True, + phi_trainable=True, + ) + r_inter = RealInterferometer( + (0, 1, 2, 3), orthogonal=perturbed_O, orthogonal_trainable=True + ) + + circ = Circuit([state_in, s_gate, r_inter]) + + def cost_fn(): + amps = circ.contract().fock_array((2, 2, 3, 3)) + return -math.abs((amps[1, 1, 0, 2] + amps[1, 1, 2, 0]) / np.sqrt(2)) ** 2 + + opt = Optimizer() + + opt.minimize(cost_fn, by_optimizing=[circ], max_steps=200) + assert np.allclose(-cost_fn(), 0.0625, atol=1e-5) + + def test_squeezing_hong_ou_mandel_optimizer(self): + """Finding the optimal squeezing parameter to get Hong-Ou-Mandel dip in time + see https://www.pnas.org/content/117/52/33107/tab-article-info + """ + skip_np() + + settings.SEED = 42 + rng = tf.random.get_global_generator() + rng.reset_from_seed(settings.SEED) + + r = np.arcsinh(1.0) + + state_in = Vacuum((0, 1, 2, 3)) + S_01 = S2gate((0, 1), r=r, phi=0.0, phi_trainable=True) + S_23 = S2gate((2, 3), r=r, phi=0.0, phi_trainable=True) + S_12 = S2gate( + (1, 2), r=1.0, phi=settings.rng.normal(), r_trainable=True, phi_trainable=True + ) + + circ = Circuit([state_in, S_01, S_23, S_12]) + + def cost_fn(): + return math.abs(circ.contract().fock_array((2, 2, 2, 2))[1, 1, 1, 1]) ** 2 + + opt = Optimizer(euclidean_lr=0.001) + opt.minimize(cost_fn, by_optimizing=[circ], max_steps=300) + assert np.allclose(np.sinh(S_12.parameters.r.value) ** 2, 1, atol=1e-2) + + def test_parameter_passthrough(self): + """Same as the test above, but with param passthrough""" + skip_np() + + settings.SEED = 42 + rng = tf.random.get_global_generator() + rng.reset_from_seed(settings.SEED) + + r = np.arcsinh(1.0) + r_var = Variable(r, "r", (0.0, None)) + phi_var = Variable(settings.rng.normal(), "phi", (None, None)) + + state_in = Vacuum((0, 1, 2, 3)) + s2_gate0 = S2gate((0, 1), r=r, phi=0.0, phi_trainable=True) + s2_gate1 = S2gate((2, 3), r=r, phi=0.0, phi_trainable=True) + s2_gate2 = S2gate((1, 2), r=r_var, phi=phi_var) + + circ = Circuit([state_in, s2_gate0, s2_gate1, s2_gate2]) + + def cost_fn(): + return math.abs(circ.contract().fock_array((2, 2, 2, 2))[1, 1, 1, 1]) ** 2 + + opt = Optimizer(euclidean_lr=0.001) + opt.minimize(cost_fn, by_optimizing=[r_var, phi_var], max_steps=300) + assert np.allclose(np.sinh(r_var.value) ** 2, 1, atol=1e-2) + + def test_making_thermal_state_as_one_half_two_mode_squeezed_vacuum(self): + """Optimizes a Ggate on two modes so as to prepare a state with the same entropy + and mean photon number as a thermal state""" + skip_np() + + settings.SEED = 42 + rng = tf.random.get_global_generator() + rng.reset_from_seed(settings.SEED) + + def thermal_entropy(nbar): + return -(nbar * np.log((nbar) / (1 + nbar)) - np.log(1 + nbar)) + + nbar = 1.4 + S_init = two_mode_squeezing(np.arcsinh(1.0), 0.0) + S = thermal_entropy(nbar) + + G = Ggate((0, 1), symplectic=S_init, symplectic_trainable=True) + + def cost_fn(): + state = Vacuum((0, 1)) >> G + + state0 = state[0] + state1 = state[1] + + cov0, mean0, _ = [x[0] for x in state0.phase_space(s=0)] + cov1, mean1, _ = [x[0] for x in state1.phase_space(s=0)] + + num_mean0 = number_means(cov0, mean0)[0] + num_mean1 = number_means(cov1, mean1)[0] + + entropy = von_neumann_entropy(cov0) + return (num_mean0 - nbar) ** 2 + (entropy - S) ** 2 + (num_mean1 - nbar) ** 2 + + opt = Optimizer(symplectic_lr=0.1) + opt.minimize(cost_fn, by_optimizing=[G], max_steps=50) + S = math.asnumpy(G.parameters.symplectic.value) + cov = S @ S.T + assert np.allclose(cov, two_mode_squeezing(2 * np.arcsinh(np.sqrt(nbar)), 0.0)) + + def test_opt_backend_param(self): + """Test the optimization of a backend parameter defined outside a gate.""" + skip_np() + + # rotated displaced squeezed state + settings.SEED = 42 + rng = tf.random.get_global_generator() + rng.reset_from_seed(settings.SEED) + + rotation_angle = np.pi / 2 + target_state = SqueezedVacuum((0,), r=1.0, phi=rotation_angle) + + # angle of rotation gate + r_angle = math.new_variable(0, bounds=(0, np.pi), name="r_angle") + # trainable squeezing + S = Sgate((0,), r=0.1, phi=0, r_trainable=True, phi_trainable=False) + + def cost_fn_sympl(): + state_out = Vacuum((0,)) >> S >> Rgate((0,), theta=r_angle) + return 1 - math.abs((state_out >> target_state.dual) ** 2) + + opt = Optimizer(symplectic_lr=0.1, euclidean_lr=0.05) + opt.minimize(cost_fn_sympl, by_optimizing=[S, r_angle]) + + assert np.allclose(math.asnumpy(r_angle), rotation_angle / 2, atol=1e-4) + + def test_dgate_optimization(self): + """Test that Dgate is optimized correctly.""" + skip_np() + + settings.SEED = 24 + rng = tf.random.get_global_generator() + rng.reset_from_seed(settings.SEED) + + dgate = Dgate((0,), x_trainable=True, y_trainable=True) + target_state = DisplacedSqueezed((0,), r=0.0, x=0.1, y=0.2).fock_array((40,)) + + def cost_fn(): + state_out = Vacuum((0,)) >> dgate + return -math.abs(math.sum(math.conj(state_out.fock_array((40,))) * target_state)) ** 2 + + opt = Optimizer() + opt.minimize(cost_fn, by_optimizing=[dgate]) + + assert np.allclose(dgate.parameters.x.value, 0.1, atol=0.01) + assert np.allclose(dgate.parameters.y.value, 0.2, atol=0.01) + + def test_sgate_optimization(self): + """Test that Sgate is optimized correctly.""" + skip_np() + + settings.SEED = 25 + rng = tf.random.get_global_generator() + rng.reset_from_seed(settings.SEED) + + sgate = Sgate((0,), r=0.2, phi=0.1, r_trainable=True, phi_trainable=True) + target_state = SqueezedVacuum((0,), r=0.1, phi=0.2).fock_array((40,)) + + def cost_fn(): + state_out = Vacuum((0,)) >> sgate + + return -math.abs(math.sum(math.conj(state_out.fock_array((40,))) * target_state)) ** 2 + + opt = Optimizer() + opt.minimize(cost_fn, by_optimizing=[sgate]) + + assert np.allclose(sgate.parameters.r.value, 0.1, atol=0.01) + assert np.allclose(sgate.parameters.phi.value, 0.2, atol=0.01) + + def test_bsgate_optimization(self): + """Test that Sgate is optimized correctly.""" + skip_np() + + settings.SEED = 25 + rng = tf.random.get_global_generator() + rng.reset_from_seed(settings.SEED) + + G = GKet((0, 1)) + + bsgate = BSgate((0, 1), 0.05, 0.1, theta_trainable=True, phi_trainable=True) + target_state = (G >> BSgate((0, 1), 0.1, 0.2)).fock_array((40, 40)) + + def cost_fn(): + state_out = G >> bsgate + + return ( + -math.abs(math.sum(math.conj(state_out.fock_array((40, 40))) * target_state)) ** 2 + ) + + opt = Optimizer() + opt.minimize(cost_fn, by_optimizing=[bsgate]) + + assert np.allclose(bsgate.parameters.theta.value, 0.1, atol=0.01) + assert np.allclose(bsgate.parameters.phi.value, 0.2, atol=0.01) + + def test_squeezing_grad_from_fock(self): + """Test that the gradient of a squeezing gate is computed from the fock representation.""" + skip_np() + + squeezing = Sgate((0,), r=1.0, r_trainable=True) + og_r = math.asnumpy(squeezing.parameters.r.value) + + def cost_fn(): + return -((Number((0,), 2) >> squeezing >> Vacuum((0,)).dual) ** 2) + + opt = Optimizer(euclidean_lr=0.05) + opt.minimize(cost_fn, by_optimizing=[squeezing], max_steps=100) + + assert squeezing.parameters.r.value != og_r + + def test_displacement_grad_from_fock(self): + """Test that the gradient of a displacement gate is computed from the fock representation.""" + skip_np() + + disp = Dgate((0,), x=1.0, y=0.5, x_trainable=True, y_trainable=True) + og_x = math.asnumpy(disp.parameters.x.value) + og_y = math.asnumpy(disp.parameters.y.value) + + def cost_fn(): + return -((Number((0,), 2) >> disp >> Vacuum((0,)).dual) ** 2) + + opt = Optimizer(euclidean_lr=0.05) + opt.minimize(cost_fn, by_optimizing=[disp], max_steps=100) + assert og_x != disp.parameters.x.value + assert og_y != disp.parameters.y.value + + def test_bsgate_grad_from_fock(self): + """Test that the gradient of a beamsplitter gate is computed from the fock representation.""" + skip_np() + + sq = SqueezedVacuum((0,), r=1.0, r_trainable=True) + og_r = math.asnumpy(sq.parameters.r.value) + + def cost_fn(): + return -( + ( + sq + >> Number((1,), 1) + >> BSgate((0, 1), 0.5) + >> (Vacuum((0,)) >> Number((1,), 1)).dual + ) + ** 2 + ) + + opt = Optimizer(euclidean_lr=0.05) + opt.minimize(cost_fn, by_optimizing=[sq], max_steps=100) + + assert og_r != sq.parameters.r.value diff --git a/tests/test_training/test_trainer.py b/tests/test_training/test_trainer.py index 292a462f1..fe10b2021 100644 --- a/tests/test_training/test_trainer.py +++ b/tests/test_training/test_trainer.py @@ -32,8 +32,7 @@ except ImportError: ray_available = False -from mrmustard.lab import Dgate, Gaussian, Ggate, Vacuum -from mrmustard.physics import fidelity +from mrmustard.lab_dev import Dgate, GKet, Ggate, Vacuum from mrmustard.training import Optimizer from mrmustard.training.trainer import map_trainer, train_device, update_pop @@ -48,8 +47,8 @@ def make_circ(x=0.0, return_type=None): math.change_backend("tensorflow") - circ = Ggate(num_modes=1, symplectic_trainable=True) >> Dgate( - x=x, x_trainable=True, y_trainable=True + circ = Ggate((0,), symplectic_trainable=True) >> Dgate( + (0,), x=x, x_trainable=True, y_trainable=True ) return ( [circ] if return_type == "list" else {"circ": circ} if return_type == "dict" else circ @@ -60,9 +59,9 @@ def cost_fn(circ=make_circ(0.1), y_targ=0.0): math.change_backend("tensorflow") - target = Gaussian(1) >> Dgate(-0.1, y_targ) - s = Vacuum(1) >> circ - return -fidelity(s, target) + target = GKet((0,)) >> Dgate((0,), -0.1, y_targ) + s = Vacuum((0,)) >> circ + return -math.abs((s >> target.dual) ** 2) return make_circ, cost_fn @@ -145,12 +144,7 @@ def test_circ_optimize(self, tasks, return_type): # pylint: disable=redefined-o @pytest.mark.parametrize( "metric_fns", [ - {"is_gaussian": lambda c: c.is_gaussian, "foo": lambda _: 17.0}, - [ - lambda c: c.modes, - len, - ], - lambda c: (Vacuum(1) >> c >> c >> c).fock_probabilities([5]), + lambda c: (Vacuum((0,)) >> c >> c >> c).fock_array((5,)), ], ) def test_circ_optimize_metrics(self, metric_fns): # pylint: disable=redefined-outer-name diff --git a/tests/test_utils/test_serialize.py b/tests/test_utils/test_serialize.py index d19a82de0..c5f47e378 100644 --- a/tests/test_utils/test_serialize.py +++ b/tests/test_utils/test_serialize.py @@ -180,7 +180,7 @@ def test_all_components_serializable(self): BSgate([1, 2], theta=0.1, theta_trainable=True, theta_bounds=(-0.5, 0.5)), Dgate([0], x=1.1, y=2.2), Identity([1, 2]), - Rgate([1, 2], phi=0.1), + Rgate([1, 2], theta=0.1), S2gate([0, 1], 1, 1), Sgate([0], 0.1, 0.2, r_trainable=True), FockDamping([0], damping=0.1),