From a20212b18ed94cc5436eaab5ffa12f7494ec8e4b Mon Sep 17 00:00:00 2001 From: Jake Arkinstall <65358059+jake-arkinstall@users.noreply.github.com> Date: Tue, 9 Jul 2024 11:31:35 +0100 Subject: [PATCH] Added .gitattributes to enforce unix line endings. The rest is done by git --- .gitattributes | 3 + docs/modules/structured_state.rst | 122 +- examples/README.md | 18 +- pytket/extensions/cutensornet/general.py | 236 +- .../cutensornet/structured_state/general.py | 854 +++---- .../cutensornet/structured_state/mps.py | 2132 ++++++++--------- .../cutensornet/structured_state/mps_gate.py | 860 +++---- .../cutensornet/structured_state/mps_mpo.py | 1164 ++++----- .../structured_state/simulation.py | 844 +++---- .../cutensornet/structured_state/ttn.py | 1864 +++++++------- .../cutensornet/structured_state/ttn_gate.py | 1350 +++++------ tests/test_structured_state.py | 1818 +++++++------- 12 files changed, 5634 insertions(+), 5631 deletions(-) create mode 100644 .gitattributes diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000..43ff080a --- /dev/null +++ b/.gitattributes @@ -0,0 +1,3 @@ +* text=auto +* text eol=lf +*.png -text diff --git a/docs/modules/structured_state.rst b/docs/modules/structured_state.rst index e6b658d7..38d22104 100644 --- a/docs/modules/structured_state.rst +++ b/docs/modules/structured_state.rst @@ -1,61 +1,61 @@ -Structured state evolution -========================== - -.. automodule:: pytket.extensions.cutensornet.structured_state - - -Simulation -~~~~~~~~~~ - -.. autofunction:: pytket.extensions.cutensornet.structured_state.simulate - -.. autoenum:: pytket.extensions.cutensornet.structured_state.SimulationAlgorithm() - :members: - -.. autoclass:: pytket.extensions.cutensornet.structured_state.Config() - - .. automethod:: __init__ - - -Classes -~~~~~~~ - -.. autoclass:: pytket.extensions.cutensornet.structured_state.StructuredState() - - .. automethod:: is_valid - .. automethod:: apply_gate - .. automethod:: apply_unitary - .. automethod:: apply_scalar - .. automethod:: vdot - .. automethod:: sample - .. automethod:: measure - .. automethod:: postselect - .. automethod:: expectation_value - .. automethod:: get_fidelity - .. automethod:: get_statevector - .. automethod:: get_amplitude - .. automethod:: get_qubits - .. automethod:: get_byte_size - .. automethod:: get_device_id - .. automethod:: update_libhandle - .. automethod:: copy - -.. autoclass:: pytket.extensions.cutensornet.structured_state.TTNxGate() - - .. automethod:: __init__ - -.. autoclass:: pytket.extensions.cutensornet.structured_state.MPSxGate() - - .. automethod:: __init__ - .. automethod:: add_qubit - -.. autoclass:: pytket.extensions.cutensornet.structured_state.MPSxMPO() - - .. automethod:: __init__ - .. automethod:: add_qubit - - -Miscellaneous -~~~~~~~~~~~~~ - -.. autofunction:: pytket.extensions.cutensornet.structured_state.prepare_circuit_mps +Structured state evolution +========================== + +.. automodule:: pytket.extensions.cutensornet.structured_state + + +Simulation +~~~~~~~~~~ + +.. autofunction:: pytket.extensions.cutensornet.structured_state.simulate + +.. autoenum:: pytket.extensions.cutensornet.structured_state.SimulationAlgorithm() + :members: + +.. autoclass:: pytket.extensions.cutensornet.structured_state.Config() + + .. automethod:: __init__ + + +Classes +~~~~~~~ + +.. autoclass:: pytket.extensions.cutensornet.structured_state.StructuredState() + + .. automethod:: is_valid + .. automethod:: apply_gate + .. automethod:: apply_unitary + .. automethod:: apply_scalar + .. automethod:: vdot + .. automethod:: sample + .. automethod:: measure + .. automethod:: postselect + .. automethod:: expectation_value + .. automethod:: get_fidelity + .. automethod:: get_statevector + .. automethod:: get_amplitude + .. automethod:: get_qubits + .. automethod:: get_byte_size + .. automethod:: get_device_id + .. automethod:: update_libhandle + .. automethod:: copy + +.. autoclass:: pytket.extensions.cutensornet.structured_state.TTNxGate() + + .. automethod:: __init__ + +.. autoclass:: pytket.extensions.cutensornet.structured_state.MPSxGate() + + .. automethod:: __init__ + .. automethod:: add_qubit + +.. autoclass:: pytket.extensions.cutensornet.structured_state.MPSxMPO() + + .. automethod:: __init__ + .. automethod:: add_qubit + + +Miscellaneous +~~~~~~~~~~~~~ + +.. autofunction:: pytket.extensions.cutensornet.structured_state.prepare_circuit_mps diff --git a/examples/README.md b/examples/README.md index fd569338..5dfd93ee 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,10 +1,10 @@ -# Contents - -Available tutorials for users: -* `mps_tutorial.ipynb`: Use of MPS simulation and features. -* `ttn_tutorial.ipynb`: Use of TTN simulation and features. -* `mpi/`: Example on how to use MPS for embarrasingly parallel tasks with `mpi4py` see the `mpi` folder. - -Developers: -* `check-examples`: The script to check that the Jupyter notebooks are generated correctly from the files in `python/`. To generate the `.ipynb` from these run the `p2j` command in this script. +# Contents + +Available tutorials for users: +* `mps_tutorial.ipynb`: Use of MPS simulation and features. +* `ttn_tutorial.ipynb`: Use of TTN simulation and features. +* `mpi/`: Example on how to use MPS for embarrasingly parallel tasks with `mpi4py` see the `mpi` folder. + +Developers: +* `check-examples`: The script to check that the Jupyter notebooks are generated correctly from the files in `python/`. To generate the `.ipynb` from these run the `p2j` command in this script. * `python/`: The `.py` files that generate the `.ipynb` files. As a developer, you are expected to update these files instead of the `.ipynb` files. Remember to generate the latter using the `p2j` command before opening a pull request that changes these examples. \ No newline at end of file diff --git a/pytket/extensions/cutensornet/general.py b/pytket/extensions/cutensornet/general.py index 3f33882e..0f5d3af7 100644 --- a/pytket/extensions/cutensornet/general.py +++ b/pytket/extensions/cutensornet/general.py @@ -1,118 +1,118 @@ -# Copyright 2019-2024 Quantinuum -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -## -# http://www.apache.org/licenses/LICENSE-2.0 -## -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from __future__ import annotations # type: ignore -import warnings -import logging -from logging import Logger - -from typing import Any, Optional - -try: - import cupy as cp # type: ignore -except ImportError: - warnings.warn("local settings failed to import cupy", ImportWarning) -try: - import cuquantum.cutensornet as cutn # type: ignore -except ImportError: - warnings.warn("local settings failed to import cutensornet", ImportWarning) - - -class CuTensorNetHandle: - """Initialise the cuTensorNet library with automatic workspace memory - management. - - Note: - Always use as ``with CuTensorNetHandle() as libhandle:`` so that cuTensorNet - handles are automatically destroyed at the end of execution. - - Attributes: - handle (int): The cuTensorNet library handle created by this initialisation. - device_id (int): The ID of the device (GPU) where cuTensorNet is initialised. - If not provided, defaults to ``cp.cuda.Device()``. - """ - - def __init__(self, device_id: Optional[int] = None): - self._is_destroyed = False - - # Make sure CuPy uses the specified device - dev = cp.cuda.Device(device_id) - dev.use() - - self.dev = dev - self.device_id = dev.id - - self._handle = cutn.create() - - @property - def handle(self) -> Any: - if self._is_destroyed: - raise RuntimeError( - "The cuTensorNet library handle is out of scope.", - "See the documentation of CuTensorNetHandle.", - ) - return self._handle - - def destroy(self) -> None: - """Destroys the memory handle, releasing memory. - - Only call this method if you are initialising a ``CuTensorNetHandle`` outside - a ``with CuTensorNetHandle() as libhandle`` statement. - """ - cutn.destroy(self._handle) - self._is_destroyed = True - - def __enter__(self) -> CuTensorNetHandle: - return self - - def __exit__(self, exc_type: Any, exc_value: Any, exc_tb: Any) -> None: - self.destroy() - - def print_device_properties(self, logger: Logger) -> None: - """Prints local GPU properties.""" - device_props = cp.cuda.runtime.getDeviceProperties(self.dev.id) - logger.debug("===== device info ======") - logger.debug("GPU-name:", device_props["name"].decode()) - logger.debug("GPU-clock:", device_props["clockRate"]) - logger.debug("GPU-memoryClock:", device_props["memoryClockRate"]) - logger.debug("GPU-nSM:", device_props["multiProcessorCount"]) - logger.debug("GPU-major:", device_props["major"]) - logger.debug("GPU-minor:", device_props["minor"]) - logger.debug("========================") - - -def set_logger( - logger_name: str, - level: int = logging.WARNING, - fmt: str = "[%(asctime)s.%(msecs)03d] %(name)s (%(levelname)s) - %(message)s", -) -> Logger: - """Initialises and configures a logger object. - - Args: - logger_name: Name for the logger object. - level: Logger output level. - fmt: Logger output format. - - Returns: - New configured logger object. - """ - logger = logging.getLogger(logger_name) - logger.setLevel(level) - logger.propagate = False - if not logger.handlers: - handler = logging.StreamHandler() - handler.setLevel(level) - formatter = logging.Formatter(fmt, datefmt="%H:%M:%S") - handler.setFormatter(formatter) - logger.addHandler(handler) - return logger +# Copyright 2019-2024 Quantinuum +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +## +# http://www.apache.org/licenses/LICENSE-2.0 +## +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations # type: ignore +import warnings +import logging +from logging import Logger + +from typing import Any, Optional + +try: + import cupy as cp # type: ignore +except ImportError: + warnings.warn("local settings failed to import cupy", ImportWarning) +try: + import cuquantum.cutensornet as cutn # type: ignore +except ImportError: + warnings.warn("local settings failed to import cutensornet", ImportWarning) + + +class CuTensorNetHandle: + """Initialise the cuTensorNet library with automatic workspace memory + management. + + Note: + Always use as ``with CuTensorNetHandle() as libhandle:`` so that cuTensorNet + handles are automatically destroyed at the end of execution. + + Attributes: + handle (int): The cuTensorNet library handle created by this initialisation. + device_id (int): The ID of the device (GPU) where cuTensorNet is initialised. + If not provided, defaults to ``cp.cuda.Device()``. + """ + + def __init__(self, device_id: Optional[int] = None): + self._is_destroyed = False + + # Make sure CuPy uses the specified device + dev = cp.cuda.Device(device_id) + dev.use() + + self.dev = dev + self.device_id = dev.id + + self._handle = cutn.create() + + @property + def handle(self) -> Any: + if self._is_destroyed: + raise RuntimeError( + "The cuTensorNet library handle is out of scope.", + "See the documentation of CuTensorNetHandle.", + ) + return self._handle + + def destroy(self) -> None: + """Destroys the memory handle, releasing memory. + + Only call this method if you are initialising a ``CuTensorNetHandle`` outside + a ``with CuTensorNetHandle() as libhandle`` statement. + """ + cutn.destroy(self._handle) + self._is_destroyed = True + + def __enter__(self) -> CuTensorNetHandle: + return self + + def __exit__(self, exc_type: Any, exc_value: Any, exc_tb: Any) -> None: + self.destroy() + + def print_device_properties(self, logger: Logger) -> None: + """Prints local GPU properties.""" + device_props = cp.cuda.runtime.getDeviceProperties(self.dev.id) + logger.debug("===== device info ======") + logger.debug("GPU-name:", device_props["name"].decode()) + logger.debug("GPU-clock:", device_props["clockRate"]) + logger.debug("GPU-memoryClock:", device_props["memoryClockRate"]) + logger.debug("GPU-nSM:", device_props["multiProcessorCount"]) + logger.debug("GPU-major:", device_props["major"]) + logger.debug("GPU-minor:", device_props["minor"]) + logger.debug("========================") + + +def set_logger( + logger_name: str, + level: int = logging.WARNING, + fmt: str = "[%(asctime)s.%(msecs)03d] %(name)s (%(levelname)s) - %(message)s", +) -> Logger: + """Initialises and configures a logger object. + + Args: + logger_name: Name for the logger object. + level: Logger output level. + fmt: Logger output format. + + Returns: + New configured logger object. + """ + logger = logging.getLogger(logger_name) + logger.setLevel(level) + logger.propagate = False + if not logger.handlers: + handler = logging.StreamHandler() + handler.setLevel(level) + formatter = logging.Formatter(fmt, datefmt="%H:%M:%S") + handler.setFormatter(formatter) + logger.addHandler(handler) + return logger diff --git a/pytket/extensions/cutensornet/structured_state/general.py b/pytket/extensions/cutensornet/structured_state/general.py index a458e723..1c8156af 100644 --- a/pytket/extensions/cutensornet/structured_state/general.py +++ b/pytket/extensions/cutensornet/structured_state/general.py @@ -1,427 +1,427 @@ -# Copyright 2019-2024 Quantinuum -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -## -# http://www.apache.org/licenses/LICENSE-2.0 -## -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from __future__ import annotations # type: ignore -from abc import ABC, abstractmethod -import warnings -import logging -from typing import Any, Optional, Type - -import numpy as np # type: ignore - -from pytket.circuit import Command, Qubit -from pytket.pauli import QubitPauliString - -try: - import cupy as cp # type: ignore -except ImportError: - warnings.warn("local settings failed to import cupy", ImportWarning) - -from pytket.extensions.cutensornet import CuTensorNetHandle - -# An alias for the CuPy type used for tensors -try: - Tensor = cp.ndarray -except NameError: - Tensor = Any - - -class Config: - """Configuration class for simulation using ``StructuredState``.""" - - def __init__( - self, - chi: Optional[int] = None, - truncation_fidelity: Optional[float] = None, - seed: Optional[int] = None, - float_precision: Type[Any] = np.float64, - value_of_zero: float = 1e-16, - leaf_size: int = 8, - use_kahypar: bool = False, - k: int = 4, - optim_delta: float = 1e-5, - loglevel: int = logging.WARNING, - ): - """Instantiate a configuration object for ``StructuredState`` simulation. - - Note: - Providing both a custom ``chi`` and ``truncation_fidelity`` will raise an - exception. Choose one or the other (or neither, for exact simulation). - - Args: - chi: The maximum value allowed for the dimension of the virtual - bonds. Higher implies better approximation but more - computational resources. If not provided, ``chi`` will be unbounded. - truncation_fidelity: Every time a two-qubit gate is applied, the virtual - bond will be truncated to the minimum dimension that satisfies - ``||^2 >= trucantion_fidelity``, where ``|psi>`` and ``|phi>`` - are the states before and after truncation (both normalised). - If not provided, it will default to its maximum value 1. - seed: Seed for the random number generator. Setting a seed provides - reproducibility across simulations using ``StructuredState``, in the - sense that they will produce the same sequence of measurement outcomes. - Crucially, consecutive samples taken from the same ``StructuredState`` - can still be different from each other. - float_precision: The floating point precision used in tensor calculations; - choose from ``numpy`` types: ``np.float64`` or ``np.float32``. - Complex numbers are represented using two of such - ``float`` numbers. Default is ``np.float64``. - value_of_zero: Any number below this value will be considered equal to zero. - Even when no ``chi`` or ``truncation_fidelity`` is provided, singular - values below this number will be truncated. - We suggest to use a value slightly below what your chosen - ``float_precision`` can reasonably achieve. For instance, ``1e-16`` for - ``np.float64`` precision (default) and ``1e-7`` for ``np.float32``. - leaf_size: For ``TTN`` simulation only. Sets the maximum number of - qubits in a leaf node when using ``TTN``. Default is 8. - use_kahypar: Use KaHyPar for graph partitioning (used in ``TTN``) if this - is True. Otherwise, use NetworkX (worse, but easy to setup). Defaults - to False. - k: For ``MPSxMPO`` simulation only. Sets the maximum number of layers - the MPO is allowed to have before being contracted. Increasing this - might increase fidelity, but it will also increase resource requirements - exponentially. Default value is 4. - optim_delta: For ``MPSxMPO`` simulation only. Sets the stopping criteria for - the optimisation when contracting the ``k`` layers of MPO. Stops when - the increase of fidelity between iterations is smaller than this value. - Default value is ``1e-5``. - loglevel: Internal logger output level. Use 30 for warnings only, 20 for - verbose and 10 for debug mode. - - Raises: - ValueError: If both ``chi`` and ``truncation_fidelity`` are fixed. - ValueError: If the value of ``chi`` is set below 2. - ValueError: If the value of ``truncation_fidelity`` is not in [0,1]. - """ - _CHI_LIMIT = 2**60 - if ( - chi is not None - and chi < _CHI_LIMIT - and truncation_fidelity is not None - and truncation_fidelity != 1.0 - ): - raise ValueError("Cannot fix both chi and truncation_fidelity.") - if chi is None: - chi = _CHI_LIMIT # In practice, this is like having it be unbounded - if truncation_fidelity is None: - truncation_fidelity = 1 - - if chi < 2: - raise ValueError("The max virtual bond dim (chi) must be >= 2.") - if truncation_fidelity < 0 or truncation_fidelity > 1: - raise ValueError("Provide a value of truncation_fidelity in [0,1].") - - self.chi = chi - self.truncation_fidelity = truncation_fidelity - - if float_precision is None or float_precision == np.float64: # Double precision - self._real_t = np.float64 # type: ignore - self._complex_t = np.complex128 # type: ignore - self._atol = 1e-12 - elif float_precision == np.float32: # Single precision - self._real_t = np.float32 # type: ignore - self._complex_t = np.complex64 # type: ignore - self._atol = 1e-4 - else: - allowed_precisions = [np.float64, np.float32] - raise TypeError( - f"Value of float_precision must be in {allowed_precisions}." - ) - self.zero = value_of_zero - - if value_of_zero > self._atol / 1000: - warnings.warn( - "Your chosen value_of_zero is relatively large. " - "Faithfulness of final fidelity estimate is not guaranteed.", - UserWarning, - ) - - self.seed = seed - - if leaf_size >= 65: # Imposed to avoid bond ID collisions - # More than 20 qubits is already unreasonable for a leaf anyway - raise ValueError("Maximum allowed leaf_size is 65.") - - self.leaf_size = leaf_size - self.use_kahypar = use_kahypar - self.k = k - self.optim_delta = 1e-5 - self.loglevel = loglevel - - def copy(self) -> Config: - """Standard copy of the contents.""" - return Config( - chi=self.chi, - truncation_fidelity=self.truncation_fidelity, - seed=self.seed, - float_precision=self._real_t, # type: ignore - value_of_zero=self.zero, - leaf_size=self.leaf_size, - k=self.k, - optim_delta=self.optim_delta, - loglevel=self.loglevel, - ) - - -class StructuredState(ABC): - """Class representing a Tensor Network state.""" - - @abstractmethod - def is_valid(self) -> bool: - """Verify that the tensor network state is valid. - - Returns: - False if a violation was detected or True otherwise. - """ - raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") - - @abstractmethod - def apply_gate(self, gate: Command) -> StructuredState: - """Applies the gate to the StructuredState. - - Args: - gate: The gate to be applied. - - Returns: - ``self``, to allow for method chaining. - - Raises: - RuntimeError: If the ``CuTensorNetHandle`` is out of scope. - ValueError: If the command introduced is not a unitary gate. - ValueError: If gate acts on more than 2 qubits. - """ - raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") - - @abstractmethod - def apply_unitary( - self, unitary: cp.ndarray, qubits: list[Qubit] - ) -> StructuredState: - """Applies the unitary to the specified qubits of the StructuredState. - - Note: - It is assumed that the matrix provided by the user is unitary. If this is - not the case, the program will still run, but its behaviour is undefined. - - Args: - unitary: The matrix to be applied as a CuPy ndarray. It should either be - a 2x2 matrix if acting on one qubit or a 4x4 matrix if acting on two. - qubits: The qubits the unitary acts on. Only one qubit and two qubit - unitaries are supported. - - Returns: - ``self``, to allow for method chaining. - - Raises: - RuntimeError: If the ``CuTensorNetHandle`` is out of scope. - ValueError: If the number of qubits provided is not one or two. - ValueError: If the size of the matrix does not match with the number of - qubits provided. - """ - raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") - - @abstractmethod - def apply_scalar(self, scalar: complex) -> StructuredState: - """Multiplies the state by a complex number. - - Args: - scalar: The complex number to be multiplied. - - Returns: - ``self``, to allow for method chaining. - """ - raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") - - @abstractmethod - def apply_qubit_relabelling(self, qubit_map: dict[Qubit, Qubit]) -> StructuredState: - """Relabels each qubit ``q`` as ``qubit_map[q]``. - - This does not apply any SWAP gate, nor it changes the internal structure of the - state. It simply changes the label of the physical bonds of the tensor network. - - Args: - qubit_map: Dictionary mapping each qubit to its new label. - - Returns: - ``self``, to allow for method chaining. - - Raises: - ValueError: If any of the keys in ``qubit_map`` are not qubits in the state. - """ - raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") - - @abstractmethod - def vdot(self, other: StructuredState) -> complex: - """Obtain the inner product of the two states: ````. - - It can be used to compute the squared norm of a state ``state`` as - ``state.vdot(state)``. The tensors within the state are not modified. - - Note: - The state that is conjugated is ``self``. - - Args: - other: The other ``StructuredState``. - - Returns: - The resulting complex number. - - Raises: - RuntimeError: If the two states do not have the same qubits. - RuntimeError: If the ``CuTensorNetHandle`` is out of scope. - """ - raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") - - @abstractmethod - def sample(self) -> dict[Qubit, int]: - """Returns a sample from a Z measurement applied on every qubit. - - Notes: - The contents of ``self`` are not updated. This is equivalent to applying - ``state = self.copy()`` then ``state.measure(state.get_qubits())``. - - Returns: - A dictionary mapping each qubit in the state to its 0 or 1 outcome. - """ - raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") - - @abstractmethod - def measure(self, qubits: set[Qubit], destructive: bool = True) -> dict[Qubit, int]: - """Applies a Z measurement on each of the ``qubits``. - - Notes: - After applying this function, ``self`` will contain the normalised - projected state. - - Args: - qubits: The subset of qubits to be measured. - destructive: If ``True``, the resulting state will not contain the - measured qubits. If ``False``, these qubits will appear on the - state corresponding to the measurement outcome. Defaults to ``True``. - - Returns: - A dictionary mapping the given ``qubits`` to their measurement outcome, - i.e. either ``0`` or ``1``. - - Raises: - ValueError: If an element in ``qubits`` is not a qubit in the state. - """ - raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") - - @abstractmethod - def postselect(self, qubit_outcomes: dict[Qubit, int]) -> float: - """Applies a postselection, updates the states and returns its probability. - - Notes: - After applying this function, ``self`` will contain the projected - state over the non-postselected qubits. - - The resulting state has been normalised. - - Args: - qubit_outcomes: A dictionary mapping a subset of qubits to their - desired outcome value (either ``0`` or ``1``). - - Returns: - The probability of this postselection to occur in a measurement. - - Raises: - ValueError: If a key in ``qubit_outcomes`` is not a qubit in the state. - ValueError: If a value in ``qubit_outcomes`` is other than ``0`` or ``1``. - ValueError: If all of the qubits in the state are being postselected. - Instead, you may wish to use ``get_amplitude()``. - """ - raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") - - @abstractmethod - def expectation_value(self, pauli_string: QubitPauliString) -> float: - """Obtains the expectation value of the Pauli string observable. - - Args: - pauli_string: A pytket object representing a tensor product of Paulis. - - Returns: - The expectation value. - - Raises: - ValueError: If a key in ``pauli_string`` is not a qubit in the state. - """ - raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") - - @abstractmethod - def get_fidelity(self) -> float: - """Returns the current fidelity of the state.""" - raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") - - @abstractmethod - def get_statevector(self) -> np.ndarray: - """Returns the statevector with qubits in Increasing Lexicographic Order (ILO). - - Raises: - ValueError: If there are no qubits left in the state. - """ - raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") - - @abstractmethod - def get_amplitude(self, state: int) -> complex: - """Returns the amplitude of the chosen computational state. - - Notes: - The result is equivalent to ``state.get_statevector[b]``, but this method - is faster when querying a single amplitude (or just a few). - - Args: - state: The integer whose bitstring describes the computational state. - The qubits in the bitstring are in increasing lexicographic order. - - Returns: - The amplitude of the computational state in ``self``. - """ - raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") - - @abstractmethod - def get_qubits(self) -> set[Qubit]: - """Returns the set of qubits that ``self`` is defined on.""" - raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") - - @abstractmethod - def get_byte_size(self) -> int: - """Returns the number of bytes ``self`` currently occupies in GPU memory.""" - raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") - - @abstractmethod - def get_device_id(self) -> int: - """Returns the identifier of the device (GPU) where the tensors are stored.""" - raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") - - @abstractmethod - def update_libhandle(self, libhandle: CuTensorNetHandle) -> None: - """Update the ``CuTensorNetHandle`` used by ``self``. Multiple - objects may use the same handle. - - Args: - libhandle: The new cuTensorNet library handle. - - Raises: - RuntimeError: If the device (GPU) where ``libhandle`` was initialised - does not match the one where the tensors of ``self`` are stored. - """ - raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") - - @abstractmethod - def copy(self) -> StructuredState: - """Returns a deep copy of ``self`` on the same device.""" - raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") - - @abstractmethod - def _flush(self) -> None: - raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") +# Copyright 2019-2024 Quantinuum +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +## +# http://www.apache.org/licenses/LICENSE-2.0 +## +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations # type: ignore +from abc import ABC, abstractmethod +import warnings +import logging +from typing import Any, Optional, Type + +import numpy as np # type: ignore + +from pytket.circuit import Command, Qubit +from pytket.pauli import QubitPauliString + +try: + import cupy as cp # type: ignore +except ImportError: + warnings.warn("local settings failed to import cupy", ImportWarning) + +from pytket.extensions.cutensornet import CuTensorNetHandle + +# An alias for the CuPy type used for tensors +try: + Tensor = cp.ndarray +except NameError: + Tensor = Any + + +class Config: + """Configuration class for simulation using ``StructuredState``.""" + + def __init__( + self, + chi: Optional[int] = None, + truncation_fidelity: Optional[float] = None, + seed: Optional[int] = None, + float_precision: Type[Any] = np.float64, + value_of_zero: float = 1e-16, + leaf_size: int = 8, + use_kahypar: bool = False, + k: int = 4, + optim_delta: float = 1e-5, + loglevel: int = logging.WARNING, + ): + """Instantiate a configuration object for ``StructuredState`` simulation. + + Note: + Providing both a custom ``chi`` and ``truncation_fidelity`` will raise an + exception. Choose one or the other (or neither, for exact simulation). + + Args: + chi: The maximum value allowed for the dimension of the virtual + bonds. Higher implies better approximation but more + computational resources. If not provided, ``chi`` will be unbounded. + truncation_fidelity: Every time a two-qubit gate is applied, the virtual + bond will be truncated to the minimum dimension that satisfies + ``||^2 >= trucantion_fidelity``, where ``|psi>`` and ``|phi>`` + are the states before and after truncation (both normalised). + If not provided, it will default to its maximum value 1. + seed: Seed for the random number generator. Setting a seed provides + reproducibility across simulations using ``StructuredState``, in the + sense that they will produce the same sequence of measurement outcomes. + Crucially, consecutive samples taken from the same ``StructuredState`` + can still be different from each other. + float_precision: The floating point precision used in tensor calculations; + choose from ``numpy`` types: ``np.float64`` or ``np.float32``. + Complex numbers are represented using two of such + ``float`` numbers. Default is ``np.float64``. + value_of_zero: Any number below this value will be considered equal to zero. + Even when no ``chi`` or ``truncation_fidelity`` is provided, singular + values below this number will be truncated. + We suggest to use a value slightly below what your chosen + ``float_precision`` can reasonably achieve. For instance, ``1e-16`` for + ``np.float64`` precision (default) and ``1e-7`` for ``np.float32``. + leaf_size: For ``TTN`` simulation only. Sets the maximum number of + qubits in a leaf node when using ``TTN``. Default is 8. + use_kahypar: Use KaHyPar for graph partitioning (used in ``TTN``) if this + is True. Otherwise, use NetworkX (worse, but easy to setup). Defaults + to False. + k: For ``MPSxMPO`` simulation only. Sets the maximum number of layers + the MPO is allowed to have before being contracted. Increasing this + might increase fidelity, but it will also increase resource requirements + exponentially. Default value is 4. + optim_delta: For ``MPSxMPO`` simulation only. Sets the stopping criteria for + the optimisation when contracting the ``k`` layers of MPO. Stops when + the increase of fidelity between iterations is smaller than this value. + Default value is ``1e-5``. + loglevel: Internal logger output level. Use 30 for warnings only, 20 for + verbose and 10 for debug mode. + + Raises: + ValueError: If both ``chi`` and ``truncation_fidelity`` are fixed. + ValueError: If the value of ``chi`` is set below 2. + ValueError: If the value of ``truncation_fidelity`` is not in [0,1]. + """ + _CHI_LIMIT = 2**60 + if ( + chi is not None + and chi < _CHI_LIMIT + and truncation_fidelity is not None + and truncation_fidelity != 1.0 + ): + raise ValueError("Cannot fix both chi and truncation_fidelity.") + if chi is None: + chi = _CHI_LIMIT # In practice, this is like having it be unbounded + if truncation_fidelity is None: + truncation_fidelity = 1 + + if chi < 2: + raise ValueError("The max virtual bond dim (chi) must be >= 2.") + if truncation_fidelity < 0 or truncation_fidelity > 1: + raise ValueError("Provide a value of truncation_fidelity in [0,1].") + + self.chi = chi + self.truncation_fidelity = truncation_fidelity + + if float_precision is None or float_precision == np.float64: # Double precision + self._real_t = np.float64 # type: ignore + self._complex_t = np.complex128 # type: ignore + self._atol = 1e-12 + elif float_precision == np.float32: # Single precision + self._real_t = np.float32 # type: ignore + self._complex_t = np.complex64 # type: ignore + self._atol = 1e-4 + else: + allowed_precisions = [np.float64, np.float32] + raise TypeError( + f"Value of float_precision must be in {allowed_precisions}." + ) + self.zero = value_of_zero + + if value_of_zero > self._atol / 1000: + warnings.warn( + "Your chosen value_of_zero is relatively large. " + "Faithfulness of final fidelity estimate is not guaranteed.", + UserWarning, + ) + + self.seed = seed + + if leaf_size >= 65: # Imposed to avoid bond ID collisions + # More than 20 qubits is already unreasonable for a leaf anyway + raise ValueError("Maximum allowed leaf_size is 65.") + + self.leaf_size = leaf_size + self.use_kahypar = use_kahypar + self.k = k + self.optim_delta = 1e-5 + self.loglevel = loglevel + + def copy(self) -> Config: + """Standard copy of the contents.""" + return Config( + chi=self.chi, + truncation_fidelity=self.truncation_fidelity, + seed=self.seed, + float_precision=self._real_t, # type: ignore + value_of_zero=self.zero, + leaf_size=self.leaf_size, + k=self.k, + optim_delta=self.optim_delta, + loglevel=self.loglevel, + ) + + +class StructuredState(ABC): + """Class representing a Tensor Network state.""" + + @abstractmethod + def is_valid(self) -> bool: + """Verify that the tensor network state is valid. + + Returns: + False if a violation was detected or True otherwise. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def apply_gate(self, gate: Command) -> StructuredState: + """Applies the gate to the StructuredState. + + Args: + gate: The gate to be applied. + + Returns: + ``self``, to allow for method chaining. + + Raises: + RuntimeError: If the ``CuTensorNetHandle`` is out of scope. + ValueError: If the command introduced is not a unitary gate. + ValueError: If gate acts on more than 2 qubits. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def apply_unitary( + self, unitary: cp.ndarray, qubits: list[Qubit] + ) -> StructuredState: + """Applies the unitary to the specified qubits of the StructuredState. + + Note: + It is assumed that the matrix provided by the user is unitary. If this is + not the case, the program will still run, but its behaviour is undefined. + + Args: + unitary: The matrix to be applied as a CuPy ndarray. It should either be + a 2x2 matrix if acting on one qubit or a 4x4 matrix if acting on two. + qubits: The qubits the unitary acts on. Only one qubit and two qubit + unitaries are supported. + + Returns: + ``self``, to allow for method chaining. + + Raises: + RuntimeError: If the ``CuTensorNetHandle`` is out of scope. + ValueError: If the number of qubits provided is not one or two. + ValueError: If the size of the matrix does not match with the number of + qubits provided. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def apply_scalar(self, scalar: complex) -> StructuredState: + """Multiplies the state by a complex number. + + Args: + scalar: The complex number to be multiplied. + + Returns: + ``self``, to allow for method chaining. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def apply_qubit_relabelling(self, qubit_map: dict[Qubit, Qubit]) -> StructuredState: + """Relabels each qubit ``q`` as ``qubit_map[q]``. + + This does not apply any SWAP gate, nor it changes the internal structure of the + state. It simply changes the label of the physical bonds of the tensor network. + + Args: + qubit_map: Dictionary mapping each qubit to its new label. + + Returns: + ``self``, to allow for method chaining. + + Raises: + ValueError: If any of the keys in ``qubit_map`` are not qubits in the state. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def vdot(self, other: StructuredState) -> complex: + """Obtain the inner product of the two states: ````. + + It can be used to compute the squared norm of a state ``state`` as + ``state.vdot(state)``. The tensors within the state are not modified. + + Note: + The state that is conjugated is ``self``. + + Args: + other: The other ``StructuredState``. + + Returns: + The resulting complex number. + + Raises: + RuntimeError: If the two states do not have the same qubits. + RuntimeError: If the ``CuTensorNetHandle`` is out of scope. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def sample(self) -> dict[Qubit, int]: + """Returns a sample from a Z measurement applied on every qubit. + + Notes: + The contents of ``self`` are not updated. This is equivalent to applying + ``state = self.copy()`` then ``state.measure(state.get_qubits())``. + + Returns: + A dictionary mapping each qubit in the state to its 0 or 1 outcome. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def measure(self, qubits: set[Qubit], destructive: bool = True) -> dict[Qubit, int]: + """Applies a Z measurement on each of the ``qubits``. + + Notes: + After applying this function, ``self`` will contain the normalised + projected state. + + Args: + qubits: The subset of qubits to be measured. + destructive: If ``True``, the resulting state will not contain the + measured qubits. If ``False``, these qubits will appear on the + state corresponding to the measurement outcome. Defaults to ``True``. + + Returns: + A dictionary mapping the given ``qubits`` to their measurement outcome, + i.e. either ``0`` or ``1``. + + Raises: + ValueError: If an element in ``qubits`` is not a qubit in the state. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def postselect(self, qubit_outcomes: dict[Qubit, int]) -> float: + """Applies a postselection, updates the states and returns its probability. + + Notes: + After applying this function, ``self`` will contain the projected + state over the non-postselected qubits. + + The resulting state has been normalised. + + Args: + qubit_outcomes: A dictionary mapping a subset of qubits to their + desired outcome value (either ``0`` or ``1``). + + Returns: + The probability of this postselection to occur in a measurement. + + Raises: + ValueError: If a key in ``qubit_outcomes`` is not a qubit in the state. + ValueError: If a value in ``qubit_outcomes`` is other than ``0`` or ``1``. + ValueError: If all of the qubits in the state are being postselected. + Instead, you may wish to use ``get_amplitude()``. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def expectation_value(self, pauli_string: QubitPauliString) -> float: + """Obtains the expectation value of the Pauli string observable. + + Args: + pauli_string: A pytket object representing a tensor product of Paulis. + + Returns: + The expectation value. + + Raises: + ValueError: If a key in ``pauli_string`` is not a qubit in the state. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def get_fidelity(self) -> float: + """Returns the current fidelity of the state.""" + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def get_statevector(self) -> np.ndarray: + """Returns the statevector with qubits in Increasing Lexicographic Order (ILO). + + Raises: + ValueError: If there are no qubits left in the state. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def get_amplitude(self, state: int) -> complex: + """Returns the amplitude of the chosen computational state. + + Notes: + The result is equivalent to ``state.get_statevector[b]``, but this method + is faster when querying a single amplitude (or just a few). + + Args: + state: The integer whose bitstring describes the computational state. + The qubits in the bitstring are in increasing lexicographic order. + + Returns: + The amplitude of the computational state in ``self``. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def get_qubits(self) -> set[Qubit]: + """Returns the set of qubits that ``self`` is defined on.""" + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def get_byte_size(self) -> int: + """Returns the number of bytes ``self`` currently occupies in GPU memory.""" + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def get_device_id(self) -> int: + """Returns the identifier of the device (GPU) where the tensors are stored.""" + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def update_libhandle(self, libhandle: CuTensorNetHandle) -> None: + """Update the ``CuTensorNetHandle`` used by ``self``. Multiple + objects may use the same handle. + + Args: + libhandle: The new cuTensorNet library handle. + + Raises: + RuntimeError: If the device (GPU) where ``libhandle`` was initialised + does not match the one where the tensors of ``self`` are stored. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def copy(self) -> StructuredState: + """Returns a deep copy of ``self`` on the same device.""" + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + @abstractmethod + def _flush(self) -> None: + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") diff --git a/pytket/extensions/cutensornet/structured_state/mps.py b/pytket/extensions/cutensornet/structured_state/mps.py index a28ea9eb..f62b5472 100644 --- a/pytket/extensions/cutensornet/structured_state/mps.py +++ b/pytket/extensions/cutensornet/structured_state/mps.py @@ -1,1066 +1,1066 @@ -# Copyright 2019-2024 Quantinuum -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -## -# http://www.apache.org/licenses/LICENSE-2.0 -## -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from __future__ import annotations # type: ignore -import warnings -from typing import Union -from enum import Enum - -from random import Random # type: ignore -import numpy as np # type: ignore - -try: - import cupy as cp # type: ignore -except ImportError: - warnings.warn("local settings failed to import cupy", ImportWarning) -try: - import cuquantum as cq # type: ignore - from cuquantum.cutensornet import tensor # type: ignore -except ImportError: - warnings.warn("local settings failed to import cutensornet", ImportWarning) - -from pytket.circuit import Command, Op, OpType, Qubit -from pytket.pauli import Pauli, QubitPauliString - -from pytket.extensions.cutensornet.general import CuTensorNetHandle, set_logger - -from .general import Config, StructuredState, Tensor - - -class DirMPS(Enum): - """An enum to refer to relative directions within the MPS.""" - - LEFT = 0 - RIGHT = 1 - - -class MPS(StructuredState): - """Represents a state as a Matrix Product State. - - Attributes: - tensors (list[Tensor]): A list of tensors in the MPS; ``tensors[0]`` is - the leftmost and ``tensors[len(self)-1]`` is the rightmost; ``tensors[i]`` - and ``tensors[i+1]`` are connected in the MPS via a bond. All of the - tensors are rank three, with the dimensions listed in ``.shape`` matching - the left, right and physical bonds, in that order. - canonical_form (dict[int, Optional[DirMPS]]): A dictionary mapping - positions to the canonical form direction of the corresponding tensor, - or ``None`` if it the tensor is not canonicalised. - qubit_position (dict[pytket.circuit.Qubit, int]): A dictionary mapping circuit - qubits to the position its tensor is at in the MPS. - fidelity (float): A lower bound of the fidelity, obtained by multiplying - the fidelities after each contraction. The fidelity of a contraction - corresponds to ``||^2`` where ``|psi>`` and ``|phi>`` are the - states before and after truncation (assuming both are normalised). - """ - - def __init__( - self, - libhandle: CuTensorNetHandle, - qubits: list[Qubit], - config: Config, - ): - """Initialise an MPS on the computational state ``|0>`` - - Note: - A ``libhandle`` should be created via a ``with CuTensorNet() as libhandle:`` - statement. The device where the MPS is stored will match the one specified - by the library handle. - - Args: - libhandle: The cuTensorNet library handle that will be used to carry out - tensor operations on the MPS. - qubits: The list of qubits in the circuit to be simulated. - config: The object describing the configuration for simulation. - - Raises: - ValueError: If less than two qubits are provided. - """ - self._lib = libhandle - self._cfg = config - self._logger = set_logger("MPS", level=config.loglevel) - self._rng = Random() - self._rng.seed(self._cfg.seed) - self.fidelity = 1.0 - - n_tensors = len(qubits) - if n_tensors == 0: # There's no initialisation to be done - pass - elif n_tensors == 1: - raise ValueError("Please, provide at least two qubits.") - else: - self.qubit_position = {q: i for i, q in enumerate(qubits)} - - # Create the list of tensors - self.tensors: list[Tensor] = [] - self.canonical_form = {i: None for i in range(n_tensors)} - - # Append each of the tensors initialised in state |0> - m_shape = (1, 1, 2) # Two virtual bonds (dim=1) and one physical - for i in range(n_tensors): - m_tensor = cp.empty(m_shape, dtype=self._cfg._complex_t) - # Initialise the tensor to ket 0 - m_tensor[0][0][0] = 1 - m_tensor[0][0][1] = 0 - self.tensors.append(m_tensor) - - def is_valid(self) -> bool: - """Verify that the MPS object is valid. - - Specifically, verify that the MPS does not exceed the dimension limit ``chi`` of - the virtual bonds, that physical bonds have dimension 2, that all tensors - are rank three and that the data structure sizes are consistent. - - Returns: - False if a violation was detected or True otherwise. - """ - self._flush() - - chi_ok = all( - all(dim <= self._cfg.chi for dim in self.get_virtual_dimensions(pos)) - for pos in range(len(self)) - ) - phys_ok = all(self.get_physical_dimension(pos) == 2 for pos in range(len(self))) - shape_ok = all(len(tensor.shape) == 3 for tensor in self.tensors) - - ds_ok = set(self.canonical_form.keys()) == set(range(len(self))) - ds_ok = ds_ok and set(self.qubit_position.values()) == set(range(len(self))) - - # Debugger logging - self._logger.debug( - "Checking validity of MPS... " - f"chi_ok={chi_ok}, " - f"phys_ok={phys_ok}, " - f"shape_ok={shape_ok}, " - f"ds_ok={ds_ok}" - ) - - return chi_ok and phys_ok and shape_ok and ds_ok - - def apply_gate(self, gate: Command) -> MPS: - """Apply the gate to the MPS. - - Note: - Only one-qubit gates and two-qubit gates are supported. - - Args: - gate: The gate to be applied. - - Returns: - ``self``, to allow for method chaining. - - Raises: - RuntimeError: If the ``CuTensorNetHandle`` is out of scope. - ValueError: If the command introduced is not a unitary gate. - ValueError: If gate acts on more than 2 qubits. - """ - try: - unitary = gate.op.get_unitary() - except: - raise ValueError("The command introduced is not unitary.") - - # Load the gate's unitary to the GPU memory - unitary = unitary.astype(dtype=self._cfg._complex_t, copy=False) - unitary = cp.asarray(unitary, dtype=self._cfg._complex_t) - - self._logger.debug(f"Applying gate {gate}.") - if len(gate.qubits) not in [1, 2]: - raise ValueError( - "Gates must act on only 1 or 2 qubits! " - + f"This is not satisfied by {gate}." - ) - - self.apply_unitary(unitary, gate.qubits) - - return self - - def apply_unitary( - self, unitary: cp.ndarray, qubits: list[Qubit] - ) -> StructuredState: - """Applies the unitary to the specified qubits of the StructuredState. - - Note: - It is assumed that the matrix provided by the user is unitary. If this is - not the case, the program will still run, but its behaviour is undefined. - - Args: - unitary: The matrix to be applied as a CuPy ndarray. It should either be - a 2x2 matrix if acting on one qubit or a 4x4 matrix if acting on two. - qubits: The qubits the unitary acts on. Only one qubit and two qubit - unitaries are supported. - - Returns: - ``self``, to allow for method chaining. - - Raises: - RuntimeError: If the ``CuTensorNetHandle`` is out of scope. - ValueError: If the number of qubits provided is not one or two. - ValueError: If the size of the matrix does not match with the number of - qubits provided. - """ - if self._lib._is_destroyed: - raise RuntimeError( - "The cuTensorNet library handle is out of scope.", - "See the documentation of update_libhandle and CuTensorNetHandle.", - ) - - self._logger.debug(f"Applying unitary {unitary} on {qubits}.") - - if len(qubits) == 1: - if unitary.shape != (2, 2): - raise ValueError( - "The unitary introduced acts on one qubit but it is not 2x2." - ) - - self._apply_1q_unitary(unitary, qubits[0]) - # NOTE: if the tensor was in canonical form, it remains being so, - # since it is guaranteed that the gate is unitary. - - elif len(qubits) == 2: - if unitary.shape != (4, 4): - raise ValueError( - "The unitary introduced acts on two qubits but it is not 4x4." - ) - - self._apply_2q_unitary(unitary, qubits[0], qubits[1]) - # The tensors will in general no longer be in canonical form. - self.canonical_form[self.qubit_position[qubits[0]]] = None - self.canonical_form[self.qubit_position[qubits[1]]] = None - - else: - raise ValueError("Gates must act on only 1 or 2 qubits!") - - return self - - def apply_scalar(self, scalar: complex) -> MPS: - """Multiplies the state by a complex number. - - Args: - scalar: The complex number to be multiplied. - - Returns: - ``self``, to allow for method chaining. - """ - self.tensors[0] *= scalar - return self - - def apply_qubit_relabelling(self, qubit_map: dict[Qubit, Qubit]) -> MPS: - """Relabels each qubit ``q`` as ``qubit_map[q]``. - - This does not apply any SWAP gate, nor it changes the internal structure of the - state. It simply changes the label of the physical bonds of the tensor network. - - Args: - qubit_map: Dictionary mapping each qubit to its new label. - - Returns: - ``self``, to allow for method chaining. - - Raises: - ValueError: If any of the keys in ``qubit_map`` are not qubits in the state. - """ - new_qubit_position = dict() - for q_orig, q_new in qubit_map.items(): - # Check the qubit is in the state - if q_orig not in self.qubit_position: - raise ValueError(f"Qubit {q_orig} is not in the state.") - # Apply the relabelling for this qubit - new_qubit_position[q_new] = self.qubit_position[q_orig] - - self.qubit_position = new_qubit_position - self._logger.debug(f"Relabelled qubits... {qubit_map}") - return self - - def add_qubit(self, new_qubit: Qubit, position: int, state: int = 0) -> MPS: - """Adds a qubit at the specified position. - - Args: - new_qubit: The identifier of the qubit to be added to the state. - position: The location the new qubit should be inserted at in the MPS. - Qubits on this and later indexed have their position shifted by 1. - state: Choose either ``0`` or ``1`` for the new qubit's state. - Defaults to ``0``. - - Returns: - ``self``, to allow for method chaining. - - Raises: - ValueError: If ``new_qubit`` already exists in the state. - ValueError: If ``position`` is negative or larger than ``len(self)``. - ValueError: If ``state`` is not ``0`` or ``1``. - """ - options = {"handle": self._lib.handle, "device_id": self._lib.device_id} - - if new_qubit in self.qubit_position.keys(): - raise ValueError( - f"Qubit {new_qubit} cannot be added, it already is in the MPS." - ) - if position < 0 or position > len(self): - raise ValueError(f"Index {position} is not a valid position in the MPS.") - if state not in [0, 1]: - raise ValueError( - f"Cannot initialise qubit to state {state}. Only 0 or 1 are supported." - ) - - # Identify the dimension of the virtual bond where the new qubit will appear - if position == len(self): - dim = self.get_virtual_dimensions(len(self) - 1)[1] # Rightmost bond - else: # Otherwise, pick the left bond of the tensor currently in ``position`` - dim = self.get_virtual_dimensions(position)[0] - - # Create the tensor for I \otimes |state> - identity = cp.eye(dim, dtype=self._cfg._complex_t) - qubit_tensor = cp.zeros(2, dtype=self._cfg._complex_t) - qubit_tensor[state] = 1 - # Apply the tensor product - new_tensor = cq.contract( - "lr,p->lrp", - identity, - qubit_tensor, - options=options, - optimize={"path": [(0, 1)]}, - ) - - # Place this ``new_tensor`` in the MPS at ``position``, - # the previous tensors at ``position`` onwards are shifted to the right - orig_mps_len = len(self) # Store it in variable, since this will change - self.tensors.insert(position, new_tensor) - - # Update the dictionary tracking the canonical form - for pos in reversed(range(position, orig_mps_len)): - self.canonical_form[pos + 1] = self.canonical_form[pos] - # The canonical form of the new tensor is both LEFT and RIGHT, just choose one - self.canonical_form[position] = DirMPS.LEFT # type: ignore - - # Finally, update the dictionary tracking the qubit position - for q, pos in self.qubit_position.items(): - if pos >= position: - self.qubit_position[q] += 1 - self.qubit_position[new_qubit] = position - - return self - - def canonicalise(self, l_pos: int, r_pos: int) -> None: - """Canonicalises the MPS object. - - Applies the necessary gauge transformations so that all MPS tensors - to the left of position ``l_pos`` are in left orthogonal form and - all MPS tensors to the right of ``r_pos`` in right orthogonal form. - - Args: - l_pos: The position of the leftmost tensor that is not to be - canonicalised. - r_pos: The position of the rightmost tensor that is not to be - canonicalised. - """ - self._logger.debug(f"Start canonicalisation... l_pos={l_pos}, r_pos={r_pos}") - - for pos in range(l_pos): - self.canonicalise_tensor(pos, form=DirMPS.LEFT) - for pos in reversed(range(r_pos + 1, len(self))): - self.canonicalise_tensor(pos, form=DirMPS.RIGHT) - - self._logger.debug(f"Finished canonicalisation.") - - def canonicalise_tensor(self, pos: int, form: DirMPS) -> None: - """Canonicalises a tensor from an MPS object. - - Applies the necessary gauge transformations so that the tensor at - position ``pos`` in the MPS is in the orthogonal form dictated by - ``form``. - - Args: - position: The position of the tensor to be canonicalised. - form: LEFT form means that its conjugate transpose is its inverse if - connected to its left bond and physical bond. Similarly for RIGHT. - - Raises: - ValueError: If ``form`` is not a value in ``DirMPS``. - RuntimeError: If the ``CuTensorNetHandle`` is out of scope. - """ - if form == self.canonical_form[pos]: - # Tensor already in canonical form, nothing needs to be done - self._logger.debug(f"Position {pos} already in {form}.") - return None - - if self._lib._is_destroyed: - raise RuntimeError( - "The cuTensorNet library handle is out of scope.", - "See the documentation of update_libhandle and CuTensorNetHandle.", - ) - - self._logger.debug(f"Canonicalising {pos} to {form}.") - # Glossary of bond IDs used here: - # s -> shared virtual bond between T and Tnext - # v -> the other virtual bond of T - # V -> the other virtual bond of Tnext - # p -> physical bond of T - # P -> physical bond of Tnext - - # Gather the details from the MPS tensors at this position - T = self.tensors[pos] - - # Assign the bond IDs - if form == DirMPS.LEFT: - next_pos = pos + 1 - Tnext = self.tensors[next_pos] - T_bonds = "vsp" - Q_bonds = "vap" - R_bonds = "as" - Tnext_bonds = "sVP" - result_bonds = "aVP" - elif form == DirMPS.RIGHT: - next_pos = pos - 1 - Tnext = self.tensors[next_pos] - T_bonds = "svp" - Q_bonds = "avp" - R_bonds = "as" - Tnext_bonds = "VsP" - result_bonds = "VaP" - else: - raise ValueError("Argument form must be a value in DirMPS.") - - # Apply QR decomposition - self._logger.debug(f"QR decompose a {T.nbytes / 2**20} MiB tensor.") - - subscripts = T_bonds + "->" + Q_bonds + "," + R_bonds - options = {"handle": self._lib.handle, "device_id": self._lib.device_id} - Q, R = tensor.decompose( - subscripts, T, method=tensor.QRMethod(), options=options - ) - self._logger.debug(f"QR decomposition finished.") - - # Contract R into Tnext - subscripts = R_bonds + "," + Tnext_bonds + "->" + result_bonds - result = cq.contract( - subscripts, - R, - Tnext, - options=options, - optimize={"path": [(0, 1)]}, - ) - self._logger.debug(f"Contraction with {next_pos} applied.") - - # Update self.tensors - self.tensors[pos] = Q - self.canonical_form[pos] = form # type: ignore - self.tensors[next_pos] = result - self.canonical_form[next_pos] = None - - def vdot(self, other: MPS) -> complex: # type: ignore - """Obtain the inner product of the two MPS: ````. - - It can be used to compute the squared norm of an MPS ``mps`` as - ``mps.vdot(mps)``. The tensors within the MPS are not modified. - - Note: - The state that is conjugated is ``self``. - - Args: - other: The other MPS. - - Returns: - The resulting complex number. - - Raises: - RuntimeError: If number of tensors, dimensions or positions do not match. - RuntimeError: If there are no tensors in the MPS. - RuntimeError: If the ``CuTensorNetHandle`` is out of scope. - """ - if self._lib._is_destroyed: - raise RuntimeError( - "The cuTensorNet library handle is out of scope.", - "See the documentation of update_libhandle and CuTensorNetHandle.", - ) - - if len(self) != len(other): - raise RuntimeError("Number of tensors do not match.") - for i in range(len(self)): - if self.get_physical_dimension(i) != other.get_physical_dimension(i): - raise RuntimeError( - f"Physical bond dimension at position {i} do not match." - ) - if self.qubit_position != other.qubit_position: - raise RuntimeError( - "The qubit labels or their position on the MPS do not match." - ) - if len(self) == 0: - raise RuntimeError("There are no tensors in the MPS.") - - self._flush() - other._flush() - - self._logger.debug("Applying vdot between two MPS.") - - # We convert both MPS to their interleaved representation and - # contract them using cuQuantum. - mps1 = self._get_interleaved_representation(conj=True) - mps2 = other._get_interleaved_representation(conj=False) - interleaved_rep = mps1 + mps2 - interleaved_rep.append([]) # Discards dim=1 bonds with [] - - # We define the contraction path ourselves - end_mps1 = len(self) - 1 # Rightmost tensor of mps1 in interleaved_rep - end_mps2 = len(self) + len(other) - 1 # Rightmost tensor of mps2 - contraction_path = [(end_mps1, end_mps2)] # Contract ends of mps1 and mps2 - for _ in range(len(self) - 1): - # Update the position markers - end_mps1 -= 1 # One tensor was removed from mps1 - end_mps2 -= 2 # One tensor removed from mps1 and another from mps2 - # Contract the result from last iteration with the ends of mps1 and mps2 - contraction_path.append((end_mps2, end_mps2 + 1)) # End of mps2 and result - contraction_path.append((end_mps1, end_mps2)) # End of mps1 and ^ outcome - - # Apply the contraction - result = cq.contract( - *interleaved_rep, - options={"handle": self._lib.handle, "device_id": self._lib.device_id}, - optimize={"path": contraction_path}, - ) - - self._logger.debug(f"Result from vdot={result}") - return complex(result) - - def _get_interleaved_representation( - self, conj: bool = False - ) -> list[Union[cp.ndarray, str]]: - """Returns the interleaved representation of the MPS used by cuQuantum. - - Args: - conj: If True, all tensors are conjugated and bonds IDs are prefixed - with * (except physical bonds). Defaults to False. - """ - self._logger.debug("Creating interleaved representation...") - - # Auxiliar dictionary of physical bonds to qubit IDs - qubit_id = {location: qubit for qubit, location in self.qubit_position.items()} - - interleaved_rep = [] - for i, t in enumerate(self.tensors): - # Append the tensor - if conj: - interleaved_rep.append(t.conj()) - else: - interleaved_rep.append(t) - - # Create the ID for the bonds involved - bonds = [str(i), str(i + 1), str(qubit_id[i])] - if conj: - bonds[0] = "*" + bonds[0] - bonds[1] = "*" + bonds[1] - interleaved_rep.append(bonds) - self._logger.debug(f"Bond IDs: {bonds}") - - return interleaved_rep - - def sample(self) -> dict[Qubit, int]: - """Returns a sample from a Z measurement applied on every qubit. - - Notes: - The MPS ``self`` is not updated. This is equivalent to applying - ``mps = self.copy()`` then ``mps.measure(mps.get_qubits())``. - - Returns: - A dictionary mapping each of the qubits in the MPS to their 0 or 1 outcome. - """ - - # TODO: Copying is not strictly necessary, but to avoid it we would need to - # modify the algorithm in `measure`. This may be done eventually if `copy` - # is shown to be a bottleneck when sampling (which is likely). - mps = self.copy() - outcomes = mps.measure(mps.get_qubits()) - # If the user sets a seed for the MPS, we'd like that every copy of the MPS - # produces the same sequence of samples, but samples within a sequence may be - # different from each other. Achieved by updating the state of `self._rng`. - self._rng.setstate(mps._rng.getstate()) - - return outcomes - - def measure(self, qubits: set[Qubit], destructive: bool = True) -> dict[Qubit, int]: - """Applies a Z measurement on each of the ``qubits``. - - Notes: - After applying this function, ``self`` will contain the normalised - projected state. - - Args: - qubits: The subset of qubits to be measured. - destructive: If ``True``, the resulting state will not contain the - measured qubits. If ``False``, these qubits will remain in the - state. Defaults to ``True``. - - Returns: - A dictionary mapping the given ``qubits`` to their measurement outcome, - i.e. either ``0`` or ``1``. - - Raises: - ValueError: If an element in ``qubits`` is not a qubit in the state. - """ - result = dict() - - # Obtain the positions that need to be measured and build the reverse dict - position_qubit_map = dict() - for q in qubits: - if q not in self.qubit_position: - raise ValueError(f"Qubit {q} is not a qubit in the MPS.") - position_qubit_map[self.qubit_position[q]] = q - positions = sorted(position_qubit_map.keys()) - self._logger.debug(f"Measuring qubits={position_qubit_map}") - - # Tensor for postselection to |0> - zero_tensor = cp.zeros(2, dtype=self._cfg._complex_t) - zero_tensor[0] = 1 - - # Measure and postselect each of the positions, one by one - while positions: - pos = positions.pop() # The rightmost position to be measured - - # Convert to canonical form with center at this position - self.canonicalise(pos, pos) - - # Glossary of bond IDs: - # l -> left virtual bond of tensor in `pos` - # r -> right virtual bond of tensor in `pos` - # p -> physical bond of tensor in `pos` - # P -> physical bond of tensor in `pos` (copy) - - # Take the tensor in this position and obtain its prob for |0>. - # Since the MPS is in canonical form, this corresponds to the probability - # if we were to take all of the other tensors into account. - prob = cq.contract( - "lrp,p,lrP,P->", # No open bonds remain; this is just a scalar - self.tensors[pos].conj(), - zero_tensor, - self.tensors[pos], - zero_tensor, - options={"handle": self._lib.handle, "device_id": self._lib.device_id}, - optimize={"path": [(0, 1), (0, 1), (0, 1)]}, - ) - - # Throw a coin to decide measurement outcome - outcome = 0 if prob > self._rng.random() else 1 - result[position_qubit_map[pos]] = outcome - self._logger.debug(f"Outcome of qubit at {pos} is {outcome}.") - - # Postselect the MPS for this outcome, renormalising at the same time - postselection_tensor = cp.zeros(2, dtype=self._cfg._complex_t) - postselection_tensor[outcome] = 1 / np.sqrt( - abs(outcome - prob) - ) # Normalise - - self._postselect_qubit(position_qubit_map[pos], postselection_tensor) - - # If the measurement is not destructive, we must add the qubit back again - if not destructive: - qubit = position_qubit_map[pos] - self.add_qubit(qubit, pos, state=outcome) - - return result - - def postselect(self, qubit_outcomes: dict[Qubit, int]) -> float: - """Applies a postselection, updates the MPS and returns its probability. - - Notes: - After applying this function, ``self`` will contain the MPS of the projected - state over the non-postselected qubits. - - The resulting state has been normalised. - - Args: - qubit_outcomes: A dictionary mapping a subset of qubits in the MPS to their - desired outcome value (either ``0`` or ``1``). - - Returns: - The probability of this postselection to occur in a measurement. - - Raises: - ValueError: If a key in ``qubit_outcomes`` is not a qubit in the MPS. - ValueError: If a value in ``qubit_outcomes`` is other than ``0`` or ``1``. - ValueError: If all of the qubits in the MPS are being postselected. Instead, - you may wish to use ``get_amplitude()``. - """ - for q, v in qubit_outcomes.items(): - if q not in self.qubit_position: - raise ValueError(f"Qubit {q} is not a qubit in the MPS.") - if v not in {0, 1}: - raise ValueError(f"Outcome of {q} cannot be {v}. Choose int 0 or 1.") - - if len(qubit_outcomes) == len(self): - raise ValueError( - "Cannot postselect all qubits. You may want to use get_amplitude()." - ) - self._logger.debug(f"Postselecting qubits={qubit_outcomes}") - - # Apply a postselection for each of the qubits - for qubit, outcome in qubit_outcomes.items(): - # Create the rank-1 postselection tensor - postselection_tensor = cp.zeros(2, dtype=self._cfg._complex_t) - postselection_tensor[outcome] = 1 - # Apply postselection - self._postselect_qubit(qubit, postselection_tensor) - - # Calculate the squared norm of the postselected state; this is its probability - prob = self.vdot(self) - assert np.isclose(prob.imag, 0.0, atol=self._cfg._atol) - prob = prob.real - - # Renormalise; it suffices to update the first tensor - if len(self) > 0 and not np.isclose(prob, 0.0, atol=self._cfg._atol): - self.tensors[0] = self.tensors[0] / np.sqrt(prob) - self.canonical_form[0] = None - - self._logger.debug(f"Probability of this postselection is {prob}.") - return prob - - def _postselect_qubit(self, qubit: Qubit, postselection_tensor: cp.ndarray) -> None: - """Postselect the qubit with the given tensor.""" - - pos = self.qubit_position[qubit] - self.tensors[pos] = cq.contract( - "lrp,p->lr", - self.tensors[pos], - postselection_tensor, - options={"handle": self._lib.handle, "device_id": self._lib.device_id}, - optimize={"path": [(0, 1)]}, - ) - - # Glossary of bond IDs: - # s -> shared bond between tensor in `pos` and next - # v -> the other virtual bond of tensor in `pos` - # V -> the other virtual bond of tensor in next position - # p -> physical bond of tensor in `pos` - # P -> physical bond of tensor in next position - - if len(self) == 1: # This is the last tensor - pass - - elif pos != 0: # Contract with next tensor on the left - self.tensors[pos - 1] = cq.contract( - "sv,VsP->VvP", - self.tensors[pos], - self.tensors[pos - 1], - options={"handle": self._lib.handle, "device_id": self._lib.device_id}, - optimize={"path": [(0, 1)]}, - ) - self.canonical_form[pos - 1] = None - else: # There are no tensors on the left, contract with the one on the right - self.tensors[pos + 1] = cq.contract( - "vs,sVP->vVP", - self.tensors[pos], - self.tensors[pos + 1], - options={"handle": self._lib.handle, "device_id": self._lib.device_id}, - optimize={"path": [(0, 1)]}, - ) - self.canonical_form[pos + 1] = None - - # Shift all entries after `pos` to the left - for q, p in self.qubit_position.items(): - if pos < p: - self.qubit_position[q] = p - 1 - for p in range(pos, len(self) - 1): - self.canonical_form[p] = self.canonical_form[p + 1] - - # Remove the entry from the data structures - del self.qubit_position[qubit] - del self.canonical_form[len(self) - 1] - self.tensors.pop(pos) - - def expectation_value(self, pauli_string: QubitPauliString) -> float: - """Obtains the expectation value of the Pauli string observable. - - Args: - pauli_string: A pytket object representing a tensor product of Paulis. - - Returns: - The expectation value. - - Raises: - ValueError: If a key in ``pauli_string`` is not a qubit in the MPS. - """ - for q in pauli_string.map.keys(): - if q not in self.qubit_position: - raise ValueError(f"Qubit {q} is not a qubit in the MPS.") - - self._logger.debug(f"Calculating expectation value of {pauli_string}.") - mps_copy = self.copy() - pauli_optype = {Pauli.Z: OpType.Z, Pauli.X: OpType.X, Pauli.Y: OpType.Y} - - # Apply each of the Pauli operators to the MPS copy - for qubit, pauli in pauli_string.map.items(): - if pauli != Pauli.I: - pos = mps_copy.qubit_position[qubit] - pauli_unitary = Op.create(pauli_optype[pauli]).get_unitary() - pauli_tensor = cp.asarray( - pauli_unitary.astype(dtype=self._cfg._complex_t, copy=False), - dtype=self._cfg._complex_t, - ) - - # Contract the Pauli to the MPS tensor of the corresponding qubit - mps_copy.tensors[pos] = cq.contract( - "lrp,Pp->lrP", - mps_copy.tensors[pos], - pauli_tensor, - options={ - "handle": self._lib.handle, - "device_id": self._lib.device_id, - }, - optimize={"path": [(0, 1)]}, - ) - - # Obtain the inner product - value = self.vdot(mps_copy) - assert np.isclose(value.imag, 0.0, atol=self._cfg._atol) - - self._logger.debug(f"Expectation value is {value.real}.") - return value.real - - def get_fidelity(self) -> float: - """Returns the current fidelity of the state.""" - return self.fidelity - - def get_statevector(self) -> np.ndarray: - """Returns the statevector with qubits in Increasing Lexicographic Order (ILO). - - Raises: - ValueError: If there are no qubits left in the MPS. - """ - if len(self) == 0: - raise ValueError("There are no qubits left in this MPS.") - - # If there is only one qubit left, it is trivial - if len(self) == 1: - result_tensor = self.tensors[0] - - else: - # Create the interleaved representation with all tensors - interleaved_rep = [] - for pos in range(len(self)): - interleaved_rep.append(self.tensors[pos]) - interleaved_rep.append( - ["v" + str(pos), "v" + str(pos + 1), "p" + str(pos)] - ) - - # Specify the output bond IDs in ILO order - output_bonds = [] - for q in sorted(self.qubit_position.keys()): - output_bonds.append("p" + str(self.qubit_position[q])) - interleaved_rep.append(output_bonds) - - # We define the contraction path ourselves - end_mps = len(self) - 1 - contraction_path = [(end_mps - 1, end_mps)] # Contract the last two tensors - end_mps -= 2 # Two tensors removed from the MPS - for _ in range(len(self) - 2): - # Contract the result from last iteration and the last tensor in the MPS - contraction_path.append((end_mps, end_mps + 1)) - # Update the position marker - end_mps -= 1 # One tensor was removed from the MPS - - # Contract - result_tensor = cq.contract( - *interleaved_rep, - options={"handle": self._lib.handle, "device_id": self._lib.device_id}, - optimize={"path": contraction_path}, - ) - - # Convert to numpy vector and flatten - statevector: np.ndarray = cp.asnumpy(result_tensor).flatten() - return statevector - - def get_amplitude(self, state: int) -> complex: - """Returns the amplitude of the chosen computational state. - - Notes: - The result is equivalent to ``mps.get_statevector[b]``, but this method - is faster when querying a single amplitude (or just a few). - - Args: - state: The integer whose bitstring describes the computational state. - The qubits in the bitstring are in increasing lexicographic order. - - Returns: - The amplitude of the computational state in the MPS. - """ - - # Auxiliar dictionary of physical bonds to qubit IDs - qubit_id = {location: qubit for qubit, location in self.qubit_position.items()} - - # Find out what the map MPS_position -> bit value is - ilo_qubits = sorted(self.qubit_position.keys()) - mps_pos_bitvalue = dict() - - for i, q in enumerate(ilo_qubits): - pos = self.qubit_position[q] - bitvalue = 1 if state & 2 ** (len(self) - i - 1) else 0 - mps_pos_bitvalue[pos] = bitvalue - - # Create the interleaved representation including all postselection tensors - interleaved_rep = self._get_interleaved_representation() - for pos in range(len(self)): - postselection_tensor = cp.zeros(2, dtype=self._cfg._complex_t) - postselection_tensor[mps_pos_bitvalue[pos]] = 1 - interleaved_rep.append(postselection_tensor) - interleaved_rep.append([str(qubit_id[pos])]) - # Append [] so that all dim=1 bonds are ignored in the result of contract - interleaved_rep.append([]) - - # We define the contraction path ourselves - end_mps = len(self) - 1 # Rightmost tensor of MPS in interleaved_rep - end_rep = 2 * len(self) - 1 # Last position in the representation - contraction_path = [(end_mps, end_rep)] # Contract ends - for _ in range(len(self) - 1): - # Update the position markers - end_mps -= 1 # One tensor was removed from mps - end_rep -= 2 # One tensor removed from mps and another from postselect - # Contract the result from last iteration with the ends - contraction_path.append((end_mps, end_rep + 1)) # End of mps and result - contraction_path.append((end_rep - 1, end_rep)) # End of mps1 and ^ outcome - - # Apply the contraction - result = cq.contract( - *interleaved_rep, - options={"handle": self._lib.handle, "device_id": self._lib.device_id}, - optimize={"samples": 1}, - ) - - self._logger.debug(f"Amplitude of state {state} is {result}.") - return complex(result) - - def get_qubits(self) -> set[Qubit]: - """Returns the set of qubits that this MPS is defined on.""" - return set(self.qubit_position.keys()) - - def get_virtual_dimensions(self, position: int) -> tuple[int, int]: - """Returns the virtual bonds dimension of the tensor ``tensors[position]``. - - Args: - position: A position in the MPS. - - Returns: - A tuple where the first element is the dimensions of the left virtual bond - and the second elements is that of the right virtual bond. - - Raises: - RuntimeError: If ``position`` is out of bounds. - """ - if position < 0 or position >= len(self): - raise RuntimeError(f"Position {position} is out of bounds.") - - virtual_dims: tuple[int, int] = self.tensors[position].shape[:2] - return virtual_dims - - def get_physical_dimension(self, position: int) -> int: - """Returns the physical bond dimension of the tensor ``tensors[position]``. - - Args: - position: A position in the MPS. - - Returns: - The dimension of the physical bond. - - Raises: - RuntimeError: If ``position`` is out of bounds. - """ - if position < 0 or position >= len(self): - raise RuntimeError(f"Position {position} is out of bounds.") - - physical_dim: int = self.tensors[position].shape[2] - return physical_dim - - def get_byte_size(self) -> int: - """ - Returns: - The number of bytes the MPS currently occupies in GPU memory. - """ - return sum(t.nbytes for t in self.tensors) - - def get_device_id(self) -> int: - """ - Returns: - The identifier of the device (GPU) where the tensors are stored. - """ - return int(self.tensors[0].device) - - def update_libhandle(self, libhandle: CuTensorNetHandle) -> None: - """Update the ``CuTensorNetHandle`` used by this ``MPS`` object. Multiple - objects may use the same handle. - - Args: - libhandle: The new cuTensorNet library handle. - - Raises: - RuntimeError: If the device (GPU) where ``libhandle`` was initialised - does not match the one where the tensors of the MPS are stored. - """ - if libhandle.device_id != self.get_device_id(): - raise RuntimeError( - "Device of libhandle is not the one where the MPS is stored.", - f"{libhandle.device_id} != {self.get_device_id()}", - ) - self._lib = libhandle - - def copy(self) -> MPS: - """ - Returns: - A deep copy of the MPS on the same device. - """ - self._flush() - - # Create a dummy object - new_mps = MPS(self._lib, qubits=[], config=self._cfg.copy()) - # Copy all data - new_mps.fidelity = self.fidelity - new_mps.tensors = [t.copy() for t in self.tensors] - new_mps.canonical_form = self.canonical_form.copy() - new_mps.qubit_position = self.qubit_position.copy() - - # If the user has set a seed, assume that they'd want every copy - # to behave in the same way, so we copy the RNG state - if self._cfg.seed is not None: - # Setting state (rather than just copying the seed) allows for the - # copy to continue from the same point in the sequence of random - # numbers as the original copy - new_mps._rng.setstate(self._rng.getstate()) - # Otherwise, samples will be different between copies, since their - # self._rng will be initialised from system randomnes when seed=None. - - self._logger.debug( - "Successfully copied an MPS " - f"of size {new_mps.get_byte_size() / 2**20} MiB." - ) - return new_mps - - def __len__(self) -> int: - """ - Returns: - The number of tensors in the MPS. - """ - return len(self.tensors) - - def _apply_1q_unitary(self, unitary: cp.ndarray, qubit: Qubit) -> MPS: - raise NotImplementedError( - "MPS is a base class with no contraction algorithm implemented." - + " You must use a subclass of MPS, such as MPSxGate or MPSxMPO." - ) - - def _apply_2q_unitary(self, unitary: cp.ndarray, q0: Qubit, q1: Qubit) -> MPS: - raise NotImplementedError( - "MPS is a base class with no contraction algorithm implemented." - + " You must use a subclass of MPS, such as MPSxGate or MPSxMPO." - ) - - def _flush(self) -> None: - # Does nothing in the general MPS case; but children classes with batched - # gate contraction will redefine this method so that the last batch of - # gates is applied. - return None +# Copyright 2019-2024 Quantinuum +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +## +# http://www.apache.org/licenses/LICENSE-2.0 +## +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations # type: ignore +import warnings +from typing import Union +from enum import Enum + +from random import Random # type: ignore +import numpy as np # type: ignore + +try: + import cupy as cp # type: ignore +except ImportError: + warnings.warn("local settings failed to import cupy", ImportWarning) +try: + import cuquantum as cq # type: ignore + from cuquantum.cutensornet import tensor # type: ignore +except ImportError: + warnings.warn("local settings failed to import cutensornet", ImportWarning) + +from pytket.circuit import Command, Op, OpType, Qubit +from pytket.pauli import Pauli, QubitPauliString + +from pytket.extensions.cutensornet.general import CuTensorNetHandle, set_logger + +from .general import Config, StructuredState, Tensor + + +class DirMPS(Enum): + """An enum to refer to relative directions within the MPS.""" + + LEFT = 0 + RIGHT = 1 + + +class MPS(StructuredState): + """Represents a state as a Matrix Product State. + + Attributes: + tensors (list[Tensor]): A list of tensors in the MPS; ``tensors[0]`` is + the leftmost and ``tensors[len(self)-1]`` is the rightmost; ``tensors[i]`` + and ``tensors[i+1]`` are connected in the MPS via a bond. All of the + tensors are rank three, with the dimensions listed in ``.shape`` matching + the left, right and physical bonds, in that order. + canonical_form (dict[int, Optional[DirMPS]]): A dictionary mapping + positions to the canonical form direction of the corresponding tensor, + or ``None`` if it the tensor is not canonicalised. + qubit_position (dict[pytket.circuit.Qubit, int]): A dictionary mapping circuit + qubits to the position its tensor is at in the MPS. + fidelity (float): A lower bound of the fidelity, obtained by multiplying + the fidelities after each contraction. The fidelity of a contraction + corresponds to ``||^2`` where ``|psi>`` and ``|phi>`` are the + states before and after truncation (assuming both are normalised). + """ + + def __init__( + self, + libhandle: CuTensorNetHandle, + qubits: list[Qubit], + config: Config, + ): + """Initialise an MPS on the computational state ``|0>`` + + Note: + A ``libhandle`` should be created via a ``with CuTensorNet() as libhandle:`` + statement. The device where the MPS is stored will match the one specified + by the library handle. + + Args: + libhandle: The cuTensorNet library handle that will be used to carry out + tensor operations on the MPS. + qubits: The list of qubits in the circuit to be simulated. + config: The object describing the configuration for simulation. + + Raises: + ValueError: If less than two qubits are provided. + """ + self._lib = libhandle + self._cfg = config + self._logger = set_logger("MPS", level=config.loglevel) + self._rng = Random() + self._rng.seed(self._cfg.seed) + self.fidelity = 1.0 + + n_tensors = len(qubits) + if n_tensors == 0: # There's no initialisation to be done + pass + elif n_tensors == 1: + raise ValueError("Please, provide at least two qubits.") + else: + self.qubit_position = {q: i for i, q in enumerate(qubits)} + + # Create the list of tensors + self.tensors: list[Tensor] = [] + self.canonical_form = {i: None for i in range(n_tensors)} + + # Append each of the tensors initialised in state |0> + m_shape = (1, 1, 2) # Two virtual bonds (dim=1) and one physical + for i in range(n_tensors): + m_tensor = cp.empty(m_shape, dtype=self._cfg._complex_t) + # Initialise the tensor to ket 0 + m_tensor[0][0][0] = 1 + m_tensor[0][0][1] = 0 + self.tensors.append(m_tensor) + + def is_valid(self) -> bool: + """Verify that the MPS object is valid. + + Specifically, verify that the MPS does not exceed the dimension limit ``chi`` of + the virtual bonds, that physical bonds have dimension 2, that all tensors + are rank three and that the data structure sizes are consistent. + + Returns: + False if a violation was detected or True otherwise. + """ + self._flush() + + chi_ok = all( + all(dim <= self._cfg.chi for dim in self.get_virtual_dimensions(pos)) + for pos in range(len(self)) + ) + phys_ok = all(self.get_physical_dimension(pos) == 2 for pos in range(len(self))) + shape_ok = all(len(tensor.shape) == 3 for tensor in self.tensors) + + ds_ok = set(self.canonical_form.keys()) == set(range(len(self))) + ds_ok = ds_ok and set(self.qubit_position.values()) == set(range(len(self))) + + # Debugger logging + self._logger.debug( + "Checking validity of MPS... " + f"chi_ok={chi_ok}, " + f"phys_ok={phys_ok}, " + f"shape_ok={shape_ok}, " + f"ds_ok={ds_ok}" + ) + + return chi_ok and phys_ok and shape_ok and ds_ok + + def apply_gate(self, gate: Command) -> MPS: + """Apply the gate to the MPS. + + Note: + Only one-qubit gates and two-qubit gates are supported. + + Args: + gate: The gate to be applied. + + Returns: + ``self``, to allow for method chaining. + + Raises: + RuntimeError: If the ``CuTensorNetHandle`` is out of scope. + ValueError: If the command introduced is not a unitary gate. + ValueError: If gate acts on more than 2 qubits. + """ + try: + unitary = gate.op.get_unitary() + except: + raise ValueError("The command introduced is not unitary.") + + # Load the gate's unitary to the GPU memory + unitary = unitary.astype(dtype=self._cfg._complex_t, copy=False) + unitary = cp.asarray(unitary, dtype=self._cfg._complex_t) + + self._logger.debug(f"Applying gate {gate}.") + if len(gate.qubits) not in [1, 2]: + raise ValueError( + "Gates must act on only 1 or 2 qubits! " + + f"This is not satisfied by {gate}." + ) + + self.apply_unitary(unitary, gate.qubits) + + return self + + def apply_unitary( + self, unitary: cp.ndarray, qubits: list[Qubit] + ) -> StructuredState: + """Applies the unitary to the specified qubits of the StructuredState. + + Note: + It is assumed that the matrix provided by the user is unitary. If this is + not the case, the program will still run, but its behaviour is undefined. + + Args: + unitary: The matrix to be applied as a CuPy ndarray. It should either be + a 2x2 matrix if acting on one qubit or a 4x4 matrix if acting on two. + qubits: The qubits the unitary acts on. Only one qubit and two qubit + unitaries are supported. + + Returns: + ``self``, to allow for method chaining. + + Raises: + RuntimeError: If the ``CuTensorNetHandle`` is out of scope. + ValueError: If the number of qubits provided is not one or two. + ValueError: If the size of the matrix does not match with the number of + qubits provided. + """ + if self._lib._is_destroyed: + raise RuntimeError( + "The cuTensorNet library handle is out of scope.", + "See the documentation of update_libhandle and CuTensorNetHandle.", + ) + + self._logger.debug(f"Applying unitary {unitary} on {qubits}.") + + if len(qubits) == 1: + if unitary.shape != (2, 2): + raise ValueError( + "The unitary introduced acts on one qubit but it is not 2x2." + ) + + self._apply_1q_unitary(unitary, qubits[0]) + # NOTE: if the tensor was in canonical form, it remains being so, + # since it is guaranteed that the gate is unitary. + + elif len(qubits) == 2: + if unitary.shape != (4, 4): + raise ValueError( + "The unitary introduced acts on two qubits but it is not 4x4." + ) + + self._apply_2q_unitary(unitary, qubits[0], qubits[1]) + # The tensors will in general no longer be in canonical form. + self.canonical_form[self.qubit_position[qubits[0]]] = None + self.canonical_form[self.qubit_position[qubits[1]]] = None + + else: + raise ValueError("Gates must act on only 1 or 2 qubits!") + + return self + + def apply_scalar(self, scalar: complex) -> MPS: + """Multiplies the state by a complex number. + + Args: + scalar: The complex number to be multiplied. + + Returns: + ``self``, to allow for method chaining. + """ + self.tensors[0] *= scalar + return self + + def apply_qubit_relabelling(self, qubit_map: dict[Qubit, Qubit]) -> MPS: + """Relabels each qubit ``q`` as ``qubit_map[q]``. + + This does not apply any SWAP gate, nor it changes the internal structure of the + state. It simply changes the label of the physical bonds of the tensor network. + + Args: + qubit_map: Dictionary mapping each qubit to its new label. + + Returns: + ``self``, to allow for method chaining. + + Raises: + ValueError: If any of the keys in ``qubit_map`` are not qubits in the state. + """ + new_qubit_position = dict() + for q_orig, q_new in qubit_map.items(): + # Check the qubit is in the state + if q_orig not in self.qubit_position: + raise ValueError(f"Qubit {q_orig} is not in the state.") + # Apply the relabelling for this qubit + new_qubit_position[q_new] = self.qubit_position[q_orig] + + self.qubit_position = new_qubit_position + self._logger.debug(f"Relabelled qubits... {qubit_map}") + return self + + def add_qubit(self, new_qubit: Qubit, position: int, state: int = 0) -> MPS: + """Adds a qubit at the specified position. + + Args: + new_qubit: The identifier of the qubit to be added to the state. + position: The location the new qubit should be inserted at in the MPS. + Qubits on this and later indexed have their position shifted by 1. + state: Choose either ``0`` or ``1`` for the new qubit's state. + Defaults to ``0``. + + Returns: + ``self``, to allow for method chaining. + + Raises: + ValueError: If ``new_qubit`` already exists in the state. + ValueError: If ``position`` is negative or larger than ``len(self)``. + ValueError: If ``state`` is not ``0`` or ``1``. + """ + options = {"handle": self._lib.handle, "device_id": self._lib.device_id} + + if new_qubit in self.qubit_position.keys(): + raise ValueError( + f"Qubit {new_qubit} cannot be added, it already is in the MPS." + ) + if position < 0 or position > len(self): + raise ValueError(f"Index {position} is not a valid position in the MPS.") + if state not in [0, 1]: + raise ValueError( + f"Cannot initialise qubit to state {state}. Only 0 or 1 are supported." + ) + + # Identify the dimension of the virtual bond where the new qubit will appear + if position == len(self): + dim = self.get_virtual_dimensions(len(self) - 1)[1] # Rightmost bond + else: # Otherwise, pick the left bond of the tensor currently in ``position`` + dim = self.get_virtual_dimensions(position)[0] + + # Create the tensor for I \otimes |state> + identity = cp.eye(dim, dtype=self._cfg._complex_t) + qubit_tensor = cp.zeros(2, dtype=self._cfg._complex_t) + qubit_tensor[state] = 1 + # Apply the tensor product + new_tensor = cq.contract( + "lr,p->lrp", + identity, + qubit_tensor, + options=options, + optimize={"path": [(0, 1)]}, + ) + + # Place this ``new_tensor`` in the MPS at ``position``, + # the previous tensors at ``position`` onwards are shifted to the right + orig_mps_len = len(self) # Store it in variable, since this will change + self.tensors.insert(position, new_tensor) + + # Update the dictionary tracking the canonical form + for pos in reversed(range(position, orig_mps_len)): + self.canonical_form[pos + 1] = self.canonical_form[pos] + # The canonical form of the new tensor is both LEFT and RIGHT, just choose one + self.canonical_form[position] = DirMPS.LEFT # type: ignore + + # Finally, update the dictionary tracking the qubit position + for q, pos in self.qubit_position.items(): + if pos >= position: + self.qubit_position[q] += 1 + self.qubit_position[new_qubit] = position + + return self + + def canonicalise(self, l_pos: int, r_pos: int) -> None: + """Canonicalises the MPS object. + + Applies the necessary gauge transformations so that all MPS tensors + to the left of position ``l_pos`` are in left orthogonal form and + all MPS tensors to the right of ``r_pos`` in right orthogonal form. + + Args: + l_pos: The position of the leftmost tensor that is not to be + canonicalised. + r_pos: The position of the rightmost tensor that is not to be + canonicalised. + """ + self._logger.debug(f"Start canonicalisation... l_pos={l_pos}, r_pos={r_pos}") + + for pos in range(l_pos): + self.canonicalise_tensor(pos, form=DirMPS.LEFT) + for pos in reversed(range(r_pos + 1, len(self))): + self.canonicalise_tensor(pos, form=DirMPS.RIGHT) + + self._logger.debug(f"Finished canonicalisation.") + + def canonicalise_tensor(self, pos: int, form: DirMPS) -> None: + """Canonicalises a tensor from an MPS object. + + Applies the necessary gauge transformations so that the tensor at + position ``pos`` in the MPS is in the orthogonal form dictated by + ``form``. + + Args: + position: The position of the tensor to be canonicalised. + form: LEFT form means that its conjugate transpose is its inverse if + connected to its left bond and physical bond. Similarly for RIGHT. + + Raises: + ValueError: If ``form`` is not a value in ``DirMPS``. + RuntimeError: If the ``CuTensorNetHandle`` is out of scope. + """ + if form == self.canonical_form[pos]: + # Tensor already in canonical form, nothing needs to be done + self._logger.debug(f"Position {pos} already in {form}.") + return None + + if self._lib._is_destroyed: + raise RuntimeError( + "The cuTensorNet library handle is out of scope.", + "See the documentation of update_libhandle and CuTensorNetHandle.", + ) + + self._logger.debug(f"Canonicalising {pos} to {form}.") + # Glossary of bond IDs used here: + # s -> shared virtual bond between T and Tnext + # v -> the other virtual bond of T + # V -> the other virtual bond of Tnext + # p -> physical bond of T + # P -> physical bond of Tnext + + # Gather the details from the MPS tensors at this position + T = self.tensors[pos] + + # Assign the bond IDs + if form == DirMPS.LEFT: + next_pos = pos + 1 + Tnext = self.tensors[next_pos] + T_bonds = "vsp" + Q_bonds = "vap" + R_bonds = "as" + Tnext_bonds = "sVP" + result_bonds = "aVP" + elif form == DirMPS.RIGHT: + next_pos = pos - 1 + Tnext = self.tensors[next_pos] + T_bonds = "svp" + Q_bonds = "avp" + R_bonds = "as" + Tnext_bonds = "VsP" + result_bonds = "VaP" + else: + raise ValueError("Argument form must be a value in DirMPS.") + + # Apply QR decomposition + self._logger.debug(f"QR decompose a {T.nbytes / 2**20} MiB tensor.") + + subscripts = T_bonds + "->" + Q_bonds + "," + R_bonds + options = {"handle": self._lib.handle, "device_id": self._lib.device_id} + Q, R = tensor.decompose( + subscripts, T, method=tensor.QRMethod(), options=options + ) + self._logger.debug(f"QR decomposition finished.") + + # Contract R into Tnext + subscripts = R_bonds + "," + Tnext_bonds + "->" + result_bonds + result = cq.contract( + subscripts, + R, + Tnext, + options=options, + optimize={"path": [(0, 1)]}, + ) + self._logger.debug(f"Contraction with {next_pos} applied.") + + # Update self.tensors + self.tensors[pos] = Q + self.canonical_form[pos] = form # type: ignore + self.tensors[next_pos] = result + self.canonical_form[next_pos] = None + + def vdot(self, other: MPS) -> complex: # type: ignore + """Obtain the inner product of the two MPS: ````. + + It can be used to compute the squared norm of an MPS ``mps`` as + ``mps.vdot(mps)``. The tensors within the MPS are not modified. + + Note: + The state that is conjugated is ``self``. + + Args: + other: The other MPS. + + Returns: + The resulting complex number. + + Raises: + RuntimeError: If number of tensors, dimensions or positions do not match. + RuntimeError: If there are no tensors in the MPS. + RuntimeError: If the ``CuTensorNetHandle`` is out of scope. + """ + if self._lib._is_destroyed: + raise RuntimeError( + "The cuTensorNet library handle is out of scope.", + "See the documentation of update_libhandle and CuTensorNetHandle.", + ) + + if len(self) != len(other): + raise RuntimeError("Number of tensors do not match.") + for i in range(len(self)): + if self.get_physical_dimension(i) != other.get_physical_dimension(i): + raise RuntimeError( + f"Physical bond dimension at position {i} do not match." + ) + if self.qubit_position != other.qubit_position: + raise RuntimeError( + "The qubit labels or their position on the MPS do not match." + ) + if len(self) == 0: + raise RuntimeError("There are no tensors in the MPS.") + + self._flush() + other._flush() + + self._logger.debug("Applying vdot between two MPS.") + + # We convert both MPS to their interleaved representation and + # contract them using cuQuantum. + mps1 = self._get_interleaved_representation(conj=True) + mps2 = other._get_interleaved_representation(conj=False) + interleaved_rep = mps1 + mps2 + interleaved_rep.append([]) # Discards dim=1 bonds with [] + + # We define the contraction path ourselves + end_mps1 = len(self) - 1 # Rightmost tensor of mps1 in interleaved_rep + end_mps2 = len(self) + len(other) - 1 # Rightmost tensor of mps2 + contraction_path = [(end_mps1, end_mps2)] # Contract ends of mps1 and mps2 + for _ in range(len(self) - 1): + # Update the position markers + end_mps1 -= 1 # One tensor was removed from mps1 + end_mps2 -= 2 # One tensor removed from mps1 and another from mps2 + # Contract the result from last iteration with the ends of mps1 and mps2 + contraction_path.append((end_mps2, end_mps2 + 1)) # End of mps2 and result + contraction_path.append((end_mps1, end_mps2)) # End of mps1 and ^ outcome + + # Apply the contraction + result = cq.contract( + *interleaved_rep, + options={"handle": self._lib.handle, "device_id": self._lib.device_id}, + optimize={"path": contraction_path}, + ) + + self._logger.debug(f"Result from vdot={result}") + return complex(result) + + def _get_interleaved_representation( + self, conj: bool = False + ) -> list[Union[cp.ndarray, str]]: + """Returns the interleaved representation of the MPS used by cuQuantum. + + Args: + conj: If True, all tensors are conjugated and bonds IDs are prefixed + with * (except physical bonds). Defaults to False. + """ + self._logger.debug("Creating interleaved representation...") + + # Auxiliar dictionary of physical bonds to qubit IDs + qubit_id = {location: qubit for qubit, location in self.qubit_position.items()} + + interleaved_rep = [] + for i, t in enumerate(self.tensors): + # Append the tensor + if conj: + interleaved_rep.append(t.conj()) + else: + interleaved_rep.append(t) + + # Create the ID for the bonds involved + bonds = [str(i), str(i + 1), str(qubit_id[i])] + if conj: + bonds[0] = "*" + bonds[0] + bonds[1] = "*" + bonds[1] + interleaved_rep.append(bonds) + self._logger.debug(f"Bond IDs: {bonds}") + + return interleaved_rep + + def sample(self) -> dict[Qubit, int]: + """Returns a sample from a Z measurement applied on every qubit. + + Notes: + The MPS ``self`` is not updated. This is equivalent to applying + ``mps = self.copy()`` then ``mps.measure(mps.get_qubits())``. + + Returns: + A dictionary mapping each of the qubits in the MPS to their 0 or 1 outcome. + """ + + # TODO: Copying is not strictly necessary, but to avoid it we would need to + # modify the algorithm in `measure`. This may be done eventually if `copy` + # is shown to be a bottleneck when sampling (which is likely). + mps = self.copy() + outcomes = mps.measure(mps.get_qubits()) + # If the user sets a seed for the MPS, we'd like that every copy of the MPS + # produces the same sequence of samples, but samples within a sequence may be + # different from each other. Achieved by updating the state of `self._rng`. + self._rng.setstate(mps._rng.getstate()) + + return outcomes + + def measure(self, qubits: set[Qubit], destructive: bool = True) -> dict[Qubit, int]: + """Applies a Z measurement on each of the ``qubits``. + + Notes: + After applying this function, ``self`` will contain the normalised + projected state. + + Args: + qubits: The subset of qubits to be measured. + destructive: If ``True``, the resulting state will not contain the + measured qubits. If ``False``, these qubits will remain in the + state. Defaults to ``True``. + + Returns: + A dictionary mapping the given ``qubits`` to their measurement outcome, + i.e. either ``0`` or ``1``. + + Raises: + ValueError: If an element in ``qubits`` is not a qubit in the state. + """ + result = dict() + + # Obtain the positions that need to be measured and build the reverse dict + position_qubit_map = dict() + for q in qubits: + if q not in self.qubit_position: + raise ValueError(f"Qubit {q} is not a qubit in the MPS.") + position_qubit_map[self.qubit_position[q]] = q + positions = sorted(position_qubit_map.keys()) + self._logger.debug(f"Measuring qubits={position_qubit_map}") + + # Tensor for postselection to |0> + zero_tensor = cp.zeros(2, dtype=self._cfg._complex_t) + zero_tensor[0] = 1 + + # Measure and postselect each of the positions, one by one + while positions: + pos = positions.pop() # The rightmost position to be measured + + # Convert to canonical form with center at this position + self.canonicalise(pos, pos) + + # Glossary of bond IDs: + # l -> left virtual bond of tensor in `pos` + # r -> right virtual bond of tensor in `pos` + # p -> physical bond of tensor in `pos` + # P -> physical bond of tensor in `pos` (copy) + + # Take the tensor in this position and obtain its prob for |0>. + # Since the MPS is in canonical form, this corresponds to the probability + # if we were to take all of the other tensors into account. + prob = cq.contract( + "lrp,p,lrP,P->", # No open bonds remain; this is just a scalar + self.tensors[pos].conj(), + zero_tensor, + self.tensors[pos], + zero_tensor, + options={"handle": self._lib.handle, "device_id": self._lib.device_id}, + optimize={"path": [(0, 1), (0, 1), (0, 1)]}, + ) + + # Throw a coin to decide measurement outcome + outcome = 0 if prob > self._rng.random() else 1 + result[position_qubit_map[pos]] = outcome + self._logger.debug(f"Outcome of qubit at {pos} is {outcome}.") + + # Postselect the MPS for this outcome, renormalising at the same time + postselection_tensor = cp.zeros(2, dtype=self._cfg._complex_t) + postselection_tensor[outcome] = 1 / np.sqrt( + abs(outcome - prob) + ) # Normalise + + self._postselect_qubit(position_qubit_map[pos], postselection_tensor) + + # If the measurement is not destructive, we must add the qubit back again + if not destructive: + qubit = position_qubit_map[pos] + self.add_qubit(qubit, pos, state=outcome) + + return result + + def postselect(self, qubit_outcomes: dict[Qubit, int]) -> float: + """Applies a postselection, updates the MPS and returns its probability. + + Notes: + After applying this function, ``self`` will contain the MPS of the projected + state over the non-postselected qubits. + + The resulting state has been normalised. + + Args: + qubit_outcomes: A dictionary mapping a subset of qubits in the MPS to their + desired outcome value (either ``0`` or ``1``). + + Returns: + The probability of this postselection to occur in a measurement. + + Raises: + ValueError: If a key in ``qubit_outcomes`` is not a qubit in the MPS. + ValueError: If a value in ``qubit_outcomes`` is other than ``0`` or ``1``. + ValueError: If all of the qubits in the MPS are being postselected. Instead, + you may wish to use ``get_amplitude()``. + """ + for q, v in qubit_outcomes.items(): + if q not in self.qubit_position: + raise ValueError(f"Qubit {q} is not a qubit in the MPS.") + if v not in {0, 1}: + raise ValueError(f"Outcome of {q} cannot be {v}. Choose int 0 or 1.") + + if len(qubit_outcomes) == len(self): + raise ValueError( + "Cannot postselect all qubits. You may want to use get_amplitude()." + ) + self._logger.debug(f"Postselecting qubits={qubit_outcomes}") + + # Apply a postselection for each of the qubits + for qubit, outcome in qubit_outcomes.items(): + # Create the rank-1 postselection tensor + postselection_tensor = cp.zeros(2, dtype=self._cfg._complex_t) + postselection_tensor[outcome] = 1 + # Apply postselection + self._postselect_qubit(qubit, postselection_tensor) + + # Calculate the squared norm of the postselected state; this is its probability + prob = self.vdot(self) + assert np.isclose(prob.imag, 0.0, atol=self._cfg._atol) + prob = prob.real + + # Renormalise; it suffices to update the first tensor + if len(self) > 0 and not np.isclose(prob, 0.0, atol=self._cfg._atol): + self.tensors[0] = self.tensors[0] / np.sqrt(prob) + self.canonical_form[0] = None + + self._logger.debug(f"Probability of this postselection is {prob}.") + return prob + + def _postselect_qubit(self, qubit: Qubit, postselection_tensor: cp.ndarray) -> None: + """Postselect the qubit with the given tensor.""" + + pos = self.qubit_position[qubit] + self.tensors[pos] = cq.contract( + "lrp,p->lr", + self.tensors[pos], + postselection_tensor, + options={"handle": self._lib.handle, "device_id": self._lib.device_id}, + optimize={"path": [(0, 1)]}, + ) + + # Glossary of bond IDs: + # s -> shared bond between tensor in `pos` and next + # v -> the other virtual bond of tensor in `pos` + # V -> the other virtual bond of tensor in next position + # p -> physical bond of tensor in `pos` + # P -> physical bond of tensor in next position + + if len(self) == 1: # This is the last tensor + pass + + elif pos != 0: # Contract with next tensor on the left + self.tensors[pos - 1] = cq.contract( + "sv,VsP->VvP", + self.tensors[pos], + self.tensors[pos - 1], + options={"handle": self._lib.handle, "device_id": self._lib.device_id}, + optimize={"path": [(0, 1)]}, + ) + self.canonical_form[pos - 1] = None + else: # There are no tensors on the left, contract with the one on the right + self.tensors[pos + 1] = cq.contract( + "vs,sVP->vVP", + self.tensors[pos], + self.tensors[pos + 1], + options={"handle": self._lib.handle, "device_id": self._lib.device_id}, + optimize={"path": [(0, 1)]}, + ) + self.canonical_form[pos + 1] = None + + # Shift all entries after `pos` to the left + for q, p in self.qubit_position.items(): + if pos < p: + self.qubit_position[q] = p - 1 + for p in range(pos, len(self) - 1): + self.canonical_form[p] = self.canonical_form[p + 1] + + # Remove the entry from the data structures + del self.qubit_position[qubit] + del self.canonical_form[len(self) - 1] + self.tensors.pop(pos) + + def expectation_value(self, pauli_string: QubitPauliString) -> float: + """Obtains the expectation value of the Pauli string observable. + + Args: + pauli_string: A pytket object representing a tensor product of Paulis. + + Returns: + The expectation value. + + Raises: + ValueError: If a key in ``pauli_string`` is not a qubit in the MPS. + """ + for q in pauli_string.map.keys(): + if q not in self.qubit_position: + raise ValueError(f"Qubit {q} is not a qubit in the MPS.") + + self._logger.debug(f"Calculating expectation value of {pauli_string}.") + mps_copy = self.copy() + pauli_optype = {Pauli.Z: OpType.Z, Pauli.X: OpType.X, Pauli.Y: OpType.Y} + + # Apply each of the Pauli operators to the MPS copy + for qubit, pauli in pauli_string.map.items(): + if pauli != Pauli.I: + pos = mps_copy.qubit_position[qubit] + pauli_unitary = Op.create(pauli_optype[pauli]).get_unitary() + pauli_tensor = cp.asarray( + pauli_unitary.astype(dtype=self._cfg._complex_t, copy=False), + dtype=self._cfg._complex_t, + ) + + # Contract the Pauli to the MPS tensor of the corresponding qubit + mps_copy.tensors[pos] = cq.contract( + "lrp,Pp->lrP", + mps_copy.tensors[pos], + pauli_tensor, + options={ + "handle": self._lib.handle, + "device_id": self._lib.device_id, + }, + optimize={"path": [(0, 1)]}, + ) + + # Obtain the inner product + value = self.vdot(mps_copy) + assert np.isclose(value.imag, 0.0, atol=self._cfg._atol) + + self._logger.debug(f"Expectation value is {value.real}.") + return value.real + + def get_fidelity(self) -> float: + """Returns the current fidelity of the state.""" + return self.fidelity + + def get_statevector(self) -> np.ndarray: + """Returns the statevector with qubits in Increasing Lexicographic Order (ILO). + + Raises: + ValueError: If there are no qubits left in the MPS. + """ + if len(self) == 0: + raise ValueError("There are no qubits left in this MPS.") + + # If there is only one qubit left, it is trivial + if len(self) == 1: + result_tensor = self.tensors[0] + + else: + # Create the interleaved representation with all tensors + interleaved_rep = [] + for pos in range(len(self)): + interleaved_rep.append(self.tensors[pos]) + interleaved_rep.append( + ["v" + str(pos), "v" + str(pos + 1), "p" + str(pos)] + ) + + # Specify the output bond IDs in ILO order + output_bonds = [] + for q in sorted(self.qubit_position.keys()): + output_bonds.append("p" + str(self.qubit_position[q])) + interleaved_rep.append(output_bonds) + + # We define the contraction path ourselves + end_mps = len(self) - 1 + contraction_path = [(end_mps - 1, end_mps)] # Contract the last two tensors + end_mps -= 2 # Two tensors removed from the MPS + for _ in range(len(self) - 2): + # Contract the result from last iteration and the last tensor in the MPS + contraction_path.append((end_mps, end_mps + 1)) + # Update the position marker + end_mps -= 1 # One tensor was removed from the MPS + + # Contract + result_tensor = cq.contract( + *interleaved_rep, + options={"handle": self._lib.handle, "device_id": self._lib.device_id}, + optimize={"path": contraction_path}, + ) + + # Convert to numpy vector and flatten + statevector: np.ndarray = cp.asnumpy(result_tensor).flatten() + return statevector + + def get_amplitude(self, state: int) -> complex: + """Returns the amplitude of the chosen computational state. + + Notes: + The result is equivalent to ``mps.get_statevector[b]``, but this method + is faster when querying a single amplitude (or just a few). + + Args: + state: The integer whose bitstring describes the computational state. + The qubits in the bitstring are in increasing lexicographic order. + + Returns: + The amplitude of the computational state in the MPS. + """ + + # Auxiliar dictionary of physical bonds to qubit IDs + qubit_id = {location: qubit for qubit, location in self.qubit_position.items()} + + # Find out what the map MPS_position -> bit value is + ilo_qubits = sorted(self.qubit_position.keys()) + mps_pos_bitvalue = dict() + + for i, q in enumerate(ilo_qubits): + pos = self.qubit_position[q] + bitvalue = 1 if state & 2 ** (len(self) - i - 1) else 0 + mps_pos_bitvalue[pos] = bitvalue + + # Create the interleaved representation including all postselection tensors + interleaved_rep = self._get_interleaved_representation() + for pos in range(len(self)): + postselection_tensor = cp.zeros(2, dtype=self._cfg._complex_t) + postselection_tensor[mps_pos_bitvalue[pos]] = 1 + interleaved_rep.append(postselection_tensor) + interleaved_rep.append([str(qubit_id[pos])]) + # Append [] so that all dim=1 bonds are ignored in the result of contract + interleaved_rep.append([]) + + # We define the contraction path ourselves + end_mps = len(self) - 1 # Rightmost tensor of MPS in interleaved_rep + end_rep = 2 * len(self) - 1 # Last position in the representation + contraction_path = [(end_mps, end_rep)] # Contract ends + for _ in range(len(self) - 1): + # Update the position markers + end_mps -= 1 # One tensor was removed from mps + end_rep -= 2 # One tensor removed from mps and another from postselect + # Contract the result from last iteration with the ends + contraction_path.append((end_mps, end_rep + 1)) # End of mps and result + contraction_path.append((end_rep - 1, end_rep)) # End of mps1 and ^ outcome + + # Apply the contraction + result = cq.contract( + *interleaved_rep, + options={"handle": self._lib.handle, "device_id": self._lib.device_id}, + optimize={"samples": 1}, + ) + + self._logger.debug(f"Amplitude of state {state} is {result}.") + return complex(result) + + def get_qubits(self) -> set[Qubit]: + """Returns the set of qubits that this MPS is defined on.""" + return set(self.qubit_position.keys()) + + def get_virtual_dimensions(self, position: int) -> tuple[int, int]: + """Returns the virtual bonds dimension of the tensor ``tensors[position]``. + + Args: + position: A position in the MPS. + + Returns: + A tuple where the first element is the dimensions of the left virtual bond + and the second elements is that of the right virtual bond. + + Raises: + RuntimeError: If ``position`` is out of bounds. + """ + if position < 0 or position >= len(self): + raise RuntimeError(f"Position {position} is out of bounds.") + + virtual_dims: tuple[int, int] = self.tensors[position].shape[:2] + return virtual_dims + + def get_physical_dimension(self, position: int) -> int: + """Returns the physical bond dimension of the tensor ``tensors[position]``. + + Args: + position: A position in the MPS. + + Returns: + The dimension of the physical bond. + + Raises: + RuntimeError: If ``position`` is out of bounds. + """ + if position < 0 or position >= len(self): + raise RuntimeError(f"Position {position} is out of bounds.") + + physical_dim: int = self.tensors[position].shape[2] + return physical_dim + + def get_byte_size(self) -> int: + """ + Returns: + The number of bytes the MPS currently occupies in GPU memory. + """ + return sum(t.nbytes for t in self.tensors) + + def get_device_id(self) -> int: + """ + Returns: + The identifier of the device (GPU) where the tensors are stored. + """ + return int(self.tensors[0].device) + + def update_libhandle(self, libhandle: CuTensorNetHandle) -> None: + """Update the ``CuTensorNetHandle`` used by this ``MPS`` object. Multiple + objects may use the same handle. + + Args: + libhandle: The new cuTensorNet library handle. + + Raises: + RuntimeError: If the device (GPU) where ``libhandle`` was initialised + does not match the one where the tensors of the MPS are stored. + """ + if libhandle.device_id != self.get_device_id(): + raise RuntimeError( + "Device of libhandle is not the one where the MPS is stored.", + f"{libhandle.device_id} != {self.get_device_id()}", + ) + self._lib = libhandle + + def copy(self) -> MPS: + """ + Returns: + A deep copy of the MPS on the same device. + """ + self._flush() + + # Create a dummy object + new_mps = MPS(self._lib, qubits=[], config=self._cfg.copy()) + # Copy all data + new_mps.fidelity = self.fidelity + new_mps.tensors = [t.copy() for t in self.tensors] + new_mps.canonical_form = self.canonical_form.copy() + new_mps.qubit_position = self.qubit_position.copy() + + # If the user has set a seed, assume that they'd want every copy + # to behave in the same way, so we copy the RNG state + if self._cfg.seed is not None: + # Setting state (rather than just copying the seed) allows for the + # copy to continue from the same point in the sequence of random + # numbers as the original copy + new_mps._rng.setstate(self._rng.getstate()) + # Otherwise, samples will be different between copies, since their + # self._rng will be initialised from system randomnes when seed=None. + + self._logger.debug( + "Successfully copied an MPS " + f"of size {new_mps.get_byte_size() / 2**20} MiB." + ) + return new_mps + + def __len__(self) -> int: + """ + Returns: + The number of tensors in the MPS. + """ + return len(self.tensors) + + def _apply_1q_unitary(self, unitary: cp.ndarray, qubit: Qubit) -> MPS: + raise NotImplementedError( + "MPS is a base class with no contraction algorithm implemented." + + " You must use a subclass of MPS, such as MPSxGate or MPSxMPO." + ) + + def _apply_2q_unitary(self, unitary: cp.ndarray, q0: Qubit, q1: Qubit) -> MPS: + raise NotImplementedError( + "MPS is a base class with no contraction algorithm implemented." + + " You must use a subclass of MPS, such as MPSxGate or MPSxMPO." + ) + + def _flush(self) -> None: + # Does nothing in the general MPS case; but children classes with batched + # gate contraction will redefine this method so that the last batch of + # gates is applied. + return None diff --git a/pytket/extensions/cutensornet/structured_state/mps_gate.py b/pytket/extensions/cutensornet/structured_state/mps_gate.py index f308261c..4a7fa7f0 100644 --- a/pytket/extensions/cutensornet/structured_state/mps_gate.py +++ b/pytket/extensions/cutensornet/structured_state/mps_gate.py @@ -1,430 +1,430 @@ -# Copyright 2019-2024 Quantinuum -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -## -# http://www.apache.org/licenses/LICENSE-2.0 -## -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from __future__ import annotations # type: ignore -import warnings -import logging - -try: - import cupy as cp # type: ignore -except ImportError: - warnings.warn("local settings failed to import cupy", ImportWarning) -try: - import cuquantum as cq # type: ignore - from cuquantum.cutensornet import tensor # type: ignore - from cuquantum.cutensornet.experimental import contract_decompose # type: ignore -except ImportError: - warnings.warn("local settings failed to import cutensornet", ImportWarning) - -from pytket.circuit import Qubit -from .mps import MPS, DirMPS - - -class MPSxGate(MPS): - """Implements a gate-by-gate contraction algorithm to calculate the output state - of a circuit as an ``MPS``. The algorithm is described in: - https://arxiv.org/abs/2002.07730 - """ - - def _apply_1q_unitary(self, unitary: cp.ndarray, qubit: Qubit) -> MPSxGate: - """Applies the 1-qubit unitary to the MPS. - - This does not increase the dimension of any bond. - - Args: - unitary: The unitary to be applied. - qubit: The qubit the unitary acts on. - - Returns: - ``self``, to allow for method chaining. - """ - position = self.qubit_position[qubit] - - # Glossary of bond IDs - # p -> physical bond of the MPS tensor - # v -> one of the virtual bonds of the MPS tensor - # V -> the other virtual bond of the MPS tensor - # o -> the output bond of the gate - - T_bonds = "vVp" - result_bonds = "vVo" - gate_bonds = "op" - - # Contract - new_tensor = cq.contract( - gate_bonds + "," + T_bonds + "->" + result_bonds, - unitary, - self.tensors[position], - options={"handle": self._lib.handle, "device_id": self._lib.device_id}, - optimize={"path": [(0, 1)]}, - ) - - # Update ``self.tensors`` - self.tensors[position] = new_tensor - return self - - def _apply_2q_unitary(self, unitary: cp.ndarray, q0: Qubit, q1: Qubit) -> MPSxGate: - """Applies the 2-qubit unitary to the MPS. - - The MPS is converted to canonical and truncation is applied if necessary. - - Args: - unitary: The unitary to be applied. - q0: The first qubit in the tuple |q0>|q1> the unitary acts on. - q1: The second qubit in the tuple |q0>|q1> the unitary acts on. - - Returns: - ``self``, to allow for method chaining. - """ - - # If qubits are not adjacent, use an alternative approach - if abs(self.qubit_position[q0] - self.qubit_position[q1]) != 1: - return self._apply_2q_unitary_nonadjacent(unitary, q0, q1) - # Otherwise, proceed as normal - - positions = [self.qubit_position[q0], self.qubit_position[q1]] - l_pos = min(positions) - r_pos = max(positions) - - options = {"handle": self._lib.handle, "device_id": self._lib.device_id} - - # Always canonicalise. Even in the case of exact simulation (no truncation) - # canonicalisation may reduce the bond dimension (thanks to reduced QR). - self.canonicalise(l_pos, r_pos) - - # Reshape into a rank-4 tensor - gate_tensor = cp.reshape(unitary, (2, 2, 2, 2)) - - # Glossary of bond IDs - # l -> physical bond of the left tensor in the MPS - # r -> physical bond of the right tensor in the MPS - # L -> left bond of the outcome of the gate - # R -> right bond of the outcome of the gate - # S -> shared bond of the gate tensor's SVD - # a,b,c -> the virtual bonds of the tensors - - if l_pos == positions[0]: - gate_bonds = "LRlr" - else: # Implicit swap - gate_bonds = "RLrl" - - # Apply SVD on the gate tensor to remove any zero singular values ASAP - svd_method = tensor.SVDMethod( - abs_cutoff=self._cfg.zero, - partition="U", # Contract S directly into U - ) - # Apply the SVD decomposition using the configuration defined above - U, S, V = tensor.decompose( - f"{gate_bonds}->SLl,SRr", gate_tensor, method=svd_method, options=options - ) - assert S is None # Due to "partition" option in SVDMethod - - # Contract - self._logger.debug("Contracting the two-qubit gate with its site tensors...") - T = cq.contract( - f"SLl,abl,SRr,bcr->acLR", - U, - self.tensors[l_pos], - V, - self.tensors[r_pos], - options=options, - optimize={"path": [(0, 1), (0, 1), (0, 1)]}, - ) - self._logger.debug(f"Intermediate tensor of size (MiB)={T.nbytes / 2**20}") - - if self._cfg.truncation_fidelity < 1: - # Apply SVD decomposition to truncate as much as possible before exceeding - # a `discarded_weight_cutoff` of `1 - self._cfg.truncation_fidelity`. - self._logger.debug( - f"Truncating to target fidelity={self._cfg.truncation_fidelity}" - ) - - svd_method = tensor.SVDMethod( - abs_cutoff=self._cfg.zero, - discarded_weight_cutoff=1 - self._cfg.truncation_fidelity, - partition="U", # Contract S directly into U (named L in our case) - normalization="L2", # Sum of squares singular values must equal 1 - ) - - else: - # Apply SVD decomposition and truncate up to a `max_extent` (for the shared - # bond) of `self._cfg.chi`. - # If user did not provide a value for `chi`, this is still given a - # default value that is so large that it causes no truncation at all. - # Nevertheless, we apply SVD so that singular values below `self._cfg.zero` - # are truncated. - self._logger.debug(f"Truncating to (or below) chosen chi={self._cfg.chi}") - - svd_method = tensor.SVDMethod( - abs_cutoff=self._cfg.zero, - max_extent=self._cfg.chi, - partition="U", # Contract S directly into U (named L in our case) - normalization="L2", # Sum of squares singular values must equal 1 - ) - - # Apply the SVD decomposition using the configuration defined above - L, S, R, svd_info = tensor.decompose( - "acLR->asL,scR", T, method=svd_method, options=options, return_info=True - ) - assert S is None # Due to "partition" option in SVDMethod - - # Update fidelity if there was some truncation - # - # discarded_weight is calculated within cuTensorNet as: - # sum([s**2 for s in S']) - # discarded_weight = 1 - ------------------------- - # sum([s**2 for s in S]) - # where S is the list of original singular values and S' is the set of - # singular values that remain after truncation (before normalisation). - # It can be shown that the fidelity ||^2 (for |phi> and |psi> - # unit vectors before and after truncation) is equal to 1 - disc_weight. - # - # We multiply the fidelity of the current step to the overall fidelity - # to keep track of a lower bound for the fidelity. - this_fidelity = 1.0 - svd_info.discarded_weight - self.fidelity *= this_fidelity - # Report to logger - self._logger.debug(f"Truncation done. Truncation fidelity={this_fidelity}") - self._logger.debug( - "Reduced virtual bond dimension from " - f"{svd_info.full_extent} to {svd_info.reduced_extent}." - ) - - self.tensors[l_pos] = L - self.tensors[r_pos] = R - - # If requested, provide info about memory usage. - if self._logger.isEnabledFor(logging.INFO): - # If-statetement used so that we only call `get_byte_size` if needed. - self._logger.info(f"MPS size (MiB)={self.get_byte_size() / 2**20}") - self._logger.info(f"MPS fidelity={self.fidelity}") - - return self - - def _apply_2q_unitary_nonadjacent( - self, unitary: cp.ndarray, q0: Qubit, q1: Qubit - ) -> MPSxGate: - """Applies the 2-qubit unitary to the MPS between non-adjacent qubits. - - The MPS is converted to canonical and truncation is applied if necessary. - - Args: - unitary: The unitary to be applied. - q0: The first qubit in the tuple |q0>|q1> the unitary acts on. - q1: The second qubit in the tuple |q0>|q1> the unitary acts on. - - Returns: - ``self``, to allow for method chaining. - """ - options = {"handle": self._lib.handle, "device_id": self._lib.device_id} - - positions = [self.qubit_position[q0], self.qubit_position[q1]] - l_pos = min(positions) - r_pos = max(positions) - - # Always canonicalise. Even in the case of exact simulation (no truncation) - # canonicalisation may reduce the bond dimension (thanks to reduced QR). - self.canonicalise(l_pos, l_pos) - - # Reshape into a rank-4 tensor - gate_tensor = cp.reshape(unitary, (2, 2, 2, 2)) - - # Glossary of bond IDs - # p -> some physical bond of the MPS - # l -> physical bond of the left tensor in the MPS - # r -> physical bond of the right tensor in the MPS - # L -> left bond of the outcome of the gate - # R -> right bond of the outcome of the gate - # s -> shared bond of the gate tensor's SVD - # a,b -> virtual bonds of the MPS - # m,M -> virtual bonds connected to the "message tensor" - - if l_pos == positions[0]: - gate_bonds = "LRlr" - else: # Implicit swap - gate_bonds = "RLrl" - - # Apply SVD on the gate tensor to remove any zero singular values ASAP - svd_method = tensor.SVDMethod( - abs_cutoff=self._cfg.zero, - partition="U", # Contract S directly into U (i.e. l_gate_tensor) - ) - # Apply the SVD decomposition using the configuration defined above - l_gate_tensor, S, r_gate_tensor = tensor.decompose( - f"{gate_bonds}->sLl,sRr", gate_tensor, method=svd_method, options=options - ) - assert S is None # Due to "partition" option in SVDMethod - - ################################# - ### Apply the gate to the MPS ### - ################################# - - orig_fidelity = self.fidelity - - # First, contract `l_gate_tensor` with the - # MPS site tensor on the left position. Then, decompose the new tensor so - # that a "message tensor" carrying the bond `s` is sent through the right - # virtual bond. We do these two steps in a single `contract_decompose` - self.tensors[l_pos], msg_tensor = contract_decompose( - "sLl,abl->amL,smb", - l_gate_tensor, - self.tensors[l_pos], - algorithm={"qr_method": tensor.QRMethod()}, - options=options, - optimize={"path": [(0, 1)]}, - ) - - # The site tensor is now in canonical form (since S is contracted to the right) - self.canonical_form[l_pos] = DirMPS.RIGHT # type: ignore - - # Next, "push" the `msg_tensor` through all site tensors between `l_pos` - # and `r_pos`. Once again, this is just contract_decompose on each. - for pos in range(l_pos + 1, r_pos): - # Report to logger - self._logger.debug( - f"Pushing msg_tensor ({msg_tensor.nbytes // 2**20} MiB) through site " - f"tensor ({self.tensors[pos].nbytes // 2**20} MiB) in position {pos}." - ) - - self.tensors[pos], msg_tensor = contract_decompose( - "sam,mbp->aMp,sMb", - msg_tensor, - self.tensors[pos], - algorithm={"qr_method": tensor.QRMethod()}, - options=options, - optimize={"path": [(0, 1)]}, - ) - - # The site tensor is now in canonical form - self.canonical_form[pos] = DirMPS.RIGHT # type: ignore - - # Finally, contract the `msg_tensor` with the site tensor in `r_pos` and the - # `r_gate_tensor` from the decomposition of `gate_tensor` - self.tensors[r_pos] = cq.contract( - "sam,mbr,sRr->abR", - msg_tensor, - self.tensors[r_pos], - r_gate_tensor, - options=options, - optimize={"path": [(0, 2), (0, 1)]}, - ) - - # The site tensor is not in canonical form anymore - self.canonical_form[r_pos] = None - - ############################################################ - ### Setup SVD configuration depending on user's settings ### - ############################################################ - - if self._cfg.truncation_fidelity < 1: - # Apply SVD decomposition to truncate as much as possible before exceeding - # a `discarded_weight_cutoff` of `1 - self._cfg.truncation_fidelity`. - self._logger.debug( - f"Truncating to target fidelity={self._cfg.truncation_fidelity}" - ) - - # When there are multiple virtual bonds between the two MPS tensors where - # the gate is applied (i.e. non-adjacent qubits) we need to distributed the - # allowed truncation error between the different bonds. - # Our target is to assign a local truncation fidelity `f_i` to each bond - # `i` in the input lists so that the lower bound of the fidelity satisfies: - # - # real_fidelity > self.fidelity*prod(f_i) > self.fidelity*trunc_fidelity - # - # Let e_i = 1 - f_i, where we refer to `e_i` as the "truncation error at i". - # We can use that when 0 < e_i < 1, it holds that: - # - # prod(1 - e_i) > 1 - sum(e_i) - # - # Hence, as long as we satisfy - # - # 1 - sum(e_i) > truncation_fidelity - # - # the target inquality at the top will be satisfied for our chosen f_i. - # We achieve this by defining e_i = (1 - trunc_fid) / k, where k is the - # number of bonds between the two tensors. - distance = r_pos - l_pos - local_truncation_error = (1 - self._cfg.truncation_fidelity) / distance - self._logger.debug( - f"The are {distance} bond between the qubits. Each of these will " - f"be truncated to target fidelity={1 - local_truncation_error}" - ) - - svd_method = tensor.SVDMethod( - abs_cutoff=self._cfg.zero, - discarded_weight_cutoff=local_truncation_error, - partition="U", # Contract S directly into U (to the "left") - normalization="L2", # Sum of squares singular values must equal 1 - ) - - else: - # Apply SVD decomposition and truncate up to a `max_extent` (for the shared - # bond) of `self._cfg.chi`. - # If the user did not explicitly ask for truncation, `self._cfg.chi` will be - # set to a very large default number, so it's like no `max_extent` was set. - # Still, we remove any singular values below ``self._cfg.zero``. - self._logger.debug(f"Truncating to (or below) chosen chi={self._cfg.chi}") - - svd_method = tensor.SVDMethod( - abs_cutoff=self._cfg.zero, - max_extent=self._cfg.chi, - partition="U", # Contract S directly into U (to the "left") - normalization="L2", # Sum of squares singular values must equal 1 - ) - - ############################################################ - ### Apply truncation to all bonds between the two qubits ### - ############################################################ - - # From right to left, so that we can use the current canonical form. - for pos in reversed(range(l_pos, r_pos)): - self.tensors[pos], S, self.tensors[pos + 1], info = contract_decompose( - "abl,bcr->abl,bcr", # Note: doesn't follow the glossary above. - self.tensors[pos], - self.tensors[pos + 1], - algorithm={"svd_method": svd_method, "qr_method": False}, - options=options, - optimize={"path": [(0, 1)]}, - return_info=True, - ) - assert S is None # Due to "partition" option in SVDMethod - - # Since we are contracting S to the "left" in `svd_method`, the site tensor - # at `pos+1` is canonicalised, whereas the site tensor at `pos` is the one - # where S has been contracted to and, hence, is not in canonical form - self.canonical_form[pos + 1] = DirMPS.LEFT # type: ignore - self.canonical_form[pos] = None - # Update fidelity lower bound - this_fidelity = 1.0 - info.svd_info.discarded_weight - self.fidelity *= this_fidelity - # Report to logger - self._logger.debug( - f"Truncation done between positions {pos} and {pos+1}. " - f"Truncation fidelity={this_fidelity}" - ) - self._logger.debug( - "Reduced virtual bond dimension from " - f"{info.svd_info.full_extent} to {info.svd_info.reduced_extent}." - ) - - if self._cfg.truncation_fidelity < 1: - # Sanity check: user's requested lower bound of fidelity satisfied - assert self.fidelity > orig_fidelity * self._cfg.truncation_fidelity - - # If requested, provide info about memory usage. - if self._logger.isEnabledFor(logging.INFO): - # If-statetement used so that we only call `get_byte_size` if needed. - self._logger.info(f"MPS size (MiB)={self.get_byte_size() / 2**20}") - self._logger.info(f"MPS fidelity={self.fidelity}") - - return self +# Copyright 2019-2024 Quantinuum +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +## +# http://www.apache.org/licenses/LICENSE-2.0 +## +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations # type: ignore +import warnings +import logging + +try: + import cupy as cp # type: ignore +except ImportError: + warnings.warn("local settings failed to import cupy", ImportWarning) +try: + import cuquantum as cq # type: ignore + from cuquantum.cutensornet import tensor # type: ignore + from cuquantum.cutensornet.experimental import contract_decompose # type: ignore +except ImportError: + warnings.warn("local settings failed to import cutensornet", ImportWarning) + +from pytket.circuit import Qubit +from .mps import MPS, DirMPS + + +class MPSxGate(MPS): + """Implements a gate-by-gate contraction algorithm to calculate the output state + of a circuit as an ``MPS``. The algorithm is described in: + https://arxiv.org/abs/2002.07730 + """ + + def _apply_1q_unitary(self, unitary: cp.ndarray, qubit: Qubit) -> MPSxGate: + """Applies the 1-qubit unitary to the MPS. + + This does not increase the dimension of any bond. + + Args: + unitary: The unitary to be applied. + qubit: The qubit the unitary acts on. + + Returns: + ``self``, to allow for method chaining. + """ + position = self.qubit_position[qubit] + + # Glossary of bond IDs + # p -> physical bond of the MPS tensor + # v -> one of the virtual bonds of the MPS tensor + # V -> the other virtual bond of the MPS tensor + # o -> the output bond of the gate + + T_bonds = "vVp" + result_bonds = "vVo" + gate_bonds = "op" + + # Contract + new_tensor = cq.contract( + gate_bonds + "," + T_bonds + "->" + result_bonds, + unitary, + self.tensors[position], + options={"handle": self._lib.handle, "device_id": self._lib.device_id}, + optimize={"path": [(0, 1)]}, + ) + + # Update ``self.tensors`` + self.tensors[position] = new_tensor + return self + + def _apply_2q_unitary(self, unitary: cp.ndarray, q0: Qubit, q1: Qubit) -> MPSxGate: + """Applies the 2-qubit unitary to the MPS. + + The MPS is converted to canonical and truncation is applied if necessary. + + Args: + unitary: The unitary to be applied. + q0: The first qubit in the tuple |q0>|q1> the unitary acts on. + q1: The second qubit in the tuple |q0>|q1> the unitary acts on. + + Returns: + ``self``, to allow for method chaining. + """ + + # If qubits are not adjacent, use an alternative approach + if abs(self.qubit_position[q0] - self.qubit_position[q1]) != 1: + return self._apply_2q_unitary_nonadjacent(unitary, q0, q1) + # Otherwise, proceed as normal + + positions = [self.qubit_position[q0], self.qubit_position[q1]] + l_pos = min(positions) + r_pos = max(positions) + + options = {"handle": self._lib.handle, "device_id": self._lib.device_id} + + # Always canonicalise. Even in the case of exact simulation (no truncation) + # canonicalisation may reduce the bond dimension (thanks to reduced QR). + self.canonicalise(l_pos, r_pos) + + # Reshape into a rank-4 tensor + gate_tensor = cp.reshape(unitary, (2, 2, 2, 2)) + + # Glossary of bond IDs + # l -> physical bond of the left tensor in the MPS + # r -> physical bond of the right tensor in the MPS + # L -> left bond of the outcome of the gate + # R -> right bond of the outcome of the gate + # S -> shared bond of the gate tensor's SVD + # a,b,c -> the virtual bonds of the tensors + + if l_pos == positions[0]: + gate_bonds = "LRlr" + else: # Implicit swap + gate_bonds = "RLrl" + + # Apply SVD on the gate tensor to remove any zero singular values ASAP + svd_method = tensor.SVDMethod( + abs_cutoff=self._cfg.zero, + partition="U", # Contract S directly into U + ) + # Apply the SVD decomposition using the configuration defined above + U, S, V = tensor.decompose( + f"{gate_bonds}->SLl,SRr", gate_tensor, method=svd_method, options=options + ) + assert S is None # Due to "partition" option in SVDMethod + + # Contract + self._logger.debug("Contracting the two-qubit gate with its site tensors...") + T = cq.contract( + f"SLl,abl,SRr,bcr->acLR", + U, + self.tensors[l_pos], + V, + self.tensors[r_pos], + options=options, + optimize={"path": [(0, 1), (0, 1), (0, 1)]}, + ) + self._logger.debug(f"Intermediate tensor of size (MiB)={T.nbytes / 2**20}") + + if self._cfg.truncation_fidelity < 1: + # Apply SVD decomposition to truncate as much as possible before exceeding + # a `discarded_weight_cutoff` of `1 - self._cfg.truncation_fidelity`. + self._logger.debug( + f"Truncating to target fidelity={self._cfg.truncation_fidelity}" + ) + + svd_method = tensor.SVDMethod( + abs_cutoff=self._cfg.zero, + discarded_weight_cutoff=1 - self._cfg.truncation_fidelity, + partition="U", # Contract S directly into U (named L in our case) + normalization="L2", # Sum of squares singular values must equal 1 + ) + + else: + # Apply SVD decomposition and truncate up to a `max_extent` (for the shared + # bond) of `self._cfg.chi`. + # If user did not provide a value for `chi`, this is still given a + # default value that is so large that it causes no truncation at all. + # Nevertheless, we apply SVD so that singular values below `self._cfg.zero` + # are truncated. + self._logger.debug(f"Truncating to (or below) chosen chi={self._cfg.chi}") + + svd_method = tensor.SVDMethod( + abs_cutoff=self._cfg.zero, + max_extent=self._cfg.chi, + partition="U", # Contract S directly into U (named L in our case) + normalization="L2", # Sum of squares singular values must equal 1 + ) + + # Apply the SVD decomposition using the configuration defined above + L, S, R, svd_info = tensor.decompose( + "acLR->asL,scR", T, method=svd_method, options=options, return_info=True + ) + assert S is None # Due to "partition" option in SVDMethod + + # Update fidelity if there was some truncation + # + # discarded_weight is calculated within cuTensorNet as: + # sum([s**2 for s in S']) + # discarded_weight = 1 - ------------------------- + # sum([s**2 for s in S]) + # where S is the list of original singular values and S' is the set of + # singular values that remain after truncation (before normalisation). + # It can be shown that the fidelity ||^2 (for |phi> and |psi> + # unit vectors before and after truncation) is equal to 1 - disc_weight. + # + # We multiply the fidelity of the current step to the overall fidelity + # to keep track of a lower bound for the fidelity. + this_fidelity = 1.0 - svd_info.discarded_weight + self.fidelity *= this_fidelity + # Report to logger + self._logger.debug(f"Truncation done. Truncation fidelity={this_fidelity}") + self._logger.debug( + "Reduced virtual bond dimension from " + f"{svd_info.full_extent} to {svd_info.reduced_extent}." + ) + + self.tensors[l_pos] = L + self.tensors[r_pos] = R + + # If requested, provide info about memory usage. + if self._logger.isEnabledFor(logging.INFO): + # If-statetement used so that we only call `get_byte_size` if needed. + self._logger.info(f"MPS size (MiB)={self.get_byte_size() / 2**20}") + self._logger.info(f"MPS fidelity={self.fidelity}") + + return self + + def _apply_2q_unitary_nonadjacent( + self, unitary: cp.ndarray, q0: Qubit, q1: Qubit + ) -> MPSxGate: + """Applies the 2-qubit unitary to the MPS between non-adjacent qubits. + + The MPS is converted to canonical and truncation is applied if necessary. + + Args: + unitary: The unitary to be applied. + q0: The first qubit in the tuple |q0>|q1> the unitary acts on. + q1: The second qubit in the tuple |q0>|q1> the unitary acts on. + + Returns: + ``self``, to allow for method chaining. + """ + options = {"handle": self._lib.handle, "device_id": self._lib.device_id} + + positions = [self.qubit_position[q0], self.qubit_position[q1]] + l_pos = min(positions) + r_pos = max(positions) + + # Always canonicalise. Even in the case of exact simulation (no truncation) + # canonicalisation may reduce the bond dimension (thanks to reduced QR). + self.canonicalise(l_pos, l_pos) + + # Reshape into a rank-4 tensor + gate_tensor = cp.reshape(unitary, (2, 2, 2, 2)) + + # Glossary of bond IDs + # p -> some physical bond of the MPS + # l -> physical bond of the left tensor in the MPS + # r -> physical bond of the right tensor in the MPS + # L -> left bond of the outcome of the gate + # R -> right bond of the outcome of the gate + # s -> shared bond of the gate tensor's SVD + # a,b -> virtual bonds of the MPS + # m,M -> virtual bonds connected to the "message tensor" + + if l_pos == positions[0]: + gate_bonds = "LRlr" + else: # Implicit swap + gate_bonds = "RLrl" + + # Apply SVD on the gate tensor to remove any zero singular values ASAP + svd_method = tensor.SVDMethod( + abs_cutoff=self._cfg.zero, + partition="U", # Contract S directly into U (i.e. l_gate_tensor) + ) + # Apply the SVD decomposition using the configuration defined above + l_gate_tensor, S, r_gate_tensor = tensor.decompose( + f"{gate_bonds}->sLl,sRr", gate_tensor, method=svd_method, options=options + ) + assert S is None # Due to "partition" option in SVDMethod + + ################################# + ### Apply the gate to the MPS ### + ################################# + + orig_fidelity = self.fidelity + + # First, contract `l_gate_tensor` with the + # MPS site tensor on the left position. Then, decompose the new tensor so + # that a "message tensor" carrying the bond `s` is sent through the right + # virtual bond. We do these two steps in a single `contract_decompose` + self.tensors[l_pos], msg_tensor = contract_decompose( + "sLl,abl->amL,smb", + l_gate_tensor, + self.tensors[l_pos], + algorithm={"qr_method": tensor.QRMethod()}, + options=options, + optimize={"path": [(0, 1)]}, + ) + + # The site tensor is now in canonical form (since S is contracted to the right) + self.canonical_form[l_pos] = DirMPS.RIGHT # type: ignore + + # Next, "push" the `msg_tensor` through all site tensors between `l_pos` + # and `r_pos`. Once again, this is just contract_decompose on each. + for pos in range(l_pos + 1, r_pos): + # Report to logger + self._logger.debug( + f"Pushing msg_tensor ({msg_tensor.nbytes // 2**20} MiB) through site " + f"tensor ({self.tensors[pos].nbytes // 2**20} MiB) in position {pos}." + ) + + self.tensors[pos], msg_tensor = contract_decompose( + "sam,mbp->aMp,sMb", + msg_tensor, + self.tensors[pos], + algorithm={"qr_method": tensor.QRMethod()}, + options=options, + optimize={"path": [(0, 1)]}, + ) + + # The site tensor is now in canonical form + self.canonical_form[pos] = DirMPS.RIGHT # type: ignore + + # Finally, contract the `msg_tensor` with the site tensor in `r_pos` and the + # `r_gate_tensor` from the decomposition of `gate_tensor` + self.tensors[r_pos] = cq.contract( + "sam,mbr,sRr->abR", + msg_tensor, + self.tensors[r_pos], + r_gate_tensor, + options=options, + optimize={"path": [(0, 2), (0, 1)]}, + ) + + # The site tensor is not in canonical form anymore + self.canonical_form[r_pos] = None + + ############################################################ + ### Setup SVD configuration depending on user's settings ### + ############################################################ + + if self._cfg.truncation_fidelity < 1: + # Apply SVD decomposition to truncate as much as possible before exceeding + # a `discarded_weight_cutoff` of `1 - self._cfg.truncation_fidelity`. + self._logger.debug( + f"Truncating to target fidelity={self._cfg.truncation_fidelity}" + ) + + # When there are multiple virtual bonds between the two MPS tensors where + # the gate is applied (i.e. non-adjacent qubits) we need to distributed the + # allowed truncation error between the different bonds. + # Our target is to assign a local truncation fidelity `f_i` to each bond + # `i` in the input lists so that the lower bound of the fidelity satisfies: + # + # real_fidelity > self.fidelity*prod(f_i) > self.fidelity*trunc_fidelity + # + # Let e_i = 1 - f_i, where we refer to `e_i` as the "truncation error at i". + # We can use that when 0 < e_i < 1, it holds that: + # + # prod(1 - e_i) > 1 - sum(e_i) + # + # Hence, as long as we satisfy + # + # 1 - sum(e_i) > truncation_fidelity + # + # the target inquality at the top will be satisfied for our chosen f_i. + # We achieve this by defining e_i = (1 - trunc_fid) / k, where k is the + # number of bonds between the two tensors. + distance = r_pos - l_pos + local_truncation_error = (1 - self._cfg.truncation_fidelity) / distance + self._logger.debug( + f"The are {distance} bond between the qubits. Each of these will " + f"be truncated to target fidelity={1 - local_truncation_error}" + ) + + svd_method = tensor.SVDMethod( + abs_cutoff=self._cfg.zero, + discarded_weight_cutoff=local_truncation_error, + partition="U", # Contract S directly into U (to the "left") + normalization="L2", # Sum of squares singular values must equal 1 + ) + + else: + # Apply SVD decomposition and truncate up to a `max_extent` (for the shared + # bond) of `self._cfg.chi`. + # If the user did not explicitly ask for truncation, `self._cfg.chi` will be + # set to a very large default number, so it's like no `max_extent` was set. + # Still, we remove any singular values below ``self._cfg.zero``. + self._logger.debug(f"Truncating to (or below) chosen chi={self._cfg.chi}") + + svd_method = tensor.SVDMethod( + abs_cutoff=self._cfg.zero, + max_extent=self._cfg.chi, + partition="U", # Contract S directly into U (to the "left") + normalization="L2", # Sum of squares singular values must equal 1 + ) + + ############################################################ + ### Apply truncation to all bonds between the two qubits ### + ############################################################ + + # From right to left, so that we can use the current canonical form. + for pos in reversed(range(l_pos, r_pos)): + self.tensors[pos], S, self.tensors[pos + 1], info = contract_decompose( + "abl,bcr->abl,bcr", # Note: doesn't follow the glossary above. + self.tensors[pos], + self.tensors[pos + 1], + algorithm={"svd_method": svd_method, "qr_method": False}, + options=options, + optimize={"path": [(0, 1)]}, + return_info=True, + ) + assert S is None # Due to "partition" option in SVDMethod + + # Since we are contracting S to the "left" in `svd_method`, the site tensor + # at `pos+1` is canonicalised, whereas the site tensor at `pos` is the one + # where S has been contracted to and, hence, is not in canonical form + self.canonical_form[pos + 1] = DirMPS.LEFT # type: ignore + self.canonical_form[pos] = None + # Update fidelity lower bound + this_fidelity = 1.0 - info.svd_info.discarded_weight + self.fidelity *= this_fidelity + # Report to logger + self._logger.debug( + f"Truncation done between positions {pos} and {pos+1}. " + f"Truncation fidelity={this_fidelity}" + ) + self._logger.debug( + "Reduced virtual bond dimension from " + f"{info.svd_info.full_extent} to {info.svd_info.reduced_extent}." + ) + + if self._cfg.truncation_fidelity < 1: + # Sanity check: user's requested lower bound of fidelity satisfied + assert self.fidelity > orig_fidelity * self._cfg.truncation_fidelity + + # If requested, provide info about memory usage. + if self._logger.isEnabledFor(logging.INFO): + # If-statetement used so that we only call `get_byte_size` if needed. + self._logger.info(f"MPS size (MiB)={self.get_byte_size() / 2**20}") + self._logger.info(f"MPS fidelity={self.fidelity}") + + return self diff --git a/pytket/extensions/cutensornet/structured_state/mps_mpo.py b/pytket/extensions/cutensornet/structured_state/mps_mpo.py index 7514040d..3908b085 100644 --- a/pytket/extensions/cutensornet/structured_state/mps_mpo.py +++ b/pytket/extensions/cutensornet/structured_state/mps_mpo.py @@ -1,582 +1,582 @@ -# Copyright 2019-2024 Quantinuum -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -## -# http://www.apache.org/licenses/LICENSE-2.0 -## -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from __future__ import annotations # type: ignore -import warnings - -from typing import Optional, Union - -import numpy as np # type: ignore - -try: - import cupy as cp # type: ignore -except ImportError: - warnings.warn("local settings failed to import cupy", ImportWarning) -try: - import cuquantum as cq # type: ignore - from cuquantum.cutensornet import tensor # type: ignore -except ImportError: - warnings.warn("local settings failed to import cutensornet", ImportWarning) - -from pytket.circuit import Qubit -from pytket.extensions.cutensornet import CuTensorNetHandle -from .general import Tensor, Config -from .mps import ( - DirMPS, - MPS, -) -from .mps_gate import MPSxGate - - -class MPSxMPO(MPS): - """Implements a batched--gate contraction algorithm (DMRG-like) to calculate - the output state of a circuit as an ``MPS``. The algorithm is described - in: https://arxiv.org/abs/2207.05612. - """ - - def __init__( - self, - libhandle: CuTensorNetHandle, - qubits: list[Qubit], - config: Config, - ): - """Initialise an MPS on the computational state ``|0>``. - - Note: - A ``libhandle`` should be created via a ``with CuTensorNet() as libhandle:`` - statement. The device where the MPS is stored will match the one specified - by the library handle. - - Args: - libhandle: The cuTensorNet library handle that will be used to carry out - tensor operations on the MPS. - qubits: The list of qubits in the circuit to be simulated. - config: The object describing the configuration for simulation. - """ - super().__init__(libhandle, qubits, config) - - # Initialise the MPO data structure. This will keep a list of the gates - # batched for application to the MPS; all of them will be applied at - # once when deemed appropriate or when calling ._flush(), removing them - # from here. The gates are kept in a list of lists. - # - # One list per MPS position, containing all the tensors of the gates - # acting on the corresponding position. These lists are originally empty. - # The last element of each list corresponds to the last gate applied. - # - # Each of the tensors will have four bonds ordered as follows: - # [input, left, right, output] - self._mpo: list[list[Tensor]] = [list() for _ in qubits] - # This ``_bond_ids`` store global bond IDs of MPO tensors, used by ``_flush()`` - self._bond_ids: list[list[tuple[int, int, int, int]]] = [list() for _ in qubits] - - # Initialise the MPS that we will use as first approximation of the - # variational algorithm. - self._aux_mps = MPSxGate(libhandle, qubits, config) - - self._mpo_bond_counter = 0 - - def update_libhandle(self, libhandle: CuTensorNetHandle) -> None: - """Set the library handle used by this ``MPS`` object. Multiple objects - may use the same library handle. - - Args: - libhandle: The new cuTensorNet library handle. - - Raises: - RuntimeError: If the device (GPU) where ``libhandle`` was initialised - does not match the one where the tensors of the MPS are stored. - """ - super().update_libhandle(libhandle) - self._aux_mps.update_libhandle(libhandle) - - def _apply_1q_unitary(self, unitary: cp.ndarray, qubit: Qubit) -> MPSxMPO: - """Applies the 1-qubit unitary to the MPS. - - This does not increase the dimension of any bond. - - Args: - unitary: The unitary to be applied. - qubit: The qubit the unitary acts on. - - Returns: - ``self``, to allow for method chaining. - """ - position = self.qubit_position[qubit] - - # Apply the gate to the MPS with eager approximation - self._aux_mps._apply_1q_unitary(unitary, qubit) - - # Glossary of bond IDs - # i -> input to the MPO tensor - # o -> output of the MPO tensor - # l -> left virtual bond of the MPO tensor - # r -> right virtual bond of the MPO tensor - # g -> output bond of the gate tensor - - # Identify the tensor to contract the gate with - if self._mpo[position]: # Not empty - last_tensor = self._mpo[position][-1] - last_bonds = "ilro" - new_bonds = "ilrg" - else: # Use the MPS tensor - last_tensor = self.tensors[position] - last_bonds = "lro" - new_bonds = "lrg" - - # Contract - new_tensor = cq.contract( - "go," + last_bonds + "->" + new_bonds, - unitary, - last_tensor, - options={"handle": self._lib.handle, "device_id": self._lib.device_id}, - optimize={"path": [(0, 1)]}, - ) - - # Update the tensor - if self._mpo[position]: # Not empty - self._mpo[position][-1] = new_tensor - else: # Update the MPS tensor - self.tensors[position] = new_tensor - - return self - - def _apply_2q_unitary(self, unitary: cp.ndarray, q0: Qubit, q1: Qubit) -> MPSxMPO: - """Applies the 2-qubit unitary to the MPS. - - The MPS is converted to canonical and truncation is applied if necessary. - - Args: - unitary: The unitary to be applied. - q0: The first qubit in the tuple |q0>|q1> the unitary acts on. - q1: The second qubit in the tuple |q0>|q1> the unitary acts on. - - Returns: - ``self``, to allow for method chaining. - """ - options = {"handle": self._lib.handle, "device_id": self._lib.device_id} - - positions = [self.qubit_position[q0], self.qubit_position[q1]] - l_pos = min(positions) - r_pos = max(positions) - - # Check whether the MPO is large enough to flush it - if any(len(self._mpo[pos]) >= self._cfg.k for pos in [l_pos, r_pos]): - self._flush() - - # Apply the gate to the MPS with eager approximation - self._aux_mps._apply_2q_unitary(unitary, q0, q1) - - # Reshape into a rank-4 tensor - gate_tensor = cp.reshape(unitary, (2, 2, 2, 2)) - - # Glossary of bond IDs - # l -> gate's left input bond - # r -> gate's right input bond - # L -> gate's left output bond - # R -> gate's right output bond - # s -> virtual bond after QR decomposition - - # Assign the bond IDs for the gate - if l_pos == positions[0]: - gate_bonds = "LRlr" - else: # Implicit swap - gate_bonds = "RLrl" - - # Apply SVD on the gate tensor to remove any zero singular values ASAP - svd_method = tensor.SVDMethod( - abs_cutoff=self._cfg.zero, - partition="U", # Contract S directly into U (named L in this case) - ) - # Apply the SVD decomposition using the configuration defined above - L, S, R = tensor.decompose( - f"{gate_bonds}->lsL,rsR", gate_tensor, method=svd_method, options=options - ) - assert S is None # Due to "partition" option in SVDMethod - dim = L.shape[1] # Dimension of the shared bond `s` - - # Add dummy bonds of dimension 1 to L and R so that they have the right shape - L = cp.reshape(L, (2, 1, dim, 2)) - R = cp.reshape(R, (2, dim, 1, 2)) - - # Store L and R - self._mpo[l_pos].append(L) - self._mpo[r_pos].append(R) - # If `l_pos` and `r_pos` are distant, add identity tensor on all - # intermediate positions - if r_pos - l_pos != 1: - # Identity between input/output at physical bonds - p_identity = cp.eye(2, dtype=self._cfg._complex_t) - # Identity between left/right virtual bonds - v_identity = cp.eye(dim, dtype=self._cfg._complex_t) - # Create a "crossing" tensor by applying tensor product of these - crossing = cq.contract( - "io,lr->ilro", - p_identity, - v_identity, - options=options, - optimize={"path": [(0, 1)]}, - ) - # Store the intermediate tensors - for pos in range(l_pos + 1, r_pos): - self._mpo[pos].append(crossing.copy()) - - # And assign their global bonds - shared_bond_id = self._new_bond_id() - self._bond_ids[l_pos].append( - ( - self._get_physical_bond(l_pos), - self._new_bond_id(), - shared_bond_id, - self._new_bond_id(), - ) - ) - for pos in range(l_pos + 1, r_pos): - next_shared_bond_id = self._new_bond_id() - self._bond_ids[pos].append( - ( - self._get_physical_bond(pos), - shared_bond_id, - next_shared_bond_id, - self._new_bond_id(), - ) - ) - shared_bond_id = next_shared_bond_id - self._bond_ids[r_pos].append( - ( - self._get_physical_bond(r_pos), - shared_bond_id, - self._new_bond_id(), - self._new_bond_id(), - ) - ) - - return self - - def get_physical_dimension(self, position: int) -> int: - """Returns the dimension of the physical bond at ``position``. - - Args: - position: A position in the MPS. - - Returns: - The dimension of the physical bond. - - Raises: - RuntimeError: If ``position`` is out of bounds. - """ - if position < 0 or position >= len(self): - raise RuntimeError(f"Position {position} is out of bounds.") - - # Identify the tensor last tensor in the MPO - if self._mpo[position]: # Not empty - last_tensor = self._mpo[position][-1] - else: # Use the MPS tensor - last_tensor = self.tensors[position] - - # By construction, the open bond is the last one - return int(last_tensor.shape[-1]) - - def _get_physical_bond(self, position: int) -> int: - """Returns the unique identifier of the physical bond at ``position``. - - Args - position: A position in the MPS. - - Returns: - The identifier of the physical bond. - - Raises: - RuntimeError: If ``position`` is out of bounds. - """ - if position < 0 or position >= len(self): - raise RuntimeError(f"Position {position} is out of bounds.") - - if self._bond_ids[position]: - return self._bond_ids[position][-1][-1] - else: - return self._new_bond_id() - - def _get_column_bonds(self, position: int, direction: DirMPS) -> list[int]: - """Returns the unique identifier of all the left (right) virtual bonds of - MPO tensors at ``position`` if ``direction`` is ``LEFT`` (``RIGHT``). - - Notes: - It does not return the corresponding bonds of the MPS tensors. - - Raises: - RuntimeError: If ``position`` is out of bounds. - ValueError: If ``direction`` is not a value in ``DirMPS``. - """ - if position < 0 or position >= len(self): - raise RuntimeError(f"Position {position} is out of bounds.") - - if direction == DirMPS.LEFT: - index = 1 # By convention, left bond at index 1 - elif direction == DirMPS.RIGHT: - index = 2 # By convention, right bond at index 2 - else: - raise ValueError("Argument form must be a value in DirMPS.") - - return [b_ids[index] for b_ids in self._bond_ids[position]] - - def _flush(self) -> None: - """Applies all batched operations within ``self._mpo`` to the MPS. - - The method applies variational optimisation of the MPS until it - converges. Based on https://arxiv.org/abs/2207.05612. - """ - self._logger.info("Applying variational optimisation.") - self._logger.info(f"Fidelity before optimisation={self._aux_mps.fidelity}") - - l_cached_tensors: list[Tensor] = [] - r_cached_tensors: list[Tensor] = [] - - def update_sweep_cache(pos: int, direction: DirMPS) -> None: - """Given a position in the MPS and a sweeping direction (see - ``DirMPS``), calculate the tensor of the partial contraction - of all MPS-MPO-vMPS* columns from ``pos`` towards ``direction``. - Update the cache accordingly. Applies canonicalisation on the vMPS - tensor before contracting. - """ - self._logger.debug("Updating the sweep cache...") - - # Canonicalise the tensor at ``pos`` - if direction == DirMPS.LEFT: - self._aux_mps.canonicalise_tensor(pos, form=DirMPS.RIGHT) - elif direction == DirMPS.RIGHT: - self._aux_mps.canonicalise_tensor(pos, form=DirMPS.LEFT) - - # Glossary of bond IDs - # p -> the physical bond of the MPS tensor - # l,r -> the virtual bonds of the MPS tensor - # L,R -> the virtual bonds of the variational MPS tensor - # P -> the physical bond of the variational MPS tensor - # MPO tensors will use ``self._bond_ids`` - - # Get the interleaved representation - interleaved_rep = [ - # The tensor of the MPS - self.tensors[pos], - ["l", "r", "p"], - # The (conjugated) tensor of the variational MPS - self._aux_mps.tensors[pos].conj(), - ["L", "R", "P" if self._mpo[pos] else "p"], - ] - for i, mpo_tensor in enumerate(self._mpo[pos]): - # The MPO tensor at this position - interleaved_rep.append(mpo_tensor) - - mpo_bonds: list[Union[int, str]] = list(self._bond_ids[pos][i]) - if i == 0: - # The input bond of the first MPO tensor must connect to the - # physical bond of the correspondong ``self.tensors`` tensor - mpo_bonds[0] = "p" - if i == len(self._mpo[pos]) - 1: - # The output bond of the last MPO tensor must connect to the - # physical bond of the corresponding ``self._aux_mps`` tensor - mpo_bonds[-1] = "P" - interleaved_rep.append(mpo_bonds) - - # Also contract the previous (cached) tensor during the sweep - if direction == DirMPS.LEFT: - if pos != len(self) - 1: # Otherwise, there is nothing cached yet - interleaved_rep.append(r_cached_tensors[-1]) - r_cached_bonds = self._get_column_bonds(pos + 1, DirMPS.LEFT) - interleaved_rep.append(["r", "R"] + r_cached_bonds) - elif direction == DirMPS.RIGHT: - if pos != 0: # Otherwise, there is nothing cached yet - interleaved_rep.append(l_cached_tensors[-1]) - l_cached_bonds = self._get_column_bonds(pos - 1, DirMPS.RIGHT) - interleaved_rep.append(["l", "L"] + l_cached_bonds) - - # Figure out the ID of the bonds of the contracted tensor - if direction == DirMPS.LEFT: - # Take the left bond of each of the MPO tensors - result_bonds = self._get_column_bonds(pos, DirMPS.LEFT) - # Take the left virtual bond of both of the MPS - interleaved_rep.append(["l", "L"] + result_bonds) - elif direction == DirMPS.RIGHT: - # Take the right bond of each of the MPO tensors - result_bonds = self._get_column_bonds(pos, DirMPS.RIGHT) - # Take the right virtual bond of both of the MPS - interleaved_rep.append(["r", "R"] + result_bonds) - - # Contract and store - T = cq.contract( - *interleaved_rep, - options={"handle": self._lib.handle, "device_id": self._lib.device_id}, - optimize={"samples": 0}, - ) - if direction == DirMPS.LEFT: - r_cached_tensors.append(T) - elif direction == DirMPS.RIGHT: - l_cached_tensors.append(T) - - self._logger.debug("Completed update of the sweep cache.") - - def update_variational_tensor( - pos: int, left_tensor: Optional[Tensor], right_tensor: Optional[Tensor] - ) -> float: - """Update the tensor at ``pos`` of the variational MPS using ``left_tensor`` - (and ``right_tensor``) which is meant to contain the contraction of all - the left (and right) columns of the MPS-MPO-vMPS* network from ``pos``. - Contract these with the MPS-MPO column at ``pos``. - Return the current fidelity of this sweep. - """ - self._logger.debug(f"Optimising tensor at position={pos}") - - interleaved_rep = [ - # The tensor of the MPS - self.tensors[pos], - ["l", "r", "p"], - ] - result_bonds = ["l", "r", "p"] - - # The MPO tensors at position ``pos`` - for i, mpo_tensor in enumerate(self._mpo[pos]): - # The MPO tensor at this position - interleaved_rep.append(mpo_tensor) - - mpo_bonds: list[Union[int, str]] = list(self._bond_ids[pos][i]) - if i == 0: - # The input bond of the first MPO tensor must connect to the - # physical bond of the correspondong ``self.tensors`` tensor - mpo_bonds[0] = "p" - if i == len(self._mpo[pos]) - 1: - # The output bond of the last MPO tensor corresponds to the - # physical bond of the corresponding ``self._aux_mps`` tensor - mpo_bonds[-1] = "P" - result_bonds[-1] = "P" - interleaved_rep.append(mpo_bonds) - - if left_tensor is not None: - interleaved_rep.append(left_tensor) - left_tensor_bonds = self._get_column_bonds(pos - 1, DirMPS.RIGHT) - interleaved_rep.append(["l", "L"] + left_tensor_bonds) - result_bonds[0] = "L" - if right_tensor is not None: - interleaved_rep.append(right_tensor) - right_tensor_bonds = self._get_column_bonds(pos + 1, DirMPS.LEFT) - interleaved_rep.append(["r", "R"] + right_tensor_bonds) - result_bonds[1] = "R" - - # Append the bond IDs of the resulting tensor - interleaved_rep.append(result_bonds) - - # Contract and store tensor - F = cq.contract( - *interleaved_rep, - options={"handle": self._lib.handle, "device_id": self._lib.device_id}, - optimize={"samples": 0}, - ) - - # Get the fidelity - optim_fidelity = complex( - cq.contract( - "LRP,LRP->", - F.conj(), - F, - options={ - "handle": self._lib.handle, - "device_id": self._lib.device_id, - }, - optimize={"path": [(0, 1)]}, - ) - ) - assert np.isclose(optim_fidelity.imag, 0.0, atol=self._cfg._atol) - optim_fidelity = float(optim_fidelity.real) - - # Normalise F and update the variational MPS - self._aux_mps.tensors[pos] = F / np.sqrt(optim_fidelity) - - return optim_fidelity - - ################################## - # Variational sweeping algorithm # - ################################## - - # Begin by doing a sweep towards the left that does not update - # the variational tensors, but simply loads up the ``r_cached_tensors`` - for pos in reversed(range(1, len(self))): - update_sweep_cache(pos, direction=DirMPS.LEFT) - - prev_fidelity = -1.0 # Dummy value - sweep_fidelity = 0.0 # Dummy value - - # Repeat sweeps until the fidelity converges - sweep_direction = DirMPS.RIGHT - while not np.isclose(prev_fidelity, sweep_fidelity, atol=self._cfg.optim_delta): - self._logger.info(f"Doing another optimisation sweep...") - prev_fidelity = sweep_fidelity - - if sweep_direction == DirMPS.RIGHT: - sweep_fidelity = update_variational_tensor( - pos=0, left_tensor=None, right_tensor=r_cached_tensors.pop() - ) - update_sweep_cache(pos=0, direction=DirMPS.RIGHT) - - for pos in range(1, len(self) - 1): - sweep_fidelity = update_variational_tensor( - pos=pos, - left_tensor=l_cached_tensors[-1], - right_tensor=r_cached_tensors.pop(), - ) - update_sweep_cache(pos, direction=DirMPS.RIGHT) - # The last variational tensor is not updated; - # it'll be the first in the next sweep - - sweep_direction = DirMPS.LEFT - - elif sweep_direction == DirMPS.LEFT: - sweep_fidelity = update_variational_tensor( - pos=len(self) - 1, - left_tensor=l_cached_tensors.pop(), - right_tensor=None, - ) - update_sweep_cache(pos=len(self) - 1, direction=DirMPS.LEFT) - - for pos in reversed(range(1, len(self) - 1)): - sweep_fidelity = update_variational_tensor( - pos=pos, - left_tensor=l_cached_tensors.pop(), - right_tensor=r_cached_tensors[-1], - ) - update_sweep_cache(pos, direction=DirMPS.LEFT) - # The last variational tensor is not updated; - # it'll be the first in the next sweep - - sweep_direction = DirMPS.RIGHT - - self._logger.info( - "Optimisation sweep completed. " - f"Current fidelity={self.fidelity*sweep_fidelity}" - ) - - # Clear out the MPO - self._mpo = [list() for _ in range(len(self))] - self._bond_ids = [list() for _ in range(len(self))] - self._mpo_bond_counter = 0 - - # Update the MPS tensors - self.tensors = [t.copy() for t in self._aux_mps.tensors] - - # Update the fidelity estimate - self.fidelity *= sweep_fidelity - self._aux_mps.fidelity = self.fidelity - - self._logger.info(f"Final fidelity after optimisation={self.fidelity}") - - def _new_bond_id(self) -> int: - self._mpo_bond_counter += 1 - return self._mpo_bond_counter +# Copyright 2019-2024 Quantinuum +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +## +# http://www.apache.org/licenses/LICENSE-2.0 +## +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations # type: ignore +import warnings + +from typing import Optional, Union + +import numpy as np # type: ignore + +try: + import cupy as cp # type: ignore +except ImportError: + warnings.warn("local settings failed to import cupy", ImportWarning) +try: + import cuquantum as cq # type: ignore + from cuquantum.cutensornet import tensor # type: ignore +except ImportError: + warnings.warn("local settings failed to import cutensornet", ImportWarning) + +from pytket.circuit import Qubit +from pytket.extensions.cutensornet import CuTensorNetHandle +from .general import Tensor, Config +from .mps import ( + DirMPS, + MPS, +) +from .mps_gate import MPSxGate + + +class MPSxMPO(MPS): + """Implements a batched--gate contraction algorithm (DMRG-like) to calculate + the output state of a circuit as an ``MPS``. The algorithm is described + in: https://arxiv.org/abs/2207.05612. + """ + + def __init__( + self, + libhandle: CuTensorNetHandle, + qubits: list[Qubit], + config: Config, + ): + """Initialise an MPS on the computational state ``|0>``. + + Note: + A ``libhandle`` should be created via a ``with CuTensorNet() as libhandle:`` + statement. The device where the MPS is stored will match the one specified + by the library handle. + + Args: + libhandle: The cuTensorNet library handle that will be used to carry out + tensor operations on the MPS. + qubits: The list of qubits in the circuit to be simulated. + config: The object describing the configuration for simulation. + """ + super().__init__(libhandle, qubits, config) + + # Initialise the MPO data structure. This will keep a list of the gates + # batched for application to the MPS; all of them will be applied at + # once when deemed appropriate or when calling ._flush(), removing them + # from here. The gates are kept in a list of lists. + # + # One list per MPS position, containing all the tensors of the gates + # acting on the corresponding position. These lists are originally empty. + # The last element of each list corresponds to the last gate applied. + # + # Each of the tensors will have four bonds ordered as follows: + # [input, left, right, output] + self._mpo: list[list[Tensor]] = [list() for _ in qubits] + # This ``_bond_ids`` store global bond IDs of MPO tensors, used by ``_flush()`` + self._bond_ids: list[list[tuple[int, int, int, int]]] = [list() for _ in qubits] + + # Initialise the MPS that we will use as first approximation of the + # variational algorithm. + self._aux_mps = MPSxGate(libhandle, qubits, config) + + self._mpo_bond_counter = 0 + + def update_libhandle(self, libhandle: CuTensorNetHandle) -> None: + """Set the library handle used by this ``MPS`` object. Multiple objects + may use the same library handle. + + Args: + libhandle: The new cuTensorNet library handle. + + Raises: + RuntimeError: If the device (GPU) where ``libhandle`` was initialised + does not match the one where the tensors of the MPS are stored. + """ + super().update_libhandle(libhandle) + self._aux_mps.update_libhandle(libhandle) + + def _apply_1q_unitary(self, unitary: cp.ndarray, qubit: Qubit) -> MPSxMPO: + """Applies the 1-qubit unitary to the MPS. + + This does not increase the dimension of any bond. + + Args: + unitary: The unitary to be applied. + qubit: The qubit the unitary acts on. + + Returns: + ``self``, to allow for method chaining. + """ + position = self.qubit_position[qubit] + + # Apply the gate to the MPS with eager approximation + self._aux_mps._apply_1q_unitary(unitary, qubit) + + # Glossary of bond IDs + # i -> input to the MPO tensor + # o -> output of the MPO tensor + # l -> left virtual bond of the MPO tensor + # r -> right virtual bond of the MPO tensor + # g -> output bond of the gate tensor + + # Identify the tensor to contract the gate with + if self._mpo[position]: # Not empty + last_tensor = self._mpo[position][-1] + last_bonds = "ilro" + new_bonds = "ilrg" + else: # Use the MPS tensor + last_tensor = self.tensors[position] + last_bonds = "lro" + new_bonds = "lrg" + + # Contract + new_tensor = cq.contract( + "go," + last_bonds + "->" + new_bonds, + unitary, + last_tensor, + options={"handle": self._lib.handle, "device_id": self._lib.device_id}, + optimize={"path": [(0, 1)]}, + ) + + # Update the tensor + if self._mpo[position]: # Not empty + self._mpo[position][-1] = new_tensor + else: # Update the MPS tensor + self.tensors[position] = new_tensor + + return self + + def _apply_2q_unitary(self, unitary: cp.ndarray, q0: Qubit, q1: Qubit) -> MPSxMPO: + """Applies the 2-qubit unitary to the MPS. + + The MPS is converted to canonical and truncation is applied if necessary. + + Args: + unitary: The unitary to be applied. + q0: The first qubit in the tuple |q0>|q1> the unitary acts on. + q1: The second qubit in the tuple |q0>|q1> the unitary acts on. + + Returns: + ``self``, to allow for method chaining. + """ + options = {"handle": self._lib.handle, "device_id": self._lib.device_id} + + positions = [self.qubit_position[q0], self.qubit_position[q1]] + l_pos = min(positions) + r_pos = max(positions) + + # Check whether the MPO is large enough to flush it + if any(len(self._mpo[pos]) >= self._cfg.k for pos in [l_pos, r_pos]): + self._flush() + + # Apply the gate to the MPS with eager approximation + self._aux_mps._apply_2q_unitary(unitary, q0, q1) + + # Reshape into a rank-4 tensor + gate_tensor = cp.reshape(unitary, (2, 2, 2, 2)) + + # Glossary of bond IDs + # l -> gate's left input bond + # r -> gate's right input bond + # L -> gate's left output bond + # R -> gate's right output bond + # s -> virtual bond after QR decomposition + + # Assign the bond IDs for the gate + if l_pos == positions[0]: + gate_bonds = "LRlr" + else: # Implicit swap + gate_bonds = "RLrl" + + # Apply SVD on the gate tensor to remove any zero singular values ASAP + svd_method = tensor.SVDMethod( + abs_cutoff=self._cfg.zero, + partition="U", # Contract S directly into U (named L in this case) + ) + # Apply the SVD decomposition using the configuration defined above + L, S, R = tensor.decompose( + f"{gate_bonds}->lsL,rsR", gate_tensor, method=svd_method, options=options + ) + assert S is None # Due to "partition" option in SVDMethod + dim = L.shape[1] # Dimension of the shared bond `s` + + # Add dummy bonds of dimension 1 to L and R so that they have the right shape + L = cp.reshape(L, (2, 1, dim, 2)) + R = cp.reshape(R, (2, dim, 1, 2)) + + # Store L and R + self._mpo[l_pos].append(L) + self._mpo[r_pos].append(R) + # If `l_pos` and `r_pos` are distant, add identity tensor on all + # intermediate positions + if r_pos - l_pos != 1: + # Identity between input/output at physical bonds + p_identity = cp.eye(2, dtype=self._cfg._complex_t) + # Identity between left/right virtual bonds + v_identity = cp.eye(dim, dtype=self._cfg._complex_t) + # Create a "crossing" tensor by applying tensor product of these + crossing = cq.contract( + "io,lr->ilro", + p_identity, + v_identity, + options=options, + optimize={"path": [(0, 1)]}, + ) + # Store the intermediate tensors + for pos in range(l_pos + 1, r_pos): + self._mpo[pos].append(crossing.copy()) + + # And assign their global bonds + shared_bond_id = self._new_bond_id() + self._bond_ids[l_pos].append( + ( + self._get_physical_bond(l_pos), + self._new_bond_id(), + shared_bond_id, + self._new_bond_id(), + ) + ) + for pos in range(l_pos + 1, r_pos): + next_shared_bond_id = self._new_bond_id() + self._bond_ids[pos].append( + ( + self._get_physical_bond(pos), + shared_bond_id, + next_shared_bond_id, + self._new_bond_id(), + ) + ) + shared_bond_id = next_shared_bond_id + self._bond_ids[r_pos].append( + ( + self._get_physical_bond(r_pos), + shared_bond_id, + self._new_bond_id(), + self._new_bond_id(), + ) + ) + + return self + + def get_physical_dimension(self, position: int) -> int: + """Returns the dimension of the physical bond at ``position``. + + Args: + position: A position in the MPS. + + Returns: + The dimension of the physical bond. + + Raises: + RuntimeError: If ``position`` is out of bounds. + """ + if position < 0 or position >= len(self): + raise RuntimeError(f"Position {position} is out of bounds.") + + # Identify the tensor last tensor in the MPO + if self._mpo[position]: # Not empty + last_tensor = self._mpo[position][-1] + else: # Use the MPS tensor + last_tensor = self.tensors[position] + + # By construction, the open bond is the last one + return int(last_tensor.shape[-1]) + + def _get_physical_bond(self, position: int) -> int: + """Returns the unique identifier of the physical bond at ``position``. + + Args + position: A position in the MPS. + + Returns: + The identifier of the physical bond. + + Raises: + RuntimeError: If ``position`` is out of bounds. + """ + if position < 0 or position >= len(self): + raise RuntimeError(f"Position {position} is out of bounds.") + + if self._bond_ids[position]: + return self._bond_ids[position][-1][-1] + else: + return self._new_bond_id() + + def _get_column_bonds(self, position: int, direction: DirMPS) -> list[int]: + """Returns the unique identifier of all the left (right) virtual bonds of + MPO tensors at ``position`` if ``direction`` is ``LEFT`` (``RIGHT``). + + Notes: + It does not return the corresponding bonds of the MPS tensors. + + Raises: + RuntimeError: If ``position`` is out of bounds. + ValueError: If ``direction`` is not a value in ``DirMPS``. + """ + if position < 0 or position >= len(self): + raise RuntimeError(f"Position {position} is out of bounds.") + + if direction == DirMPS.LEFT: + index = 1 # By convention, left bond at index 1 + elif direction == DirMPS.RIGHT: + index = 2 # By convention, right bond at index 2 + else: + raise ValueError("Argument form must be a value in DirMPS.") + + return [b_ids[index] for b_ids in self._bond_ids[position]] + + def _flush(self) -> None: + """Applies all batched operations within ``self._mpo`` to the MPS. + + The method applies variational optimisation of the MPS until it + converges. Based on https://arxiv.org/abs/2207.05612. + """ + self._logger.info("Applying variational optimisation.") + self._logger.info(f"Fidelity before optimisation={self._aux_mps.fidelity}") + + l_cached_tensors: list[Tensor] = [] + r_cached_tensors: list[Tensor] = [] + + def update_sweep_cache(pos: int, direction: DirMPS) -> None: + """Given a position in the MPS and a sweeping direction (see + ``DirMPS``), calculate the tensor of the partial contraction + of all MPS-MPO-vMPS* columns from ``pos`` towards ``direction``. + Update the cache accordingly. Applies canonicalisation on the vMPS + tensor before contracting. + """ + self._logger.debug("Updating the sweep cache...") + + # Canonicalise the tensor at ``pos`` + if direction == DirMPS.LEFT: + self._aux_mps.canonicalise_tensor(pos, form=DirMPS.RIGHT) + elif direction == DirMPS.RIGHT: + self._aux_mps.canonicalise_tensor(pos, form=DirMPS.LEFT) + + # Glossary of bond IDs + # p -> the physical bond of the MPS tensor + # l,r -> the virtual bonds of the MPS tensor + # L,R -> the virtual bonds of the variational MPS tensor + # P -> the physical bond of the variational MPS tensor + # MPO tensors will use ``self._bond_ids`` + + # Get the interleaved representation + interleaved_rep = [ + # The tensor of the MPS + self.tensors[pos], + ["l", "r", "p"], + # The (conjugated) tensor of the variational MPS + self._aux_mps.tensors[pos].conj(), + ["L", "R", "P" if self._mpo[pos] else "p"], + ] + for i, mpo_tensor in enumerate(self._mpo[pos]): + # The MPO tensor at this position + interleaved_rep.append(mpo_tensor) + + mpo_bonds: list[Union[int, str]] = list(self._bond_ids[pos][i]) + if i == 0: + # The input bond of the first MPO tensor must connect to the + # physical bond of the correspondong ``self.tensors`` tensor + mpo_bonds[0] = "p" + if i == len(self._mpo[pos]) - 1: + # The output bond of the last MPO tensor must connect to the + # physical bond of the corresponding ``self._aux_mps`` tensor + mpo_bonds[-1] = "P" + interleaved_rep.append(mpo_bonds) + + # Also contract the previous (cached) tensor during the sweep + if direction == DirMPS.LEFT: + if pos != len(self) - 1: # Otherwise, there is nothing cached yet + interleaved_rep.append(r_cached_tensors[-1]) + r_cached_bonds = self._get_column_bonds(pos + 1, DirMPS.LEFT) + interleaved_rep.append(["r", "R"] + r_cached_bonds) + elif direction == DirMPS.RIGHT: + if pos != 0: # Otherwise, there is nothing cached yet + interleaved_rep.append(l_cached_tensors[-1]) + l_cached_bonds = self._get_column_bonds(pos - 1, DirMPS.RIGHT) + interleaved_rep.append(["l", "L"] + l_cached_bonds) + + # Figure out the ID of the bonds of the contracted tensor + if direction == DirMPS.LEFT: + # Take the left bond of each of the MPO tensors + result_bonds = self._get_column_bonds(pos, DirMPS.LEFT) + # Take the left virtual bond of both of the MPS + interleaved_rep.append(["l", "L"] + result_bonds) + elif direction == DirMPS.RIGHT: + # Take the right bond of each of the MPO tensors + result_bonds = self._get_column_bonds(pos, DirMPS.RIGHT) + # Take the right virtual bond of both of the MPS + interleaved_rep.append(["r", "R"] + result_bonds) + + # Contract and store + T = cq.contract( + *interleaved_rep, + options={"handle": self._lib.handle, "device_id": self._lib.device_id}, + optimize={"samples": 0}, + ) + if direction == DirMPS.LEFT: + r_cached_tensors.append(T) + elif direction == DirMPS.RIGHT: + l_cached_tensors.append(T) + + self._logger.debug("Completed update of the sweep cache.") + + def update_variational_tensor( + pos: int, left_tensor: Optional[Tensor], right_tensor: Optional[Tensor] + ) -> float: + """Update the tensor at ``pos`` of the variational MPS using ``left_tensor`` + (and ``right_tensor``) which is meant to contain the contraction of all + the left (and right) columns of the MPS-MPO-vMPS* network from ``pos``. + Contract these with the MPS-MPO column at ``pos``. + Return the current fidelity of this sweep. + """ + self._logger.debug(f"Optimising tensor at position={pos}") + + interleaved_rep = [ + # The tensor of the MPS + self.tensors[pos], + ["l", "r", "p"], + ] + result_bonds = ["l", "r", "p"] + + # The MPO tensors at position ``pos`` + for i, mpo_tensor in enumerate(self._mpo[pos]): + # The MPO tensor at this position + interleaved_rep.append(mpo_tensor) + + mpo_bonds: list[Union[int, str]] = list(self._bond_ids[pos][i]) + if i == 0: + # The input bond of the first MPO tensor must connect to the + # physical bond of the correspondong ``self.tensors`` tensor + mpo_bonds[0] = "p" + if i == len(self._mpo[pos]) - 1: + # The output bond of the last MPO tensor corresponds to the + # physical bond of the corresponding ``self._aux_mps`` tensor + mpo_bonds[-1] = "P" + result_bonds[-1] = "P" + interleaved_rep.append(mpo_bonds) + + if left_tensor is not None: + interleaved_rep.append(left_tensor) + left_tensor_bonds = self._get_column_bonds(pos - 1, DirMPS.RIGHT) + interleaved_rep.append(["l", "L"] + left_tensor_bonds) + result_bonds[0] = "L" + if right_tensor is not None: + interleaved_rep.append(right_tensor) + right_tensor_bonds = self._get_column_bonds(pos + 1, DirMPS.LEFT) + interleaved_rep.append(["r", "R"] + right_tensor_bonds) + result_bonds[1] = "R" + + # Append the bond IDs of the resulting tensor + interleaved_rep.append(result_bonds) + + # Contract and store tensor + F = cq.contract( + *interleaved_rep, + options={"handle": self._lib.handle, "device_id": self._lib.device_id}, + optimize={"samples": 0}, + ) + + # Get the fidelity + optim_fidelity = complex( + cq.contract( + "LRP,LRP->", + F.conj(), + F, + options={ + "handle": self._lib.handle, + "device_id": self._lib.device_id, + }, + optimize={"path": [(0, 1)]}, + ) + ) + assert np.isclose(optim_fidelity.imag, 0.0, atol=self._cfg._atol) + optim_fidelity = float(optim_fidelity.real) + + # Normalise F and update the variational MPS + self._aux_mps.tensors[pos] = F / np.sqrt(optim_fidelity) + + return optim_fidelity + + ################################## + # Variational sweeping algorithm # + ################################## + + # Begin by doing a sweep towards the left that does not update + # the variational tensors, but simply loads up the ``r_cached_tensors`` + for pos in reversed(range(1, len(self))): + update_sweep_cache(pos, direction=DirMPS.LEFT) + + prev_fidelity = -1.0 # Dummy value + sweep_fidelity = 0.0 # Dummy value + + # Repeat sweeps until the fidelity converges + sweep_direction = DirMPS.RIGHT + while not np.isclose(prev_fidelity, sweep_fidelity, atol=self._cfg.optim_delta): + self._logger.info(f"Doing another optimisation sweep...") + prev_fidelity = sweep_fidelity + + if sweep_direction == DirMPS.RIGHT: + sweep_fidelity = update_variational_tensor( + pos=0, left_tensor=None, right_tensor=r_cached_tensors.pop() + ) + update_sweep_cache(pos=0, direction=DirMPS.RIGHT) + + for pos in range(1, len(self) - 1): + sweep_fidelity = update_variational_tensor( + pos=pos, + left_tensor=l_cached_tensors[-1], + right_tensor=r_cached_tensors.pop(), + ) + update_sweep_cache(pos, direction=DirMPS.RIGHT) + # The last variational tensor is not updated; + # it'll be the first in the next sweep + + sweep_direction = DirMPS.LEFT + + elif sweep_direction == DirMPS.LEFT: + sweep_fidelity = update_variational_tensor( + pos=len(self) - 1, + left_tensor=l_cached_tensors.pop(), + right_tensor=None, + ) + update_sweep_cache(pos=len(self) - 1, direction=DirMPS.LEFT) + + for pos in reversed(range(1, len(self) - 1)): + sweep_fidelity = update_variational_tensor( + pos=pos, + left_tensor=l_cached_tensors.pop(), + right_tensor=r_cached_tensors[-1], + ) + update_sweep_cache(pos, direction=DirMPS.LEFT) + # The last variational tensor is not updated; + # it'll be the first in the next sweep + + sweep_direction = DirMPS.RIGHT + + self._logger.info( + "Optimisation sweep completed. " + f"Current fidelity={self.fidelity*sweep_fidelity}" + ) + + # Clear out the MPO + self._mpo = [list() for _ in range(len(self))] + self._bond_ids = [list() for _ in range(len(self))] + self._mpo_bond_counter = 0 + + # Update the MPS tensors + self.tensors = [t.copy() for t in self._aux_mps.tensors] + + # Update the fidelity estimate + self.fidelity *= sweep_fidelity + self._aux_mps.fidelity = self.fidelity + + self._logger.info(f"Final fidelity after optimisation={self.fidelity}") + + def _new_bond_id(self) -> int: + self._mpo_bond_counter += 1 + return self._mpo_bond_counter diff --git a/pytket/extensions/cutensornet/structured_state/simulation.py b/pytket/extensions/cutensornet/structured_state/simulation.py index f2bace65..4f061877 100644 --- a/pytket/extensions/cutensornet/structured_state/simulation.py +++ b/pytket/extensions/cutensornet/structured_state/simulation.py @@ -1,422 +1,422 @@ -# Copyright 2019-2024 Quantinuum -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -## -# http://www.apache.org/licenses/LICENSE-2.0 -## -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Optional -import warnings -from enum import Enum - -from pathlib import Path -from collections import defaultdict # type: ignore -import numpy as np # type: ignore - -import networkx as nx # type: ignore - -try: - import kahypar # type: ignore -except ImportError: - warnings.warn("local settings failed to import kahypar", ImportWarning) - -from pytket.circuit import Circuit, Command, Qubit -from pytket.transform import Transform -from pytket.architecture import Architecture -from pytket.passes import DefaultMappingPass -from pytket.predicates import CompilationUnit - -from pytket.extensions.cutensornet.general import CuTensorNetHandle, set_logger -from .general import Config, StructuredState -from .mps_gate import MPSxGate -from .mps_mpo import MPSxMPO -from .ttn_gate import TTNxGate - - -class SimulationAlgorithm(Enum): - """An enum to refer to the StructuredState contraction algorithm. - - Each enum value corresponds to the class with the same name; see its docs for - information about the algorithm. - """ - - TTNxGate = 0 - MPSxGate = 1 - MPSxMPO = 2 - - -def simulate( - libhandle: CuTensorNetHandle, - circuit: Circuit, - algorithm: SimulationAlgorithm, - config: Config, -) -> StructuredState: - """Simulates the circuit and returns the ``StructuredState`` of the final state. - - Note: - A ``libhandle`` is created via a ``with CuTensorNetHandle() as libhandle:`` - statement. The device where the ``StructuredState`` is stored will match the one - specified by the library handle. - - The input ``circuit`` must be composed of one-qubit and two-qubit gates only. - Any gateset supported by ``pytket`` can be used. - - Args: - libhandle: The cuTensorNet library handle that will be used to carry out - tensor operations. - circuit: The pytket circuit to be simulated. - algorithm: Choose between the values of the ``SimulationAlgorithm`` enum. - config: The configuration object for simulation. - - Returns: - An instance of ``StructuredState`` for (an approximation of) the final state - of the circuit. The instance be of the class matching ``algorithm``. - """ - logger = set_logger("Simulation", level=config.loglevel) - - logger.info( - "Ordering the gates in the circuit to reduce canonicalisation overhead." - ) - if algorithm == SimulationAlgorithm.MPSxGate: - state = MPSxGate( # type: ignore - libhandle, - circuit.qubits, - config, - ) - sorted_gates = _get_sorted_gates(circuit, algorithm) - - elif algorithm == SimulationAlgorithm.MPSxMPO: - state = MPSxMPO( # type: ignore - libhandle, - circuit.qubits, - config, - ) - sorted_gates = _get_sorted_gates(circuit, algorithm) - - elif algorithm == SimulationAlgorithm.TTNxGate: - qubit_partition = _get_qubit_partition( - circuit, config.leaf_size, config.use_kahypar - ) - state = TTNxGate( # type: ignore - libhandle, - qubit_partition, - config, - ) - sorted_gates = _get_sorted_gates(circuit, algorithm, qubit_partition) - - logger.info("Running simulation...") - # Apply the gates - for i, g in enumerate(sorted_gates): - state.apply_gate(g) - logger.info(f"Progress... {(100*i) // len(sorted_gates)}%") - - # Apply the batched operations that are left (if any) - state._flush() - - # Apply the circuit's phase to the state - state.apply_scalar(np.exp(1j * np.pi * circuit.phase)) - - # Relabel qubits according to the implicit swaps (if any) - state.apply_qubit_relabelling(circuit.implicit_qubit_permutation()) - - logger.info("Simulation completed.") - logger.info(f"Final StructuredState size={state.get_byte_size() / 2**20} MiB") - logger.info(f"Final StructuredState fidelity={state.fidelity}") - return state - - -def prepare_circuit_mps(circuit: Circuit) -> tuple[Circuit, dict[Qubit, Qubit]]: - """Adds SWAP gates to the circuit so that all gates act on adjacent qubits. - - The qubits in the output circuit will be renamed. Implicit SWAPs may be added - to the circuit, meaning that the logical qubit held at the ``node[i]`` qubit - at the beginning of the circuit may differ from the one it holds at the end. - Consider applying ``apply_qubit_relabelling`` on the MPS after simulation. - - Note: - This preprocessing is *not* required by the MPS algorithms we provide. - Shallow circuits tend to run faster if this preprocessing is *not* used. - In occassions, it has been shown to improve runtime for deep circuits. - - Args: - circuit: The circuit to be simulated. - - Returns: - A tuple with an equivalent circuit with the appropriate structure and a - map of qubit names at the end of the circuit to their corresponding - original names. - """ - - # Implement it in a line architecture - cu = CompilationUnit(circuit) - architecture = Architecture([(i, i + 1) for i in range(circuit.n_qubits - 1)]) - DefaultMappingPass(architecture).apply(cu) - prep_circ = cu.circuit - Transform.DecomposeBRIDGE().apply(prep_circ) - - qubit_map: dict[Qubit, Qubit] = {} - for orig_q, arch_q in cu.final_map.items(): - assert isinstance(orig_q, Qubit) - assert isinstance(arch_q, Qubit) - qubit_map[arch_q] = orig_q - - return (prep_circ, qubit_map) - - -def _get_qubit_partition( - circuit: Circuit, max_q_per_leaf: int, use_kahypar: bool -) -> dict[int, list[Qubit]]: - """Returns a qubit partition for a TTN. - - Proceeds by recursive bisection of the qubit connectivity graph, so that - qubits that interact with each other less are connected by a common ancestor - closer to the root. - - Args: - circuit: The circuit to be simulated. - max_q_per_leaf: The maximum allowed number of qubits per node leaf - use_kahypar: Use KaHyPar for graph partitioning if this is True. - Otherwise, use NetworkX (worse, but easy to setup). - - Returns: - A dictionary describing the partition in the format expected by TTN. - - Raises: - RuntimeError: If gate acts on more than 2 qubits. - """ - - # Scan the circuit and generate the edges of the connectivity graph - edge_weights: dict[tuple[Qubit, Qubit], int] = dict() - for cmd in circuit.get_commands(): - if cmd.op.is_gate(): - if cmd.op.n_qubits == 2: - edge = (min(cmd.qubits), max(cmd.qubits)) - - if edge in edge_weights: - edge_weights[edge] += 1 - else: - edge_weights[edge] = 1 - - elif cmd.op.n_qubits > 2: - raise RuntimeError( - "Gates must act on only 1 or 2 qubits! " - + f"This is not satisfied by {cmd}." - ) - - # Create the connectivity graph in NetworkX - connectivity_graph = nx.Graph() - connectivity_graph.add_nodes_from(circuit.qubits) - for (u, v), weight in edge_weights.items(): - connectivity_graph.add_edge(u, v, weight=weight) - - # Apply balanced bisections until each qubit group is small enough - partition = {0: circuit.qubits} - stop_bisec = False # Do at least one bisection (TTN reqs >1 leaf nodes) - - while not stop_bisec: - old_partition = partition.copy() - for key, group in old_partition.items(): - # Apply the balanced bisection on this group - if use_kahypar: # Using KaHyPar - (groupA, groupB) = _apply_kahypar_bisection( - connectivity_graph.subgraph(group), - ) - else: # Using NetworkX - (groupA, groupB) = nx.community.kernighan_lin_bisection( - connectivity_graph.subgraph(group), - ) - # Groups A and B are on the same subtree (key separated by +1) - partition[2 * key] = groupA - partition[2 * key + 1] = groupB - - # Stop if all groups have less than ``max_q_per_leaf`` qubits in them - stop_bisec = all(len(group) <= max_q_per_leaf for group in partition.values()) - - qubit_partition = {key: list(leaf_qubits) for key, leaf_qubits in partition.items()} - return qubit_partition - - -def _apply_kahypar_bisection( - graph: nx.Graph, -) -> tuple[list[Qubit], list[Qubit]]: - """Use KaHyPar to obtain a bisection of the graph. - - Returns: - Two lists, each containing the vertices in either group of the bisection. - """ - vertices = list(graph.nodes) - edges = list(graph.edges) - weight_dict = nx.get_edge_attributes(graph, "weight") - qubit_dict = {q: i for i, q in enumerate(vertices)} - - num_vertices = len(vertices) - num_edges = len(edges) - k = 2 # Number of groups in the partition - epsilon = 0.03 # Imbalance tolerance - - # Special case where the graph has no edges; KaHyPar cannot deal with it - if num_edges == 0: - # Just split the list of vertices in half - return (vertices[: num_vertices // 2], vertices[num_vertices // 2 :]) - - # KaHyPar expects the list of edges to be provided as a continuous set of vertices - # ``edge_stream`` where ``edge_indices`` indicates where each new edge begins - # (the latter is necessary because KaHyPar can accept hyperedges) - edge_stream = [qubit_dict[vertex] for edge in edges for vertex in edge] - edge_indices = [0] + [2 * (i + 1) for i in range(num_edges)] - edge_weights = [weight_dict[edge] for edge in edges] - vertex_weights = [1 for _ in range(num_vertices)] - - hypergraph = kahypar.Hypergraph( - num_vertices, - num_edges, - edge_indices, - edge_stream, - k, - edge_weights, - vertex_weights, - ) - - # Set up the configuration for KaHyPar - context = kahypar.Context() - context.setK(k) - context.setEpsilon(epsilon) - context.suppressOutput(True) - - # Load the default configuration file provided by the KaHyPar devs - ini_file = str(Path(__file__).parent / "cut_rKaHyPar_sea20.ini") - context.loadINIconfiguration(ini_file) - - # Run the partitioner - kahypar.partition(hypergraph, context) - partition_dict = {i: hypergraph.blockID(i) for i in range(hypergraph.numNodes())} - - # Obtain the two groups of qubits from ``partition_dict`` - groupA = [vertices[i] for i, block in partition_dict.items() if block == 0] - groupB = [vertices[i] for i, block in partition_dict.items() if block == 1] - - return (groupA, groupB) - - -def _get_sorted_gates( - circuit: Circuit, - algorithm: SimulationAlgorithm, - qubit_partition: Optional[dict[int, list[Qubit]]] = None, -) -> list[Command]: - """Sorts the list of gates so that there's less canonicalisation during simulation. - - Returns an equivalent list of commands fixing the order of parallel gates so that - 2-qubit gates that are close together are applied one after the other. This reduces - the overhead of canonicalisation during simulation. - - Args: - circuit: The original circuit. - algorithm: The simulation algorithm that will be used on this circuit. - qubit_partition: For TTN simulation algorithms only. A partition of the - qubits in the circuit into disjoint groups, describing the hierarchical - structure of the TTN. - - Returns: - The same gates, ordered in a beneficial way for the given algorithm. - """ - all_gates = circuit.get_commands() - sorted_gates = [] - # Entries from `all_gates` that are not yet in `sorted_gates` - remaining = set(range(len(all_gates))) - - # Do some precomputation depending on the algorithm - if algorithm in [SimulationAlgorithm.TTNxGate]: - if qubit_partition is None: - raise RuntimeError("You must provide a qubit partition!") - - leaf_of_qubit: dict[Qubit, int] = dict() - for leaf, qubits in qubit_partition.items(): - for q in qubits: - leaf_of_qubit[q] = leaf - - elif algorithm in [SimulationAlgorithm.MPSxGate, SimulationAlgorithm.MPSxMPO]: - idx_of_qubit = {q: i for i, q in enumerate(circuit.qubits)} - - else: - raise RuntimeError(f"Sorting gates for {algorithm} not supported.") - - # Create the list of indices of gates acting on each qubit - gate_indices: dict[Qubit, list[int]] = defaultdict(list) - for i, g in enumerate(all_gates): - for q in g.qubits: - gate_indices[q].append(i) - # Schedule all 1-qubit gates at the beginning of the circuit - for q, indices in gate_indices.items(): - while indices and len(all_gates[indices[0]].qubits) == 1: - i = indices.pop(0) - sorted_gates.append(all_gates[i]) - remaining.remove(i) - - # Decide which 2-qubit gate to apply next - last_qubits = [circuit.qubits[0], circuit.qubits[0]] # Arbitrary choice at start - while remaining: - # Gather all gates that have nothing in front of them at one of its qubits - reachable_gates = [gates[0] for gates in gate_indices.values() if gates] - # Among them, find those that are available in both qubits - available_gates: list[int] = [] - for gate_idx in reachable_gates: - gate_qubits = all_gates[gate_idx].qubits - assert len(gate_qubits) == 2 # Sanity check: all of them are 2-qubit gates - # If the first gate in both qubits coincides, then this gate is available - if gate_indices[gate_qubits[0]][0] == gate_indices[gate_qubits[1]][0]: - assert gate_indices[gate_qubits[0]][0] == gate_idx - available_gates.append(gate_idx) - # Sanity check: there is at least one available 2-qubit gate - assert available_gates - - # Find distance from last_qubits to current applicable 2-qubit gates - gate_distance: dict[int, int] = dict() - for gate_idx in available_gates: - gate_qubits = all_gates[gate_idx].qubits - - # Criterion for distance depends on the simulation algorithm - if algorithm in [SimulationAlgorithm.TTNxGate]: - gate_distance[gate_idx] = max( # Max common ancestor distance - leaf_of_qubit[last_qubits[0]] ^ leaf_of_qubit[gate_qubits[0]], - leaf_of_qubit[last_qubits[0]] ^ leaf_of_qubit[gate_qubits[1]], - leaf_of_qubit[last_qubits[1]] ^ leaf_of_qubit[gate_qubits[0]], - leaf_of_qubit[last_qubits[1]] ^ leaf_of_qubit[gate_qubits[1]], - ) - elif algorithm in [ - SimulationAlgorithm.MPSxGate, - SimulationAlgorithm.MPSxMPO, - ]: - gate_distance[gate_idx] = max( # Max linear distance between qubits - abs(idx_of_qubit[last_qubits[0]] - idx_of_qubit[gate_qubits[0]]), - abs(idx_of_qubit[last_qubits[0]] - idx_of_qubit[gate_qubits[1]]), - abs(idx_of_qubit[last_qubits[1]] - idx_of_qubit[gate_qubits[0]]), - abs(idx_of_qubit[last_qubits[1]] - idx_of_qubit[gate_qubits[1]]), - ) - else: - raise RuntimeError(f"Sorting gates for {algorithm} not supported.") - - # Choose the gate with shortest distance - chosen_gate_idx = min(gate_distance, key=gate_distance.get) # type: ignore - chosen_gate = all_gates[chosen_gate_idx] - - # Schedule the gate - last_qubits = chosen_gate.qubits - sorted_gates.append(chosen_gate) - remaining.remove(chosen_gate_idx) - # Schedule all 1-qubit gates after this gate - for q in last_qubits: - gate_indices[q].pop(0) # Remove the 2-qubit `chosen_gate` - indices = gate_indices[q] - while indices and len(all_gates[indices[0]].qubits) == 1: - i = indices.pop(0) - sorted_gates.append(all_gates[i]) - remaining.remove(i) - - assert len(all_gates) == len(sorted_gates) - return sorted_gates +# Copyright 2019-2024 Quantinuum +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +## +# http://www.apache.org/licenses/LICENSE-2.0 +## +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional +import warnings +from enum import Enum + +from pathlib import Path +from collections import defaultdict # type: ignore +import numpy as np # type: ignore + +import networkx as nx # type: ignore + +try: + import kahypar # type: ignore +except ImportError: + warnings.warn("local settings failed to import kahypar", ImportWarning) + +from pytket.circuit import Circuit, Command, Qubit +from pytket.transform import Transform +from pytket.architecture import Architecture +from pytket.passes import DefaultMappingPass +from pytket.predicates import CompilationUnit + +from pytket.extensions.cutensornet.general import CuTensorNetHandle, set_logger +from .general import Config, StructuredState +from .mps_gate import MPSxGate +from .mps_mpo import MPSxMPO +from .ttn_gate import TTNxGate + + +class SimulationAlgorithm(Enum): + """An enum to refer to the StructuredState contraction algorithm. + + Each enum value corresponds to the class with the same name; see its docs for + information about the algorithm. + """ + + TTNxGate = 0 + MPSxGate = 1 + MPSxMPO = 2 + + +def simulate( + libhandle: CuTensorNetHandle, + circuit: Circuit, + algorithm: SimulationAlgorithm, + config: Config, +) -> StructuredState: + """Simulates the circuit and returns the ``StructuredState`` of the final state. + + Note: + A ``libhandle`` is created via a ``with CuTensorNetHandle() as libhandle:`` + statement. The device where the ``StructuredState`` is stored will match the one + specified by the library handle. + + The input ``circuit`` must be composed of one-qubit and two-qubit gates only. + Any gateset supported by ``pytket`` can be used. + + Args: + libhandle: The cuTensorNet library handle that will be used to carry out + tensor operations. + circuit: The pytket circuit to be simulated. + algorithm: Choose between the values of the ``SimulationAlgorithm`` enum. + config: The configuration object for simulation. + + Returns: + An instance of ``StructuredState`` for (an approximation of) the final state + of the circuit. The instance be of the class matching ``algorithm``. + """ + logger = set_logger("Simulation", level=config.loglevel) + + logger.info( + "Ordering the gates in the circuit to reduce canonicalisation overhead." + ) + if algorithm == SimulationAlgorithm.MPSxGate: + state = MPSxGate( # type: ignore + libhandle, + circuit.qubits, + config, + ) + sorted_gates = _get_sorted_gates(circuit, algorithm) + + elif algorithm == SimulationAlgorithm.MPSxMPO: + state = MPSxMPO( # type: ignore + libhandle, + circuit.qubits, + config, + ) + sorted_gates = _get_sorted_gates(circuit, algorithm) + + elif algorithm == SimulationAlgorithm.TTNxGate: + qubit_partition = _get_qubit_partition( + circuit, config.leaf_size, config.use_kahypar + ) + state = TTNxGate( # type: ignore + libhandle, + qubit_partition, + config, + ) + sorted_gates = _get_sorted_gates(circuit, algorithm, qubit_partition) + + logger.info("Running simulation...") + # Apply the gates + for i, g in enumerate(sorted_gates): + state.apply_gate(g) + logger.info(f"Progress... {(100*i) // len(sorted_gates)}%") + + # Apply the batched operations that are left (if any) + state._flush() + + # Apply the circuit's phase to the state + state.apply_scalar(np.exp(1j * np.pi * circuit.phase)) + + # Relabel qubits according to the implicit swaps (if any) + state.apply_qubit_relabelling(circuit.implicit_qubit_permutation()) + + logger.info("Simulation completed.") + logger.info(f"Final StructuredState size={state.get_byte_size() / 2**20} MiB") + logger.info(f"Final StructuredState fidelity={state.fidelity}") + return state + + +def prepare_circuit_mps(circuit: Circuit) -> tuple[Circuit, dict[Qubit, Qubit]]: + """Adds SWAP gates to the circuit so that all gates act on adjacent qubits. + + The qubits in the output circuit will be renamed. Implicit SWAPs may be added + to the circuit, meaning that the logical qubit held at the ``node[i]`` qubit + at the beginning of the circuit may differ from the one it holds at the end. + Consider applying ``apply_qubit_relabelling`` on the MPS after simulation. + + Note: + This preprocessing is *not* required by the MPS algorithms we provide. + Shallow circuits tend to run faster if this preprocessing is *not* used. + In occassions, it has been shown to improve runtime for deep circuits. + + Args: + circuit: The circuit to be simulated. + + Returns: + A tuple with an equivalent circuit with the appropriate structure and a + map of qubit names at the end of the circuit to their corresponding + original names. + """ + + # Implement it in a line architecture + cu = CompilationUnit(circuit) + architecture = Architecture([(i, i + 1) for i in range(circuit.n_qubits - 1)]) + DefaultMappingPass(architecture).apply(cu) + prep_circ = cu.circuit + Transform.DecomposeBRIDGE().apply(prep_circ) + + qubit_map: dict[Qubit, Qubit] = {} + for orig_q, arch_q in cu.final_map.items(): + assert isinstance(orig_q, Qubit) + assert isinstance(arch_q, Qubit) + qubit_map[arch_q] = orig_q + + return (prep_circ, qubit_map) + + +def _get_qubit_partition( + circuit: Circuit, max_q_per_leaf: int, use_kahypar: bool +) -> dict[int, list[Qubit]]: + """Returns a qubit partition for a TTN. + + Proceeds by recursive bisection of the qubit connectivity graph, so that + qubits that interact with each other less are connected by a common ancestor + closer to the root. + + Args: + circuit: The circuit to be simulated. + max_q_per_leaf: The maximum allowed number of qubits per node leaf + use_kahypar: Use KaHyPar for graph partitioning if this is True. + Otherwise, use NetworkX (worse, but easy to setup). + + Returns: + A dictionary describing the partition in the format expected by TTN. + + Raises: + RuntimeError: If gate acts on more than 2 qubits. + """ + + # Scan the circuit and generate the edges of the connectivity graph + edge_weights: dict[tuple[Qubit, Qubit], int] = dict() + for cmd in circuit.get_commands(): + if cmd.op.is_gate(): + if cmd.op.n_qubits == 2: + edge = (min(cmd.qubits), max(cmd.qubits)) + + if edge in edge_weights: + edge_weights[edge] += 1 + else: + edge_weights[edge] = 1 + + elif cmd.op.n_qubits > 2: + raise RuntimeError( + "Gates must act on only 1 or 2 qubits! " + + f"This is not satisfied by {cmd}." + ) + + # Create the connectivity graph in NetworkX + connectivity_graph = nx.Graph() + connectivity_graph.add_nodes_from(circuit.qubits) + for (u, v), weight in edge_weights.items(): + connectivity_graph.add_edge(u, v, weight=weight) + + # Apply balanced bisections until each qubit group is small enough + partition = {0: circuit.qubits} + stop_bisec = False # Do at least one bisection (TTN reqs >1 leaf nodes) + + while not stop_bisec: + old_partition = partition.copy() + for key, group in old_partition.items(): + # Apply the balanced bisection on this group + if use_kahypar: # Using KaHyPar + (groupA, groupB) = _apply_kahypar_bisection( + connectivity_graph.subgraph(group), + ) + else: # Using NetworkX + (groupA, groupB) = nx.community.kernighan_lin_bisection( + connectivity_graph.subgraph(group), + ) + # Groups A and B are on the same subtree (key separated by +1) + partition[2 * key] = groupA + partition[2 * key + 1] = groupB + + # Stop if all groups have less than ``max_q_per_leaf`` qubits in them + stop_bisec = all(len(group) <= max_q_per_leaf for group in partition.values()) + + qubit_partition = {key: list(leaf_qubits) for key, leaf_qubits in partition.items()} + return qubit_partition + + +def _apply_kahypar_bisection( + graph: nx.Graph, +) -> tuple[list[Qubit], list[Qubit]]: + """Use KaHyPar to obtain a bisection of the graph. + + Returns: + Two lists, each containing the vertices in either group of the bisection. + """ + vertices = list(graph.nodes) + edges = list(graph.edges) + weight_dict = nx.get_edge_attributes(graph, "weight") + qubit_dict = {q: i for i, q in enumerate(vertices)} + + num_vertices = len(vertices) + num_edges = len(edges) + k = 2 # Number of groups in the partition + epsilon = 0.03 # Imbalance tolerance + + # Special case where the graph has no edges; KaHyPar cannot deal with it + if num_edges == 0: + # Just split the list of vertices in half + return (vertices[: num_vertices // 2], vertices[num_vertices // 2 :]) + + # KaHyPar expects the list of edges to be provided as a continuous set of vertices + # ``edge_stream`` where ``edge_indices`` indicates where each new edge begins + # (the latter is necessary because KaHyPar can accept hyperedges) + edge_stream = [qubit_dict[vertex] for edge in edges for vertex in edge] + edge_indices = [0] + [2 * (i + 1) for i in range(num_edges)] + edge_weights = [weight_dict[edge] for edge in edges] + vertex_weights = [1 for _ in range(num_vertices)] + + hypergraph = kahypar.Hypergraph( + num_vertices, + num_edges, + edge_indices, + edge_stream, + k, + edge_weights, + vertex_weights, + ) + + # Set up the configuration for KaHyPar + context = kahypar.Context() + context.setK(k) + context.setEpsilon(epsilon) + context.suppressOutput(True) + + # Load the default configuration file provided by the KaHyPar devs + ini_file = str(Path(__file__).parent / "cut_rKaHyPar_sea20.ini") + context.loadINIconfiguration(ini_file) + + # Run the partitioner + kahypar.partition(hypergraph, context) + partition_dict = {i: hypergraph.blockID(i) for i in range(hypergraph.numNodes())} + + # Obtain the two groups of qubits from ``partition_dict`` + groupA = [vertices[i] for i, block in partition_dict.items() if block == 0] + groupB = [vertices[i] for i, block in partition_dict.items() if block == 1] + + return (groupA, groupB) + + +def _get_sorted_gates( + circuit: Circuit, + algorithm: SimulationAlgorithm, + qubit_partition: Optional[dict[int, list[Qubit]]] = None, +) -> list[Command]: + """Sorts the list of gates so that there's less canonicalisation during simulation. + + Returns an equivalent list of commands fixing the order of parallel gates so that + 2-qubit gates that are close together are applied one after the other. This reduces + the overhead of canonicalisation during simulation. + + Args: + circuit: The original circuit. + algorithm: The simulation algorithm that will be used on this circuit. + qubit_partition: For TTN simulation algorithms only. A partition of the + qubits in the circuit into disjoint groups, describing the hierarchical + structure of the TTN. + + Returns: + The same gates, ordered in a beneficial way for the given algorithm. + """ + all_gates = circuit.get_commands() + sorted_gates = [] + # Entries from `all_gates` that are not yet in `sorted_gates` + remaining = set(range(len(all_gates))) + + # Do some precomputation depending on the algorithm + if algorithm in [SimulationAlgorithm.TTNxGate]: + if qubit_partition is None: + raise RuntimeError("You must provide a qubit partition!") + + leaf_of_qubit: dict[Qubit, int] = dict() + for leaf, qubits in qubit_partition.items(): + for q in qubits: + leaf_of_qubit[q] = leaf + + elif algorithm in [SimulationAlgorithm.MPSxGate, SimulationAlgorithm.MPSxMPO]: + idx_of_qubit = {q: i for i, q in enumerate(circuit.qubits)} + + else: + raise RuntimeError(f"Sorting gates for {algorithm} not supported.") + + # Create the list of indices of gates acting on each qubit + gate_indices: dict[Qubit, list[int]] = defaultdict(list) + for i, g in enumerate(all_gates): + for q in g.qubits: + gate_indices[q].append(i) + # Schedule all 1-qubit gates at the beginning of the circuit + for q, indices in gate_indices.items(): + while indices and len(all_gates[indices[0]].qubits) == 1: + i = indices.pop(0) + sorted_gates.append(all_gates[i]) + remaining.remove(i) + + # Decide which 2-qubit gate to apply next + last_qubits = [circuit.qubits[0], circuit.qubits[0]] # Arbitrary choice at start + while remaining: + # Gather all gates that have nothing in front of them at one of its qubits + reachable_gates = [gates[0] for gates in gate_indices.values() if gates] + # Among them, find those that are available in both qubits + available_gates: list[int] = [] + for gate_idx in reachable_gates: + gate_qubits = all_gates[gate_idx].qubits + assert len(gate_qubits) == 2 # Sanity check: all of them are 2-qubit gates + # If the first gate in both qubits coincides, then this gate is available + if gate_indices[gate_qubits[0]][0] == gate_indices[gate_qubits[1]][0]: + assert gate_indices[gate_qubits[0]][0] == gate_idx + available_gates.append(gate_idx) + # Sanity check: there is at least one available 2-qubit gate + assert available_gates + + # Find distance from last_qubits to current applicable 2-qubit gates + gate_distance: dict[int, int] = dict() + for gate_idx in available_gates: + gate_qubits = all_gates[gate_idx].qubits + + # Criterion for distance depends on the simulation algorithm + if algorithm in [SimulationAlgorithm.TTNxGate]: + gate_distance[gate_idx] = max( # Max common ancestor distance + leaf_of_qubit[last_qubits[0]] ^ leaf_of_qubit[gate_qubits[0]], + leaf_of_qubit[last_qubits[0]] ^ leaf_of_qubit[gate_qubits[1]], + leaf_of_qubit[last_qubits[1]] ^ leaf_of_qubit[gate_qubits[0]], + leaf_of_qubit[last_qubits[1]] ^ leaf_of_qubit[gate_qubits[1]], + ) + elif algorithm in [ + SimulationAlgorithm.MPSxGate, + SimulationAlgorithm.MPSxMPO, + ]: + gate_distance[gate_idx] = max( # Max linear distance between qubits + abs(idx_of_qubit[last_qubits[0]] - idx_of_qubit[gate_qubits[0]]), + abs(idx_of_qubit[last_qubits[0]] - idx_of_qubit[gate_qubits[1]]), + abs(idx_of_qubit[last_qubits[1]] - idx_of_qubit[gate_qubits[0]]), + abs(idx_of_qubit[last_qubits[1]] - idx_of_qubit[gate_qubits[1]]), + ) + else: + raise RuntimeError(f"Sorting gates for {algorithm} not supported.") + + # Choose the gate with shortest distance + chosen_gate_idx = min(gate_distance, key=gate_distance.get) # type: ignore + chosen_gate = all_gates[chosen_gate_idx] + + # Schedule the gate + last_qubits = chosen_gate.qubits + sorted_gates.append(chosen_gate) + remaining.remove(chosen_gate_idx) + # Schedule all 1-qubit gates after this gate + for q in last_qubits: + gate_indices[q].pop(0) # Remove the 2-qubit `chosen_gate` + indices = gate_indices[q] + while indices and len(all_gates[indices[0]].qubits) == 1: + i = indices.pop(0) + sorted_gates.append(all_gates[i]) + remaining.remove(i) + + assert len(all_gates) == len(sorted_gates) + return sorted_gates diff --git a/pytket/extensions/cutensornet/structured_state/ttn.py b/pytket/extensions/cutensornet/structured_state/ttn.py index 9b36c6fa..75dc531e 100644 --- a/pytket/extensions/cutensornet/structured_state/ttn.py +++ b/pytket/extensions/cutensornet/structured_state/ttn.py @@ -1,932 +1,932 @@ -# Copyright 2019-2024 Quantinuum -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -## -# http://www.apache.org/licenses/LICENSE-2.0 -## -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from __future__ import annotations # type: ignore -import warnings -from typing import Optional, Union -from enum import IntEnum - -from random import Random # type: ignore -import math # type: ignore -import numpy as np # type: ignore - -try: - import cupy as cp # type: ignore -except ImportError: - warnings.warn("local settings failed to import cupy", ImportWarning) -try: - import cuquantum as cq # type: ignore - from cuquantum.cutensornet import tensor # type: ignore -except ImportError: - warnings.warn("local settings failed to import cutensornet", ImportWarning) - -from pytket.circuit import Command, Qubit -from pytket.pauli import QubitPauliString - -from pytket.extensions.cutensornet.general import CuTensorNetHandle, set_logger - -from .general import Config, StructuredState, Tensor - - -class DirTTN(IntEnum): - """An enum to refer to relative directions within the TTN.""" - - PARENT = -1 - LEFT = 0 - RIGHT = 1 - - -# An alias for the TTN path from root to a TreeNode -RootPath = tuple[DirTTN, ...] - - -class TreeNode: - """Represents a single tensor in the TTN. - - The shape of the tensor agrees with the convention set in ``DirTTN``, meaning - that ``tensor.shape[DirTTN.PARENT]`` corresponds to the dimension of the bond - connecting this tree node with its parent. Notice that, since DirTTN.PARENT is - -1, this is always the last entry. - - In the case the TreeNode is a leaf, it will contain only one virtual bond - (the parent) and as many physical bonds as qubits in the group it represents. - These qubits will correspond to bonds from ``0`` to ``len(tensor.shape)-2``. - """ - - def __init__(self, tensor: Tensor, is_leaf: bool = False): - self.tensor = tensor - self.is_leaf = is_leaf - self.canonical_form: Optional[DirTTN] = None - - def copy(self) -> TreeNode: - new_node = TreeNode( - self.tensor.copy(), - is_leaf=self.is_leaf, - ) - new_node.canonical_form = self.canonical_form - return new_node - - -class TTN(StructuredState): - """Represents a state as a Tree Tensor Network. - - Attributes: - nodes (dict[RootPath, TreeNode]): A dictionary providing the tree node - of the given root path in the TTN. - qubit_position (dict[pytket.circuit.Qubit, tuple[RootPath, int]]): A dictionary - mapping circuit qubits to their address in the TTN. - fidelity (float): A lower bound of the fidelity, obtained by multiplying - the fidelities after each contraction. The fidelity of a contraction - corresponds to ``||^2`` where ``|psi>`` and ``|phi>`` are the - states before and after truncation (assuming both are normalised). - """ - - def __init__( - self, - libhandle: CuTensorNetHandle, - qubit_partition: dict[int, list[Qubit]], - config: Config, - ): - """Initialise a TTN on the computational state ``|0>``. - - Note: - A ``libhandle`` should be created via a ``with CuTensorNet() as libhandle:`` - statement. The device where the TTN is stored will match the one specified - by the library handle. - - The current implementation requires the keys of ``qubit_partition`` to be - integers from ``0`` to ``2^l - 1`` for some ``l``. - - Args: - libhandle: The cuTensorNet library handle that will be used to carry out - tensor operations on the TTN. - qubit_partition: A partition of the qubits in the circuit into disjoint - groups, describing the hierarchical structure of the TTN. Each key - identifies a leaf of the TTN, with its corresponding value indicating - the list of qubits represented by the leaf. The leaves are numbered - from left to right on a planar representation of the tree. Hence, the - smaller half of the keys correspond to leaves in the left subtree and - the rest are in the right subtree; providing recursive bipartitions. - config: The object describing the configuration for simulation. - - Raises: - ValueError: If the keys of ``qubit_partition`` do not range from ``0`` to - ``2^l - 1`` for some ``l``. - ValueError: If a ``Qubit`` is repeated in ``qubit_partition``. - ValueError: If there is only one entry in ``qubit_partition``. - """ - self._lib = libhandle - self._cfg = config - self._logger = set_logger("TTN", level=config.loglevel) - self._rng = Random() - self._rng.seed(self._cfg.seed) - - self.fidelity = 1.0 - self.nodes: dict[RootPath, TreeNode] = dict() - self.qubit_position: dict[Qubit, tuple[RootPath, int]] = dict() - - n_groups = len(qubit_partition) - if n_groups == 0: # There's no initialisation to be done - pass - elif n_groups == 1: - raise ValueError( - "Only one entry to qubit_partition provided." - "Introduce a finer partition of qubits." - ) - else: - n_levels = math.floor(math.log2(n_groups)) - if n_groups != 2**n_levels: - raise ValueError( - "The number of entries in qubit_partition must be a power of two." - ) - - # Create the TreeNodes of the different groups of qubits - for k, qubits in qubit_partition.items(): - if k < 0 or k >= n_groups: - raise ValueError( - f"Keys of qubit_partition must range from 0 to {n_groups-1}." - ) - - # Calculate the root path of this group - path = [] - for l in reversed(range(n_levels)): - if k < 2**l: - path.append(DirTTN.LEFT) - else: - path.append(DirTTN.RIGHT) - k -= 2**l - - # Add each qubit to the qubit_position dictionary - for i, q in enumerate(qubits): - if q in self.qubit_position: - raise ValueError( - f"Qubit {q} appears more than once in qubit_partition." - ) - self.qubit_position[q] = (tuple(path), i) - - # This tensor has a physical bond per qubit and one virtual bond at the - # end for the parent (dim=1) - shape = tuple([2] * len(qubits) + [1]) - # Initialise the tensor of this group of qubits to |0> - tensor = cp.zeros(shape=shape, dtype=self._cfg._complex_t) - ket_zero_entry = tuple(0 for _ in shape) # Index 0 on all bonds - tensor[ket_zero_entry] = 1 # Amplitude of |0> set to 1 - - # Create the TreeNode - node = TreeNode(tensor, is_leaf=True) - self.nodes[tuple(path)] = node - - # Create the internal TreeNodes - paths: list[list[DirTTN]] = [[]] - for _ in range(n_levels): - # Create the TreeNode at this path - for p in paths: - tensor = cp.ones(shape=(1, 1, 1), dtype=self._cfg._complex_t) - self.nodes[tuple(p)] = TreeNode(tensor) - # Generate the paths for the next level - paths = [ - p + [direction] - for p in paths - for direction in [DirTTN.LEFT, DirTTN.RIGHT] - ] - self._logger.debug(f"qubit_position={self.qubit_position}") - self._logger.debug(f"All root paths: {list(self.nodes.keys())}") - - def is_valid(self) -> bool: - """Verify that the TTN object is valid. - - Specifically, verify that the TTN does not exceed the dimension limit ``chi`` - specified in the ``Config`` object, that physical bonds have dimension 2, - that all tensors except the leaves are rank three and that tensors have shapes - consistent with the bond dimensions. - - Returns: - False if a violation was detected or True otherwise. - """ - chi_ok = all( - self.get_dimension(path, DirTTN.PARENT) <= self._cfg.chi - for path in self.nodes.keys() - ) - phys_ok = all( - self.nodes[path].tensor.shape[bond] == 2 - for path, bond in self.qubit_position.values() - ) - rank_ok = all( - node.is_leaf or len(node.tensor.shape) == 3 for node in self.nodes.values() - ) - shape_ok = all( - self.get_dimension(path, DirTTN.PARENT) - == self.get_dimension(path[:-1], path[-1]) - for path in self.nodes.keys() - if len(path) != 0 - ) - shape_ok = shape_ok and self.get_dimension((), DirTTN.PARENT) == 1 - - # Debugger logging - self._logger.debug( - "Checking validity of TTN... " - f"chi_ok={chi_ok}, " - f"phys_ok={phys_ok}, " - f"rank_ok={rank_ok}, " - f"shape_ok={shape_ok}" - ) - return chi_ok and phys_ok and rank_ok and shape_ok - - def apply_gate(self, gate: Command) -> TTN: - """Apply the gate to the TTN. - - Note: - Only single-qubit gates and two-qubit gates are supported. - - Args: - gate: The gate to be applied. - - Returns: - ``self``, to allow for method chaining. - - Raises: - RuntimeError: If the ``CuTensorNetHandle`` is out of scope. - ValueError: If the command introduced is not a unitary gate. - ValueError: If gate acts on more than 2 qubits. - """ - try: - unitary = gate.op.get_unitary() - except: - raise ValueError("The command introduced is not unitary.") - - # Load the gate's unitary to the GPU memory - unitary = unitary.astype(dtype=self._cfg._complex_t, copy=False) - unitary = cp.asarray(unitary, dtype=self._cfg._complex_t) - - self._logger.debug(f"Applying gate {gate}.") - self.apply_unitary(unitary, gate.qubits) - - return self - - def apply_unitary( - self, unitary: cp.ndarray, qubits: list[Qubit] - ) -> StructuredState: - """Applies the unitary to the specified qubits of the StructuredState. - - Note: - It is assumed that the matrix provided by the user is unitary. If this is - not the case, the program will still run, but its behaviour is undefined. - - Args: - unitary: The matrix to be applied as a CuPy ndarray. It should either be - a 2x2 matrix if acting on one qubit or a 4x4 matrix if acting on two. - qubits: The qubits the unitary acts on. Only one qubit and two qubit - unitaries are supported. - - Returns: - ``self``, to allow for method chaining. - - Raises: - RuntimeError: If the ``CuTensorNetHandle`` is out of scope. - ValueError: If the number of qubits provided is not one or two. - ValueError: If the size of the matrix does not match with the number of - qubits provided. - """ - if self._lib._is_destroyed: - raise RuntimeError( - "The cuTensorNet library handle is out of scope.", - "See the documentation of update_libhandle and CuTensorNetHandle.", - ) - - self._logger.debug(f"Applying unitary {unitary} on {qubits}.") - - if len(qubits) == 1: - if unitary.shape != (2, 2): - raise ValueError( - "The unitary introduced acts on one qubit but it is not 2x2." - ) - self._apply_1q_unitary(unitary, qubits[0]) - - elif len(qubits) == 2: - if unitary.shape != (4, 4): - raise ValueError( - "The unitary introduced acts on two qubits but it is not 4x4." - ) - self._apply_2q_unitary(unitary, qubits[0], qubits[1]) - - else: - raise ValueError("Gates must act on only 1 or 2 qubits!") - - return self - - def apply_scalar(self, scalar: complex) -> TTN: - """Multiplies the state by a complex number. - - Args: - scalar: The complex number to be multiplied. - - Returns: - ``self``, to allow for method chaining. - """ - self.nodes[()].tensor *= scalar - return self - - def apply_qubit_relabelling(self, qubit_map: dict[Qubit, Qubit]) -> TTN: - """Relabels each qubit ``q`` as ``qubit_map[q]``. - - This does not apply any SWAP gate, nor it changes the internal structure of the - state. It simply changes the label of the physical bonds of the tensor network. - - Args: - qubit_map: Dictionary mapping each qubit to its new label. - - Returns: - ``self``, to allow for method chaining. - - Raises: - ValueError: If any of the keys in ``qubit_map`` are not qubits in the state. - """ - new_qubit_position = dict() - for q_orig, q_new in qubit_map.items(): - # Check the qubit is in the state - if q_orig not in self.qubit_position: - raise ValueError(f"Qubit {q_orig} is not in the state.") - # Apply the relabelling for this qubit - new_qubit_position[q_new] = self.qubit_position[q_orig] - - self.qubit_position = new_qubit_position - self._logger.debug(f"Relabelled qubits... {qubit_map}") - return self - - def canonicalise( - self, center: Union[RootPath, Qubit], unsafe: bool = False - ) -> Tensor: - """Canonicalise the TTN so that all tensors are isometries from ``center``. - - Args: - center: Identifies the bond that is to become the center of the canonical - form. If it is a ``RootPath`` it refers to the parent bond of - ``self.nodes[center]``. If it is a ``Qubit`` it refers to its physical - bond. - unsafe: If True, the final state will be different than the starting one. - Specifically, the information in the returned bond tensor at ``center`` - is removed from the TTN. It is expected that the caller will reintroduce - the bond tensor after some processing (e.g. after SVD truncation). - - Returns: - The bond tensor created at ``center`` when canonicalisation is complete. - Applying SVD to this tensor yields the global SVD of the TTN. - - Raises: - ValueError: If the ``center`` is ``tuple()``. - """ - self._logger.debug(f"Canonicalising to {str(center)}") - options = {"handle": self._lib.handle, "device_id": self._lib.device_id} - - if isinstance(center, Qubit): - target_path = self.qubit_position[center][0] - assert not unsafe # Unsafe disallowed when ``center`` is a qubit - elif center == (): - raise ValueError("There is no bond at path ().") - else: - target_path = center - - # Separate nodes to be canonicalised towards children from those towards parent - towards_child = [] - towards_parent = [] - for path in self.nodes.keys(): - # Nodes towards children are closer to the root and coincide in the path - if len(path) < len(target_path) and all( - path[l] == target_path[l] for l in range(len(path)) - ): - towards_child.append(path) - # If the center is a physical bond (qubit), its node is skipped - elif path == target_path and isinstance(center, Qubit): - continue - # All other nodes are canonicalised towards their parent - else: - towards_parent.append(path) - # Sanity checks - assert len(towards_child) != 0 - assert len(towards_parent) != 0 - - # Glossary of bond IDs - # chr(x) -> bond of the x-th qubit in the node (if it is a leaf) - # l -> left child bond of the TTN node - # r -> right child bond of the TTN node - # p -> parent bond of the TTN node - # s -> bond between Q and R after decomposition - - # Canonicalise nodes towards parent, start from the furthest away from root - for path in sorted(towards_parent, key=len, reverse=True): - self._logger.debug(f"Canonicalising node at {path} towards parent.") - - # If already in desired canonical form, do nothing - if self.nodes[path].canonical_form == DirTTN.PARENT: - self._logger.debug("Skipping, already in canonical form.") - continue - - # Otherwise, apply QR decomposition - if self.nodes[path].is_leaf: - n_qbonds = len(self.nodes[path].tensor.shape) - 1 # Num of qubit bonds - q_bonds = "".join(chr(x) for x in range(n_qbonds)) - node_bonds = q_bonds + "p" - Q_bonds = q_bonds + "s" - else: - node_bonds = "lrp" - Q_bonds = "lrs" - R_bonds = "sp" - - Q, R = tensor.decompose( - node_bonds + "->" + Q_bonds + "," + R_bonds, - self.nodes[path].tensor, - method=tensor.QRMethod(), - options=options, - ) - - # Update the tensor - self.nodes[path].tensor = Q - self.nodes[path].canonical_form = DirTTN.PARENT - - # Contract R with the parent node - if path[-1] == DirTTN.LEFT: - R_bonds = "sl" - result_bonds = "srp" - else: - R_bonds = "sr" - result_bonds = "lsp" - node_bonds = "lrp" - - parent_node = self.nodes[path[:-1]] - parent_node.tensor = cq.contract( - R_bonds + "," + node_bonds + "->" + result_bonds, - R, - parent_node.tensor, - options=options, - optimize={"path": [(0, 1)]}, - ) - # The canonical form of the parent node is lost - parent_node.canonical_form = None - - self._logger.debug(f"Node canonicalised. Shape: {Q.shape}") - - # Canonicalise the rest of the nodes, from the root up to the center - for path in sorted(towards_child, key=len): - # Identify the direction of the canonicalisation - target_direction = target_path[len(path)] - # Sanity checks - assert not self.nodes[path].is_leaf - assert target_direction != DirTTN.PARENT - - self._logger.debug( - f"Canonicalising node at {path} towards {str(target_direction)}." - ) - - # If already in the desired canonical form, do nothing - if self.nodes[path].canonical_form == target_direction: - self._logger.debug("Skipping, already in canonical form.") - continue - - # Otherwise, apply QR decomposition - if target_direction == DirTTN.LEFT: - Q_bonds = "srp" - R_bonds = "ls" - else: - Q_bonds = "lsp" - R_bonds = "rs" - node_bonds = "lrp" - - Q, R = tensor.decompose( - node_bonds + "->" + Q_bonds + "," + R_bonds, - self.nodes[path].tensor, - method=tensor.QRMethod(), - options=options, - ) - - # If the child bond is not the center yet, contract R with child node - child_path = tuple(list(path) + [target_direction]) - if child_path != target_path: - child_node = self.nodes[child_path] - - # Contract R with the child node - child_node.tensor = cq.contract( - "lrp,ps->lrs", - child_node.tensor, - R, - options=options, - optimize={"path": [(0, 1)]}, - ) - - # The canonical form of the child node is lost - child_node.canonical_form = None - # Update the tensor - self.nodes[path].tensor = Q - self.nodes[path].canonical_form = target_direction - - self._logger.debug(f"Node canonicalised. Shape: {Q.shape}") - - # If ``center`` is not a physical bond, we are done canonicalising and R is - # the tensor to return. Otherwise, we need to do a final contraction and QR - # decomposition on the leaf node corresponding to ``target_path``. - if isinstance(center, Qubit): - self._logger.debug( - f"Applying QR decomposition on leaf node at {target_path}." - ) - - leaf_node = self.nodes[target_path] - n_qbonds = len(leaf_node.tensor.shape) - 1 # Number of qubit bonds - q_bonds = "".join(chr(x) for x in range(n_qbonds)) - node_bonds = q_bonds + "p" - new_bonds = q_bonds + "s" - R_bonds = "ps" - - # Contract R with the leaf node - leaf_node.tensor = cq.contract( - node_bonds + "," + R_bonds + "->" + new_bonds, - leaf_node.tensor, - R, - options=options, - optimize={"path": [(0, 1)]}, - ) - - # The canonical form of the leaf node is lost - leaf_node.canonical_form = None - # Update the parent tensor - parent_path = target_path[:-1] - self.nodes[parent_path].tensor = Q - self.nodes[parent_path].canonical_form = target_path[-1] - self._logger.debug(f"Node canonicalised. Shape: {Q.shape}") - - # Finally, apply QR decomposition on the leaf_node to obtain the R - # tensor to be returned - target_bond = self.qubit_position[center][1] - Q_bonds = node_bonds[:target_bond] + "s" + node_bonds[target_bond + 1 :] - R_bonds = chr(target_bond) + "s" - - Q, R = tensor.decompose( - node_bonds + "->" + Q_bonds + "," + R_bonds, - leaf_node.tensor, - method=tensor.QRMethod(), - options=options, - ) - # Note: Since R is not contracted with any other tensor, we cannot update - # the leaf node to Q. That'd change the state represented by the TTN. - - # Otherwise, if ``unsafe`` is enabled, update the last tensor to Q - elif unsafe: - self.nodes[target_path[:-1]].tensor = Q - self.nodes[target_path[:-1]].canonical_form = target_path[-1] - - self._logger.debug(f"Node canonicalised (unsafe!). Shape: {Q.shape}") - - self._logger.debug( - f"Finished canonicalisation. Returning R tensor of shape {R.shape}" - ) - return R - - def vdot(self, other: TTN) -> complex: # type: ignore - """Obtain the inner product of the two TTN: ````. - - It can be used to compute the squared norm of a TTN ``ttn`` as - ``ttn.vdot(ttn)``. The tensors within the TTN are not modified. - - Note: - The state that is conjugated is ``self``. - - Args: - other: The other TTN. - - Returns: - The resulting complex number. - - Raises: - RuntimeError: If the two TTNs do not have the same qubits. - RuntimeError: If the ``CuTensorNetHandle`` is out of scope. - """ - if self._lib._is_destroyed: - raise RuntimeError( - "The cuTensorNet library handle is out of scope.", - "See the documentation of update_libhandle and CuTensorNetHandle.", - ) - - if len(self.qubit_position) != len(other.qubit_position): - raise RuntimeError("Number of qubits do not match.") - if self.get_qubits() != other.get_qubits(): - raise RuntimeError( - "The sets of qubits are not the same." - "\n\tself has {self.get_qubits()}" - "\n\tother has {other.get_qubits()}" - ) - if len(self.qubit_position) == 0: - raise RuntimeError("There are no qubits in the TTN.") - - self._logger.debug("Applying vdot between two TTNs.") - - # We convert both TTNs to their interleaved representation and - # contract them using cuQuantum. A single sample is enough for - # contraction path optimisation, since there is little to optimise. - ttn1 = self.get_interleaved_representation(conj=True) - ttn2 = other.get_interleaved_representation(conj=False) - interleaved_rep = ttn1 + ttn2 + [[]] # Discards dim=1 bonds with [] - result = cq.contract( - *interleaved_rep, - options={"handle": self._lib.handle, "device_id": self._lib.device_id}, - optimize={"samples": 0}, # There is little to no optimisation to be done - ) - - self._logger.debug(f"Result from vdot={result}") - return complex(result) - - def sample(self) -> dict[Qubit, int]: - """Returns a sample from a Z measurement applied on every qubit. - - Notes: - The contents of ``self`` are not updated. This is equivalent to applying - ``state = self.copy()`` then ``state.measure(state.get_qubits())``. - - Returns: - A dictionary mapping each qubit in the state to its 0 or 1 outcome. - """ - raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") - - def measure(self, qubits: set[Qubit], destructive: bool = True) -> dict[Qubit, int]: - """Applies a Z measurement on each of the ``qubits``. - - Notes: - After applying this function, ``self`` will contain the normalised - projected state. - - Args: - qubits: The subset of qubits to be measured. - destructive: If ``True``, the resulting state will not contain the - measured qubits. If ``False``, these qubits will appear on the - state corresponding to the measurement outcome. Defaults to ``True``. - - Returns: - A dictionary mapping the given ``qubits`` to their measurement outcome, - i.e. either ``0`` or ``1``. - - Raises: - ValueError: If an element in ``qubits`` is not a qubit in the state. - """ - raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") - - def postselect(self, qubit_outcomes: dict[Qubit, int]) -> float: - """Applies a postselection, updates the states and returns its probability. - - Notes: - After applying this function, ``self`` will contain the projected - state over the non-postselected qubits. - - The resulting state has been normalised. - - Args: - qubit_outcomes: A dictionary mapping a subset of qubits to their - desired outcome value (either ``0`` or ``1``). - - Returns: - The probability of this postselection to occur in a measurement. - - Raises: - ValueError: If a key in ``qubit_outcomes`` is not a qubit in the state. - ValueError: If a value in ``qubit_outcomes`` is other than ``0`` or ``1``. - ValueError: If all of the qubits in the state are being postselected. - Instead, you may wish to use ``get_amplitude()``. - """ - raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") - - def expectation_value(self, pauli_string: QubitPauliString) -> float: - """Obtains the expectation value of the Pauli string observable. - - Args: - pauli_string: A pytket object representing a tensor product of Paulis. - - Returns: - The expectation value. - - Raises: - ValueError: If a key in ``pauli_string`` is not a qubit in the state. - """ - raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") - - def get_fidelity(self) -> float: - """Returns the current fidelity of the state.""" - return self.fidelity - - def get_statevector(self) -> np.ndarray: - """Returns the statevector represented by the TTN, with qubits ordered - in Increasing Lexicographic Order (ILO). - Raises: - ValueError: If there are no qubits left in the TTN. - """ - if len(self.get_qubits()) == 0: - raise ValueError("There are no qubits left in this TTN.") - - # Create the interleaved representation with all tensors - interleaved_rep = self.get_interleaved_representation() - - # Specify the output bond IDs in ILO order - output_bonds = [] - for q in sorted(self.get_qubits()): - output_bonds.append(str(q)) - interleaved_rep.append(output_bonds) - - # Contract - result_tensor = cq.contract( - *interleaved_rep, - options={"handle": self._lib.handle, "device_id": self._lib.device_id}, - optimize={"samples": 0}, # There is little to no optimisation to be done - ) - - # Convert to numpy vector and flatten - statevector: np.ndarray = cp.asnumpy(result_tensor).flatten() - return statevector - - def get_amplitude(self, state: int) -> complex: - """Returns the amplitude of the chosen computational state. - - Notes: - The result is equivalent to ``self.get_statevector[b]``, but this method - is faster when querying a single amplitude. - - Args: - state: The integer whose bitstring describes the computational state. - The qubits in the bitstring are in increasing lexicographic order. - - Returns: - The amplitude of the computational state in the TTN. - """ - - interleaved_rep = self.get_interleaved_representation() - ilo_qubits = sorted(self.get_qubits()) - - for i, q in enumerate(ilo_qubits): - # Create the tensors for each qubit in ``state`` - bitvalue = 1 if state & 2 ** (len(ilo_qubits) - i - 1) else 0 - tensor = cp.zeros(shape=(2,), dtype=self._cfg._complex_t) - tensor[bitvalue] = 1 - # Append it to the interleaved representation - interleaved_rep.append(tensor) - interleaved_rep.append([str(q)]) # The bond - # Ignore the dim=1 tensors in the output - interleaved_rep.append([]) - - # Contract - result = cq.contract( - *interleaved_rep, - options={"handle": self._lib.handle, "device_id": self._lib.device_id}, - optimize={"samples": 0}, # There is little to no optimisation to be done - ) - - self._logger.debug(f"Amplitude of state {state} is {result}.") - return complex(result) - - def get_qubits(self) -> set[Qubit]: - """Returns the set of qubits that this TTN is defined on.""" - return set(self.qubit_position.keys()) - - def get_interleaved_representation( - self, conj: bool = False - ) -> list[Union[Tensor, str]]: - """Returns the interleaved representation of the TTN used by cuQuantum. - - Args: - conj: If True, all tensors are conjugated and bonds IDs are prefixed - with * (except physical bonds). Defaults to False. - """ - self._logger.debug("Creating interleaved representation...") - - # Auxiliar dictionary of physical bonds to qubit IDs - qubit_id = { - location: str(qubit) for qubit, location in self.qubit_position.items() - } - - interleaved_rep = [] - for path, node in self.nodes.items(): - # Append the tensor - if conj: - interleaved_rep.append(node.tensor.conj()) - else: - interleaved_rep.append(node.tensor) - - # Create the ID for the parent bond - parentID = "".join(str(int(d)) for d in path) - if conj: - parentID = "*" + parentID - - # Append the bonds - if node.is_leaf: - bonds = [] - for b in range(len(node.tensor.shape) - 1): - bonds.append(qubit_id[(path, b)]) - bonds.append(parentID) - else: - bonds = [parentID + "0", parentID + "1", parentID] - - interleaved_rep.append(bonds) - self._logger.debug(f"Bond IDs: {bonds}") - - return interleaved_rep - - def get_dimension(self, path: RootPath, direction: DirTTN) -> int: - """Returns the dimension of bond ``dir`` of the node at ``path``. - - Args: - path: The path to a node in the TTN. - direction: The direction of the bond. - - Returns: - The dimension of the specified bond. - - Raises: - ValueError: If ``path`` is not in the TTN. - """ - if path not in self.nodes: - raise ValueError(f"The path {path} is not in the TTN.") - - dim: int = self.nodes[path].tensor.shape[direction] - return dim - - def get_byte_size(self) -> int: - """ - Returns: - The number of bytes the TTN currently occupies in GPU memory. - """ - return sum(node.tensor.nbytes for node in self.nodes.values()) - - def get_device_id(self) -> int: - """ - Returns: - The identifier of the device (GPU) where the tensors are stored. - """ - return int(self.nodes[tuple()].tensor.device) - - def update_libhandle(self, libhandle: CuTensorNetHandle) -> None: - """Update the ``CuTensorNetHandle`` used by this ``TTN`` object. Multiple - objects may use the same handle. - - Args: - libhandle: The new cuTensorNet library handle. - - Raises: - RuntimeError: If the device (GPU) where ``libhandle`` was initialised - does not match the one where the tensors of the TTN are stored. - """ - if libhandle.device_id != self.get_device_id(): - raise RuntimeError( - "Device of libhandle is not the one where the TTN is stored.", - f"{libhandle.device_id} != {self.get_device_id()}", - ) - self._lib = libhandle - - def copy(self) -> TTN: - """ - Returns: - A deep copy of the TTN on the same device. - """ - - # Create a dummy object - new_ttn = TTN(self._lib, qubit_partition=dict(), config=self._cfg.copy()) - # Copy all data - new_ttn.fidelity = self.fidelity - new_ttn.nodes = {path: node.copy() for path, node in self.nodes.items()} - new_ttn.qubit_position = self.qubit_position.copy() - - # If the user has set a seed, assume that they'd want every copy - # to behave in the same way, so we copy the RNG state - if self._cfg.seed is not None: - # Setting state (rather than just copying the seed) allows for the - # copy to continue from the same point in the sequence of random - # numbers as the original copy - new_ttn._rng.setstate(self._rng.getstate()) - # Otherwise, samples will be different between copies, since their - # self._rng will be initialised from system randomnes when seed=None. - - self._logger.debug( - "Successfully copied a TTN " - f"of size {new_ttn.get_byte_size() / 2**20} MiB." - ) - return new_ttn - - def _apply_1q_unitary(self, unitary: cp.ndarray, qubit: Qubit) -> TTN: - raise NotImplementedError( - "TTN is a base class with no contraction algorithm implemented." - + " You must use a subclass of TTN, such as TTNxGate." - ) - - def _apply_2q_unitary(self, unitary: cp.ndarray, q0: Qubit, q1: Qubit) -> TTN: - raise NotImplementedError( - "TTN is a base class with no contraction algorithm implemented." - + " You must use a subclass of TTN, such as TTNxGate." - ) - - def _flush(self) -> None: - # Does nothing in the general MPS case; but children classes with batched - # gate contraction will redefine this method so that the last batch of - # gates is applied. - return None +# Copyright 2019-2024 Quantinuum +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +## +# http://www.apache.org/licenses/LICENSE-2.0 +## +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations # type: ignore +import warnings +from typing import Optional, Union +from enum import IntEnum + +from random import Random # type: ignore +import math # type: ignore +import numpy as np # type: ignore + +try: + import cupy as cp # type: ignore +except ImportError: + warnings.warn("local settings failed to import cupy", ImportWarning) +try: + import cuquantum as cq # type: ignore + from cuquantum.cutensornet import tensor # type: ignore +except ImportError: + warnings.warn("local settings failed to import cutensornet", ImportWarning) + +from pytket.circuit import Command, Qubit +from pytket.pauli import QubitPauliString + +from pytket.extensions.cutensornet.general import CuTensorNetHandle, set_logger + +from .general import Config, StructuredState, Tensor + + +class DirTTN(IntEnum): + """An enum to refer to relative directions within the TTN.""" + + PARENT = -1 + LEFT = 0 + RIGHT = 1 + + +# An alias for the TTN path from root to a TreeNode +RootPath = tuple[DirTTN, ...] + + +class TreeNode: + """Represents a single tensor in the TTN. + + The shape of the tensor agrees with the convention set in ``DirTTN``, meaning + that ``tensor.shape[DirTTN.PARENT]`` corresponds to the dimension of the bond + connecting this tree node with its parent. Notice that, since DirTTN.PARENT is + -1, this is always the last entry. + + In the case the TreeNode is a leaf, it will contain only one virtual bond + (the parent) and as many physical bonds as qubits in the group it represents. + These qubits will correspond to bonds from ``0`` to ``len(tensor.shape)-2``. + """ + + def __init__(self, tensor: Tensor, is_leaf: bool = False): + self.tensor = tensor + self.is_leaf = is_leaf + self.canonical_form: Optional[DirTTN] = None + + def copy(self) -> TreeNode: + new_node = TreeNode( + self.tensor.copy(), + is_leaf=self.is_leaf, + ) + new_node.canonical_form = self.canonical_form + return new_node + + +class TTN(StructuredState): + """Represents a state as a Tree Tensor Network. + + Attributes: + nodes (dict[RootPath, TreeNode]): A dictionary providing the tree node + of the given root path in the TTN. + qubit_position (dict[pytket.circuit.Qubit, tuple[RootPath, int]]): A dictionary + mapping circuit qubits to their address in the TTN. + fidelity (float): A lower bound of the fidelity, obtained by multiplying + the fidelities after each contraction. The fidelity of a contraction + corresponds to ``||^2`` where ``|psi>`` and ``|phi>`` are the + states before and after truncation (assuming both are normalised). + """ + + def __init__( + self, + libhandle: CuTensorNetHandle, + qubit_partition: dict[int, list[Qubit]], + config: Config, + ): + """Initialise a TTN on the computational state ``|0>``. + + Note: + A ``libhandle`` should be created via a ``with CuTensorNet() as libhandle:`` + statement. The device where the TTN is stored will match the one specified + by the library handle. + + The current implementation requires the keys of ``qubit_partition`` to be + integers from ``0`` to ``2^l - 1`` for some ``l``. + + Args: + libhandle: The cuTensorNet library handle that will be used to carry out + tensor operations on the TTN. + qubit_partition: A partition of the qubits in the circuit into disjoint + groups, describing the hierarchical structure of the TTN. Each key + identifies a leaf of the TTN, with its corresponding value indicating + the list of qubits represented by the leaf. The leaves are numbered + from left to right on a planar representation of the tree. Hence, the + smaller half of the keys correspond to leaves in the left subtree and + the rest are in the right subtree; providing recursive bipartitions. + config: The object describing the configuration for simulation. + + Raises: + ValueError: If the keys of ``qubit_partition`` do not range from ``0`` to + ``2^l - 1`` for some ``l``. + ValueError: If a ``Qubit`` is repeated in ``qubit_partition``. + ValueError: If there is only one entry in ``qubit_partition``. + """ + self._lib = libhandle + self._cfg = config + self._logger = set_logger("TTN", level=config.loglevel) + self._rng = Random() + self._rng.seed(self._cfg.seed) + + self.fidelity = 1.0 + self.nodes: dict[RootPath, TreeNode] = dict() + self.qubit_position: dict[Qubit, tuple[RootPath, int]] = dict() + + n_groups = len(qubit_partition) + if n_groups == 0: # There's no initialisation to be done + pass + elif n_groups == 1: + raise ValueError( + "Only one entry to qubit_partition provided." + "Introduce a finer partition of qubits." + ) + else: + n_levels = math.floor(math.log2(n_groups)) + if n_groups != 2**n_levels: + raise ValueError( + "The number of entries in qubit_partition must be a power of two." + ) + + # Create the TreeNodes of the different groups of qubits + for k, qubits in qubit_partition.items(): + if k < 0 or k >= n_groups: + raise ValueError( + f"Keys of qubit_partition must range from 0 to {n_groups-1}." + ) + + # Calculate the root path of this group + path = [] + for l in reversed(range(n_levels)): + if k < 2**l: + path.append(DirTTN.LEFT) + else: + path.append(DirTTN.RIGHT) + k -= 2**l + + # Add each qubit to the qubit_position dictionary + for i, q in enumerate(qubits): + if q in self.qubit_position: + raise ValueError( + f"Qubit {q} appears more than once in qubit_partition." + ) + self.qubit_position[q] = (tuple(path), i) + + # This tensor has a physical bond per qubit and one virtual bond at the + # end for the parent (dim=1) + shape = tuple([2] * len(qubits) + [1]) + # Initialise the tensor of this group of qubits to |0> + tensor = cp.zeros(shape=shape, dtype=self._cfg._complex_t) + ket_zero_entry = tuple(0 for _ in shape) # Index 0 on all bonds + tensor[ket_zero_entry] = 1 # Amplitude of |0> set to 1 + + # Create the TreeNode + node = TreeNode(tensor, is_leaf=True) + self.nodes[tuple(path)] = node + + # Create the internal TreeNodes + paths: list[list[DirTTN]] = [[]] + for _ in range(n_levels): + # Create the TreeNode at this path + for p in paths: + tensor = cp.ones(shape=(1, 1, 1), dtype=self._cfg._complex_t) + self.nodes[tuple(p)] = TreeNode(tensor) + # Generate the paths for the next level + paths = [ + p + [direction] + for p in paths + for direction in [DirTTN.LEFT, DirTTN.RIGHT] + ] + self._logger.debug(f"qubit_position={self.qubit_position}") + self._logger.debug(f"All root paths: {list(self.nodes.keys())}") + + def is_valid(self) -> bool: + """Verify that the TTN object is valid. + + Specifically, verify that the TTN does not exceed the dimension limit ``chi`` + specified in the ``Config`` object, that physical bonds have dimension 2, + that all tensors except the leaves are rank three and that tensors have shapes + consistent with the bond dimensions. + + Returns: + False if a violation was detected or True otherwise. + """ + chi_ok = all( + self.get_dimension(path, DirTTN.PARENT) <= self._cfg.chi + for path in self.nodes.keys() + ) + phys_ok = all( + self.nodes[path].tensor.shape[bond] == 2 + for path, bond in self.qubit_position.values() + ) + rank_ok = all( + node.is_leaf or len(node.tensor.shape) == 3 for node in self.nodes.values() + ) + shape_ok = all( + self.get_dimension(path, DirTTN.PARENT) + == self.get_dimension(path[:-1], path[-1]) + for path in self.nodes.keys() + if len(path) != 0 + ) + shape_ok = shape_ok and self.get_dimension((), DirTTN.PARENT) == 1 + + # Debugger logging + self._logger.debug( + "Checking validity of TTN... " + f"chi_ok={chi_ok}, " + f"phys_ok={phys_ok}, " + f"rank_ok={rank_ok}, " + f"shape_ok={shape_ok}" + ) + return chi_ok and phys_ok and rank_ok and shape_ok + + def apply_gate(self, gate: Command) -> TTN: + """Apply the gate to the TTN. + + Note: + Only single-qubit gates and two-qubit gates are supported. + + Args: + gate: The gate to be applied. + + Returns: + ``self``, to allow for method chaining. + + Raises: + RuntimeError: If the ``CuTensorNetHandle`` is out of scope. + ValueError: If the command introduced is not a unitary gate. + ValueError: If gate acts on more than 2 qubits. + """ + try: + unitary = gate.op.get_unitary() + except: + raise ValueError("The command introduced is not unitary.") + + # Load the gate's unitary to the GPU memory + unitary = unitary.astype(dtype=self._cfg._complex_t, copy=False) + unitary = cp.asarray(unitary, dtype=self._cfg._complex_t) + + self._logger.debug(f"Applying gate {gate}.") + self.apply_unitary(unitary, gate.qubits) + + return self + + def apply_unitary( + self, unitary: cp.ndarray, qubits: list[Qubit] + ) -> StructuredState: + """Applies the unitary to the specified qubits of the StructuredState. + + Note: + It is assumed that the matrix provided by the user is unitary. If this is + not the case, the program will still run, but its behaviour is undefined. + + Args: + unitary: The matrix to be applied as a CuPy ndarray. It should either be + a 2x2 matrix if acting on one qubit or a 4x4 matrix if acting on two. + qubits: The qubits the unitary acts on. Only one qubit and two qubit + unitaries are supported. + + Returns: + ``self``, to allow for method chaining. + + Raises: + RuntimeError: If the ``CuTensorNetHandle`` is out of scope. + ValueError: If the number of qubits provided is not one or two. + ValueError: If the size of the matrix does not match with the number of + qubits provided. + """ + if self._lib._is_destroyed: + raise RuntimeError( + "The cuTensorNet library handle is out of scope.", + "See the documentation of update_libhandle and CuTensorNetHandle.", + ) + + self._logger.debug(f"Applying unitary {unitary} on {qubits}.") + + if len(qubits) == 1: + if unitary.shape != (2, 2): + raise ValueError( + "The unitary introduced acts on one qubit but it is not 2x2." + ) + self._apply_1q_unitary(unitary, qubits[0]) + + elif len(qubits) == 2: + if unitary.shape != (4, 4): + raise ValueError( + "The unitary introduced acts on two qubits but it is not 4x4." + ) + self._apply_2q_unitary(unitary, qubits[0], qubits[1]) + + else: + raise ValueError("Gates must act on only 1 or 2 qubits!") + + return self + + def apply_scalar(self, scalar: complex) -> TTN: + """Multiplies the state by a complex number. + + Args: + scalar: The complex number to be multiplied. + + Returns: + ``self``, to allow for method chaining. + """ + self.nodes[()].tensor *= scalar + return self + + def apply_qubit_relabelling(self, qubit_map: dict[Qubit, Qubit]) -> TTN: + """Relabels each qubit ``q`` as ``qubit_map[q]``. + + This does not apply any SWAP gate, nor it changes the internal structure of the + state. It simply changes the label of the physical bonds of the tensor network. + + Args: + qubit_map: Dictionary mapping each qubit to its new label. + + Returns: + ``self``, to allow for method chaining. + + Raises: + ValueError: If any of the keys in ``qubit_map`` are not qubits in the state. + """ + new_qubit_position = dict() + for q_orig, q_new in qubit_map.items(): + # Check the qubit is in the state + if q_orig not in self.qubit_position: + raise ValueError(f"Qubit {q_orig} is not in the state.") + # Apply the relabelling for this qubit + new_qubit_position[q_new] = self.qubit_position[q_orig] + + self.qubit_position = new_qubit_position + self._logger.debug(f"Relabelled qubits... {qubit_map}") + return self + + def canonicalise( + self, center: Union[RootPath, Qubit], unsafe: bool = False + ) -> Tensor: + """Canonicalise the TTN so that all tensors are isometries from ``center``. + + Args: + center: Identifies the bond that is to become the center of the canonical + form. If it is a ``RootPath`` it refers to the parent bond of + ``self.nodes[center]``. If it is a ``Qubit`` it refers to its physical + bond. + unsafe: If True, the final state will be different than the starting one. + Specifically, the information in the returned bond tensor at ``center`` + is removed from the TTN. It is expected that the caller will reintroduce + the bond tensor after some processing (e.g. after SVD truncation). + + Returns: + The bond tensor created at ``center`` when canonicalisation is complete. + Applying SVD to this tensor yields the global SVD of the TTN. + + Raises: + ValueError: If the ``center`` is ``tuple()``. + """ + self._logger.debug(f"Canonicalising to {str(center)}") + options = {"handle": self._lib.handle, "device_id": self._lib.device_id} + + if isinstance(center, Qubit): + target_path = self.qubit_position[center][0] + assert not unsafe # Unsafe disallowed when ``center`` is a qubit + elif center == (): + raise ValueError("There is no bond at path ().") + else: + target_path = center + + # Separate nodes to be canonicalised towards children from those towards parent + towards_child = [] + towards_parent = [] + for path in self.nodes.keys(): + # Nodes towards children are closer to the root and coincide in the path + if len(path) < len(target_path) and all( + path[l] == target_path[l] for l in range(len(path)) + ): + towards_child.append(path) + # If the center is a physical bond (qubit), its node is skipped + elif path == target_path and isinstance(center, Qubit): + continue + # All other nodes are canonicalised towards their parent + else: + towards_parent.append(path) + # Sanity checks + assert len(towards_child) != 0 + assert len(towards_parent) != 0 + + # Glossary of bond IDs + # chr(x) -> bond of the x-th qubit in the node (if it is a leaf) + # l -> left child bond of the TTN node + # r -> right child bond of the TTN node + # p -> parent bond of the TTN node + # s -> bond between Q and R after decomposition + + # Canonicalise nodes towards parent, start from the furthest away from root + for path in sorted(towards_parent, key=len, reverse=True): + self._logger.debug(f"Canonicalising node at {path} towards parent.") + + # If already in desired canonical form, do nothing + if self.nodes[path].canonical_form == DirTTN.PARENT: + self._logger.debug("Skipping, already in canonical form.") + continue + + # Otherwise, apply QR decomposition + if self.nodes[path].is_leaf: + n_qbonds = len(self.nodes[path].tensor.shape) - 1 # Num of qubit bonds + q_bonds = "".join(chr(x) for x in range(n_qbonds)) + node_bonds = q_bonds + "p" + Q_bonds = q_bonds + "s" + else: + node_bonds = "lrp" + Q_bonds = "lrs" + R_bonds = "sp" + + Q, R = tensor.decompose( + node_bonds + "->" + Q_bonds + "," + R_bonds, + self.nodes[path].tensor, + method=tensor.QRMethod(), + options=options, + ) + + # Update the tensor + self.nodes[path].tensor = Q + self.nodes[path].canonical_form = DirTTN.PARENT + + # Contract R with the parent node + if path[-1] == DirTTN.LEFT: + R_bonds = "sl" + result_bonds = "srp" + else: + R_bonds = "sr" + result_bonds = "lsp" + node_bonds = "lrp" + + parent_node = self.nodes[path[:-1]] + parent_node.tensor = cq.contract( + R_bonds + "," + node_bonds + "->" + result_bonds, + R, + parent_node.tensor, + options=options, + optimize={"path": [(0, 1)]}, + ) + # The canonical form of the parent node is lost + parent_node.canonical_form = None + + self._logger.debug(f"Node canonicalised. Shape: {Q.shape}") + + # Canonicalise the rest of the nodes, from the root up to the center + for path in sorted(towards_child, key=len): + # Identify the direction of the canonicalisation + target_direction = target_path[len(path)] + # Sanity checks + assert not self.nodes[path].is_leaf + assert target_direction != DirTTN.PARENT + + self._logger.debug( + f"Canonicalising node at {path} towards {str(target_direction)}." + ) + + # If already in the desired canonical form, do nothing + if self.nodes[path].canonical_form == target_direction: + self._logger.debug("Skipping, already in canonical form.") + continue + + # Otherwise, apply QR decomposition + if target_direction == DirTTN.LEFT: + Q_bonds = "srp" + R_bonds = "ls" + else: + Q_bonds = "lsp" + R_bonds = "rs" + node_bonds = "lrp" + + Q, R = tensor.decompose( + node_bonds + "->" + Q_bonds + "," + R_bonds, + self.nodes[path].tensor, + method=tensor.QRMethod(), + options=options, + ) + + # If the child bond is not the center yet, contract R with child node + child_path = tuple(list(path) + [target_direction]) + if child_path != target_path: + child_node = self.nodes[child_path] + + # Contract R with the child node + child_node.tensor = cq.contract( + "lrp,ps->lrs", + child_node.tensor, + R, + options=options, + optimize={"path": [(0, 1)]}, + ) + + # The canonical form of the child node is lost + child_node.canonical_form = None + # Update the tensor + self.nodes[path].tensor = Q + self.nodes[path].canonical_form = target_direction + + self._logger.debug(f"Node canonicalised. Shape: {Q.shape}") + + # If ``center`` is not a physical bond, we are done canonicalising and R is + # the tensor to return. Otherwise, we need to do a final contraction and QR + # decomposition on the leaf node corresponding to ``target_path``. + if isinstance(center, Qubit): + self._logger.debug( + f"Applying QR decomposition on leaf node at {target_path}." + ) + + leaf_node = self.nodes[target_path] + n_qbonds = len(leaf_node.tensor.shape) - 1 # Number of qubit bonds + q_bonds = "".join(chr(x) for x in range(n_qbonds)) + node_bonds = q_bonds + "p" + new_bonds = q_bonds + "s" + R_bonds = "ps" + + # Contract R with the leaf node + leaf_node.tensor = cq.contract( + node_bonds + "," + R_bonds + "->" + new_bonds, + leaf_node.tensor, + R, + options=options, + optimize={"path": [(0, 1)]}, + ) + + # The canonical form of the leaf node is lost + leaf_node.canonical_form = None + # Update the parent tensor + parent_path = target_path[:-1] + self.nodes[parent_path].tensor = Q + self.nodes[parent_path].canonical_form = target_path[-1] + self._logger.debug(f"Node canonicalised. Shape: {Q.shape}") + + # Finally, apply QR decomposition on the leaf_node to obtain the R + # tensor to be returned + target_bond = self.qubit_position[center][1] + Q_bonds = node_bonds[:target_bond] + "s" + node_bonds[target_bond + 1 :] + R_bonds = chr(target_bond) + "s" + + Q, R = tensor.decompose( + node_bonds + "->" + Q_bonds + "," + R_bonds, + leaf_node.tensor, + method=tensor.QRMethod(), + options=options, + ) + # Note: Since R is not contracted with any other tensor, we cannot update + # the leaf node to Q. That'd change the state represented by the TTN. + + # Otherwise, if ``unsafe`` is enabled, update the last tensor to Q + elif unsafe: + self.nodes[target_path[:-1]].tensor = Q + self.nodes[target_path[:-1]].canonical_form = target_path[-1] + + self._logger.debug(f"Node canonicalised (unsafe!). Shape: {Q.shape}") + + self._logger.debug( + f"Finished canonicalisation. Returning R tensor of shape {R.shape}" + ) + return R + + def vdot(self, other: TTN) -> complex: # type: ignore + """Obtain the inner product of the two TTN: ````. + + It can be used to compute the squared norm of a TTN ``ttn`` as + ``ttn.vdot(ttn)``. The tensors within the TTN are not modified. + + Note: + The state that is conjugated is ``self``. + + Args: + other: The other TTN. + + Returns: + The resulting complex number. + + Raises: + RuntimeError: If the two TTNs do not have the same qubits. + RuntimeError: If the ``CuTensorNetHandle`` is out of scope. + """ + if self._lib._is_destroyed: + raise RuntimeError( + "The cuTensorNet library handle is out of scope.", + "See the documentation of update_libhandle and CuTensorNetHandle.", + ) + + if len(self.qubit_position) != len(other.qubit_position): + raise RuntimeError("Number of qubits do not match.") + if self.get_qubits() != other.get_qubits(): + raise RuntimeError( + "The sets of qubits are not the same." + "\n\tself has {self.get_qubits()}" + "\n\tother has {other.get_qubits()}" + ) + if len(self.qubit_position) == 0: + raise RuntimeError("There are no qubits in the TTN.") + + self._logger.debug("Applying vdot between two TTNs.") + + # We convert both TTNs to their interleaved representation and + # contract them using cuQuantum. A single sample is enough for + # contraction path optimisation, since there is little to optimise. + ttn1 = self.get_interleaved_representation(conj=True) + ttn2 = other.get_interleaved_representation(conj=False) + interleaved_rep = ttn1 + ttn2 + [[]] # Discards dim=1 bonds with [] + result = cq.contract( + *interleaved_rep, + options={"handle": self._lib.handle, "device_id": self._lib.device_id}, + optimize={"samples": 0}, # There is little to no optimisation to be done + ) + + self._logger.debug(f"Result from vdot={result}") + return complex(result) + + def sample(self) -> dict[Qubit, int]: + """Returns a sample from a Z measurement applied on every qubit. + + Notes: + The contents of ``self`` are not updated. This is equivalent to applying + ``state = self.copy()`` then ``state.measure(state.get_qubits())``. + + Returns: + A dictionary mapping each qubit in the state to its 0 or 1 outcome. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + def measure(self, qubits: set[Qubit], destructive: bool = True) -> dict[Qubit, int]: + """Applies a Z measurement on each of the ``qubits``. + + Notes: + After applying this function, ``self`` will contain the normalised + projected state. + + Args: + qubits: The subset of qubits to be measured. + destructive: If ``True``, the resulting state will not contain the + measured qubits. If ``False``, these qubits will appear on the + state corresponding to the measurement outcome. Defaults to ``True``. + + Returns: + A dictionary mapping the given ``qubits`` to their measurement outcome, + i.e. either ``0`` or ``1``. + + Raises: + ValueError: If an element in ``qubits`` is not a qubit in the state. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + def postselect(self, qubit_outcomes: dict[Qubit, int]) -> float: + """Applies a postselection, updates the states and returns its probability. + + Notes: + After applying this function, ``self`` will contain the projected + state over the non-postselected qubits. + + The resulting state has been normalised. + + Args: + qubit_outcomes: A dictionary mapping a subset of qubits to their + desired outcome value (either ``0`` or ``1``). + + Returns: + The probability of this postselection to occur in a measurement. + + Raises: + ValueError: If a key in ``qubit_outcomes`` is not a qubit in the state. + ValueError: If a value in ``qubit_outcomes`` is other than ``0`` or ``1``. + ValueError: If all of the qubits in the state are being postselected. + Instead, you may wish to use ``get_amplitude()``. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + def expectation_value(self, pauli_string: QubitPauliString) -> float: + """Obtains the expectation value of the Pauli string observable. + + Args: + pauli_string: A pytket object representing a tensor product of Paulis. + + Returns: + The expectation value. + + Raises: + ValueError: If a key in ``pauli_string`` is not a qubit in the state. + """ + raise NotImplementedError(f"Method not implemented in {type(self).__name__}.") + + def get_fidelity(self) -> float: + """Returns the current fidelity of the state.""" + return self.fidelity + + def get_statevector(self) -> np.ndarray: + """Returns the statevector represented by the TTN, with qubits ordered + in Increasing Lexicographic Order (ILO). + Raises: + ValueError: If there are no qubits left in the TTN. + """ + if len(self.get_qubits()) == 0: + raise ValueError("There are no qubits left in this TTN.") + + # Create the interleaved representation with all tensors + interleaved_rep = self.get_interleaved_representation() + + # Specify the output bond IDs in ILO order + output_bonds = [] + for q in sorted(self.get_qubits()): + output_bonds.append(str(q)) + interleaved_rep.append(output_bonds) + + # Contract + result_tensor = cq.contract( + *interleaved_rep, + options={"handle": self._lib.handle, "device_id": self._lib.device_id}, + optimize={"samples": 0}, # There is little to no optimisation to be done + ) + + # Convert to numpy vector and flatten + statevector: np.ndarray = cp.asnumpy(result_tensor).flatten() + return statevector + + def get_amplitude(self, state: int) -> complex: + """Returns the amplitude of the chosen computational state. + + Notes: + The result is equivalent to ``self.get_statevector[b]``, but this method + is faster when querying a single amplitude. + + Args: + state: The integer whose bitstring describes the computational state. + The qubits in the bitstring are in increasing lexicographic order. + + Returns: + The amplitude of the computational state in the TTN. + """ + + interleaved_rep = self.get_interleaved_representation() + ilo_qubits = sorted(self.get_qubits()) + + for i, q in enumerate(ilo_qubits): + # Create the tensors for each qubit in ``state`` + bitvalue = 1 if state & 2 ** (len(ilo_qubits) - i - 1) else 0 + tensor = cp.zeros(shape=(2,), dtype=self._cfg._complex_t) + tensor[bitvalue] = 1 + # Append it to the interleaved representation + interleaved_rep.append(tensor) + interleaved_rep.append([str(q)]) # The bond + # Ignore the dim=1 tensors in the output + interleaved_rep.append([]) + + # Contract + result = cq.contract( + *interleaved_rep, + options={"handle": self._lib.handle, "device_id": self._lib.device_id}, + optimize={"samples": 0}, # There is little to no optimisation to be done + ) + + self._logger.debug(f"Amplitude of state {state} is {result}.") + return complex(result) + + def get_qubits(self) -> set[Qubit]: + """Returns the set of qubits that this TTN is defined on.""" + return set(self.qubit_position.keys()) + + def get_interleaved_representation( + self, conj: bool = False + ) -> list[Union[Tensor, str]]: + """Returns the interleaved representation of the TTN used by cuQuantum. + + Args: + conj: If True, all tensors are conjugated and bonds IDs are prefixed + with * (except physical bonds). Defaults to False. + """ + self._logger.debug("Creating interleaved representation...") + + # Auxiliar dictionary of physical bonds to qubit IDs + qubit_id = { + location: str(qubit) for qubit, location in self.qubit_position.items() + } + + interleaved_rep = [] + for path, node in self.nodes.items(): + # Append the tensor + if conj: + interleaved_rep.append(node.tensor.conj()) + else: + interleaved_rep.append(node.tensor) + + # Create the ID for the parent bond + parentID = "".join(str(int(d)) for d in path) + if conj: + parentID = "*" + parentID + + # Append the bonds + if node.is_leaf: + bonds = [] + for b in range(len(node.tensor.shape) - 1): + bonds.append(qubit_id[(path, b)]) + bonds.append(parentID) + else: + bonds = [parentID + "0", parentID + "1", parentID] + + interleaved_rep.append(bonds) + self._logger.debug(f"Bond IDs: {bonds}") + + return interleaved_rep + + def get_dimension(self, path: RootPath, direction: DirTTN) -> int: + """Returns the dimension of bond ``dir`` of the node at ``path``. + + Args: + path: The path to a node in the TTN. + direction: The direction of the bond. + + Returns: + The dimension of the specified bond. + + Raises: + ValueError: If ``path`` is not in the TTN. + """ + if path not in self.nodes: + raise ValueError(f"The path {path} is not in the TTN.") + + dim: int = self.nodes[path].tensor.shape[direction] + return dim + + def get_byte_size(self) -> int: + """ + Returns: + The number of bytes the TTN currently occupies in GPU memory. + """ + return sum(node.tensor.nbytes for node in self.nodes.values()) + + def get_device_id(self) -> int: + """ + Returns: + The identifier of the device (GPU) where the tensors are stored. + """ + return int(self.nodes[tuple()].tensor.device) + + def update_libhandle(self, libhandle: CuTensorNetHandle) -> None: + """Update the ``CuTensorNetHandle`` used by this ``TTN`` object. Multiple + objects may use the same handle. + + Args: + libhandle: The new cuTensorNet library handle. + + Raises: + RuntimeError: If the device (GPU) where ``libhandle`` was initialised + does not match the one where the tensors of the TTN are stored. + """ + if libhandle.device_id != self.get_device_id(): + raise RuntimeError( + "Device of libhandle is not the one where the TTN is stored.", + f"{libhandle.device_id} != {self.get_device_id()}", + ) + self._lib = libhandle + + def copy(self) -> TTN: + """ + Returns: + A deep copy of the TTN on the same device. + """ + + # Create a dummy object + new_ttn = TTN(self._lib, qubit_partition=dict(), config=self._cfg.copy()) + # Copy all data + new_ttn.fidelity = self.fidelity + new_ttn.nodes = {path: node.copy() for path, node in self.nodes.items()} + new_ttn.qubit_position = self.qubit_position.copy() + + # If the user has set a seed, assume that they'd want every copy + # to behave in the same way, so we copy the RNG state + if self._cfg.seed is not None: + # Setting state (rather than just copying the seed) allows for the + # copy to continue from the same point in the sequence of random + # numbers as the original copy + new_ttn._rng.setstate(self._rng.getstate()) + # Otherwise, samples will be different between copies, since their + # self._rng will be initialised from system randomnes when seed=None. + + self._logger.debug( + "Successfully copied a TTN " + f"of size {new_ttn.get_byte_size() / 2**20} MiB." + ) + return new_ttn + + def _apply_1q_unitary(self, unitary: cp.ndarray, qubit: Qubit) -> TTN: + raise NotImplementedError( + "TTN is a base class with no contraction algorithm implemented." + + " You must use a subclass of TTN, such as TTNxGate." + ) + + def _apply_2q_unitary(self, unitary: cp.ndarray, q0: Qubit, q1: Qubit) -> TTN: + raise NotImplementedError( + "TTN is a base class with no contraction algorithm implemented." + + " You must use a subclass of TTN, such as TTNxGate." + ) + + def _flush(self) -> None: + # Does nothing in the general MPS case; but children classes with batched + # gate contraction will redefine this method so that the last batch of + # gates is applied. + return None diff --git a/pytket/extensions/cutensornet/structured_state/ttn_gate.py b/pytket/extensions/cutensornet/structured_state/ttn_gate.py index 12e0058a..27e8531e 100644 --- a/pytket/extensions/cutensornet/structured_state/ttn_gate.py +++ b/pytket/extensions/cutensornet/structured_state/ttn_gate.py @@ -1,675 +1,675 @@ -# Copyright 2019-2024 Quantinuum -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -## -# http://www.apache.org/licenses/LICENSE-2.0 -## -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from __future__ import annotations # type: ignore -import warnings - -try: - import cupy as cp # type: ignore -except ImportError: - warnings.warn("local settings failed to import cupy", ImportWarning) -try: - import cuquantum as cq # type: ignore - from cuquantum.cutensornet import tensor # type: ignore - from cuquantum.cutensornet.experimental import contract_decompose # type: ignore -except ImportError: - warnings.warn("local settings failed to import cutensornet", ImportWarning) - -from pytket.circuit import Qubit -from .ttn import TTN, DirTTN, RootPath - - -class TTNxGate(TTN): - """Implements a gate-by-gate contraction algorithm to calculate the output state - of a circuit as a ``TTN``. - """ - - def _apply_1q_unitary(self, unitary: cp.ndarray, qubit: Qubit) -> TTNxGate: - """Applies the 1-qubit gate to the TTN. - - This does not increase the dimension of any bond. - - Args: - unitary: The unitary to be applied. - qubit: The qubit the unitary acts on. - - Returns: - ``self``, to allow for method chaining. - """ - path, target = self.qubit_position[qubit] - node_tensor = self.nodes[path].tensor - n_qbonds = ( - len(node_tensor.shape) - 1 - ) # Total number of physical bonds in this node - - # Glossary of bond IDs - # qX -> where X is the X-th physical bond (qubit) in the TTN node - # p -> the parent bond of the TTN node - # i -> the input bond of the gate - # o -> the output bond of the gate - - node_bonds = [f"q{x}" for x in range(n_qbonds)] + ["p"] - result_bonds = node_bonds.copy() - node_bonds[target] = "i" # Target bond must match with the gate input bond - result_bonds[target] = "o" # After contraction it matches the output bond - - # Contract - new_tensor = cq.contract( - node_tensor, - node_bonds, - unitary, - ["o", "i"], - result_bonds, - options={"handle": self._lib.handle, "device_id": self._lib.device_id}, - optimize={"path": [(0, 1)]}, - ) - - # Update ``self.nodes`` - # NOTE: Canonicalisation of the node does not change - self.nodes[path].tensor = new_tensor - return self - - def _apply_2q_unitary(self, unitary: cp.ndarray, q0: Qubit, q1: Qubit) -> TTNxGate: - """Applies the 2-qubit gate to the TTN. - - The TTN is converted to canonical and truncation is applied if necessary. - - Args: - unitary: The unitary to be applied. - q0: The first qubit in the tuple |q0>|q1> the unitary acts on. - q1: The second qubit in the tuple |q0>|q1> the unitary acts on. - - Returns: - ``self``, to allow for method chaining. - """ - options = {"handle": self._lib.handle, "device_id": self._lib.device_id} - - # Reshape into a rank-4 tensor - gate_tensor = cp.reshape(unitary, (2, 2, 2, 2)) - - (path_q0, bond_q0) = self.qubit_position[q0] - (path_q1, bond_q1) = self.qubit_position[q1] - - # Glossary of bond IDs - # a -> the input bond of the gate on q0 - # b -> the input bond of the gate on q1 - # A -> the output bond of the gate on q0 - # B -> the output bond of the gate on q1 - # S -> the shared bond of the gate tensor's SVD - # l -> left child bond of the TTN node - # r -> right child bond of the TTN node - # p -> the parent bond of the TTN node - # s -> the shared bond resulting from a decomposition - # chr(x) -> bond of the x-th qubit in a leaf node - gate_bonds = "ABab" - - # If the two qubits are in the same leaf node, contract the gate with it. - # No truncation is required. - if path_q0 == path_q1: - leaf_node = self.nodes[path_q0] - n_qbonds = len(leaf_node.tensor.shape) - 1 # Num of qubit bonds - aux_bonds = [chr(x) for x in range(n_qbonds)] - aux_bonds[bond_q0] = "a" - aux_bonds[bond_q1] = "b" - leaf_bonds = "".join(aux_bonds) + "p" - aux_bonds[bond_q0] = "A" - aux_bonds[bond_q1] = "B" - result_bonds = "".join(aux_bonds) + "p" - - self.nodes[path_q0].tensor = cq.contract( - f"{leaf_bonds},{gate_bonds}->{result_bonds}", - leaf_node.tensor, - gate_tensor, - options=options, - optimize={"path": [(0, 1)]}, - ) - - self._logger.debug( - "The qubits the gate acts on are on the same group. " - "Gate trivially applied, no dimensions changed." - ) - return self - - # Otherwise, we must include the gate in the common ancestor tensor and - # rewire the inputs and outputs. First, identify common path and direction - common_dirs = [] - for d0, d1 in zip(path_q0, path_q1): - if d0 == d1: - common_dirs.append(d0) - else: - break - common_path = tuple(common_dirs) - - # We begin by canonicalising to the left child bond of the common ancestor. - # This canonicalisation could be done later (just before truncation), but - # doing it now will prevent the need to recanonicalise the tensors that have - # grown (by a factor of x16) when introducing this gate. - # The choice of the left child bond is arbitrary, any bond in the TTN that - # is in the arc connecting qL to qR would have worked. - # - # NOTE: In fact, none of the tensors that are affected by the gate need to - # be canonicalised ahead of time, but I don't expect the saving would be - # particularly noticeable, and it'd require some non-trivial refactoring - # of `canonicalise()`. - self.canonicalise(center=(*common_path, DirTTN.LEFT)) - - # Apply SVD on the gate tensor to remove any zero singular values ASAP - svd_method = tensor.SVDMethod( - abs_cutoff=self._cfg.zero, - partition="U", # Contract S directly into U - ) - # Apply the SVD decomposition using the configuration defined above - U, S, V = tensor.decompose( - f"{gate_bonds}->SAa,SBb", gate_tensor, method=svd_method, options=options - ) - assert S is None # Due to "partition" option in SVDMethod - - # The overall strategy is to connect the `U` tensor above with the physical bond - # for `q0` in the TTN, so that its bond `A` becomes the new physical bond and - # the bond `S` is left dangling (open). We combine this `gate_tensor` with the - # leaf node of `q0` and QR-decompose the result; where the Q tensor will be the - # new (canonicalised) leaf node and R becomes our `msg_tensor`. The latter - # contains the open bond `S` and our objective is to "push" this `msg_tensor` - # through the TTN towards the leaf node of `q1`. Here, "push through" means - # contract with the next tensor, and apply QR decomposition, so that the - # `msg_tensor` carrying `b` and `B` ends up one bond closer to `q1`. - # Once `msg_tensor` is directly connected to the leaf node containing `q1`, we - # just need to apply the `V` tensor above to `q1` and connect its `S` bond with - # that of the `msg_tensor`. - bonds_to_q0 = [ # Bonds in the "arc" from the common ancestor to `q0` - path_q0[:i] for i in range(len(common_path) + 1, len(path_q0) + 1) - ] - # Sanity checks: - assert all( - len(bond_address) != len(common_path) for bond_address in bonds_to_q0 - ) - assert len(bonds_to_q0) == 1 or len(bonds_to_q0[0]) < len(bonds_to_q0[1]) - assert len(bonds_to_q0[-1]) == len(path_q0) - - bonds_to_q1 = [ # Bonds in the "arc" from the common ancestor to `q1` - path_q1[:i] for i in range(len(common_path) + 1, len(path_q1) + 1) - ] - # Sanity checks: - assert all( - len(bond_address) != len(common_path) for bond_address in bonds_to_q1 - ) - assert len(bonds_to_q1) == 1 or len(bonds_to_q1[0]) < len(bonds_to_q1[1]) - assert len(bonds_to_q1[-1]) == len(path_q1) - - # The `msg_tensor` has three bonds. Our convention will be that the first bond - # always corresponds to `S`, the second bond connects the `msg_tensor` - # to the TTN in the child direction and the third connects it to the TTN - # in the `DirTTN.PARENT` direction. If we label the second bond with `l`, then - # the third bond will be labelled `L` (and vice versa). Same for `r` and `p`. - - # We begin applying the gate to the TTN by contracting `U` into the - # leaf node containing `q0`, with the `S` bond of the former left open. - # We immediately QR-decompose the resulting tensor, so that Q becomes the new - # (canonicalised) leaf node and R becomes the `msg_tensor` that we will be - # "pushing" through the rest of the arc towards `q1`. - leaf_node = self.nodes[path_q0] - n_qbonds = len(leaf_node.tensor.shape) - 1 # Num of qubit bonds - aux_bonds = [chr(x) for x in range(n_qbonds)] - aux_bonds[bond_q0] = "a" - leaf_bonds = "".join(aux_bonds) + "p" - aux_bonds[bond_q0] = "A" - Q_bonds = "".join(aux_bonds) + "s" - R_bonds = "Ssp" # The `msg_tensor` - U_bonds = "SAa" - - # Apply the contraction followed by a QR decomposition - leaf_node.tensor, msg_tensor = contract_decompose( - f"{leaf_bonds},{U_bonds}->{Q_bonds},{R_bonds}", - leaf_node.tensor, - U, - algorithm={"qr_method": tensor.QRMethod()}, - options=options, - optimize={"path": [(0, 1)]}, - ) - # Update the canonical form of the leaf node - leaf_node.canonical_form = DirTTN.PARENT - - # We must push the `msg_tensor` all the way to the common ancestor - # of `q0` and `q1`. - bond_addresses = list(reversed(bonds_to_q0)) # From `q0` to the ancestor - - # For all of these nodes; push `msg_tensor` through to their parent bond - for child_bond in bond_addresses[:-1]: # Doesn't do it on common ancestor! - child_dir = child_bond[-1] - parent_bond = child_bond[:-1] - node = self.nodes[parent_bond] - - node_bonds = "lrp" - msg_bonds = "SLl" if child_dir == DirTTN.LEFT else "SRr" - Q_bonds = "Lrs" if child_dir == DirTTN.LEFT else "lRs" - R_bonds = "Ssp" # The new `msg_tensor` - - self._logger.debug( - f"Pushing msg_tensor ({msg_tensor.nbytes // 2**20} MiB) through node " - f"({node.tensor.nbytes // 2**20} MiB) at {parent_bond}." - ) - - # Apply the contraction followed by a QR decomposition - node.tensor, msg_tensor = contract_decompose( - f"{node_bonds},{msg_bonds}->{Q_bonds},{R_bonds}", - node.tensor, - msg_tensor, - algorithm={"qr_method": tensor.QRMethod()}, - options=options, - optimize={"path": [(0, 1)]}, - ) - # Update the canonical form of the node - node.canonical_form = DirTTN.PARENT - - # The `msg_tensor` is now on a child bond of the common ancestor. - # We must push it through to the other child node. - child_bond = bond_addresses[-1] # This is where msg_tensor currently is - child_dir = child_bond[-1] - parent_bond = child_bond[:-1] - common_ancestor_node = self.nodes[parent_bond] - - node_bonds = "lrp" - msg_bonds = "SLl" if child_dir == DirTTN.LEFT else "SRr" - Q_bonds = "Lsp" if child_dir == DirTTN.LEFT else "sRp" - R_bonds = "Srs" if child_dir == DirTTN.LEFT else "Sls" # The new `msg_tensor` - - self._logger.debug( - f"Pushing msg_tensor ({msg_tensor.nbytes // 2**20} MiB) through node " - f"({common_ancestor_node.tensor.nbytes // 2**20} MiB) at {parent_bond}." - ) - - # Apply the contraction followed by a QR decomposition - common_ancestor_node.tensor, msg_tensor = contract_decompose( - f"{node_bonds},{msg_bonds}->{Q_bonds},{R_bonds}", - common_ancestor_node.tensor, - msg_tensor, - algorithm={"qr_method": tensor.QRMethod()}, - options=options, - optimize={"path": [(0, 1)]}, - ) - # Update the canonical form of the node - if child_dir == DirTTN.LEFT: - common_ancestor_node.canonical_form = DirTTN.RIGHT - else: - common_ancestor_node.canonical_form = DirTTN.LEFT - - # We must push the `msg_tensor` from the common ancestor to the leaf node - # containing `q1`. - bond_addresses = bonds_to_q1 # From ancestor to `q1` - - # For all of these nodes; push `msg_tensor` through to their child bond - for child_bond in bond_addresses[1:]: # Skip common ancestor: already pushed - child_dir = child_bond[-1] - parent_bond = child_bond[:-1] - node = self.nodes[parent_bond] - - node_bonds = "lrp" - msg_bonds = "SpP" - Q_bonds = "srP" if child_dir == DirTTN.LEFT else "lsP" - R_bonds = "Sls" if child_dir == DirTTN.LEFT else "Srs" # New `msg_tensor` - - self._logger.debug( - f"Pushing msg_tensor ({msg_tensor.nbytes // 2**20} MiB) through node " - f"({node.tensor.nbytes // 2**20} MiB) at {parent_bond}." - ) - - # Apply the contraction followed by a QR decomposition - node.tensor, msg_tensor = contract_decompose( - f"{node_bonds},{msg_bonds}->{Q_bonds},{R_bonds}", - node.tensor, - msg_tensor, - algorithm={"qr_method": tensor.QRMethod()}, - options=options, - optimize={"path": [(0, 1)]}, - ) - # Update the canonical form of the node - node.canonical_form = child_dir - - # Finally, the `msg_tensor` is in the parent bond of the leaf node of `q1`. - # All we need to do is contract the `msg_tensor` and `V` into the leaf. - leaf_node = self.nodes[path_q1] - n_qbonds = len(leaf_node.tensor.shape) - 1 # Num of qubit bonds - aux_bonds = [chr(x) for x in range(n_qbonds)] - aux_bonds[bond_q1] = "b" # Connect `b` to `q1` - leaf_bonds = "".join(aux_bonds) + "p" - msg_bonds = "SpP" - V_bonds = "SBb" - aux_bonds[bond_q1] = "B" # `B` becomes the new physical bond `q1` - result_bonds = "".join(aux_bonds) + "P" - - # Apply the contraction - leaf_node.tensor = cq.contract( - f"{leaf_bonds},{V_bonds},{msg_bonds}->{result_bonds}", - leaf_node.tensor, - V, - msg_tensor, - options=options, - optimize={"path": [(0, 1), (0, 1)]}, - ) - # The leaf node lost its canonical form - leaf_node.canonical_form = None - - # Truncate (if needed) bonds along the arc from `q1` to `q0`. - # We truncate in this direction to take advantage of the canonicalisation - # of the TTN we achieved while pushing the `msg_tensor` from `q0` to `q1`. - if self._cfg.truncation_fidelity < 1: - # Truncate as much as possible before violating the truncation fidelity - self._fidelity_bound_sequential_weighted_truncation( - list(reversed(bonds_to_q1)), bonds_to_q0 - ) - - else: - # Truncate so that all bonds have dimension less or equal to chi - self._chi_sequential_truncation(list(reversed(bonds_to_q1)), bonds_to_q0) - - return self - - def _fidelity_bound_sequential_weighted_truncation( - self, - bonds_from_q1_to_ancestor: list[RootPath], - bonds_from_ancestor_to_q0: list[RootPath], - ) -> None: - """Truncate as much as possible up to the truncation fidelity. - - Our target is to assign a local truncation fidelity `f_i` to each bond `i` in - the input lists so that the lower bound of the fidelity satisfies: - - real_fidelity > self.fidelity * prod(f_i) > self.fidelity * trunc_fidelity (A) - - Let e_i = 1 - f_i, where we refer to `e_i` as the "truncation error at bond i". - We can use that when 0 < e_i < 1, the bound: - - prod(1 - e_i) > 1 - sum(e_i) (B) - - is fairly tight, with an inaccuracy of an additive O(e_i^2) term. Hence, as long - as we satisfy - - 1 - sum(e_i) > truncation_fidelity (C) - - the inqualities at (A) will be satisfied for our chosen f_i. Let - - admissible_error = 1 - truncation_fidelity (D) - - and assign each e_i = w_i * admissible_error where 0 < w_i < 1 is a weight - factor such that sum(w_i) = 1. With these choice, inequality (C) holds and, - consequently, so does (A). - - Args: - bonds_from_q1_to_ancestor: A list of bonds (each as their RootPath address). - These bonds will be truncated. The list must be ordered in such a way - that consecutive bonds share a common tensor and such that the first - bond in the list corresponds to the leaf node that `q0` is assigned to - and the last bond in the list corresponds to child bond of the common - ancestor between the leaves of `q0` and `q1`. - bonds_from_ancestor_q1: Same as above, but the list starts from the other - child bond of the common ancestor and ends at the leaf node that `q1` - is assigned to. Together, these two lists provide a path in the TTN - from the leaf node of `q0` to the leaf node of `q1`. - """ - self._logger.debug("Starting sequential weighted truncation (fidelity bound).") - initial_fidelity = self.fidelity - - options = {"handle": self._lib.handle, "device_id": self._lib.device_id} - admissible_error = 1 - self._cfg.truncation_fidelity - - # Combine the two lists of bonds, but remember at which entry the direction - # of the path is switched from going towards root to going towards leaves. - truncation_bonds = bonds_from_q1_to_ancestor + bonds_from_ancestor_to_q0 - switch_direction_at = len(bonds_from_q1_to_ancestor) - towards_root = True # First half of truncation_bonds is path towards ancestor - - # Obtain the dimension of each bond - dimensions = [ - self.get_dimension(bond, DirTTN.PARENT) for bond in truncation_bonds - ] - # Assign the weight `w_i` of each bond. - # NOTE: currently uses w_i = dim_i / sum(dim_i), for no other reason that it is - # simple. Better weight functions may exist and research on this is desirable. - weights = [dim / sum(dimensions) for dim in dimensions] - # Assign a fidelity `f_i` to each bond. - bond_fidelities = [1 - w * admissible_error for w in weights] - - # Truncate each bond as much as possible up to its assigned bond fidelity - for i, bond_address in enumerate(truncation_bonds): - dimension_before = self.get_dimension(bond_address, DirTTN.PARENT) - - # Canonicalise to this bond (unsafely, so we must reintroduce bond_tensor) - bond_tensor = self.canonicalise(bond_address, unsafe=True) - - # Flip ``towards_root`` if we have reached the common ancestor - # i.e. if the ``bond_tensor`` needs to go towards a child tensor rather - # than towards the parent - if switch_direction_at == i: - towards_root = False - - # Apply SVD decomposition to truncate as much as possible before exceeding - # a `discarded_weight_cutoff` of `1 - f_i`. Contract S directly into U/V and - # normalise the singular values so that the sum of its squares is equal - # to one (i.e. the TTN is a normalised state after truncation). - self._logger.debug( - f"Truncating at {bond_address} to target fidelity={bond_fidelities[i]}" - ) - - svd_method = tensor.SVDMethod( - abs_cutoff=self._cfg.zero, - discarded_weight_cutoff=1 - bond_fidelities[i], - partition="V" if towards_root else "U", # Contract S to parent or child - normalization="L2", # Sum of squares singular values must equal 1 - ) - - # Apply the SVD decomposition using the configuration defined above - U, S, V, svd_info = tensor.decompose( - "cp->cs,sp", - bond_tensor, - method=svd_method, - options=options, - return_info=True, - ) - assert S is None # Due to "partition" option in SVDMethod - - # discarded_weight is calculated within cuTensorNet as: - # sum([s**2 for s in S']) - # discarded_weight = 1 - ------------------------- - # sum([s**2 for s in S]) - # where S is the list of original singular values and S' is the set of - # singular values that remain after truncation (before normalisation). - # It can be shown that the fidelity ||^2 (for |phi> and |psi> - # unit vectors before and after truncation) is equal to 1 - disc_weight. - # - # We multiply the fidelity of the current step to the overall fidelity - # to keep track of a lower bound for the fidelity. - this_fidelity = 1.0 - svd_info.discarded_weight - self.fidelity *= this_fidelity - dimension_after = V.shape[0] - - # Contract U and V into the TTN. This reintroduces the data of bond_tensor - # back into the TTN, as required by ``canonicalise(.., unsafe=True)``. - self._contract_decomp_bond_tensor_into_ttn(U, V, bond_address) - - # The next node in the path towards qR loses its canonical form, since - # S was contracted to it (either via U or V) - if towards_root: - self.nodes[bond_address[:-1]].canonical_form = None - else: - self.nodes[bond_address].canonical_form = None - - # Report to logger - self._logger.debug(f"Truncation done. Truncation fidelity={this_fidelity}") - self._logger.debug( - f"Reduced bond dimension from {dimension_before} to {dimension_after}." - ) - - self._logger.debug( - "Finished sequential weighted truncation (fidelity bound). " - f"Fidelity factor = {self.fidelity / initial_fidelity}" - ) - - # Sanity check: reached the common ancestor and changed direction - assert not towards_root - - def _chi_sequential_truncation( - self, - bonds_from_q1_to_ancestor: list[RootPath], - bonds_from_ancestor_to_q0: list[RootPath], - ) -> None: - """Truncate all bonds in the input lists to have a dimension of chi or lower. - - The lists of bonds are explored sequentially, truncating the bonds - one by one. - - Args: - bonds_from_q1_to_ancestor: A list of bonds (each as their RootPath address). - These bonds will be truncated. The list must be ordered in such a way - that consecutive bonds share a common tensor and such that the first - bond in the list corresponds to the leaf node that `q0` is assigned to - and the last bond in the list corresponds to child bond of the common - ancestor between the leaves of `q0` and `q1`. - bonds_from_ancestor_q1: Same as above, but the list starts from the other - child bond of the common ancestor and ends at the leaf node that `q1` - is assigned to. Together, these two lists provide a path in the TTN - from the leaf node of `q0` to the leaf node of `q1`. - """ - self._logger.debug("Starting sequential truncation (chi bound).") - initial_fidelity = self.fidelity - - options = {"handle": self._lib.handle, "device_id": self._lib.device_id} - - # Combine the two lists of bonds, but remember at which entry the direction - # of the path is switched from going towards root to going towards leaves. - truncation_bonds = bonds_from_q1_to_ancestor + bonds_from_ancestor_to_q0 - switch_direction_at = len(bonds_from_q1_to_ancestor) - towards_root = True # First half of truncation_bonds is path towards ancestor - - for i, bond_address in enumerate(truncation_bonds): - dimension_before = self.get_dimension(bond_address, DirTTN.PARENT) - - # Canonicalise to this bond (unsafely, so we must reintroduce bond_tensor) - bond_tensor = self.canonicalise(bond_address, unsafe=True) - - # Flip ``towards_root`` if we have reached the common ancestor - # i.e. if the ``bond_tensor`` needs to go towards a child tensor rather - # than towards the parent - if switch_direction_at == i: - towards_root = False - - # Apply SVD decomposition on bond_tensor and truncate up to - # `self._cfg.chi`. Ask cuTensorNet to contract S directly into U/V and - # normalise the singular values so that the sum of its squares is equal - # to one (i.e. the TTN is a normalised state after truncation). - self._logger.debug( - f"Truncating at {bond_address} to (or below) chosen chi={self._cfg.chi}" - ) - - svd_method = tensor.SVDMethod( - abs_cutoff=self._cfg.zero, - max_extent=self._cfg.chi, - partition="V" if towards_root else "U", # Contract S to parent or child - normalization="L2", # Sum of squares equal 1 - ) - - U, S, V, svd_info = tensor.decompose( - "cp->cs,sp", - bond_tensor, - method=svd_method, - options=options, - return_info=True, - ) - assert S is None # Due to "partition" option in SVDMethod - - # discarded_weight is calculated within cuTensorNet as: - # sum([s**2 for s in S']) - # discarded_weight = 1 - ------------------------- - # sum([s**2 for s in S]) - # where S is the list of original singular values and S' is the set of - # singular values that remain after truncation (before normalisation). - # It can be shown that the fidelity ||^2 (for |phi> and |psi> - # unit vectors before and after truncation) is equal to 1 - disc_weight. - # - # We multiply the fidelity of the current step to the overall fidelity - # to keep track of a lower bound for the fidelity. - this_fidelity = 1.0 - svd_info.discarded_weight - self.fidelity *= this_fidelity - dimension_after = V.shape[0] - - # Contract U and V into the TTN. This reintroduces the data of bond_tensor - # back into the TTN, as required by ``canonicalise(.., unsafe=True)``. - self._contract_decomp_bond_tensor_into_ttn(U, V, bond_address) - - # The next node in the path towards qR loses its canonical form, since - # S was contracted to it (either via U or V) - if towards_root: - self.nodes[bond_address[:-1]].canonical_form = None - else: - self.nodes[bond_address].canonical_form = None - - # Report to logger - self._logger.debug(f"Truncation done. Truncation fidelity={this_fidelity}") - self._logger.debug( - f"Reduced bond dimension from {dimension_before} to {dimension_after}." - ) - - self._logger.debug( - "Finished sequential truncation (chi bound). " - f"Fidelity factor = {self.fidelity / initial_fidelity}" - ) - - # Sanity check: reached the common ancestor and changed direction - assert not towards_root - - def _contract_decomp_bond_tensor_into_ttn( - self, U: cp.ndarray, V: cp.ndarray, bond_address: RootPath - ) -> None: - """Contracts a decomposed bond_tensor back into the TTN. - - Args: - U: The tensor of the decomposition adjacent to the child node of the bond. - V: The tensor of the decomposition adjacent to the parent node of the bond. - bond_address: The address to the bond that was decomposed; explicitly, the - DirTTN.PARENT bond of the corresponding child node. - """ - options = {"handle": self._lib.handle, "device_id": self._lib.device_id} - - # Contract V to the parent node of the bond - direction = bond_address[-1] - if direction == DirTTN.LEFT: - indices = "lrp,sl->srp" - else: - indices = "lrp,sr->lsp" - self.nodes[bond_address[:-1]].tensor = cq.contract( - indices, - self.nodes[bond_address[:-1]].tensor, - V, - options=options, - optimize={"path": [(0, 1)]}, - ) - - # Contract U to the child node of the bond - if self.nodes[bond_address].is_leaf: - n_qbonds = ( - len(self.nodes[bond_address].tensor.shape) - 1 - ) # Total number of physical bonds in this node - node_bonds = [f"q{x}" for x in range(n_qbonds)] + ["p"] - else: - node_bonds = ["l", "r", "p"] - result_bonds = node_bonds.copy() - result_bonds[-1] = "s" - - self.nodes[bond_address].tensor = cq.contract( - self.nodes[bond_address].tensor, - node_bonds, - U, - ["p", "s"], - result_bonds, - options=options, - optimize={"path": [(0, 1)]}, - ) +# Copyright 2019-2024 Quantinuum +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +## +# http://www.apache.org/licenses/LICENSE-2.0 +## +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations # type: ignore +import warnings + +try: + import cupy as cp # type: ignore +except ImportError: + warnings.warn("local settings failed to import cupy", ImportWarning) +try: + import cuquantum as cq # type: ignore + from cuquantum.cutensornet import tensor # type: ignore + from cuquantum.cutensornet.experimental import contract_decompose # type: ignore +except ImportError: + warnings.warn("local settings failed to import cutensornet", ImportWarning) + +from pytket.circuit import Qubit +from .ttn import TTN, DirTTN, RootPath + + +class TTNxGate(TTN): + """Implements a gate-by-gate contraction algorithm to calculate the output state + of a circuit as a ``TTN``. + """ + + def _apply_1q_unitary(self, unitary: cp.ndarray, qubit: Qubit) -> TTNxGate: + """Applies the 1-qubit gate to the TTN. + + This does not increase the dimension of any bond. + + Args: + unitary: The unitary to be applied. + qubit: The qubit the unitary acts on. + + Returns: + ``self``, to allow for method chaining. + """ + path, target = self.qubit_position[qubit] + node_tensor = self.nodes[path].tensor + n_qbonds = ( + len(node_tensor.shape) - 1 + ) # Total number of physical bonds in this node + + # Glossary of bond IDs + # qX -> where X is the X-th physical bond (qubit) in the TTN node + # p -> the parent bond of the TTN node + # i -> the input bond of the gate + # o -> the output bond of the gate + + node_bonds = [f"q{x}" for x in range(n_qbonds)] + ["p"] + result_bonds = node_bonds.copy() + node_bonds[target] = "i" # Target bond must match with the gate input bond + result_bonds[target] = "o" # After contraction it matches the output bond + + # Contract + new_tensor = cq.contract( + node_tensor, + node_bonds, + unitary, + ["o", "i"], + result_bonds, + options={"handle": self._lib.handle, "device_id": self._lib.device_id}, + optimize={"path": [(0, 1)]}, + ) + + # Update ``self.nodes`` + # NOTE: Canonicalisation of the node does not change + self.nodes[path].tensor = new_tensor + return self + + def _apply_2q_unitary(self, unitary: cp.ndarray, q0: Qubit, q1: Qubit) -> TTNxGate: + """Applies the 2-qubit gate to the TTN. + + The TTN is converted to canonical and truncation is applied if necessary. + + Args: + unitary: The unitary to be applied. + q0: The first qubit in the tuple |q0>|q1> the unitary acts on. + q1: The second qubit in the tuple |q0>|q1> the unitary acts on. + + Returns: + ``self``, to allow for method chaining. + """ + options = {"handle": self._lib.handle, "device_id": self._lib.device_id} + + # Reshape into a rank-4 tensor + gate_tensor = cp.reshape(unitary, (2, 2, 2, 2)) + + (path_q0, bond_q0) = self.qubit_position[q0] + (path_q1, bond_q1) = self.qubit_position[q1] + + # Glossary of bond IDs + # a -> the input bond of the gate on q0 + # b -> the input bond of the gate on q1 + # A -> the output bond of the gate on q0 + # B -> the output bond of the gate on q1 + # S -> the shared bond of the gate tensor's SVD + # l -> left child bond of the TTN node + # r -> right child bond of the TTN node + # p -> the parent bond of the TTN node + # s -> the shared bond resulting from a decomposition + # chr(x) -> bond of the x-th qubit in a leaf node + gate_bonds = "ABab" + + # If the two qubits are in the same leaf node, contract the gate with it. + # No truncation is required. + if path_q0 == path_q1: + leaf_node = self.nodes[path_q0] + n_qbonds = len(leaf_node.tensor.shape) - 1 # Num of qubit bonds + aux_bonds = [chr(x) for x in range(n_qbonds)] + aux_bonds[bond_q0] = "a" + aux_bonds[bond_q1] = "b" + leaf_bonds = "".join(aux_bonds) + "p" + aux_bonds[bond_q0] = "A" + aux_bonds[bond_q1] = "B" + result_bonds = "".join(aux_bonds) + "p" + + self.nodes[path_q0].tensor = cq.contract( + f"{leaf_bonds},{gate_bonds}->{result_bonds}", + leaf_node.tensor, + gate_tensor, + options=options, + optimize={"path": [(0, 1)]}, + ) + + self._logger.debug( + "The qubits the gate acts on are on the same group. " + "Gate trivially applied, no dimensions changed." + ) + return self + + # Otherwise, we must include the gate in the common ancestor tensor and + # rewire the inputs and outputs. First, identify common path and direction + common_dirs = [] + for d0, d1 in zip(path_q0, path_q1): + if d0 == d1: + common_dirs.append(d0) + else: + break + common_path = tuple(common_dirs) + + # We begin by canonicalising to the left child bond of the common ancestor. + # This canonicalisation could be done later (just before truncation), but + # doing it now will prevent the need to recanonicalise the tensors that have + # grown (by a factor of x16) when introducing this gate. + # The choice of the left child bond is arbitrary, any bond in the TTN that + # is in the arc connecting qL to qR would have worked. + # + # NOTE: In fact, none of the tensors that are affected by the gate need to + # be canonicalised ahead of time, but I don't expect the saving would be + # particularly noticeable, and it'd require some non-trivial refactoring + # of `canonicalise()`. + self.canonicalise(center=(*common_path, DirTTN.LEFT)) + + # Apply SVD on the gate tensor to remove any zero singular values ASAP + svd_method = tensor.SVDMethod( + abs_cutoff=self._cfg.zero, + partition="U", # Contract S directly into U + ) + # Apply the SVD decomposition using the configuration defined above + U, S, V = tensor.decompose( + f"{gate_bonds}->SAa,SBb", gate_tensor, method=svd_method, options=options + ) + assert S is None # Due to "partition" option in SVDMethod + + # The overall strategy is to connect the `U` tensor above with the physical bond + # for `q0` in the TTN, so that its bond `A` becomes the new physical bond and + # the bond `S` is left dangling (open). We combine this `gate_tensor` with the + # leaf node of `q0` and QR-decompose the result; where the Q tensor will be the + # new (canonicalised) leaf node and R becomes our `msg_tensor`. The latter + # contains the open bond `S` and our objective is to "push" this `msg_tensor` + # through the TTN towards the leaf node of `q1`. Here, "push through" means + # contract with the next tensor, and apply QR decomposition, so that the + # `msg_tensor` carrying `b` and `B` ends up one bond closer to `q1`. + # Once `msg_tensor` is directly connected to the leaf node containing `q1`, we + # just need to apply the `V` tensor above to `q1` and connect its `S` bond with + # that of the `msg_tensor`. + bonds_to_q0 = [ # Bonds in the "arc" from the common ancestor to `q0` + path_q0[:i] for i in range(len(common_path) + 1, len(path_q0) + 1) + ] + # Sanity checks: + assert all( + len(bond_address) != len(common_path) for bond_address in bonds_to_q0 + ) + assert len(bonds_to_q0) == 1 or len(bonds_to_q0[0]) < len(bonds_to_q0[1]) + assert len(bonds_to_q0[-1]) == len(path_q0) + + bonds_to_q1 = [ # Bonds in the "arc" from the common ancestor to `q1` + path_q1[:i] for i in range(len(common_path) + 1, len(path_q1) + 1) + ] + # Sanity checks: + assert all( + len(bond_address) != len(common_path) for bond_address in bonds_to_q1 + ) + assert len(bonds_to_q1) == 1 or len(bonds_to_q1[0]) < len(bonds_to_q1[1]) + assert len(bonds_to_q1[-1]) == len(path_q1) + + # The `msg_tensor` has three bonds. Our convention will be that the first bond + # always corresponds to `S`, the second bond connects the `msg_tensor` + # to the TTN in the child direction and the third connects it to the TTN + # in the `DirTTN.PARENT` direction. If we label the second bond with `l`, then + # the third bond will be labelled `L` (and vice versa). Same for `r` and `p`. + + # We begin applying the gate to the TTN by contracting `U` into the + # leaf node containing `q0`, with the `S` bond of the former left open. + # We immediately QR-decompose the resulting tensor, so that Q becomes the new + # (canonicalised) leaf node and R becomes the `msg_tensor` that we will be + # "pushing" through the rest of the arc towards `q1`. + leaf_node = self.nodes[path_q0] + n_qbonds = len(leaf_node.tensor.shape) - 1 # Num of qubit bonds + aux_bonds = [chr(x) for x in range(n_qbonds)] + aux_bonds[bond_q0] = "a" + leaf_bonds = "".join(aux_bonds) + "p" + aux_bonds[bond_q0] = "A" + Q_bonds = "".join(aux_bonds) + "s" + R_bonds = "Ssp" # The `msg_tensor` + U_bonds = "SAa" + + # Apply the contraction followed by a QR decomposition + leaf_node.tensor, msg_tensor = contract_decompose( + f"{leaf_bonds},{U_bonds}->{Q_bonds},{R_bonds}", + leaf_node.tensor, + U, + algorithm={"qr_method": tensor.QRMethod()}, + options=options, + optimize={"path": [(0, 1)]}, + ) + # Update the canonical form of the leaf node + leaf_node.canonical_form = DirTTN.PARENT + + # We must push the `msg_tensor` all the way to the common ancestor + # of `q0` and `q1`. + bond_addresses = list(reversed(bonds_to_q0)) # From `q0` to the ancestor + + # For all of these nodes; push `msg_tensor` through to their parent bond + for child_bond in bond_addresses[:-1]: # Doesn't do it on common ancestor! + child_dir = child_bond[-1] + parent_bond = child_bond[:-1] + node = self.nodes[parent_bond] + + node_bonds = "lrp" + msg_bonds = "SLl" if child_dir == DirTTN.LEFT else "SRr" + Q_bonds = "Lrs" if child_dir == DirTTN.LEFT else "lRs" + R_bonds = "Ssp" # The new `msg_tensor` + + self._logger.debug( + f"Pushing msg_tensor ({msg_tensor.nbytes // 2**20} MiB) through node " + f"({node.tensor.nbytes // 2**20} MiB) at {parent_bond}." + ) + + # Apply the contraction followed by a QR decomposition + node.tensor, msg_tensor = contract_decompose( + f"{node_bonds},{msg_bonds}->{Q_bonds},{R_bonds}", + node.tensor, + msg_tensor, + algorithm={"qr_method": tensor.QRMethod()}, + options=options, + optimize={"path": [(0, 1)]}, + ) + # Update the canonical form of the node + node.canonical_form = DirTTN.PARENT + + # The `msg_tensor` is now on a child bond of the common ancestor. + # We must push it through to the other child node. + child_bond = bond_addresses[-1] # This is where msg_tensor currently is + child_dir = child_bond[-1] + parent_bond = child_bond[:-1] + common_ancestor_node = self.nodes[parent_bond] + + node_bonds = "lrp" + msg_bonds = "SLl" if child_dir == DirTTN.LEFT else "SRr" + Q_bonds = "Lsp" if child_dir == DirTTN.LEFT else "sRp" + R_bonds = "Srs" if child_dir == DirTTN.LEFT else "Sls" # The new `msg_tensor` + + self._logger.debug( + f"Pushing msg_tensor ({msg_tensor.nbytes // 2**20} MiB) through node " + f"({common_ancestor_node.tensor.nbytes // 2**20} MiB) at {parent_bond}." + ) + + # Apply the contraction followed by a QR decomposition + common_ancestor_node.tensor, msg_tensor = contract_decompose( + f"{node_bonds},{msg_bonds}->{Q_bonds},{R_bonds}", + common_ancestor_node.tensor, + msg_tensor, + algorithm={"qr_method": tensor.QRMethod()}, + options=options, + optimize={"path": [(0, 1)]}, + ) + # Update the canonical form of the node + if child_dir == DirTTN.LEFT: + common_ancestor_node.canonical_form = DirTTN.RIGHT + else: + common_ancestor_node.canonical_form = DirTTN.LEFT + + # We must push the `msg_tensor` from the common ancestor to the leaf node + # containing `q1`. + bond_addresses = bonds_to_q1 # From ancestor to `q1` + + # For all of these nodes; push `msg_tensor` through to their child bond + for child_bond in bond_addresses[1:]: # Skip common ancestor: already pushed + child_dir = child_bond[-1] + parent_bond = child_bond[:-1] + node = self.nodes[parent_bond] + + node_bonds = "lrp" + msg_bonds = "SpP" + Q_bonds = "srP" if child_dir == DirTTN.LEFT else "lsP" + R_bonds = "Sls" if child_dir == DirTTN.LEFT else "Srs" # New `msg_tensor` + + self._logger.debug( + f"Pushing msg_tensor ({msg_tensor.nbytes // 2**20} MiB) through node " + f"({node.tensor.nbytes // 2**20} MiB) at {parent_bond}." + ) + + # Apply the contraction followed by a QR decomposition + node.tensor, msg_tensor = contract_decompose( + f"{node_bonds},{msg_bonds}->{Q_bonds},{R_bonds}", + node.tensor, + msg_tensor, + algorithm={"qr_method": tensor.QRMethod()}, + options=options, + optimize={"path": [(0, 1)]}, + ) + # Update the canonical form of the node + node.canonical_form = child_dir + + # Finally, the `msg_tensor` is in the parent bond of the leaf node of `q1`. + # All we need to do is contract the `msg_tensor` and `V` into the leaf. + leaf_node = self.nodes[path_q1] + n_qbonds = len(leaf_node.tensor.shape) - 1 # Num of qubit bonds + aux_bonds = [chr(x) for x in range(n_qbonds)] + aux_bonds[bond_q1] = "b" # Connect `b` to `q1` + leaf_bonds = "".join(aux_bonds) + "p" + msg_bonds = "SpP" + V_bonds = "SBb" + aux_bonds[bond_q1] = "B" # `B` becomes the new physical bond `q1` + result_bonds = "".join(aux_bonds) + "P" + + # Apply the contraction + leaf_node.tensor = cq.contract( + f"{leaf_bonds},{V_bonds},{msg_bonds}->{result_bonds}", + leaf_node.tensor, + V, + msg_tensor, + options=options, + optimize={"path": [(0, 1), (0, 1)]}, + ) + # The leaf node lost its canonical form + leaf_node.canonical_form = None + + # Truncate (if needed) bonds along the arc from `q1` to `q0`. + # We truncate in this direction to take advantage of the canonicalisation + # of the TTN we achieved while pushing the `msg_tensor` from `q0` to `q1`. + if self._cfg.truncation_fidelity < 1: + # Truncate as much as possible before violating the truncation fidelity + self._fidelity_bound_sequential_weighted_truncation( + list(reversed(bonds_to_q1)), bonds_to_q0 + ) + + else: + # Truncate so that all bonds have dimension less or equal to chi + self._chi_sequential_truncation(list(reversed(bonds_to_q1)), bonds_to_q0) + + return self + + def _fidelity_bound_sequential_weighted_truncation( + self, + bonds_from_q1_to_ancestor: list[RootPath], + bonds_from_ancestor_to_q0: list[RootPath], + ) -> None: + """Truncate as much as possible up to the truncation fidelity. + + Our target is to assign a local truncation fidelity `f_i` to each bond `i` in + the input lists so that the lower bound of the fidelity satisfies: + + real_fidelity > self.fidelity * prod(f_i) > self.fidelity * trunc_fidelity (A) + + Let e_i = 1 - f_i, where we refer to `e_i` as the "truncation error at bond i". + We can use that when 0 < e_i < 1, the bound: + + prod(1 - e_i) > 1 - sum(e_i) (B) + + is fairly tight, with an inaccuracy of an additive O(e_i^2) term. Hence, as long + as we satisfy + + 1 - sum(e_i) > truncation_fidelity (C) + + the inqualities at (A) will be satisfied for our chosen f_i. Let + + admissible_error = 1 - truncation_fidelity (D) + + and assign each e_i = w_i * admissible_error where 0 < w_i < 1 is a weight + factor such that sum(w_i) = 1. With these choice, inequality (C) holds and, + consequently, so does (A). + + Args: + bonds_from_q1_to_ancestor: A list of bonds (each as their RootPath address). + These bonds will be truncated. The list must be ordered in such a way + that consecutive bonds share a common tensor and such that the first + bond in the list corresponds to the leaf node that `q0` is assigned to + and the last bond in the list corresponds to child bond of the common + ancestor between the leaves of `q0` and `q1`. + bonds_from_ancestor_q1: Same as above, but the list starts from the other + child bond of the common ancestor and ends at the leaf node that `q1` + is assigned to. Together, these two lists provide a path in the TTN + from the leaf node of `q0` to the leaf node of `q1`. + """ + self._logger.debug("Starting sequential weighted truncation (fidelity bound).") + initial_fidelity = self.fidelity + + options = {"handle": self._lib.handle, "device_id": self._lib.device_id} + admissible_error = 1 - self._cfg.truncation_fidelity + + # Combine the two lists of bonds, but remember at which entry the direction + # of the path is switched from going towards root to going towards leaves. + truncation_bonds = bonds_from_q1_to_ancestor + bonds_from_ancestor_to_q0 + switch_direction_at = len(bonds_from_q1_to_ancestor) + towards_root = True # First half of truncation_bonds is path towards ancestor + + # Obtain the dimension of each bond + dimensions = [ + self.get_dimension(bond, DirTTN.PARENT) for bond in truncation_bonds + ] + # Assign the weight `w_i` of each bond. + # NOTE: currently uses w_i = dim_i / sum(dim_i), for no other reason that it is + # simple. Better weight functions may exist and research on this is desirable. + weights = [dim / sum(dimensions) for dim in dimensions] + # Assign a fidelity `f_i` to each bond. + bond_fidelities = [1 - w * admissible_error for w in weights] + + # Truncate each bond as much as possible up to its assigned bond fidelity + for i, bond_address in enumerate(truncation_bonds): + dimension_before = self.get_dimension(bond_address, DirTTN.PARENT) + + # Canonicalise to this bond (unsafely, so we must reintroduce bond_tensor) + bond_tensor = self.canonicalise(bond_address, unsafe=True) + + # Flip ``towards_root`` if we have reached the common ancestor + # i.e. if the ``bond_tensor`` needs to go towards a child tensor rather + # than towards the parent + if switch_direction_at == i: + towards_root = False + + # Apply SVD decomposition to truncate as much as possible before exceeding + # a `discarded_weight_cutoff` of `1 - f_i`. Contract S directly into U/V and + # normalise the singular values so that the sum of its squares is equal + # to one (i.e. the TTN is a normalised state after truncation). + self._logger.debug( + f"Truncating at {bond_address} to target fidelity={bond_fidelities[i]}" + ) + + svd_method = tensor.SVDMethod( + abs_cutoff=self._cfg.zero, + discarded_weight_cutoff=1 - bond_fidelities[i], + partition="V" if towards_root else "U", # Contract S to parent or child + normalization="L2", # Sum of squares singular values must equal 1 + ) + + # Apply the SVD decomposition using the configuration defined above + U, S, V, svd_info = tensor.decompose( + "cp->cs,sp", + bond_tensor, + method=svd_method, + options=options, + return_info=True, + ) + assert S is None # Due to "partition" option in SVDMethod + + # discarded_weight is calculated within cuTensorNet as: + # sum([s**2 for s in S']) + # discarded_weight = 1 - ------------------------- + # sum([s**2 for s in S]) + # where S is the list of original singular values and S' is the set of + # singular values that remain after truncation (before normalisation). + # It can be shown that the fidelity ||^2 (for |phi> and |psi> + # unit vectors before and after truncation) is equal to 1 - disc_weight. + # + # We multiply the fidelity of the current step to the overall fidelity + # to keep track of a lower bound for the fidelity. + this_fidelity = 1.0 - svd_info.discarded_weight + self.fidelity *= this_fidelity + dimension_after = V.shape[0] + + # Contract U and V into the TTN. This reintroduces the data of bond_tensor + # back into the TTN, as required by ``canonicalise(.., unsafe=True)``. + self._contract_decomp_bond_tensor_into_ttn(U, V, bond_address) + + # The next node in the path towards qR loses its canonical form, since + # S was contracted to it (either via U or V) + if towards_root: + self.nodes[bond_address[:-1]].canonical_form = None + else: + self.nodes[bond_address].canonical_form = None + + # Report to logger + self._logger.debug(f"Truncation done. Truncation fidelity={this_fidelity}") + self._logger.debug( + f"Reduced bond dimension from {dimension_before} to {dimension_after}." + ) + + self._logger.debug( + "Finished sequential weighted truncation (fidelity bound). " + f"Fidelity factor = {self.fidelity / initial_fidelity}" + ) + + # Sanity check: reached the common ancestor and changed direction + assert not towards_root + + def _chi_sequential_truncation( + self, + bonds_from_q1_to_ancestor: list[RootPath], + bonds_from_ancestor_to_q0: list[RootPath], + ) -> None: + """Truncate all bonds in the input lists to have a dimension of chi or lower. + + The lists of bonds are explored sequentially, truncating the bonds + one by one. + + Args: + bonds_from_q1_to_ancestor: A list of bonds (each as their RootPath address). + These bonds will be truncated. The list must be ordered in such a way + that consecutive bonds share a common tensor and such that the first + bond in the list corresponds to the leaf node that `q0` is assigned to + and the last bond in the list corresponds to child bond of the common + ancestor between the leaves of `q0` and `q1`. + bonds_from_ancestor_q1: Same as above, but the list starts from the other + child bond of the common ancestor and ends at the leaf node that `q1` + is assigned to. Together, these two lists provide a path in the TTN + from the leaf node of `q0` to the leaf node of `q1`. + """ + self._logger.debug("Starting sequential truncation (chi bound).") + initial_fidelity = self.fidelity + + options = {"handle": self._lib.handle, "device_id": self._lib.device_id} + + # Combine the two lists of bonds, but remember at which entry the direction + # of the path is switched from going towards root to going towards leaves. + truncation_bonds = bonds_from_q1_to_ancestor + bonds_from_ancestor_to_q0 + switch_direction_at = len(bonds_from_q1_to_ancestor) + towards_root = True # First half of truncation_bonds is path towards ancestor + + for i, bond_address in enumerate(truncation_bonds): + dimension_before = self.get_dimension(bond_address, DirTTN.PARENT) + + # Canonicalise to this bond (unsafely, so we must reintroduce bond_tensor) + bond_tensor = self.canonicalise(bond_address, unsafe=True) + + # Flip ``towards_root`` if we have reached the common ancestor + # i.e. if the ``bond_tensor`` needs to go towards a child tensor rather + # than towards the parent + if switch_direction_at == i: + towards_root = False + + # Apply SVD decomposition on bond_tensor and truncate up to + # `self._cfg.chi`. Ask cuTensorNet to contract S directly into U/V and + # normalise the singular values so that the sum of its squares is equal + # to one (i.e. the TTN is a normalised state after truncation). + self._logger.debug( + f"Truncating at {bond_address} to (or below) chosen chi={self._cfg.chi}" + ) + + svd_method = tensor.SVDMethod( + abs_cutoff=self._cfg.zero, + max_extent=self._cfg.chi, + partition="V" if towards_root else "U", # Contract S to parent or child + normalization="L2", # Sum of squares equal 1 + ) + + U, S, V, svd_info = tensor.decompose( + "cp->cs,sp", + bond_tensor, + method=svd_method, + options=options, + return_info=True, + ) + assert S is None # Due to "partition" option in SVDMethod + + # discarded_weight is calculated within cuTensorNet as: + # sum([s**2 for s in S']) + # discarded_weight = 1 - ------------------------- + # sum([s**2 for s in S]) + # where S is the list of original singular values and S' is the set of + # singular values that remain after truncation (before normalisation). + # It can be shown that the fidelity ||^2 (for |phi> and |psi> + # unit vectors before and after truncation) is equal to 1 - disc_weight. + # + # We multiply the fidelity of the current step to the overall fidelity + # to keep track of a lower bound for the fidelity. + this_fidelity = 1.0 - svd_info.discarded_weight + self.fidelity *= this_fidelity + dimension_after = V.shape[0] + + # Contract U and V into the TTN. This reintroduces the data of bond_tensor + # back into the TTN, as required by ``canonicalise(.., unsafe=True)``. + self._contract_decomp_bond_tensor_into_ttn(U, V, bond_address) + + # The next node in the path towards qR loses its canonical form, since + # S was contracted to it (either via U or V) + if towards_root: + self.nodes[bond_address[:-1]].canonical_form = None + else: + self.nodes[bond_address].canonical_form = None + + # Report to logger + self._logger.debug(f"Truncation done. Truncation fidelity={this_fidelity}") + self._logger.debug( + f"Reduced bond dimension from {dimension_before} to {dimension_after}." + ) + + self._logger.debug( + "Finished sequential truncation (chi bound). " + f"Fidelity factor = {self.fidelity / initial_fidelity}" + ) + + # Sanity check: reached the common ancestor and changed direction + assert not towards_root + + def _contract_decomp_bond_tensor_into_ttn( + self, U: cp.ndarray, V: cp.ndarray, bond_address: RootPath + ) -> None: + """Contracts a decomposed bond_tensor back into the TTN. + + Args: + U: The tensor of the decomposition adjacent to the child node of the bond. + V: The tensor of the decomposition adjacent to the parent node of the bond. + bond_address: The address to the bond that was decomposed; explicitly, the + DirTTN.PARENT bond of the corresponding child node. + """ + options = {"handle": self._lib.handle, "device_id": self._lib.device_id} + + # Contract V to the parent node of the bond + direction = bond_address[-1] + if direction == DirTTN.LEFT: + indices = "lrp,sl->srp" + else: + indices = "lrp,sr->lsp" + self.nodes[bond_address[:-1]].tensor = cq.contract( + indices, + self.nodes[bond_address[:-1]].tensor, + V, + options=options, + optimize={"path": [(0, 1)]}, + ) + + # Contract U to the child node of the bond + if self.nodes[bond_address].is_leaf: + n_qbonds = ( + len(self.nodes[bond_address].tensor.shape) - 1 + ) # Total number of physical bonds in this node + node_bonds = [f"q{x}" for x in range(n_qbonds)] + ["p"] + else: + node_bonds = ["l", "r", "p"] + result_bonds = node_bonds.copy() + result_bonds[-1] = "s" + + self.nodes[bond_address].tensor = cq.contract( + self.nodes[bond_address].tensor, + node_bonds, + U, + ["p", "s"], + result_bonds, + options=options, + optimize={"path": [(0, 1)]}, + ) diff --git a/tests/test_structured_state.py b/tests/test_structured_state.py index 849b5e2a..307db888 100644 --- a/tests/test_structured_state.py +++ b/tests/test_structured_state.py @@ -1,909 +1,909 @@ -from typing import Any, Union -import random # type: ignore -import pytest - -import cuquantum as cq # type: ignore -import cupy as cp # type: ignore -import numpy as np # type: ignore - -from pytket.circuit import Circuit, Qubit, OpType # type: ignore -from pytket.pauli import Pauli, QubitPauliString # type: ignore -from pytket.extensions.cutensornet.structured_state import ( - CuTensorNetHandle, - Config, - MPS, - MPSxGate, - MPSxMPO, - TTNxGate, - DirTTN, - simulate, - prepare_circuit_mps, - SimulationAlgorithm, -) -from pytket.extensions.cutensornet.structured_state.ttn import RootPath -from pytket.extensions.cutensornet.general_state.utils import ( - circuit_statevector_postselect, -) - - -def test_libhandle_manager() -> None: - circ = Circuit(5) - - # Proper use of library handle - with CuTensorNetHandle() as libhandle: - cfg = Config() - mps = MPS(libhandle, circ.qubits, cfg) - assert np.isclose(mps.vdot(mps), 1, atol=cfg._atol) - - # Catch exception due to library handle out of scope - with pytest.raises(RuntimeError): - mps.vdot(mps) - - -def test_init() -> None: - circ = Circuit(8) - qubit_partition = {i: [q] for i, q in enumerate(circ.qubits)} - - with CuTensorNetHandle() as libhandle: - mps_gate = MPSxGate(libhandle, circ.qubits, Config()) - assert mps_gate.is_valid() - mps_mpo = MPSxMPO(libhandle, circ.qubits, Config()) - assert mps_mpo.is_valid() - ttn_gate = TTNxGate(libhandle, qubit_partition, Config()) - assert ttn_gate.is_valid() - - -@pytest.mark.parametrize( - "algorithm", - [ - SimulationAlgorithm.MPSxGate, - SimulationAlgorithm.MPSxMPO, - SimulationAlgorithm.TTNxGate, - ], -) -def test_copy(algorithm: SimulationAlgorithm) -> None: - simple_circ = Circuit(2).H(0).H(1).CX(0, 1) - - with CuTensorNetHandle() as libhandle: - # Default config - cfg = Config() - state = simulate(libhandle, simple_circ, algorithm, cfg) - assert state.is_valid() - copy_state = state.copy() - assert copy_state.is_valid() - assert np.isclose(copy_state.vdot(state), 1.0, atol=cfg._atol) - - # Bounded chi - cfg = Config(chi=8) - state = simulate(libhandle, simple_circ, algorithm, cfg) - assert state.is_valid() - copy_state = state.copy() - assert copy_state.is_valid() - assert np.isclose(copy_state.vdot(state), 1.0, atol=cfg._atol) - - # Bounded truncation_fidelity - cfg = Config(truncation_fidelity=0.9999) - state = simulate(libhandle, simple_circ, algorithm, cfg) - assert state.is_valid() - copy_state = state.copy() - assert copy_state.is_valid() - assert np.isclose(copy_state.vdot(state), 1.0, atol=cfg._atol) - - -def test_canonicalise_mps() -> None: - cp.random.seed(1) - circ = Circuit(5) - - with CuTensorNetHandle() as libhandle: - cfg = Config() - mps_gate = MPSxGate(libhandle, circ.qubits, cfg) - # Fill up the tensors with random entries - - # Leftmost tensor - T_d = cp.empty(shape=(1, 4, 2), dtype=cfg._complex_t) - for i1 in range(T_d.shape[1]): - for i2 in range(T_d.shape[2]): - T_d[0][i1][i2] = cp.random.rand() + 1j * cp.random.rand() - mps_gate.tensors[0] = T_d - - # Middle tensors - for pos in range(1, len(mps_gate) - 1): - T_d = cp.empty(shape=(4, 4, 2), dtype=cfg._complex_t) - for i0 in range(T_d.shape[0]): - for i1 in range(T_d.shape[1]): - for i2 in range(T_d.shape[2]): - T_d[i0][i1][i2] = cp.random.rand() + 1j * cp.random.rand() - mps_gate.tensors[pos] = T_d - - # Rightmost tensor - T_d = cp.empty(shape=(4, 1, 2), dtype=cfg._complex_t) - for i0 in range(T_d.shape[0]): - for i2 in range(T_d.shape[2]): - T_d[i0][0][i2] = cp.random.rand() + 1j * cp.random.rand() - mps_gate.tensors[len(mps_gate) - 1] = T_d - - assert mps_gate.is_valid() - - # Calculate the norm of the MPS - norm_sq = mps_gate.vdot(mps_gate) - - # Keep a copy of the non-canonicalised MPS - mps_copy = mps_gate.copy() - - # Canonicalise around center_pos - center_pos = 2 - mps_gate.canonicalise(l_pos=center_pos, r_pos=center_pos) - - # Check that canonicalisation did not change the vector - overlap = mps_gate.vdot(mps_copy) - assert np.isclose(overlap, norm_sq, atol=cfg._atol) - - # Check that the corresponding tensors are in orthogonal form - for pos in range(len(mps_gate)): - if pos == center_pos: # This needs not be in orthogonal form - continue - - T_d = mps_gate.tensors[pos] - - if pos < 2: # Should be in left orthogonal form - result = cq.contract("lrp,lRp->rR", T_d, T_d.conj()) - elif pos > 2: # Should be in right orthogonal form - result = cq.contract("lrp,Lrp->lL", T_d, T_d.conj()) - - # Check that the result is the identity - assert cp.allclose(result, cp.eye(result.shape[0])) - - -@pytest.mark.parametrize( - "center", - [ - (DirTTN.RIGHT,), - (DirTTN.LEFT, DirTTN.RIGHT), - (DirTTN.LEFT, DirTTN.RIGHT, DirTTN.RIGHT), - Qubit("q", [2]), - ], -) -def test_canonicalise_ttn(center: Union[RootPath, Qubit]) -> None: - cp.random.seed(1) - n_levels = 3 - n_qubits = 2**n_levels - max_dim = 8 - - circ = Circuit(n_qubits) - qubit_partition = {i: [q] for i, q in enumerate(circ.qubits)} - - with CuTensorNetHandle() as libhandle: - ttn = TTNxGate(libhandle, qubit_partition, Config()) - - # Fill up the tensors with random entries - for path, node in ttn.nodes.items(): - if node.is_leaf: - T = cp.empty(shape=(2, max_dim), dtype=ttn._cfg._complex_t) - for i0 in range(T.shape[0]): - for i1 in range(T.shape[1]): - T[i0][i1] = cp.random.rand() + 1j * cp.random.rand() - else: - shape = (max_dim, max_dim, max_dim if len(path) != 0 else 1) - T = cp.empty(shape=shape, dtype=ttn._cfg._complex_t) - for i0 in range(shape[0]): - for i1 in range(shape[1]): - for i2 in range(shape[2]): - T[i0][i1][i2] = cp.random.rand() + 1j * cp.random.rand() - node.tensor = T - - assert ttn.is_valid() - - # Calculate the norm of the TTN - norm_sq = ttn.vdot(ttn) - - # Keep a copy of the non-canonicalised TTN - ttn_copy = ttn.copy() - - # Canonicalise at target path - R = ttn.canonicalise(center) - assert ttn.is_valid() - - # Check that canonicalisation did not change the vector - overlap = ttn.vdot(ttn_copy) - assert np.isclose(overlap / norm_sq, 1.0, atol=ttn._cfg._atol) - - # Check that the tensor R returned agrees with the norm - overlap_R = cq.contract("ud,ud->", R, R.conj()) - assert np.isclose(overlap_R / norm_sq, 1.0, atol=ttn._cfg._atol) - - # Check that the corresponding tensors are in orthogonal form - for path, node in ttn.nodes.items(): - # If it's the node just below the center of canonicalisation, it - # cannot be in orthogonal form - if isinstance(center, Qubit): - if path == ttn.qubit_position[center][0]: - assert node.canonical_form is None - continue - else: - if path == center[:-1]: - assert node.canonical_form is None - continue - # Otherwise, it should be in orthogonal form - assert node.canonical_form is not None - - T = node.tensor - - if node.is_leaf: - assert node.canonical_form == DirTTN.PARENT - result = cq.contract("qp,qP->pP", T, T.conj()) - - elif node.canonical_form == DirTTN.PARENT: - result = cq.contract("lrp,lrP->pP", T, T.conj()) - - elif node.canonical_form == DirTTN.LEFT: - result = cq.contract("lrp,Lrp->lL", T, T.conj()) - - elif node.canonical_form == DirTTN.RIGHT: - result = cq.contract("lrp,lRp->rR", T, T.conj()) - - # Check that the result is the identity - assert cp.allclose(result, cp.eye(result.shape[0])) - - -@pytest.mark.parametrize( - "circuit", - [ - pytest.lazy_fixture("q5_empty"), # type: ignore - pytest.lazy_fixture("q8_empty"), # type: ignore - pytest.lazy_fixture("q2_x0"), # type: ignore - pytest.lazy_fixture("q2_x1"), # type: ignore - pytest.lazy_fixture("q2_v0"), # type: ignore - pytest.lazy_fixture("q8_x0h2v5z6"), # type: ignore - pytest.lazy_fixture("q2_x0cx01"), # type: ignore - pytest.lazy_fixture("q2_x1cx10x1"), # type: ignore - pytest.lazy_fixture("q2_x0cx01cx10"), # type: ignore - pytest.lazy_fixture("q2_v0cx01cx10"), # type: ignore - pytest.lazy_fixture("q2_hadamard_test"), # type: ignore - pytest.lazy_fixture("q2_lcu1"), # type: ignore - pytest.lazy_fixture("q2_lcu2"), # type: ignore - pytest.lazy_fixture("q2_lcu3"), # type: ignore - pytest.lazy_fixture("q3_v0cx02"), # type: ignore - pytest.lazy_fixture("q3_cx01cz12x1rx0"), # type: ignore - pytest.lazy_fixture("q3_toffoli_box_with_implicit_swaps"), # type: ignore - pytest.lazy_fixture("q4_with_creates"), # type: ignore - pytest.lazy_fixture("q5_h0s1rz2ry3tk4tk13"), # type: ignore - pytest.lazy_fixture("q5_line_circ_30_layers"), # type: ignore - pytest.lazy_fixture("q6_qvol"), # type: ignore - pytest.lazy_fixture("q8_qvol"), # type: ignore - ], -) -@pytest.mark.parametrize( - "algorithm", - [ - SimulationAlgorithm.MPSxGate, - SimulationAlgorithm.MPSxMPO, - SimulationAlgorithm.TTNxGate, - ], -) -def test_exact_circ_sim(circuit: Circuit, algorithm: SimulationAlgorithm) -> None: - n_qubits = len(circuit.qubits) - state_vec = circuit.get_statevector() - - with CuTensorNetHandle() as libhandle: - cfg = Config(leaf_size=2) - state = simulate(libhandle, circuit, algorithm, cfg) - assert state.is_valid() - # Check that there was no approximation - assert np.isclose(state.get_fidelity(), 1.0, atol=cfg._atol) - # Check that overlap is 1 - assert np.isclose(state.vdot(state), 1.0, atol=cfg._atol) - - # Check that all of the amplitudes are correct - for b in range(2**n_qubits): - assert np.isclose( - state.get_amplitude(b), - state_vec[b], - atol=cfg._atol, - ) - - # Check that the statevector is correct - assert np.allclose(state.get_statevector(), state_vec, atol=cfg._atol) - - -@pytest.mark.parametrize( - "circuit", - [ - pytest.lazy_fixture("q5_empty"), # type: ignore - pytest.lazy_fixture("q2_lcu1"), # type: ignore - pytest.lazy_fixture("q2_lcu2"), # type: ignore - pytest.lazy_fixture("q2_lcu3"), # type: ignore - pytest.lazy_fixture("q3_toffoli_box_with_implicit_swaps"), # type: ignore - pytest.lazy_fixture("q4_with_creates"), # type: ignore - pytest.lazy_fixture("q6_qvol"), # type: ignore - pytest.lazy_fixture("q8_qvol"), # type: ignore - ], -) -@pytest.mark.parametrize( - "algorithm", - [ - SimulationAlgorithm.MPSxGate, - SimulationAlgorithm.MPSxMPO, - ], -) -def test_prepare_circuit_mps(circuit: Circuit, algorithm: SimulationAlgorithm) -> None: - state_vec = circuit.get_statevector() - n_qubits = len(circuit.qubits) - - # Prepare the circuit (i.e. add SWAPs so that all gates act on adjacent qubits) - circuit, qubit_map = prepare_circuit_mps(circuit) - # Check that the qubit adjacency is satisfied - for cmd in circuit.get_commands(): - qs = cmd.qubits - assert len(qs) in {1, 2} - if len(qs) == 2: - assert abs(qs[0].index[0] - qs[1].index[0]) == 1 - - with CuTensorNetHandle() as libhandle: - cfg = Config(leaf_size=2) - state = simulate(libhandle, circuit, algorithm, cfg) - state.apply_qubit_relabelling(qubit_map) - assert state.is_valid() - # Check that there was no approximation - assert np.isclose(state.get_fidelity(), 1.0, atol=cfg._atol) - # Check that overlap is 1 - assert np.isclose(state.vdot(state), 1.0, atol=cfg._atol) - - # Check that all of the amplitudes are correct - for b in range(2**n_qubits): - assert np.isclose( - state.get_amplitude(b), - state_vec[b], - atol=cfg._atol, - ) - - # Check that the statevector is correct - assert np.allclose(state.get_statevector(), state_vec, atol=cfg._atol) - - -@pytest.mark.parametrize( - "circuit", - [ - pytest.lazy_fixture("q5_empty"), # type: ignore - pytest.lazy_fixture("q8_empty"), # type: ignore - pytest.lazy_fixture("q2_x0"), # type: ignore - pytest.lazy_fixture("q2_x1"), # type: ignore - pytest.lazy_fixture("q2_v0"), # type: ignore - pytest.lazy_fixture("q8_x0h2v5z6"), # type: ignore - pytest.lazy_fixture("q2_x0cx01"), # type: ignore - pytest.lazy_fixture("q2_x1cx10x1"), # type: ignore - pytest.lazy_fixture("q2_x0cx01cx10"), # type: ignore - pytest.lazy_fixture("q2_v0cx01cx10"), # type: ignore - pytest.lazy_fixture("q2_hadamard_test"), # type: ignore - pytest.lazy_fixture("q2_lcu1"), # type: ignore - pytest.lazy_fixture("q2_lcu2"), # type: ignore - pytest.lazy_fixture("q2_lcu3"), # type: ignore - pytest.lazy_fixture("q3_v0cx02"), # type: ignore - pytest.lazy_fixture("q3_cx01cz12x1rx0"), # type: ignore - pytest.lazy_fixture("q3_toffoli_box_with_implicit_swaps"), # type: ignore - pytest.lazy_fixture("q4_with_creates"), # type: ignore - pytest.lazy_fixture("q5_h0s1rz2ry3tk4tk13"), # type: ignore - pytest.lazy_fixture("q5_line_circ_30_layers"), # type: ignore - pytest.lazy_fixture("q6_qvol"), # type: ignore - pytest.lazy_fixture("q8_qvol"), # type: ignore - ], -) -@pytest.mark.parametrize( - "algorithm", - [ - SimulationAlgorithm.MPSxGate, - SimulationAlgorithm.MPSxMPO, - SimulationAlgorithm.TTNxGate, - ], -) -def test_approx_circ_sim_gate_fid( - circuit: Circuit, algorithm: SimulationAlgorithm -) -> None: - with CuTensorNetHandle() as libhandle: - cfg = Config(truncation_fidelity=0.99, leaf_size=2) - state = simulate(libhandle, circuit, algorithm, cfg) - assert state.is_valid() - # Check that overlap is 1 - assert np.isclose(state.vdot(state), 1.0, atol=cfg._atol) - - -@pytest.mark.parametrize( - "circuit", - [ - pytest.lazy_fixture("q5_empty"), # type: ignore - pytest.lazy_fixture("q8_empty"), # type: ignore - pytest.lazy_fixture("q2_x0"), # type: ignore - pytest.lazy_fixture("q2_x1"), # type: ignore - pytest.lazy_fixture("q2_v0"), # type: ignore - pytest.lazy_fixture("q8_x0h2v5z6"), # type: ignore - pytest.lazy_fixture("q2_x0cx01"), # type: ignore - pytest.lazy_fixture("q2_x1cx10x1"), # type: ignore - pytest.lazy_fixture("q2_x0cx01cx10"), # type: ignore - pytest.lazy_fixture("q2_v0cx01cx10"), # type: ignore - pytest.lazy_fixture("q2_hadamard_test"), # type: ignore - pytest.lazy_fixture("q2_lcu1"), # type: ignore - pytest.lazy_fixture("q2_lcu2"), # type: ignore - pytest.lazy_fixture("q2_lcu3"), # type: ignore - pytest.lazy_fixture("q3_v0cx02"), # type: ignore - pytest.lazy_fixture("q3_cx01cz12x1rx0"), # type: ignore - pytest.lazy_fixture("q3_toffoli_box_with_implicit_swaps"), # type: ignore - pytest.lazy_fixture("q4_with_creates"), # type: ignore - pytest.lazy_fixture("q5_h0s1rz2ry3tk4tk13"), # type: ignore - pytest.lazy_fixture("q5_line_circ_30_layers"), # type: ignore - pytest.lazy_fixture("q6_qvol"), # type: ignore - pytest.lazy_fixture("q8_qvol"), # type: ignore - ], -) -@pytest.mark.parametrize( - "algorithm", - [ - SimulationAlgorithm.MPSxGate, - SimulationAlgorithm.MPSxMPO, - SimulationAlgorithm.TTNxGate, - ], -) -def test_approx_circ_sim_chi(circuit: Circuit, algorithm: SimulationAlgorithm) -> None: - with CuTensorNetHandle() as libhandle: - cfg = Config(chi=4, leaf_size=2) - state = simulate(libhandle, circuit, algorithm, cfg) - assert state.is_valid() - # Check that overlap is 1 - assert np.isclose(state.vdot(state), 1.0, atol=cfg._atol) - - -@pytest.mark.parametrize( - "circuit", - [ - pytest.lazy_fixture("q5_empty"), # type: ignore - pytest.lazy_fixture("q2_x0cx01cx10"), # type: ignore - pytest.lazy_fixture("q2_lcu2"), # type: ignore - pytest.lazy_fixture("q3_cx01cz12x1rx0"), # type: ignore - pytest.lazy_fixture("q4_with_creates"), # type: ignore - pytest.lazy_fixture("q5_line_circ_30_layers"), # type: ignore - pytest.lazy_fixture("q6_qvol"), # type: ignore - ], -) -@pytest.mark.parametrize( - "algorithm", - [ - SimulationAlgorithm.MPSxGate, - SimulationAlgorithm.MPSxMPO, - SimulationAlgorithm.TTNxGate, - ], -) -@pytest.mark.parametrize( - "fp_precision", - [ - np.float32, - np.float64, - ], -) -def test_float_point_options( - circuit: Circuit, algorithm: SimulationAlgorithm, fp_precision: Any -) -> None: - with CuTensorNetHandle() as libhandle: - # Exact - cfg = Config(float_precision=fp_precision, leaf_size=2) - state = simulate(libhandle, circuit, algorithm, cfg) - assert state.is_valid() - # Check that overlap is 1 - assert np.isclose(state.vdot(state), 1.0, atol=cfg._atol) - - # Approximate, bound truncation fidelity - cfg = Config( - truncation_fidelity=0.99, float_precision=fp_precision, leaf_size=2 - ) - state = simulate( - libhandle, - circuit, - algorithm, - cfg, - ) - assert state.is_valid() - # Check that overlap is 1 - assert np.isclose(state.vdot(state), 1.0, atol=cfg._atol) - - # Approximate, bound chi - cfg = Config(chi=4, float_precision=fp_precision, leaf_size=2) - state = simulate( - libhandle, - circuit, - algorithm, - cfg, - ) - assert state.is_valid() - # Check that overlap is 1 - assert np.isclose(state.vdot(state), 1.0, atol=cfg._atol) - - -@pytest.mark.parametrize( - "circuit", - [ - pytest.lazy_fixture("q20_line_circ_20_layers"), # type: ignore - ], -) -def test_circ_approx_explicit_mps(circuit: Circuit) -> None: - random.seed(1) - - with CuTensorNetHandle() as libhandle: - # Finite gate fidelity - # Check for MPSxGate - cfg = Config(truncation_fidelity=0.99, leaf_size=4, float_precision=np.float32) - mps_gate = simulate( - libhandle, - circuit, - SimulationAlgorithm.MPSxGate, - cfg, - ) - assert np.isclose(mps_gate.get_fidelity(), 0.4, atol=1e-1) - assert mps_gate.is_valid() - assert np.isclose(mps_gate.vdot(mps_gate), 1.0, atol=cfg._atol) - - # Check for MPSxMPO - mps_mpo = simulate( - libhandle, - circuit, - SimulationAlgorithm.MPSxMPO, - cfg, - ) - assert np.isclose(mps_mpo.get_fidelity(), 0.6, atol=1e-1) - assert mps_mpo.is_valid() - assert np.isclose(mps_mpo.vdot(mps_mpo), 1.0, atol=cfg._atol) - - # Fixed virtual bond dimension - # Check for MPSxGate - cfg = Config(chi=8, leaf_size=4, float_precision=np.float32) - mps_gate = simulate(libhandle, circuit, SimulationAlgorithm.MPSxGate, cfg) - assert np.isclose(mps_gate.get_fidelity(), 0.03, atol=1e-2) - assert mps_gate.is_valid() - assert np.isclose(mps_gate.vdot(mps_gate), 1.0, atol=cfg._atol) - - # Check for MPSxMPO - mps_mpo = simulate(libhandle, circuit, SimulationAlgorithm.MPSxMPO, cfg) - assert np.isclose(mps_mpo.get_fidelity(), 0.05, atol=1e-2) - assert mps_mpo.is_valid() - assert np.isclose(mps_mpo.vdot(mps_mpo), 1.0, atol=cfg._atol) - - -@pytest.mark.parametrize( - "circuit", - [ - pytest.lazy_fixture("q15_qvol"), # type: ignore - ], -) -def test_circ_approx_explicit_ttn(circuit: Circuit) -> None: - random.seed(1) - - with CuTensorNetHandle() as libhandle: - # Finite gate fidelity - # Check for TTNxGate - cfg = Config(truncation_fidelity=0.99, leaf_size=3, float_precision=np.float32) - ttn_gate = simulate(libhandle, circuit, SimulationAlgorithm.TTNxGate, cfg) - assert np.isclose(ttn_gate.get_fidelity(), 0.751, atol=1e-3) - assert ttn_gate.is_valid() - assert np.isclose(ttn_gate.vdot(ttn_gate), 1.0, atol=cfg._atol) - - # Fixed virtual bond dimension - # Check for TTNxGate - cfg = Config(chi=120, leaf_size=3, float_precision=np.float32) - ttn_gate = simulate(libhandle, circuit, SimulationAlgorithm.TTNxGate, cfg) - assert np.isclose(ttn_gate.get_fidelity(), 0.854, atol=1e-3) - assert ttn_gate.is_valid() - assert np.isclose(ttn_gate.vdot(ttn_gate), 1.0, atol=cfg._atol) - - -@pytest.mark.parametrize( - "circuit", - [ - pytest.lazy_fixture("q2_x0"), # type: ignore - pytest.lazy_fixture("q2_x1"), # type: ignore - pytest.lazy_fixture("q2_v0"), # type: ignore - pytest.lazy_fixture("q2_x0cx01"), # type: ignore - pytest.lazy_fixture("q2_x1cx10x1"), # type: ignore - pytest.lazy_fixture("q2_x0cx01cx10"), # type: ignore - pytest.lazy_fixture("q2_v0cx01cx10"), # type: ignore - pytest.lazy_fixture("q2_hadamard_test"), # type: ignore - pytest.lazy_fixture("q2_lcu1"), # type: ignore - pytest.lazy_fixture("q2_lcu2"), # type: ignore - pytest.lazy_fixture("q2_lcu3"), # type: ignore - ], -) -@pytest.mark.parametrize( - "postselect_dict", - [ - {Qubit("q", 0): 0}, - {Qubit("q", 0): 1}, - {Qubit("q", 1): 0}, - {Qubit("q", 1): 1}, - ], -) -def test_postselect_2q_circ(circuit: Circuit, postselect_dict: dict) -> None: - sv = circuit_statevector_postselect(circuit, postselect_dict.copy()) - sv_prob = sv.conj() @ sv - if not np.isclose(sv_prob, 0.0): - sv = sv / np.sqrt(sv_prob) # Normalise - - with CuTensorNetHandle() as libhandle: - cfg = Config() - mps = simulate(libhandle, circuit, SimulationAlgorithm.MPSxGate, cfg) - prob = mps.postselect(postselect_dict) - assert np.isclose(prob, sv_prob, atol=cfg._atol) - assert np.allclose(mps.get_statevector(), sv, atol=cfg._atol) - - -@pytest.mark.parametrize( - "circuit", - [ - pytest.lazy_fixture("q3_cx01cz12x1rx0"), # type: ignore - pytest.lazy_fixture("q3_toffoli_box_with_implicit_swaps"), # type: ignore - pytest.lazy_fixture("q5_line_circ_30_layers"), # type: ignore - ], -) -@pytest.mark.parametrize( - "postselect_dict", - [ - {Qubit("q", 0): 1}, - {Qubit("q", 1): 0}, - {Qubit("q", 0): 0, Qubit("q", 1): 0}, - {Qubit("q", 1): 1, Qubit("q", 2): 1}, - {Qubit("q", 0): 0, Qubit("q", 2): 1}, - ], -) -def test_postselect_circ(circuit: Circuit, postselect_dict: dict) -> None: - sv = circuit_statevector_postselect(circuit, postselect_dict.copy()) - sv_prob = sv.conj() @ sv - if not np.isclose(sv_prob, 0.0): - sv = sv / np.sqrt(sv_prob) # Normalise - - with CuTensorNetHandle() as libhandle: - cfg = Config() - - mps = simulate(libhandle, circuit, SimulationAlgorithm.MPSxGate, cfg) - - prob = mps.postselect(postselect_dict) - assert np.isclose(prob, sv_prob, atol=cfg._atol) - assert np.allclose(mps.get_statevector(), sv, atol=cfg._atol) - - -@pytest.mark.parametrize( - "circuit", - [ - pytest.lazy_fixture("q2_x0"), # type: ignore - pytest.lazy_fixture("q2_x1"), # type: ignore - pytest.lazy_fixture("q2_v0"), # type: ignore - pytest.lazy_fixture("q2_x0cx01"), # type: ignore - pytest.lazy_fixture("q2_x1cx10x1"), # type: ignore - pytest.lazy_fixture("q2_hadamard_test"), # type: ignore - pytest.lazy_fixture("q2_lcu1"), # type: ignore - pytest.lazy_fixture("q2_lcu2"), # type: ignore - pytest.lazy_fixture("q2_lcu3"), # type: ignore - pytest.lazy_fixture("q3_cx01cz12x1rx0"), # type: ignore - pytest.lazy_fixture("q3_toffoli_box_with_implicit_swaps"), # type: ignore - pytest.lazy_fixture("q4_with_creates"), # type: ignore - pytest.lazy_fixture("q5_line_circ_30_layers"), # type: ignore - ], -) -@pytest.mark.parametrize( - "observable", - [ - QubitPauliString({Qubit(0): Pauli.Z}), - QubitPauliString({Qubit(1): Pauli.X}), - QubitPauliString({Qubit(0): Pauli.X, Qubit(1): Pauli.Z}), - ], -) -def test_expectation_value(circuit: Circuit, observable: QubitPauliString) -> None: - pauli_to_optype = {Pauli.Z: OpType.Z, Pauli.Y: OpType.Z, Pauli.X: OpType.X} - - # Use pytket to generate the expectation value of the observable - ket_circ = circuit.copy() - for q, o in observable.map.items(): - ket_circ.add_gate(pauli_to_optype[o], [q]) - ket_sv = ket_circ.get_statevector() - - bra_sv = circuit.get_statevector() - - expectation_value = bra_sv.conj() @ ket_sv - - # Simulate the circuit and obtain the expectation value - with CuTensorNetHandle() as libhandle: - cfg = Config() - mps = simulate(libhandle, circuit, SimulationAlgorithm.MPSxGate, cfg) - assert np.isclose( - mps.expectation_value(observable), expectation_value, atol=cfg._atol - ) - - -@pytest.mark.parametrize( - "circuit", - [ - pytest.lazy_fixture("q2_v0cx01cx10"), # type: ignore - pytest.lazy_fixture("q2_hadamard_test"), # type: ignore - pytest.lazy_fixture("q2_lcu2"), # type: ignore - pytest.lazy_fixture("q3_cx01cz12x1rx0"), # type: ignore - pytest.lazy_fixture("q5_line_circ_30_layers"), # type: ignore - ], -) -def test_sample_with_seed(circuit: Circuit) -> None: - n_samples = 10 - config = Config(seed=1234) - - with CuTensorNetHandle() as libhandle: - mps_0 = simulate(libhandle, circuit, SimulationAlgorithm.MPSxGate, config) - mps_1 = simulate(libhandle, circuit, SimulationAlgorithm.MPSxGate, config) - mps_2 = mps_0.copy() - - all_outcomes = [] - for _ in range(n_samples): - # Check that all copies of the MPS result in the same sample - outcomes_0 = mps_0.sample() - outcomes_1 = mps_1.sample() - outcomes_2 = mps_2.sample() - assert outcomes_0 == outcomes_1 and outcomes_0 == outcomes_2 - - all_outcomes.append(outcomes_0) - - # Check that the outcomes change between different samples - assert not all(outcome == outcomes_0 for outcome in all_outcomes) - - -@pytest.mark.parametrize( - "circuit", - [ - pytest.lazy_fixture("q2_x1"), # type: ignore - pytest.lazy_fixture("q2_x0cx01"), # type: ignore - pytest.lazy_fixture("q2_v0cx01cx10"), # type: ignore - pytest.lazy_fixture("q2_hadamard_test"), # type: ignore - pytest.lazy_fixture("q2_lcu2"), # type: ignore - ], -) -def test_sample_circ_2q(circuit: Circuit) -> None: - n_samples = 200 - - q0 = circuit.qubits[0] - q1 = circuit.qubits[1] - - # Compute the probabilities of each outcome - p = dict() - for outcome in range(4): - p[outcome] = abs(circuit.get_statevector()[outcome]) ** 2 - - # Compute the samples - sample_dict = {0: 0, 1: 0, 2: 0, 3: 0} - with CuTensorNetHandle() as libhandle: - mps = simulate(libhandle, circuit, SimulationAlgorithm.MPSxGate, Config()) - - # Take samples measuring both qubits at once - for _ in range(n_samples): - outcome_dict = mps.sample() - outcome = outcome_dict[q0] * 2 + outcome_dict[q1] - sample_dict[outcome] += 1 - - # Check sample frequency consistent with theoretical probability - for outcome, count in sample_dict.items(): - assert np.isclose(count / n_samples, p[outcome], atol=0.1) - - -@pytest.mark.parametrize( - "circuit", - [ - pytest.lazy_fixture("q3_cx01cz12x1rx0"), # type: ignore - pytest.lazy_fixture("q3_toffoli_box_with_implicit_swaps"), # type: ignore - pytest.lazy_fixture("q5_line_circ_30_layers"), # type: ignore - ], -) -def test_measure_circ(circuit: Circuit) -> None: - n_samples = 200 - - qA = circuit.qubits[-1] # Least significant qubit - qB = circuit.qubits[-3] # Third list significant qubit - - with CuTensorNetHandle() as libhandle: - mps = simulate(libhandle, circuit, SimulationAlgorithm.MPSxGate, Config()) - - # Compute the probabilities of each outcome - p = {(0, 0): 0.0, (0, 1): 0.0, (1, 0): 0.0, (1, 1): 0.0} - for outA in range(2): - for outB in range(2): - mps_copy = mps.copy() - p[(outA, outB)] = mps_copy.postselect({qA: outA, qB: outB}) - - # Compute the samples - sample_dict = {(0, 0): 0, (0, 1): 0, (1, 0): 0, (1, 1): 0} - for _ in range(n_samples): - mps_copy = mps.copy() - outcome_dict = mps_copy.measure({qA, qB}) - sample_dict[(outcome_dict[qA], outcome_dict[qB])] += 1 - - # Check sample frequency consistent with theoretical probability - for outcome, count in sample_dict.items(): - assert np.isclose(count / n_samples, p[outcome], atol=0.1) - - -def test_mps_qubit_addition_and_measure() -> None: - with CuTensorNetHandle() as libhandle: - config = Config() - mps = MPSxGate( - libhandle, - qubits=[Qubit(0), Qubit(1), Qubit(2), Qubit(3)], - config=config, - ) - - x = cp.asarray( - [ - [0, 1], - [1, 0], - ], - dtype=config._complex_t, - ) - cx = cp.asarray( - [ - [1, 0, 0, 0], - [0, 1, 0, 0], - [0, 0, 0, 1], - [0, 0, 1, 0], - ], - dtype=config._complex_t, - ) - - # Apply some gates - mps.apply_unitary(x, [Qubit(1)]) # |0100> - mps.apply_unitary(cx, [Qubit(1), Qubit(2)]) # |0110> - mps.apply_unitary(cx, [Qubit(2), Qubit(3)]) # |0111> - # Add a qubit at the end of the MPS - mps.add_qubit(new_qubit=Qubit(4), position=len(mps)) # |01110> - # Apply some more gates acting on the new qubit - mps.apply_unitary(cx, [Qubit(3), Qubit(4)]) # |01111> - mps.apply_unitary(cx, [Qubit(4), Qubit(3)]) # |01101> - # Add a qubit at position 3 - mps.add_qubit(new_qubit=Qubit(6), position=3) # |011001> - # Apply some more gates acting on the new qubit - mps.apply_unitary(x, [Qubit(6)]) # |011101> - mps.apply_unitary(cx, [Qubit(6), Qubit(2)]) # |010101> - mps.apply_unitary(cx, [Qubit(6), Qubit(3)]) # |010111> - # Add another qubit at the end of the MPS - mps.add_qubit(new_qubit=Qubit(5), position=len(mps), state=1) # |0101111> - # Apply some more gates acting on the new qubit - mps.apply_unitary(cx, [Qubit(4), Qubit(5)]) # |0101110> - - # The resulting state should be |0101110> - sv = np.zeros(2**7) - sv[int("0101110", 2)] = 1 - - # However, since mps.get_statevector will sort qubits in ILO, the bits would - # change position. Instead, we can relabel the qubits. - mps.apply_qubit_relabelling( - {q: Qubit(i) for q, i in mps.qubit_position.items()} - ) - - # Compare the state vectors - assert np.allclose(mps.get_statevector(), sv) - - # Measure some of the qubits destructively - outcomes = mps.measure({Qubit(0), Qubit(2), Qubit(4)}, destructive=True) - # Since the state is |0101110>, the outcomes are deterministic - assert outcomes[Qubit(0)] == 0 - assert outcomes[Qubit(2)] == 0 - assert outcomes[Qubit(4)] == 1 - - # Note that the qubit identifiers have not been updated, - # so the qubits that were measured are no longer in the MPS. - with pytest.raises(ValueError, match="not a qubit in the MPS"): - mps.measure({Qubit(0)}) - - # Measure some of the remaining qubits non-destructively - outcomes = mps.measure({Qubit(1), Qubit(6)}, destructive=False) - assert outcomes[Qubit(1)] == 1 - assert outcomes[Qubit(6)] == 0 - - # The resulting state should be |1110>, verify it - sv = np.zeros(2**4) - sv[int("1110", 2)] = 1 - assert np.allclose(mps.get_statevector(), sv) - - # Apply a few more gates to check it works - mps.apply_unitary(x, [Qubit(1)]) # |0110> - mps.apply_unitary(cx, [Qubit(3), Qubit(5)]) # |0100> - - # The resulting state should be |0100>, verify it - sv = np.zeros(2**4) - sv[int("0100", 2)] = 1 - assert np.allclose(mps.get_statevector(), sv) +from typing import Any, Union +import random # type: ignore +import pytest + +import cuquantum as cq # type: ignore +import cupy as cp # type: ignore +import numpy as np # type: ignore + +from pytket.circuit import Circuit, Qubit, OpType # type: ignore +from pytket.pauli import Pauli, QubitPauliString # type: ignore +from pytket.extensions.cutensornet.structured_state import ( + CuTensorNetHandle, + Config, + MPS, + MPSxGate, + MPSxMPO, + TTNxGate, + DirTTN, + simulate, + prepare_circuit_mps, + SimulationAlgorithm, +) +from pytket.extensions.cutensornet.structured_state.ttn import RootPath +from pytket.extensions.cutensornet.general_state.utils import ( + circuit_statevector_postselect, +) + + +def test_libhandle_manager() -> None: + circ = Circuit(5) + + # Proper use of library handle + with CuTensorNetHandle() as libhandle: + cfg = Config() + mps = MPS(libhandle, circ.qubits, cfg) + assert np.isclose(mps.vdot(mps), 1, atol=cfg._atol) + + # Catch exception due to library handle out of scope + with pytest.raises(RuntimeError): + mps.vdot(mps) + + +def test_init() -> None: + circ = Circuit(8) + qubit_partition = {i: [q] for i, q in enumerate(circ.qubits)} + + with CuTensorNetHandle() as libhandle: + mps_gate = MPSxGate(libhandle, circ.qubits, Config()) + assert mps_gate.is_valid() + mps_mpo = MPSxMPO(libhandle, circ.qubits, Config()) + assert mps_mpo.is_valid() + ttn_gate = TTNxGate(libhandle, qubit_partition, Config()) + assert ttn_gate.is_valid() + + +@pytest.mark.parametrize( + "algorithm", + [ + SimulationAlgorithm.MPSxGate, + SimulationAlgorithm.MPSxMPO, + SimulationAlgorithm.TTNxGate, + ], +) +def test_copy(algorithm: SimulationAlgorithm) -> None: + simple_circ = Circuit(2).H(0).H(1).CX(0, 1) + + with CuTensorNetHandle() as libhandle: + # Default config + cfg = Config() + state = simulate(libhandle, simple_circ, algorithm, cfg) + assert state.is_valid() + copy_state = state.copy() + assert copy_state.is_valid() + assert np.isclose(copy_state.vdot(state), 1.0, atol=cfg._atol) + + # Bounded chi + cfg = Config(chi=8) + state = simulate(libhandle, simple_circ, algorithm, cfg) + assert state.is_valid() + copy_state = state.copy() + assert copy_state.is_valid() + assert np.isclose(copy_state.vdot(state), 1.0, atol=cfg._atol) + + # Bounded truncation_fidelity + cfg = Config(truncation_fidelity=0.9999) + state = simulate(libhandle, simple_circ, algorithm, cfg) + assert state.is_valid() + copy_state = state.copy() + assert copy_state.is_valid() + assert np.isclose(copy_state.vdot(state), 1.0, atol=cfg._atol) + + +def test_canonicalise_mps() -> None: + cp.random.seed(1) + circ = Circuit(5) + + with CuTensorNetHandle() as libhandle: + cfg = Config() + mps_gate = MPSxGate(libhandle, circ.qubits, cfg) + # Fill up the tensors with random entries + + # Leftmost tensor + T_d = cp.empty(shape=(1, 4, 2), dtype=cfg._complex_t) + for i1 in range(T_d.shape[1]): + for i2 in range(T_d.shape[2]): + T_d[0][i1][i2] = cp.random.rand() + 1j * cp.random.rand() + mps_gate.tensors[0] = T_d + + # Middle tensors + for pos in range(1, len(mps_gate) - 1): + T_d = cp.empty(shape=(4, 4, 2), dtype=cfg._complex_t) + for i0 in range(T_d.shape[0]): + for i1 in range(T_d.shape[1]): + for i2 in range(T_d.shape[2]): + T_d[i0][i1][i2] = cp.random.rand() + 1j * cp.random.rand() + mps_gate.tensors[pos] = T_d + + # Rightmost tensor + T_d = cp.empty(shape=(4, 1, 2), dtype=cfg._complex_t) + for i0 in range(T_d.shape[0]): + for i2 in range(T_d.shape[2]): + T_d[i0][0][i2] = cp.random.rand() + 1j * cp.random.rand() + mps_gate.tensors[len(mps_gate) - 1] = T_d + + assert mps_gate.is_valid() + + # Calculate the norm of the MPS + norm_sq = mps_gate.vdot(mps_gate) + + # Keep a copy of the non-canonicalised MPS + mps_copy = mps_gate.copy() + + # Canonicalise around center_pos + center_pos = 2 + mps_gate.canonicalise(l_pos=center_pos, r_pos=center_pos) + + # Check that canonicalisation did not change the vector + overlap = mps_gate.vdot(mps_copy) + assert np.isclose(overlap, norm_sq, atol=cfg._atol) + + # Check that the corresponding tensors are in orthogonal form + for pos in range(len(mps_gate)): + if pos == center_pos: # This needs not be in orthogonal form + continue + + T_d = mps_gate.tensors[pos] + + if pos < 2: # Should be in left orthogonal form + result = cq.contract("lrp,lRp->rR", T_d, T_d.conj()) + elif pos > 2: # Should be in right orthogonal form + result = cq.contract("lrp,Lrp->lL", T_d, T_d.conj()) + + # Check that the result is the identity + assert cp.allclose(result, cp.eye(result.shape[0])) + + +@pytest.mark.parametrize( + "center", + [ + (DirTTN.RIGHT,), + (DirTTN.LEFT, DirTTN.RIGHT), + (DirTTN.LEFT, DirTTN.RIGHT, DirTTN.RIGHT), + Qubit("q", [2]), + ], +) +def test_canonicalise_ttn(center: Union[RootPath, Qubit]) -> None: + cp.random.seed(1) + n_levels = 3 + n_qubits = 2**n_levels + max_dim = 8 + + circ = Circuit(n_qubits) + qubit_partition = {i: [q] for i, q in enumerate(circ.qubits)} + + with CuTensorNetHandle() as libhandle: + ttn = TTNxGate(libhandle, qubit_partition, Config()) + + # Fill up the tensors with random entries + for path, node in ttn.nodes.items(): + if node.is_leaf: + T = cp.empty(shape=(2, max_dim), dtype=ttn._cfg._complex_t) + for i0 in range(T.shape[0]): + for i1 in range(T.shape[1]): + T[i0][i1] = cp.random.rand() + 1j * cp.random.rand() + else: + shape = (max_dim, max_dim, max_dim if len(path) != 0 else 1) + T = cp.empty(shape=shape, dtype=ttn._cfg._complex_t) + for i0 in range(shape[0]): + for i1 in range(shape[1]): + for i2 in range(shape[2]): + T[i0][i1][i2] = cp.random.rand() + 1j * cp.random.rand() + node.tensor = T + + assert ttn.is_valid() + + # Calculate the norm of the TTN + norm_sq = ttn.vdot(ttn) + + # Keep a copy of the non-canonicalised TTN + ttn_copy = ttn.copy() + + # Canonicalise at target path + R = ttn.canonicalise(center) + assert ttn.is_valid() + + # Check that canonicalisation did not change the vector + overlap = ttn.vdot(ttn_copy) + assert np.isclose(overlap / norm_sq, 1.0, atol=ttn._cfg._atol) + + # Check that the tensor R returned agrees with the norm + overlap_R = cq.contract("ud,ud->", R, R.conj()) + assert np.isclose(overlap_R / norm_sq, 1.0, atol=ttn._cfg._atol) + + # Check that the corresponding tensors are in orthogonal form + for path, node in ttn.nodes.items(): + # If it's the node just below the center of canonicalisation, it + # cannot be in orthogonal form + if isinstance(center, Qubit): + if path == ttn.qubit_position[center][0]: + assert node.canonical_form is None + continue + else: + if path == center[:-1]: + assert node.canonical_form is None + continue + # Otherwise, it should be in orthogonal form + assert node.canonical_form is not None + + T = node.tensor + + if node.is_leaf: + assert node.canonical_form == DirTTN.PARENT + result = cq.contract("qp,qP->pP", T, T.conj()) + + elif node.canonical_form == DirTTN.PARENT: + result = cq.contract("lrp,lrP->pP", T, T.conj()) + + elif node.canonical_form == DirTTN.LEFT: + result = cq.contract("lrp,Lrp->lL", T, T.conj()) + + elif node.canonical_form == DirTTN.RIGHT: + result = cq.contract("lrp,lRp->rR", T, T.conj()) + + # Check that the result is the identity + assert cp.allclose(result, cp.eye(result.shape[0])) + + +@pytest.mark.parametrize( + "circuit", + [ + pytest.lazy_fixture("q5_empty"), # type: ignore + pytest.lazy_fixture("q8_empty"), # type: ignore + pytest.lazy_fixture("q2_x0"), # type: ignore + pytest.lazy_fixture("q2_x1"), # type: ignore + pytest.lazy_fixture("q2_v0"), # type: ignore + pytest.lazy_fixture("q8_x0h2v5z6"), # type: ignore + pytest.lazy_fixture("q2_x0cx01"), # type: ignore + pytest.lazy_fixture("q2_x1cx10x1"), # type: ignore + pytest.lazy_fixture("q2_x0cx01cx10"), # type: ignore + pytest.lazy_fixture("q2_v0cx01cx10"), # type: ignore + pytest.lazy_fixture("q2_hadamard_test"), # type: ignore + pytest.lazy_fixture("q2_lcu1"), # type: ignore + pytest.lazy_fixture("q2_lcu2"), # type: ignore + pytest.lazy_fixture("q2_lcu3"), # type: ignore + pytest.lazy_fixture("q3_v0cx02"), # type: ignore + pytest.lazy_fixture("q3_cx01cz12x1rx0"), # type: ignore + pytest.lazy_fixture("q3_toffoli_box_with_implicit_swaps"), # type: ignore + pytest.lazy_fixture("q4_with_creates"), # type: ignore + pytest.lazy_fixture("q5_h0s1rz2ry3tk4tk13"), # type: ignore + pytest.lazy_fixture("q5_line_circ_30_layers"), # type: ignore + pytest.lazy_fixture("q6_qvol"), # type: ignore + pytest.lazy_fixture("q8_qvol"), # type: ignore + ], +) +@pytest.mark.parametrize( + "algorithm", + [ + SimulationAlgorithm.MPSxGate, + SimulationAlgorithm.MPSxMPO, + SimulationAlgorithm.TTNxGate, + ], +) +def test_exact_circ_sim(circuit: Circuit, algorithm: SimulationAlgorithm) -> None: + n_qubits = len(circuit.qubits) + state_vec = circuit.get_statevector() + + with CuTensorNetHandle() as libhandle: + cfg = Config(leaf_size=2) + state = simulate(libhandle, circuit, algorithm, cfg) + assert state.is_valid() + # Check that there was no approximation + assert np.isclose(state.get_fidelity(), 1.0, atol=cfg._atol) + # Check that overlap is 1 + assert np.isclose(state.vdot(state), 1.0, atol=cfg._atol) + + # Check that all of the amplitudes are correct + for b in range(2**n_qubits): + assert np.isclose( + state.get_amplitude(b), + state_vec[b], + atol=cfg._atol, + ) + + # Check that the statevector is correct + assert np.allclose(state.get_statevector(), state_vec, atol=cfg._atol) + + +@pytest.mark.parametrize( + "circuit", + [ + pytest.lazy_fixture("q5_empty"), # type: ignore + pytest.lazy_fixture("q2_lcu1"), # type: ignore + pytest.lazy_fixture("q2_lcu2"), # type: ignore + pytest.lazy_fixture("q2_lcu3"), # type: ignore + pytest.lazy_fixture("q3_toffoli_box_with_implicit_swaps"), # type: ignore + pytest.lazy_fixture("q4_with_creates"), # type: ignore + pytest.lazy_fixture("q6_qvol"), # type: ignore + pytest.lazy_fixture("q8_qvol"), # type: ignore + ], +) +@pytest.mark.parametrize( + "algorithm", + [ + SimulationAlgorithm.MPSxGate, + SimulationAlgorithm.MPSxMPO, + ], +) +def test_prepare_circuit_mps(circuit: Circuit, algorithm: SimulationAlgorithm) -> None: + state_vec = circuit.get_statevector() + n_qubits = len(circuit.qubits) + + # Prepare the circuit (i.e. add SWAPs so that all gates act on adjacent qubits) + circuit, qubit_map = prepare_circuit_mps(circuit) + # Check that the qubit adjacency is satisfied + for cmd in circuit.get_commands(): + qs = cmd.qubits + assert len(qs) in {1, 2} + if len(qs) == 2: + assert abs(qs[0].index[0] - qs[1].index[0]) == 1 + + with CuTensorNetHandle() as libhandle: + cfg = Config(leaf_size=2) + state = simulate(libhandle, circuit, algorithm, cfg) + state.apply_qubit_relabelling(qubit_map) + assert state.is_valid() + # Check that there was no approximation + assert np.isclose(state.get_fidelity(), 1.0, atol=cfg._atol) + # Check that overlap is 1 + assert np.isclose(state.vdot(state), 1.0, atol=cfg._atol) + + # Check that all of the amplitudes are correct + for b in range(2**n_qubits): + assert np.isclose( + state.get_amplitude(b), + state_vec[b], + atol=cfg._atol, + ) + + # Check that the statevector is correct + assert np.allclose(state.get_statevector(), state_vec, atol=cfg._atol) + + +@pytest.mark.parametrize( + "circuit", + [ + pytest.lazy_fixture("q5_empty"), # type: ignore + pytest.lazy_fixture("q8_empty"), # type: ignore + pytest.lazy_fixture("q2_x0"), # type: ignore + pytest.lazy_fixture("q2_x1"), # type: ignore + pytest.lazy_fixture("q2_v0"), # type: ignore + pytest.lazy_fixture("q8_x0h2v5z6"), # type: ignore + pytest.lazy_fixture("q2_x0cx01"), # type: ignore + pytest.lazy_fixture("q2_x1cx10x1"), # type: ignore + pytest.lazy_fixture("q2_x0cx01cx10"), # type: ignore + pytest.lazy_fixture("q2_v0cx01cx10"), # type: ignore + pytest.lazy_fixture("q2_hadamard_test"), # type: ignore + pytest.lazy_fixture("q2_lcu1"), # type: ignore + pytest.lazy_fixture("q2_lcu2"), # type: ignore + pytest.lazy_fixture("q2_lcu3"), # type: ignore + pytest.lazy_fixture("q3_v0cx02"), # type: ignore + pytest.lazy_fixture("q3_cx01cz12x1rx0"), # type: ignore + pytest.lazy_fixture("q3_toffoli_box_with_implicit_swaps"), # type: ignore + pytest.lazy_fixture("q4_with_creates"), # type: ignore + pytest.lazy_fixture("q5_h0s1rz2ry3tk4tk13"), # type: ignore + pytest.lazy_fixture("q5_line_circ_30_layers"), # type: ignore + pytest.lazy_fixture("q6_qvol"), # type: ignore + pytest.lazy_fixture("q8_qvol"), # type: ignore + ], +) +@pytest.mark.parametrize( + "algorithm", + [ + SimulationAlgorithm.MPSxGate, + SimulationAlgorithm.MPSxMPO, + SimulationAlgorithm.TTNxGate, + ], +) +def test_approx_circ_sim_gate_fid( + circuit: Circuit, algorithm: SimulationAlgorithm +) -> None: + with CuTensorNetHandle() as libhandle: + cfg = Config(truncation_fidelity=0.99, leaf_size=2) + state = simulate(libhandle, circuit, algorithm, cfg) + assert state.is_valid() + # Check that overlap is 1 + assert np.isclose(state.vdot(state), 1.0, atol=cfg._atol) + + +@pytest.mark.parametrize( + "circuit", + [ + pytest.lazy_fixture("q5_empty"), # type: ignore + pytest.lazy_fixture("q8_empty"), # type: ignore + pytest.lazy_fixture("q2_x0"), # type: ignore + pytest.lazy_fixture("q2_x1"), # type: ignore + pytest.lazy_fixture("q2_v0"), # type: ignore + pytest.lazy_fixture("q8_x0h2v5z6"), # type: ignore + pytest.lazy_fixture("q2_x0cx01"), # type: ignore + pytest.lazy_fixture("q2_x1cx10x1"), # type: ignore + pytest.lazy_fixture("q2_x0cx01cx10"), # type: ignore + pytest.lazy_fixture("q2_v0cx01cx10"), # type: ignore + pytest.lazy_fixture("q2_hadamard_test"), # type: ignore + pytest.lazy_fixture("q2_lcu1"), # type: ignore + pytest.lazy_fixture("q2_lcu2"), # type: ignore + pytest.lazy_fixture("q2_lcu3"), # type: ignore + pytest.lazy_fixture("q3_v0cx02"), # type: ignore + pytest.lazy_fixture("q3_cx01cz12x1rx0"), # type: ignore + pytest.lazy_fixture("q3_toffoli_box_with_implicit_swaps"), # type: ignore + pytest.lazy_fixture("q4_with_creates"), # type: ignore + pytest.lazy_fixture("q5_h0s1rz2ry3tk4tk13"), # type: ignore + pytest.lazy_fixture("q5_line_circ_30_layers"), # type: ignore + pytest.lazy_fixture("q6_qvol"), # type: ignore + pytest.lazy_fixture("q8_qvol"), # type: ignore + ], +) +@pytest.mark.parametrize( + "algorithm", + [ + SimulationAlgorithm.MPSxGate, + SimulationAlgorithm.MPSxMPO, + SimulationAlgorithm.TTNxGate, + ], +) +def test_approx_circ_sim_chi(circuit: Circuit, algorithm: SimulationAlgorithm) -> None: + with CuTensorNetHandle() as libhandle: + cfg = Config(chi=4, leaf_size=2) + state = simulate(libhandle, circuit, algorithm, cfg) + assert state.is_valid() + # Check that overlap is 1 + assert np.isclose(state.vdot(state), 1.0, atol=cfg._atol) + + +@pytest.mark.parametrize( + "circuit", + [ + pytest.lazy_fixture("q5_empty"), # type: ignore + pytest.lazy_fixture("q2_x0cx01cx10"), # type: ignore + pytest.lazy_fixture("q2_lcu2"), # type: ignore + pytest.lazy_fixture("q3_cx01cz12x1rx0"), # type: ignore + pytest.lazy_fixture("q4_with_creates"), # type: ignore + pytest.lazy_fixture("q5_line_circ_30_layers"), # type: ignore + pytest.lazy_fixture("q6_qvol"), # type: ignore + ], +) +@pytest.mark.parametrize( + "algorithm", + [ + SimulationAlgorithm.MPSxGate, + SimulationAlgorithm.MPSxMPO, + SimulationAlgorithm.TTNxGate, + ], +) +@pytest.mark.parametrize( + "fp_precision", + [ + np.float32, + np.float64, + ], +) +def test_float_point_options( + circuit: Circuit, algorithm: SimulationAlgorithm, fp_precision: Any +) -> None: + with CuTensorNetHandle() as libhandle: + # Exact + cfg = Config(float_precision=fp_precision, leaf_size=2) + state = simulate(libhandle, circuit, algorithm, cfg) + assert state.is_valid() + # Check that overlap is 1 + assert np.isclose(state.vdot(state), 1.0, atol=cfg._atol) + + # Approximate, bound truncation fidelity + cfg = Config( + truncation_fidelity=0.99, float_precision=fp_precision, leaf_size=2 + ) + state = simulate( + libhandle, + circuit, + algorithm, + cfg, + ) + assert state.is_valid() + # Check that overlap is 1 + assert np.isclose(state.vdot(state), 1.0, atol=cfg._atol) + + # Approximate, bound chi + cfg = Config(chi=4, float_precision=fp_precision, leaf_size=2) + state = simulate( + libhandle, + circuit, + algorithm, + cfg, + ) + assert state.is_valid() + # Check that overlap is 1 + assert np.isclose(state.vdot(state), 1.0, atol=cfg._atol) + + +@pytest.mark.parametrize( + "circuit", + [ + pytest.lazy_fixture("q20_line_circ_20_layers"), # type: ignore + ], +) +def test_circ_approx_explicit_mps(circuit: Circuit) -> None: + random.seed(1) + + with CuTensorNetHandle() as libhandle: + # Finite gate fidelity + # Check for MPSxGate + cfg = Config(truncation_fidelity=0.99, leaf_size=4, float_precision=np.float32) + mps_gate = simulate( + libhandle, + circuit, + SimulationAlgorithm.MPSxGate, + cfg, + ) + assert np.isclose(mps_gate.get_fidelity(), 0.4, atol=1e-1) + assert mps_gate.is_valid() + assert np.isclose(mps_gate.vdot(mps_gate), 1.0, atol=cfg._atol) + + # Check for MPSxMPO + mps_mpo = simulate( + libhandle, + circuit, + SimulationAlgorithm.MPSxMPO, + cfg, + ) + assert np.isclose(mps_mpo.get_fidelity(), 0.6, atol=1e-1) + assert mps_mpo.is_valid() + assert np.isclose(mps_mpo.vdot(mps_mpo), 1.0, atol=cfg._atol) + + # Fixed virtual bond dimension + # Check for MPSxGate + cfg = Config(chi=8, leaf_size=4, float_precision=np.float32) + mps_gate = simulate(libhandle, circuit, SimulationAlgorithm.MPSxGate, cfg) + assert np.isclose(mps_gate.get_fidelity(), 0.03, atol=1e-2) + assert mps_gate.is_valid() + assert np.isclose(mps_gate.vdot(mps_gate), 1.0, atol=cfg._atol) + + # Check for MPSxMPO + mps_mpo = simulate(libhandle, circuit, SimulationAlgorithm.MPSxMPO, cfg) + assert np.isclose(mps_mpo.get_fidelity(), 0.05, atol=1e-2) + assert mps_mpo.is_valid() + assert np.isclose(mps_mpo.vdot(mps_mpo), 1.0, atol=cfg._atol) + + +@pytest.mark.parametrize( + "circuit", + [ + pytest.lazy_fixture("q15_qvol"), # type: ignore + ], +) +def test_circ_approx_explicit_ttn(circuit: Circuit) -> None: + random.seed(1) + + with CuTensorNetHandle() as libhandle: + # Finite gate fidelity + # Check for TTNxGate + cfg = Config(truncation_fidelity=0.99, leaf_size=3, float_precision=np.float32) + ttn_gate = simulate(libhandle, circuit, SimulationAlgorithm.TTNxGate, cfg) + assert np.isclose(ttn_gate.get_fidelity(), 0.751, atol=1e-3) + assert ttn_gate.is_valid() + assert np.isclose(ttn_gate.vdot(ttn_gate), 1.0, atol=cfg._atol) + + # Fixed virtual bond dimension + # Check for TTNxGate + cfg = Config(chi=120, leaf_size=3, float_precision=np.float32) + ttn_gate = simulate(libhandle, circuit, SimulationAlgorithm.TTNxGate, cfg) + assert np.isclose(ttn_gate.get_fidelity(), 0.854, atol=1e-3) + assert ttn_gate.is_valid() + assert np.isclose(ttn_gate.vdot(ttn_gate), 1.0, atol=cfg._atol) + + +@pytest.mark.parametrize( + "circuit", + [ + pytest.lazy_fixture("q2_x0"), # type: ignore + pytest.lazy_fixture("q2_x1"), # type: ignore + pytest.lazy_fixture("q2_v0"), # type: ignore + pytest.lazy_fixture("q2_x0cx01"), # type: ignore + pytest.lazy_fixture("q2_x1cx10x1"), # type: ignore + pytest.lazy_fixture("q2_x0cx01cx10"), # type: ignore + pytest.lazy_fixture("q2_v0cx01cx10"), # type: ignore + pytest.lazy_fixture("q2_hadamard_test"), # type: ignore + pytest.lazy_fixture("q2_lcu1"), # type: ignore + pytest.lazy_fixture("q2_lcu2"), # type: ignore + pytest.lazy_fixture("q2_lcu3"), # type: ignore + ], +) +@pytest.mark.parametrize( + "postselect_dict", + [ + {Qubit("q", 0): 0}, + {Qubit("q", 0): 1}, + {Qubit("q", 1): 0}, + {Qubit("q", 1): 1}, + ], +) +def test_postselect_2q_circ(circuit: Circuit, postselect_dict: dict) -> None: + sv = circuit_statevector_postselect(circuit, postselect_dict.copy()) + sv_prob = sv.conj() @ sv + if not np.isclose(sv_prob, 0.0): + sv = sv / np.sqrt(sv_prob) # Normalise + + with CuTensorNetHandle() as libhandle: + cfg = Config() + mps = simulate(libhandle, circuit, SimulationAlgorithm.MPSxGate, cfg) + prob = mps.postselect(postselect_dict) + assert np.isclose(prob, sv_prob, atol=cfg._atol) + assert np.allclose(mps.get_statevector(), sv, atol=cfg._atol) + + +@pytest.mark.parametrize( + "circuit", + [ + pytest.lazy_fixture("q3_cx01cz12x1rx0"), # type: ignore + pytest.lazy_fixture("q3_toffoli_box_with_implicit_swaps"), # type: ignore + pytest.lazy_fixture("q5_line_circ_30_layers"), # type: ignore + ], +) +@pytest.mark.parametrize( + "postselect_dict", + [ + {Qubit("q", 0): 1}, + {Qubit("q", 1): 0}, + {Qubit("q", 0): 0, Qubit("q", 1): 0}, + {Qubit("q", 1): 1, Qubit("q", 2): 1}, + {Qubit("q", 0): 0, Qubit("q", 2): 1}, + ], +) +def test_postselect_circ(circuit: Circuit, postselect_dict: dict) -> None: + sv = circuit_statevector_postselect(circuit, postselect_dict.copy()) + sv_prob = sv.conj() @ sv + if not np.isclose(sv_prob, 0.0): + sv = sv / np.sqrt(sv_prob) # Normalise + + with CuTensorNetHandle() as libhandle: + cfg = Config() + + mps = simulate(libhandle, circuit, SimulationAlgorithm.MPSxGate, cfg) + + prob = mps.postselect(postselect_dict) + assert np.isclose(prob, sv_prob, atol=cfg._atol) + assert np.allclose(mps.get_statevector(), sv, atol=cfg._atol) + + +@pytest.mark.parametrize( + "circuit", + [ + pytest.lazy_fixture("q2_x0"), # type: ignore + pytest.lazy_fixture("q2_x1"), # type: ignore + pytest.lazy_fixture("q2_v0"), # type: ignore + pytest.lazy_fixture("q2_x0cx01"), # type: ignore + pytest.lazy_fixture("q2_x1cx10x1"), # type: ignore + pytest.lazy_fixture("q2_hadamard_test"), # type: ignore + pytest.lazy_fixture("q2_lcu1"), # type: ignore + pytest.lazy_fixture("q2_lcu2"), # type: ignore + pytest.lazy_fixture("q2_lcu3"), # type: ignore + pytest.lazy_fixture("q3_cx01cz12x1rx0"), # type: ignore + pytest.lazy_fixture("q3_toffoli_box_with_implicit_swaps"), # type: ignore + pytest.lazy_fixture("q4_with_creates"), # type: ignore + pytest.lazy_fixture("q5_line_circ_30_layers"), # type: ignore + ], +) +@pytest.mark.parametrize( + "observable", + [ + QubitPauliString({Qubit(0): Pauli.Z}), + QubitPauliString({Qubit(1): Pauli.X}), + QubitPauliString({Qubit(0): Pauli.X, Qubit(1): Pauli.Z}), + ], +) +def test_expectation_value(circuit: Circuit, observable: QubitPauliString) -> None: + pauli_to_optype = {Pauli.Z: OpType.Z, Pauli.Y: OpType.Z, Pauli.X: OpType.X} + + # Use pytket to generate the expectation value of the observable + ket_circ = circuit.copy() + for q, o in observable.map.items(): + ket_circ.add_gate(pauli_to_optype[o], [q]) + ket_sv = ket_circ.get_statevector() + + bra_sv = circuit.get_statevector() + + expectation_value = bra_sv.conj() @ ket_sv + + # Simulate the circuit and obtain the expectation value + with CuTensorNetHandle() as libhandle: + cfg = Config() + mps = simulate(libhandle, circuit, SimulationAlgorithm.MPSxGate, cfg) + assert np.isclose( + mps.expectation_value(observable), expectation_value, atol=cfg._atol + ) + + +@pytest.mark.parametrize( + "circuit", + [ + pytest.lazy_fixture("q2_v0cx01cx10"), # type: ignore + pytest.lazy_fixture("q2_hadamard_test"), # type: ignore + pytest.lazy_fixture("q2_lcu2"), # type: ignore + pytest.lazy_fixture("q3_cx01cz12x1rx0"), # type: ignore + pytest.lazy_fixture("q5_line_circ_30_layers"), # type: ignore + ], +) +def test_sample_with_seed(circuit: Circuit) -> None: + n_samples = 10 + config = Config(seed=1234) + + with CuTensorNetHandle() as libhandle: + mps_0 = simulate(libhandle, circuit, SimulationAlgorithm.MPSxGate, config) + mps_1 = simulate(libhandle, circuit, SimulationAlgorithm.MPSxGate, config) + mps_2 = mps_0.copy() + + all_outcomes = [] + for _ in range(n_samples): + # Check that all copies of the MPS result in the same sample + outcomes_0 = mps_0.sample() + outcomes_1 = mps_1.sample() + outcomes_2 = mps_2.sample() + assert outcomes_0 == outcomes_1 and outcomes_0 == outcomes_2 + + all_outcomes.append(outcomes_0) + + # Check that the outcomes change between different samples + assert not all(outcome == outcomes_0 for outcome in all_outcomes) + + +@pytest.mark.parametrize( + "circuit", + [ + pytest.lazy_fixture("q2_x1"), # type: ignore + pytest.lazy_fixture("q2_x0cx01"), # type: ignore + pytest.lazy_fixture("q2_v0cx01cx10"), # type: ignore + pytest.lazy_fixture("q2_hadamard_test"), # type: ignore + pytest.lazy_fixture("q2_lcu2"), # type: ignore + ], +) +def test_sample_circ_2q(circuit: Circuit) -> None: + n_samples = 200 + + q0 = circuit.qubits[0] + q1 = circuit.qubits[1] + + # Compute the probabilities of each outcome + p = dict() + for outcome in range(4): + p[outcome] = abs(circuit.get_statevector()[outcome]) ** 2 + + # Compute the samples + sample_dict = {0: 0, 1: 0, 2: 0, 3: 0} + with CuTensorNetHandle() as libhandle: + mps = simulate(libhandle, circuit, SimulationAlgorithm.MPSxGate, Config()) + + # Take samples measuring both qubits at once + for _ in range(n_samples): + outcome_dict = mps.sample() + outcome = outcome_dict[q0] * 2 + outcome_dict[q1] + sample_dict[outcome] += 1 + + # Check sample frequency consistent with theoretical probability + for outcome, count in sample_dict.items(): + assert np.isclose(count / n_samples, p[outcome], atol=0.1) + + +@pytest.mark.parametrize( + "circuit", + [ + pytest.lazy_fixture("q3_cx01cz12x1rx0"), # type: ignore + pytest.lazy_fixture("q3_toffoli_box_with_implicit_swaps"), # type: ignore + pytest.lazy_fixture("q5_line_circ_30_layers"), # type: ignore + ], +) +def test_measure_circ(circuit: Circuit) -> None: + n_samples = 200 + + qA = circuit.qubits[-1] # Least significant qubit + qB = circuit.qubits[-3] # Third list significant qubit + + with CuTensorNetHandle() as libhandle: + mps = simulate(libhandle, circuit, SimulationAlgorithm.MPSxGate, Config()) + + # Compute the probabilities of each outcome + p = {(0, 0): 0.0, (0, 1): 0.0, (1, 0): 0.0, (1, 1): 0.0} + for outA in range(2): + for outB in range(2): + mps_copy = mps.copy() + p[(outA, outB)] = mps_copy.postselect({qA: outA, qB: outB}) + + # Compute the samples + sample_dict = {(0, 0): 0, (0, 1): 0, (1, 0): 0, (1, 1): 0} + for _ in range(n_samples): + mps_copy = mps.copy() + outcome_dict = mps_copy.measure({qA, qB}) + sample_dict[(outcome_dict[qA], outcome_dict[qB])] += 1 + + # Check sample frequency consistent with theoretical probability + for outcome, count in sample_dict.items(): + assert np.isclose(count / n_samples, p[outcome], atol=0.1) + + +def test_mps_qubit_addition_and_measure() -> None: + with CuTensorNetHandle() as libhandle: + config = Config() + mps = MPSxGate( + libhandle, + qubits=[Qubit(0), Qubit(1), Qubit(2), Qubit(3)], + config=config, + ) + + x = cp.asarray( + [ + [0, 1], + [1, 0], + ], + dtype=config._complex_t, + ) + cx = cp.asarray( + [ + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 0, 1], + [0, 0, 1, 0], + ], + dtype=config._complex_t, + ) + + # Apply some gates + mps.apply_unitary(x, [Qubit(1)]) # |0100> + mps.apply_unitary(cx, [Qubit(1), Qubit(2)]) # |0110> + mps.apply_unitary(cx, [Qubit(2), Qubit(3)]) # |0111> + # Add a qubit at the end of the MPS + mps.add_qubit(new_qubit=Qubit(4), position=len(mps)) # |01110> + # Apply some more gates acting on the new qubit + mps.apply_unitary(cx, [Qubit(3), Qubit(4)]) # |01111> + mps.apply_unitary(cx, [Qubit(4), Qubit(3)]) # |01101> + # Add a qubit at position 3 + mps.add_qubit(new_qubit=Qubit(6), position=3) # |011001> + # Apply some more gates acting on the new qubit + mps.apply_unitary(x, [Qubit(6)]) # |011101> + mps.apply_unitary(cx, [Qubit(6), Qubit(2)]) # |010101> + mps.apply_unitary(cx, [Qubit(6), Qubit(3)]) # |010111> + # Add another qubit at the end of the MPS + mps.add_qubit(new_qubit=Qubit(5), position=len(mps), state=1) # |0101111> + # Apply some more gates acting on the new qubit + mps.apply_unitary(cx, [Qubit(4), Qubit(5)]) # |0101110> + + # The resulting state should be |0101110> + sv = np.zeros(2**7) + sv[int("0101110", 2)] = 1 + + # However, since mps.get_statevector will sort qubits in ILO, the bits would + # change position. Instead, we can relabel the qubits. + mps.apply_qubit_relabelling( + {q: Qubit(i) for q, i in mps.qubit_position.items()} + ) + + # Compare the state vectors + assert np.allclose(mps.get_statevector(), sv) + + # Measure some of the qubits destructively + outcomes = mps.measure({Qubit(0), Qubit(2), Qubit(4)}, destructive=True) + # Since the state is |0101110>, the outcomes are deterministic + assert outcomes[Qubit(0)] == 0 + assert outcomes[Qubit(2)] == 0 + assert outcomes[Qubit(4)] == 1 + + # Note that the qubit identifiers have not been updated, + # so the qubits that were measured are no longer in the MPS. + with pytest.raises(ValueError, match="not a qubit in the MPS"): + mps.measure({Qubit(0)}) + + # Measure some of the remaining qubits non-destructively + outcomes = mps.measure({Qubit(1), Qubit(6)}, destructive=False) + assert outcomes[Qubit(1)] == 1 + assert outcomes[Qubit(6)] == 0 + + # The resulting state should be |1110>, verify it + sv = np.zeros(2**4) + sv[int("1110", 2)] = 1 + assert np.allclose(mps.get_statevector(), sv) + + # Apply a few more gates to check it works + mps.apply_unitary(x, [Qubit(1)]) # |0110> + mps.apply_unitary(cx, [Qubit(3), Qubit(5)]) # |0100> + + # The resulting state should be |0100>, verify it + sv = np.zeros(2**4) + sv[int("0100", 2)] = 1 + assert np.allclose(mps.get_statevector(), sv)