diff --git a/doc/releases/changelog-dev.md b/doc/releases/changelog-dev.md index cb5f4a98005..d327f2829b3 100644 --- a/doc/releases/changelog-dev.md +++ b/doc/releases/changelog-dev.md @@ -63,6 +63,9 @@ * Transforms can be applied on devices following the new device API. [(#4667)](https://github.com/PennyLaneAI/pennylane/pull/4667) +* All gradient transforms are updated to the new transform program system. + [(#4595)](https://github.com/PennyLaneAI/pennylane/pull/4595) + * All quantum functions transforms are update to the new transform program system. [(#4439)](https://github.com/PennyLaneAI/pennylane/pull/4439) diff --git a/pennylane/_grad.py b/pennylane/_grad.py index cd93d8997d5..6769548d66a 100644 --- a/pennylane/_grad.py +++ b/pennylane/_grad.py @@ -327,7 +327,6 @@ def _jacobian_function(*args, **kwargs): "If this is unintended, please add trainable parameters via the " "'requires_grad' attribute or 'argnum' keyword." ) - jac = tuple(_jacobian(func, arg)(*args, **kwargs) for arg in _argnum) return jac[0] if unpack else jac diff --git a/pennylane/drawer/draw.py b/pennylane/drawer/draw.py index 812ced953d4..ce3939e2839 100644 --- a/pennylane/drawer/draw.py +++ b/pennylane/drawer/draw.py @@ -260,10 +260,13 @@ def wrapper(*args, **kwargs): _wire_order = wire_order or qnode.tape.wires else: original_expansion_strategy = getattr(qnode, "expansion_strategy", None) - try: qnode.expansion_strategy = expansion_strategy or original_expansion_strategy tapes = qnode.construct(args, kwargs) + if isinstance(qnode.device, qml.devices.Device): + program = qnode.transform_program + tapes = program([qnode.tape]) + finally: qnode.expansion_strategy = original_expansion_strategy diff --git a/pennylane/gradients/finite_difference.py b/pennylane/gradients/finite_difference.py index fa86886a4ce..d925c57853a 100644 --- a/pennylane/gradients/finite_difference.py +++ b/pennylane/gradients/finite_difference.py @@ -15,8 +15,10 @@ This module contains functions for computing the finite-difference gradient of a quantum tape. """ -# pylint: disable=protected-access,too-many-arguments,too-many-branches,too-many-statements +# pylint: disable=protected-access,too-many-arguments,too-many-branches,too-many-statements,unused-argument +from typing import Sequence, Callable import functools +from functools import partial from warnings import warn import numpy as np @@ -24,6 +26,10 @@ import pennylane as qml from pennylane.measurements import ProbabilityMP +from pennylane.transforms.core import transform +from pennylane.transforms.tape_expand import expand_invalid_trainable +from pennylane.gradients.gradient_transform import _contract_qjac_with_cjac + from .general_shift_rules import generate_shifted_tapes from .gradient_transform import ( @@ -31,7 +37,6 @@ assert_no_tape_batching, choose_grad_methods, gradient_analysis_and_validation, - gradient_transform, _no_trainable_grad, ) @@ -167,9 +172,36 @@ def _processing_fn(results, shots, single_shot_batch_fn): return tuple(grads_tuple) -@gradient_transform +def _expand_transform_finite_diff( + tape: qml.tape.QuantumTape, + argnum=None, + h=1e-7, + approx_order=1, + n=1, + strategy="forward", + f0=None, + validate_params=True, +) -> (Sequence[qml.tape.QuantumTape], Callable): + """Expand function to be applied before finite difference.""" + expanded_tape = expand_invalid_trainable(tape) + + def null_postprocessing(results): + """A postprocesing function returned by a transform that only converts the batch of results + into a result for a single ``QuantumTape``. + """ + return results[0] + + return [expanded_tape], null_postprocessing + + +@partial( + transform, + expand_transform=_expand_transform_finite_diff, + classical_cotransform=_contract_qjac_with_cjac, + final_transform=True, +) def finite_diff( - tape, + tape: qml.tape.QuantumTape, argnum=None, h=1e-7, approx_order=1, @@ -177,7 +209,7 @@ def finite_diff( strategy="forward", f0=None, validate_params=True, -): +) -> (Sequence[qml.tape.QuantumTape], Callable): r"""Transform a QNode to compute the finite-difference gradient of all gate parameters with respect to its inputs. Args: @@ -318,6 +350,7 @@ def finite_diff( The outermost tuple contains results corresponding to each element of the shot vector. """ + transform_name = "finite difference" assert_no_tape_batching(tape, transform_name) diff --git a/pennylane/gradients/gradient_transform.py b/pennylane/gradients/gradient_transform.py index 2a128f92a16..218a9226087 100644 --- a/pennylane/gradients/gradient_transform.py +++ b/pennylane/gradients/gradient_transform.py @@ -391,13 +391,19 @@ def reorder_grads(grads, tape_specs): # pylint: disable=too-many-return-statements,too-many-branches -def _contract_qjac_with_cjac(qjac, cjac, num_measurements, has_partitioned_shots): +def _contract_qjac_with_cjac(qjac, cjac, tape): """Contract a quantum Jacobian with a classical preprocessing Jacobian. Essentially, this function computes the generalized version of ``tensordot(qjac, cjac)`` over the tape parameter axis, adapted to the new return type system. This function takes the measurement shapes and different QNode arguments into account. """ + num_measurements = len(tape.measurements) + has_partitioned_shots = tape.shots.has_partitioned_shots + + if isinstance(qjac, tuple) and len(qjac) == 1: + qjac = qjac[0] + if isinstance(cjac, tuple) and len(cjac) == 1: cjac = cjac[0] @@ -453,7 +459,7 @@ def _reshape(x): return tuple(tuple(tdot(qml.math.stack(q), c) for c in cjac if c is not None) for q in qjac) -class gradient_transform(qml.batch_transform): +class gradient_transform(qml.batch_transform): # pragma: no cover """Decorator for defining quantum gradient transforms. Quantum gradient transforms are a specific case of :class:`~.batch_transform`. @@ -601,8 +607,6 @@ def jacobian_wrapper( qnode, argnum=argnum_cjac, expand_fn=self.expand_fn )(*args, **kwargs) - num_measurements = len(qnode.tape.measurements) - has_partitioned_shots = qnode.tape.shots.has_partitioned_shots - return _contract_qjac_with_cjac(qjac, cjac, num_measurements, has_partitioned_shots) + return _contract_qjac_with_cjac(qjac, cjac, qnode.tape) # pragma: no cover return jacobian_wrapper diff --git a/pennylane/gradients/hadamard_gradient.py b/pennylane/gradients/hadamard_gradient.py index e37df1a393d..dbc99e35bb6 100644 --- a/pennylane/gradients/hadamard_gradient.py +++ b/pennylane/gradients/hadamard_gradient.py @@ -15,9 +15,14 @@ This module contains functions for computing the Hadamard-test gradient of a qubit-based quantum tape. """ +# pylint: disable=unused-argument +from typing import Sequence, Callable +from functools import partial import pennylane as qml import pennylane.numpy as np from pennylane.transforms.metric_tensor import _get_aux_wire +from pennylane.transforms.core import transform +from pennylane.gradients.gradient_transform import _contract_qjac_with_cjac from pennylane.transforms.tape_expand import expand_invalid_trainable_hadamard_gradient from .gradient_transform import ( @@ -27,17 +32,40 @@ assert_no_variance, choose_grad_methods, gradient_analysis_and_validation, - gradient_transform, _no_trainable_grad, ) -def _hadamard_grad( - tape, +def _expand_transform_hadamard( + tape: qml.tape.QuantumTape, argnum=None, aux_wire=None, device_wires=None, -): +) -> (Sequence[qml.tape.QuantumTape], Callable): + """Expand function to be applied before hadamard gradient.""" + expanded_tape = expand_invalid_trainable_hadamard_gradient(tape) + + def null_postprocessing(results): + """A postprocesing function returned by a transform that only converts the batch of results + into a result for a single ``QuantumTape``. + """ + return results[0] + + return [expanded_tape], null_postprocessing + + +@partial( + transform, + expand_transform=_expand_transform_hadamard, + classical_cotransform=_contract_qjac_with_cjac, + final_transform=True, +) +def hadamard_grad( + tape: qml.tape.QuantumTape, + argnum=None, + aux_wire=None, + device_wires=None, +) -> (Sequence[qml.tape.QuantumTape], Callable): r"""Transform a QNode to compute the Hadamard test gradient of all gates with respect to their inputs. Args: @@ -174,6 +202,7 @@ def _hadamard_grad( The number of trainable parameters may increase due to the decomposition. """ + transform_name = "Hadamard test" assert_no_state_returns(tape.measurements, transform_name) assert_no_variance(tape.measurements, transform_name) @@ -421,8 +450,3 @@ def _get_generators(trainable_op): coeffs = trainable_op.generator().coeffs return coeffs, generators - - -hadamard_grad = gradient_transform( - _hadamard_grad, expand_fn=expand_invalid_trainable_hadamard_gradient -) diff --git a/pennylane/gradients/parameter_shift.py b/pennylane/gradients/parameter_shift.py index 03dd00c3643..d7877bb890a 100644 --- a/pennylane/gradients/parameter_shift.py +++ b/pennylane/gradients/parameter_shift.py @@ -15,11 +15,17 @@ This module contains functions for computing the parameter-shift gradient of a qubit-based quantum tape. """ -# pylint: disable=protected-access,too-many-arguments,too-many-statements +# pylint: disable=protected-access,too-many-arguments,too-many-statements,unused-argument +from typing import Sequence, Callable +from functools import partial + import numpy as np import pennylane as qml from pennylane.measurements import VarianceMP +from pennylane.transforms.core import transform +from pennylane.transforms.tape_expand import expand_invalid_trainable +from pennylane.gradients.gradient_transform import _contract_qjac_with_cjac from .finite_difference import finite_diff from .general_shift_rules import ( @@ -35,7 +41,6 @@ assert_multimeasure_not_broadcasted, choose_grad_methods, gradient_analysis_and_validation, - gradient_transform, _no_trainable_grad, reorder_grads, ) @@ -155,7 +160,6 @@ def _single_meas_grad(result, coeffs, unshifted_coeff, r0): ) # pragma: no cover # return the unshifted term, which is the only contribution return qml.math.array(unshifted_coeff * r0) - result = qml.math.stack(result) coeffs = qml.math.convert_like(coeffs, result) g = qml.math.tensordot(result, coeffs, [[0], [0]]) @@ -719,16 +723,42 @@ def var_param_shift(tape, argnum, shifts=None, gradient_recipes=None, f0=None, b return gradient_tapes, processing_fn -@gradient_transform +def _expand_transform_param_shift( + tape: qml.tape.QuantumTape, + argnum=None, + shifts=None, + gradient_recipes=None, + fallback_fn=finite_diff, + f0=None, + broadcast=False, +) -> (Sequence[qml.tape.QuantumTape], Callable): + """Expand function to be applied before parameter shift.""" + expanded_tape = expand_invalid_trainable(tape) + + def null_postprocessing(results): + """A postprocesing function returned by a transform that only converts the batch of results + into a result for a single ``QuantumTape``. + """ + return results[0] + + return [expanded_tape], null_postprocessing + + +@partial( + transform, + expand_transform=_expand_transform_param_shift, + classical_cotransform=_contract_qjac_with_cjac, + final_transform=True, +) def param_shift( - tape, + tape: qml.tape.QuantumTape, argnum=None, shifts=None, gradient_recipes=None, fallback_fn=finite_diff, f0=None, broadcast=False, -): +) -> (Sequence[qml.tape.QuantumTape], Callable): r"""Transform a QNode to compute the parameter-shift gradient of all gate parameters with respect to its inputs. @@ -1004,6 +1034,7 @@ def param_shift( Note that ``broadcast=True`` requires additional memory by a factor of the largest batch_size of the created tapes. """ + transform_name = "parameter-shift rule" assert_no_state_returns(tape.measurements, transform_name) assert_multimeasure_not_broadcasted(tape.measurements, broadcast) diff --git a/pennylane/gradients/parameter_shift_cv.py b/pennylane/gradients/parameter_shift_cv.py index 7aeec09ad4b..06b3282094d 100644 --- a/pennylane/gradients/parameter_shift_cv.py +++ b/pennylane/gradients/parameter_shift_cv.py @@ -15,21 +15,25 @@ This module contains functions for computing the parameter-shift gradient of a CV-based quantum tape. """ -# pylint: disable=protected-access,too-many-arguments,too-many-statements,too-many-branches +# pylint: disable=protected-access,too-many-arguments,too-many-statements,too-many-branches,unused-argument +from typing import Sequence, Callable import itertools +from functools import partial import warnings import numpy as np import pennylane as qml from pennylane.measurements import ExpectationMP, ProbabilityMP, StateMP, VarianceMP +from pennylane.transforms.core import transform +from pennylane.transforms.tape_expand import expand_invalid_trainable +from pennylane.gradients.gradient_transform import _contract_qjac_with_cjac from .finite_difference import finite_diff from .general_shift_rules import generate_shifted_tapes, process_shifts from .gradient_transform import ( choose_grad_methods, _grad_method_validation, - gradient_transform, _no_trainable_grad, ) from .parameter_shift import _get_operation_recipe, expval_param_shift @@ -478,11 +482,36 @@ def processing_fn(results): return gradient_tapes, processing_fn -# TODO: integration of CV devices with new return types -# pylint: disable=unused-argument -@gradient_transform +def _expand_transform_param_shift_cv( + tape: qml.tape.QuantumTape, + dev, + argnum=None, + shifts=None, + gradient_recipes=None, + fallback_fn=finite_diff, + f0=None, + force_order2=False, +) -> (Sequence[qml.tape.QuantumTape], Callable): + """Expand function to be applied before parameter shift CV.""" + expanded_tape = expand_invalid_trainable(tape) + + def null_postprocessing(results): + """A postprocesing function returned by a transform that only converts the batch of results + into a result for a single ``QuantumTape``. + """ + return results[0] + + return [expanded_tape], null_postprocessing + + +@partial( + transform, + expand_transform=_expand_transform_param_shift_cv, + classical_cotransform=_contract_qjac_with_cjac, + final_transform=True, +) def param_shift_cv( - tape, + tape: qml.tape.QuantumTape, dev, argnum=None, shifts=None, @@ -490,7 +519,7 @@ def param_shift_cv( fallback_fn=finite_diff, f0=None, force_order2=False, -): +) -> (Sequence[qml.tape.QuantumTape], Callable): r"""Transform a continuous-variable QNode to compute the parameter-shift gradient of all gate parameters with respect to its inputs. diff --git a/pennylane/gradients/pulse_gradient.py b/pennylane/gradients/pulse_gradient.py index 47cc189fd32..2693d10b675 100644 --- a/pennylane/gradients/pulse_gradient.py +++ b/pennylane/gradients/pulse_gradient.py @@ -15,11 +15,14 @@ This module contains functions for computing the stochastic parameter-shift gradient of pulse sequences in a qubit-based quantum tape. """ +from typing import Sequence, Callable +from functools import partial import warnings import numpy as np import pennylane as qml from pennylane.pulse import ParametrizedEvolution, HardwareHamiltonian +from pennylane.transforms.core import transform from .parameter_shift import _make_zero_rep from .general_shift_rules import eigvals_to_frequencies, generate_shift_rule @@ -30,7 +33,6 @@ assert_no_variance, choose_grad_methods, gradient_analysis_and_validation, - gradient_transform, _no_trainable_grad, reorder_grads, ) @@ -281,9 +283,14 @@ def _psr_and_contract(res_list, cjacs, int_prefactor): # pylint: disable=too-many-arguments -def _stoch_pulse_grad( - tape, argnum=None, num_split_times=1, sampler_seed=None, use_broadcasting=False -): +@partial(transform, final_transform=True) +def stoch_pulse_grad( + tape: qml.tape.QuantumTape, + argnum=None, + num_split_times=1, + sampler_seed=None, + use_broadcasting=False, +) -> (Sequence[qml.tape.QuantumTape], Callable): r"""Compute the gradient of a quantum circuit composed of pulse sequences by applying the stochastic parameter shift rule. @@ -854,21 +861,7 @@ def processing_fn(results): return tapes, processing_fn -def expand_invalid_trainable_stoch_pulse_grad(x, *args, **kwargs): - r"""Do not expand any operation. We expect the ``stoch_pulse_grad`` to be used - on pulse programs and we do not expect decomposition pipelines between pulses - and gate-based circuits yet. - """ - # pylint:disable=unused-argument - return x - - -stoch_pulse_grad = gradient_transform( - _stoch_pulse_grad, expand_fn=expand_invalid_trainable_stoch_pulse_grad -) - - -@stoch_pulse_grad.custom_qnode_wrapper +@stoch_pulse_grad.custom_qnode_transform def stoch_pulse_grad_qnode_wrapper(self, qnode, targs, tkwargs): """A custom QNode wrapper for the gradient transform :func:`~.stoch_pulse_grad`. It raises an error, so that applying ``stoch_pulse_grad`` to a ``QNode`` directly diff --git a/pennylane/gradients/pulse_gradient_odegen.py b/pennylane/gradients/pulse_gradient_odegen.py index 927ec620d39..39dc815f566 100644 --- a/pennylane/gradients/pulse_gradient_odegen.py +++ b/pennylane/gradients/pulse_gradient_odegen.py @@ -15,6 +15,7 @@ This module contains functions for computing the pulse generator parameter-shift gradient of pulse sequences in a qubit-based quantum tape. """ +from typing import Callable, Sequence import warnings from functools import partial import numpy as np @@ -23,6 +24,7 @@ from pennylane.pulse import ParametrizedEvolution from pennylane.ops.qubit.special_unitary import pauli_basis_strings, _pauli_decompose +from pennylane.transforms.core import transform from .parameter_shift import _make_zero_rep from .pulse_gradient import _assert_has_jax, raise_pulse_diff_on_qnode @@ -33,7 +35,6 @@ assert_no_variance, choose_grad_methods, gradient_analysis_and_validation, - gradient_transform, _no_trainable_grad, reorder_grads, ) @@ -398,7 +399,10 @@ def processing_fn(results): return gradient_tapes, processing_fn -def _pulse_odegen(tape, argnum=None, atol=1e-7): +@partial(transform, final_transform=True) +def pulse_odegen( + tape: qml.tape.QuantumTape, argnum=None, atol=1e-7 +) -> (Sequence[qml.tape.QuantumTape], Callable): r"""Transform a QNode to compute the pulse generator parameter-shift gradient of pulses in a pulse program with respect to their inputs. This method combines automatic differentiation of few-qubit operations with @@ -699,31 +703,19 @@ def circuit(params): return _expval_pulse_odegen(tape, argnum, atol) -def expand_invalid_trainable_pulse_odegen(x, *args, **kwargs): - r"""Do not expand any operation. We expect the ``pulse_odegen`` to be used - on pulse programs and we do not expect decomposition pipelines between pulses - and gate-based circuits yet. - """ - # pylint:disable=unused-argument - return x - - -pulse_odegen = gradient_transform(_pulse_odegen, expand_fn=expand_invalid_trainable_pulse_odegen) - - -def _legacy_pulse_generator_wrapper(*args, **kwargs): +def _legacy_pulse_generator_wrapper( + tape: qml.tape.QuantumTape, argnum=None, atol=1e-7 +) -> (Sequence[qml.tape.QuantumTape], Callable): warnings.warn( "pulse_generator for gradient computation has been renamed to pulse_odegen and will not be available in pennylane v0.34 onwards" ) - return _pulse_odegen(*args, **kwargs) + return pulse_odegen(tape, argnum, atol) -pulse_generator = gradient_transform( - _legacy_pulse_generator_wrapper, expand_fn=expand_invalid_trainable_pulse_odegen -) +pulse_generator = transform(_legacy_pulse_generator_wrapper, final_transform=True) -@pulse_odegen.custom_qnode_wrapper +@pulse_odegen.custom_qnode_transform def pulse_odegen_qnode_wrapper(self, qnode, targs, tkwargs): """A custom QNode wrapper for the gradient transform :func:`~.pulse_odegen`. It raises an error, so that applying ``pulse_odegen`` to a ``QNode`` directly diff --git a/pennylane/gradients/spsa_gradient.py b/pennylane/gradients/spsa_gradient.py index 70239178d1c..f49454fe4b7 100644 --- a/pennylane/gradients/spsa_gradient.py +++ b/pennylane/gradients/spsa_gradient.py @@ -15,18 +15,21 @@ This module contains functions for computing the SPSA gradient of a quantum tape. """ -# pylint: disable=protected-access,too-many-arguments,too-many-branches,too-many-statements +# pylint: disable=protected-access,too-many-arguments,too-many-branches,too-many-statements,unused-argument +from typing import Sequence, Callable from functools import partial import numpy as np import pennylane as qml +from pennylane.transforms.core import transform +from pennylane.gradients.gradient_transform import _contract_qjac_with_cjac +from pennylane.transforms.tape_expand import expand_invalid_trainable from .finite_difference import _processing_fn, finite_diff_coeffs from .gradient_transform import ( _all_zero_grad, assert_no_tape_batching, - gradient_transform, choose_grad_methods, gradient_analysis_and_validation, _no_trainable_grad, @@ -56,9 +59,39 @@ def _rademacher_sampler(indices, num_params, *args, rng): return direction -@gradient_transform +def _expand_transform_spsa( + tape: qml.tape.QuantumTape, + argnum=None, + h=1e-5, + approx_order=2, + n=1, + strategy="center", + f0=None, + validate_params=True, + num_directions=1, + sampler=_rademacher_sampler, + sampler_rng=None, +) -> (Sequence[qml.tape.QuantumTape], Callable): + """Expand function to be applied before spsa gradient.""" + expanded_tape = expand_invalid_trainable(tape) + + def null_postprocessing(results): + """A postprocesing function returned by a transform that only converts the batch of results + into a result for a single ``QuantumTape``. + """ + return results[0] + + return [expanded_tape], null_postprocessing + + +@partial( + transform, + expand_transform=_expand_transform_spsa, + classical_cotransform=_contract_qjac_with_cjac, + final_transform=True, +) def spsa_grad( - tape, + tape: qml.tape.QuantumTape, argnum=None, h=1e-5, approx_order=2, @@ -69,7 +102,7 @@ def spsa_grad( num_directions=1, sampler=_rademacher_sampler, sampler_rng=None, -): +) -> (Sequence[qml.tape.QuantumTape], Callable): r"""Transform a QNode to compute the SPSA gradient of all gate parameters with respect to its inputs. This estimator shifts all parameters simultaneously and approximates the gradient based on these shifts and a @@ -250,6 +283,7 @@ def spsa_grad( Note that the stochastic approximation and the fluctuations from the shot noise of the device accumulate, leading to a very coarse-grained estimate for the gradient. """ + transform_name = "SPSA" assert_no_tape_batching(tape, transform_name) diff --git a/pennylane/interfaces/autograd.py b/pennylane/interfaces/autograd.py index 62d7b384221..6ca0e2a7d76 100644 --- a/pennylane/interfaces/autograd.py +++ b/pennylane/interfaces/autograd.py @@ -241,7 +241,7 @@ def grad_fn(dy): else: # Need to compute the Jacobians on the backward pass (accumulation="backward") - if isinstance(gradient_fn, qml.gradients.gradient_transform): + if isinstance(gradient_fn, qml.transforms.core.TransformDispatcher): # Gradient function is a gradient transform. # Generate and execute the required gradient tapes diff --git a/pennylane/interfaces/execution.py b/pennylane/interfaces/execution.py index a5d07e979ae..202ed42488a 100644 --- a/pennylane/interfaces/execution.py +++ b/pennylane/interfaces/execution.py @@ -399,6 +399,7 @@ def execute( gradient_fn: Optional[Union[Callable, str]] = None, interface="auto", transform_program=None, + config=None, grad_on_execution="best", gradient_kwargs=None, cache: Union[bool, dict, Cache] = True, @@ -423,6 +424,8 @@ def execute( interface (str): The interface that will be used for classical autodifferentiation. This affects the types of parameters that can exist on the input tapes. Available options include ``autograd``, ``torch``, ``tf``, ``jax`` and ``auto``. + transform_program(qml.transforms.core.TransformProgram): A transform program to be applied to the initial tape. + config (qml.devices.ExecutionConfig): A datastructure describing the parameters needed to fully describe the execution. grad_on_execution (bool, str): Whether the gradients should be computed on the execution or not. Only applies if the device is queried for the gradient; gradient transform functions available in ``qml.gradients`` are only supported on the backward @@ -552,18 +555,8 @@ def cost_fn(params, x): interface = get_jax_interface_name(tapes) - if gradient_fn is None: - _gradient_method = None - elif isinstance(gradient_fn, str): - _gradient_method = gradient_fn - else: - _gradient_method = "gradient-transform" - config = qml.devices.ExecutionConfig( - interface=interface, - gradient_method=_gradient_method, - grad_on_execution=None if grad_on_execution == "best" else grad_on_execution, - ) gradient_kwargs = gradient_kwargs or {} + config = config or _get_execution_config(gradient_fn, grad_on_execution, interface, device) if isinstance(cache, bool) and cache: # cache=True: create a LRUCache object @@ -604,9 +597,7 @@ def inner_execute_with_empty_jac(tapes, **_): "device batch transforms cannot be turned off with the new device interface.", UserWarning, ) - device_transform_program, config = device.preprocess(config) - full_transform_program = transform_program + device_transform_program - tapes, post_processing = full_transform_program(tapes) + tapes, post_processing = transform_program(tapes) else: # TODO: Remove once old device are removed tapes, program_post_processing = transform_program(tapes) @@ -747,3 +738,21 @@ def device_gradient_fn(inner_tapes, **gradient_kwargs): ) return post_processing(results) + + +def _get_execution_config(gradient_fn, grad_on_execution, interface, device): + """Helper function to get the execution config.""" + if gradient_fn is None: + _gradient_method = None + elif isinstance(gradient_fn, str): + _gradient_method = gradient_fn + else: + _gradient_method = "gradient-transform" + config = qml.devices.ExecutionConfig( + interface=interface, + gradient_method=_gradient_method, + grad_on_execution=None if grad_on_execution == "best" else grad_on_execution, + ) + if isinstance(device, qml.devices.Device): + _, config = device.preprocess(config) + return config diff --git a/pennylane/interfaces/jax.py b/pennylane/interfaces/jax.py index 632be8a51a7..4e1325063cf 100644 --- a/pennylane/interfaces/jax.py +++ b/pennylane/interfaces/jax.py @@ -176,7 +176,7 @@ def execute_wrapper(params): @execute_wrapper.defjvp def execute_wrapper_jvp(primals, tangents): """Primals[0] are parameters as Jax tracers and tangents[0] is a list of tangent vectors as Jax tracers.""" - if isinstance(gradient_fn, qml.gradients.gradient_transform): + if isinstance(gradient_fn, qml.transforms.core.TransformDispatcher): at_max_diff = _n == max_diff new_tapes = set_parameters_on_copy_and_unwrap(tapes, primals[0], unwrap=False) _args = ( diff --git a/pennylane/interfaces/jax_jit.py b/pennylane/interfaces/jax_jit.py index 902128e787a..1b182a993f7 100644 --- a/pennylane/interfaces/jax_jit.py +++ b/pennylane/interfaces/jax_jit.py @@ -340,7 +340,7 @@ def execute_wrapper_jvp(primals, tangents): idx for idx, t in enumerate(tangent) if not isinstance(t, Zero) ) - if not isinstance(gradient_fn, qml.gradients.gradient_transform): + if not isinstance(gradient_fn, qml.transforms.core.TransformDispatcher): jacobians_func = _device_method_jac_via_callback elif _n == max_diff: jacobians_func = _grad_transform_jac_via_callback diff --git a/pennylane/interfaces/tensorflow.py b/pennylane/interfaces/tensorflow.py index 8947d84084b..a8e2b18388f 100644 --- a/pennylane/interfaces/tensorflow.py +++ b/pennylane/interfaces/tensorflow.py @@ -243,7 +243,7 @@ def grad_fn(*dy, **tfkwargs): else: # Need to compute the Jacobians on the backward pass (accumulation="backward") - if isinstance(gradient_fn, qml.gradients.gradient_transform): + if isinstance(gradient_fn, qml.transforms.core.TransformDispatcher): # Gradient function is a gradient transform. # Generate and execute the required gradient tapes diff --git a/pennylane/interfaces/tensorflow_autograph.py b/pennylane/interfaces/tensorflow_autograph.py index 0be8b64f33c..8ad073b0d55 100644 --- a/pennylane/interfaces/tensorflow_autograph.py +++ b/pennylane/interfaces/tensorflow_autograph.py @@ -197,7 +197,7 @@ def _backward(*args): else: # Need to compute the Jacobians on the backward pass (accumulation="backward") - if isinstance(gradient_fn, qml.gradients.gradient_transform): + if isinstance(gradient_fn, qml.transforms.core.TransformDispatcher): # Gradient function is a gradient transform. # Generate and execute the required gradient tapes diff --git a/pennylane/interfaces/torch.py b/pennylane/interfaces/torch.py index 0ba68c71de1..2a4172078b7 100644 --- a/pennylane/interfaces/torch.py +++ b/pennylane/interfaces/torch.py @@ -183,7 +183,7 @@ def backward(ctx, *dy): else: # Need to compute the Jacobians on the backward pass (accumulation="backward") - if isinstance(ctx.gradient_fn, qml.gradients.gradient_transform): + if isinstance(ctx.gradient_fn, qml.transforms.core.TransformDispatcher): # Gradient function is a gradient transform. # Generate and execute the required gradient tapes diff --git a/pennylane/optimize/qnspsa.py b/pennylane/optimize/qnspsa.py index 29a379d1e54..e1c08453172 100644 --- a/pennylane/optimize/qnspsa.py +++ b/pennylane/optimize/qnspsa.py @@ -218,7 +218,20 @@ def _step_core(self, cost, args, kwargs): all_grad_dirs.append(grad_dirs) all_tensor_dirs.append(tensor_dirs) - raw_results = qml.execute(all_grad_tapes + all_metric_tapes, cost.device, None) + if isinstance(cost.device, qml.devices.Device): + program, config = cost.device.preprocess() + + raw_results = qml.execute( + all_grad_tapes + all_metric_tapes, + cost.device, + None, + transform_program=program, + config=config, + ) + else: + raw_results = qml.execute( + all_grad_tapes + all_metric_tapes, cost.device, None + ) # pragma: no cover grads = [ self._post_process_grad(raw_results[2 * i : 2 * i + 2], all_grad_dirs[i]) for i in range(self.resamplings) @@ -425,8 +438,10 @@ def _apply_blocking(self, cost, args, kwargs, params_next): cost.construct(params_next, kwargs) tape_loss_next = cost.tape.copy(copy_operations=True) - - loss_curr, loss_next = qml.execute([tape_loss_curr, tape_loss_next], cost.device, None) + program, _ = cost.device.preprocess() + loss_curr, loss_next = qml.execute( + [tape_loss_curr, tape_loss_next], cost.device, None, transform_program=program + ) # self.k has been updated earlier ind = (self.k - 2) % self.last_n_steps.size diff --git a/pennylane/optimize/riemannian_gradient.py b/pennylane/optimize/riemannian_gradient.py index ac9371769c1..4f69c8fdca7 100644 --- a/pennylane/optimize/riemannian_gradient.py +++ b/pennylane/optimize/riemannian_gradient.py @@ -390,7 +390,24 @@ def get_omegas(self): self.lie_algebra_basis_names, self.nqubits, ) - circuits = qml.execute(circuits, self.circuit.device, gradient_fn=None) + + if isinstance(self.circuit.device, qml.devices.Device): + program, config = self.circuit.device.preprocess() + + circuits = qml.execute( + circuits, + self.circuit.device, + transform_program=program, + config=config, + gradient_fn=None, + ) + else: + circuits = qml.execute( + circuits, self.circuit.device, gradient_fn=None + ) # pragma: no cover + + program, _ = self.circuit.device.preprocess() + circuits_plus = np.array(circuits[: len(circuits) // 2]).reshape( len(self.coeffs), len(self.lie_algebra_basis_names) ) diff --git a/pennylane/qinfo/transforms.py b/pennylane/qinfo/transforms.py index 1ab0c0a8c6a..3e6f83c0bbd 100644 --- a/pennylane/qinfo/transforms.py +++ b/pennylane/qinfo/transforms.py @@ -646,7 +646,10 @@ def wrapper(*args, **kwargs): return wrapper -def quantum_fisher(qnode, *args, **kwargs): +@partial(transform, is_informative=True) +def quantum_fisher( + tape: qml.tape.QuantumTape, device, *args, **kwargs +) -> (Sequence[qml.tape.QuantumTape], Callable): r"""Returns a function that computes the quantum fisher information matrix (QFIM) of a given :class:`.QNode`. Given a parametrized quantum state :math:`|\psi(\bm{\theta})\rangle`, the quantum fisher information matrix (QFIM) quantifies how changes to the parameters :math:`\bm{\theta}` @@ -731,17 +734,33 @@ def circ(params): """ - if qnode.device.shots and isinstance(qnode.device, (DefaultQubitLegacy, DefaultQubit)): + if device.shots and isinstance(device, (DefaultQubitLegacy, DefaultQubit)): + tapes, processing_fn = metric_tensor(tape, *args, **kwargs) - def wrapper(*args0, **kwargs0): - return 4 * metric_tensor(qnode, *args, **kwargs)(*args0, **kwargs0) + def processing_fn_multiply(res): + res = qml.execute(res, device=device) + return 4 * processing_fn(res) - else: + return tapes, processing_fn_multiply - def wrapper(*args0, **kwargs0): - return 4 * adjoint_metric_tensor(qnode, *args, **kwargs)(*args0, **kwargs0) + res = adjoint_metric_tensor(tape, *args, **kwargs) - return wrapper + def processing_fn_multiply(r): # pylint: disable=function-redefined + r = qml.math.stack(r) + return 4 * r + + return res, processing_fn_multiply + + +@quantum_fisher.custom_qnode_transform +def qnode_execution_wrapper(self, qnode, targs, tkwargs): + """Here, we overwrite the QNode execution wrapper in order + to take into account that classical processing may be present + inside the QNode.""" + + tkwargs["device"] = qnode.device + + return self.default_qnode_transform(qnode, targs, tkwargs) def fidelity(qnode0, qnode1, wires0, wires1): diff --git a/pennylane/qnode.py b/pennylane/qnode.py index 3b79f2cce01..3cac738e5ee 100644 --- a/pennylane/qnode.py +++ b/pennylane/qnode.py @@ -615,7 +615,7 @@ def get_gradient_fn(device, interface, diff_method="best", shots=None): "'device', 'adjoint', 'spsa', 'hadamard')." ) - if isinstance(diff_method, qml.gradients.gradient_transform): + if isinstance(diff_method, qml.transforms.core.TransformDispatcher): return diff_method, {}, device raise qml.QuantumFunctionError( @@ -918,11 +918,6 @@ def construct(self, args, kwargs): # pylint: disable=too-many-branches else: self._tape = self.device.expand_fn(self.tape, max_expansion=self.max_expansion) - # If the gradient function is a transform, expand the tape so that - # all operations are supported by the transform. - if isinstance(self.gradient_fn, qml.gradients.gradient_transform): - self._tape = self.gradient_fn.expand_fn(self._tape) - if old_interface == "auto": self.interface = "auto" @@ -969,13 +964,54 @@ def __call__(self, *args, **kwargs) -> qml.typing.Result: ) self._tape_cached = using_custom_cache and self.tape.hash in cache + config = None + # Add the device program to the QNode program + if isinstance(self.device, qml.devices.Device): + if self.gradient_fn is None: + _gradient_method = None + elif isinstance(self.gradient_fn, str): + _gradient_method = self.gradient_fn + else: + _gradient_method = "gradient-transform" + grad_on_execution = self.execute_kwargs.get("grad_on_execution") + config = qml.devices.ExecutionConfig( + interface=self.interface, + gradient_method=_gradient_method, + grad_on_execution=None if grad_on_execution == "best" else grad_on_execution, + ) + device_transform_program, config = self.device.preprocess(execution_config=config) + full_transform_program = self.transform_program + device_transform_program + else: + full_transform_program = self.transform_program + # Add the gradient expand to the porgram if necessary + if ( + isinstance(self.gradient_fn, qml.transforms.core.TransformDispatcher) + and self.gradient_fn.expand_transform + ): + full_transform_program.insert_front_transform( + qml.transforms.core.TransformDispatcher(self.gradient_fn.expand_transform), + **self.gradient_kwargs, + ) + # Calculate the classical jacobians if necessary + if full_transform_program.has_classical_cotransform(): + argnums = full_transform_program[-1]._kwargs.pop( + "argnums", None + ) # pylint: disable=protected-access + full_transform_program._set_all_classical_jacobians( + self, args, kwargs, argnums + ) # pylint: disable=protected-access + full_transform_program._set_all_argnums( + self, args, kwargs, argnums + ) # pylint: disable=protected-access + # pylint: disable=unexpected-keyword-arg res = qml.execute( (self._tape,), device=self.device, gradient_fn=self.gradient_fn, interface=self.interface, - transform_program=self.transform_program, + transform_program=full_transform_program, + config=config, gradient_kwargs=self.gradient_kwargs, override_shots=override_shots, **self.execute_kwargs, diff --git a/pennylane/transforms/adjoint_metric_tensor.py b/pennylane/transforms/adjoint_metric_tensor.py index 6eaf0189736..0339c4d112c 100644 --- a/pennylane/transforms/adjoint_metric_tensor.py +++ b/pennylane/transforms/adjoint_metric_tensor.py @@ -14,14 +14,16 @@ """ Contains the adjoint_metric_tensor. """ -import warnings +from typing import Sequence, Callable from itertools import chain +from functools import partial import pennylane as qml from pennylane import numpy as np -# pylint: disable=too-many-statements +# pylint: disable=too-many-statements,unused-argument from pennylane.transforms.metric_tensor import _contract_metric_tensor_with_cjac +from pennylane.transforms.core import transform def _reshape_real_imag(state, dim): @@ -52,7 +54,14 @@ def _group_operations(tape): return trainable_operations, group_after_trainable_op -def adjoint_metric_tensor(circuit, device=None, hybrid=True): +@partial( + transform, + classical_cotransform=_contract_metric_tensor_with_cjac, + is_informative=True, +) +def adjoint_metric_tensor( + tape: qml.tape.QuantumTape, +) -> (Sequence[qml.tape.QuantumTape], Callable): r"""Implements the adjoint method outlined in `Jones `__ to compute the metric tensor. @@ -71,10 +80,7 @@ def adjoint_metric_tensor(circuit, device=None, hybrid=True): Note also that this makes the metric tensor strictly real-valued. Args: - circuit (.QuantumTape or .QNode): Circuit to compute the metric tensor of - device (.Device): Device to use for the adjoint method - hybrid (bool): Whether to take classical preprocessing into account. Ignored if - ``circuit`` is a tape. + tape (.QuantumTape): Circuit to compute the metric tensor of Returns: array: the metric tensor of the tape with respect to its trainable parameters. @@ -128,147 +134,100 @@ def circuit(weights): The drawback of the adjoint method is that it is only available on simulators and without shot simulations. """ - if isinstance(circuit, qml.tape.QuantumScript): - return _adjoint_metric_tensor_tape(circuit) - if isinstance(circuit, (qml.QNode, qml.ExpvalCost)): - return _adjoint_metric_tensor_qnode(circuit, device, hybrid) - - raise qml.QuantumFunctionError("The passed object is not a QuantumTape or QNode.") - - -def _adjoint_metric_tensor_tape(tape): - """Computes the metric tensor of a tape using the adjoint method and a given device.""" - # pylint: disable=protected-access - if tape.shots: - raise ValueError( - "The adjoint method for the metric tensor is only implemented for shots=None" - ) - if set(tape.wires) != set(range(tape.num_wires)): - wire_map = {w: i for i, w in enumerate(tape.wires)} - tape = qml.map_wires(tape, wire_map) - tape = qml.transforms.expand_trainable_multipar(tape) - - # Divide all operations of a tape into trainable operations and blocks - # of untrainable operations after each trainable one. - trainable_operations, group_after_trainable_op = _group_operations(tape) - - dim = 2**tape.num_wires - # generate and extract initial state - prep = tape[0] if len(tape) > 0 and isinstance(tape[0], qml.operation.StatePrep) else None - - interface = qml.math.get_interface(*tape.get_parameters(trainable_only=False)) - psi = qml.devices.qubit.create_initial_state(tape.wires, prep, like=interface) - - # initialize metric tensor components (which all will be real-valued) - like_real = qml.math.real(psi[0]) - L = qml.math.convert_like(qml.math.zeros((tape.num_params, tape.num_params)), like_real) - T = qml.math.convert_like(qml.math.zeros((tape.num_params,)), like_real) - - for op in group_after_trainable_op[-1]: - psi = qml.devices.qubit.apply_operation(op, psi) - - for j, outer_op in enumerate(trainable_operations): - generator_1, prefactor_1 = qml.generator(outer_op) - - # the state vector phi is missing a factor of 1j * prefactor_1 - phi = qml.devices.qubit.apply_operation(generator_1, psi) - - phi_real, phi_imag = _reshape_real_imag(phi, dim) - diag_value = prefactor_1**2 * ( - qml.math.dot(phi_real, phi_real) + qml.math.dot(phi_imag, phi_imag) - ) - L = qml.math.scatter_element_add(L, (j, j), diag_value) - - lam = psi * 1.0 - lam_real, lam_imag = _reshape_real_imag(lam, dim) - - # this entry is missing a factor of 1j - value = prefactor_1 * (qml.math.dot(lam_real, phi_real) + qml.math.dot(lam_imag, phi_imag)) - T = qml.math.scatter_element_add(T, (j,), value) - - for i in range(j - 1, -1, -1): - # after first iteration of inner loop: apply U_{i+1}^\dagger - if i < j - 1: - phi = qml.devices.qubit.apply_operation( - qml.adjoint(trainable_operations[i + 1], lazy=False), phi - ) - # apply V_{i}^\dagger - for op in reversed(group_after_trainable_op[i]): - adj_op = qml.adjoint(op, lazy=False) - phi = qml.devices.qubit.apply_operation(adj_op, phi) - lam = qml.devices.qubit.apply_operation(adj_op, lam) - - inner_op = trainable_operations[i] - # extract and apply G_i - generator_2, prefactor_2 = qml.generator(inner_op) - # this state vector is missing a factor of 1j * prefactor_2 - mu = qml.devices.qubit.apply_operation(generator_2, lam) - phi_real, phi_imag = _reshape_real_imag(phi, dim) - mu_real, mu_imag = _reshape_real_imag(mu, dim) - # this entry is missing a factor of 1j * (-1j) = 1, i.e. none - value = ( - prefactor_1 - * prefactor_2 - * (qml.math.dot(mu_real, phi_real) + qml.math.dot(mu_imag, phi_imag)) + def processing_fn(tapes): + tape = tapes[0] + if tape.shots: + raise ValueError( + "The adjoint method for the metric tensor is only implemented for shots=None" ) - L = qml.math.scatter_element_add( - L, [(i, j), (j, i)], value * qml.math.convert_like(qml.math.ones((2,)), value) - ) - # apply U_i^\dagger - lam = qml.devices.qubit.apply_operation(qml.adjoint(inner_op, lazy=False), lam) + if set(tape.wires) != set(range(tape.num_wires)): + wire_map = {w: i for i, w in enumerate(tape.wires)} + tape = qml.map_wires(tape, wire_map) + tape = qml.transforms.expand_trainable_multipar(tape) - # apply U_j and V_j - psi = qml.devices.qubit.apply_operation(outer_op, psi) - for op in group_after_trainable_op[j]: - psi = qml.devices.qubit.apply_operation(op, psi) + # Divide all operations of a tape into trainable operations and blocks + # of untrainable operations after each trainable one. + trainable_operations, group_after_trainable_op = _group_operations(tape) - # postprocessing: combine L and T into the metric tensor. - # We require outer(conj(T), T) here, but as we skipped the factor 1j above, - # the stored T is real-valued. Thus we have -1j*1j*outer(T, T) = outer(T, T) - metric_tensor = L - qml.math.tensordot(T, T, 0) + dim = 2**tape.num_wires + # generate and extract initial state + prep = tape[0] if len(tape) > 0 and isinstance(tape[0], qml.operation.StatePrep) else None - return metric_tensor + interface = qml.math.get_interface(*tape.get_parameters(trainable_only=False)) + psi = qml.devices.qubit.create_initial_state(tape.wires, prep, like=interface) + # initialize metric tensor components (which all will be real-valued) + like_real = qml.math.real(psi[0]) + L = qml.math.convert_like(qml.math.zeros((tape.num_params, tape.num_params)), like_real) + T = qml.math.convert_like(qml.math.zeros((tape.num_params,)), like_real) -def _adjoint_metric_tensor_qnode(qnode, device, hybrid): - """Computes the metric tensor of a qnode using the adjoint method and its device. - For ``hybrid==True`` this wrapper accounts for classical preprocessing within the - QNode. - """ - if device is None: - if isinstance(qnode, qml.ExpvalCost): - if qnode._multiple_devices: # pylint: disable=protected-access - warnings.warn( - "ExpvalCost was instantiated with multiple devices. Only the first device " - "will be used to evaluate the metric tensor with the adjoint method.", - UserWarning, - ) - qnode = qnode.qnodes[0] - device = qnode.device + for op in group_after_trainable_op[-1]: + psi = qml.devices.qubit.apply_operation(op, psi) - def wrapper(*args, **kwargs): - old_interface = qnode.interface - if old_interface == "auto": - qnode.interface = qml.math.get_interface(*args, *list(kwargs.values())) + for j, outer_op in enumerate(trainable_operations): + generator_1, prefactor_1 = qml.generator(outer_op) - cjac_fn = qml.transforms.classical_jacobian( - qnode, expand_fn=qml.transforms.expand_trainable_multipar - ) + # the state vector phi is missing a factor of 1j * prefactor_1 + phi = qml.devices.qubit.apply_operation(generator_1, psi) - qnode.construct(args, kwargs) - program, _ = qml.devices.qubit.preprocess() - tapes, _ = program((qnode.tape,)) - mt = _adjoint_metric_tensor_tape(tapes[0]) + phi_real, phi_imag = _reshape_real_imag(phi, dim) + diag_value = prefactor_1**2 * ( + qml.math.dot(phi_real, phi_real) + qml.math.dot(phi_imag, phi_imag) + ) + L = qml.math.scatter_element_add(L, (j, j), diag_value) + + lam = psi * 1.0 + lam_real, lam_imag = _reshape_real_imag(lam, dim) - if old_interface == "auto": - qnode.interface = "auto" + # this entry is missing a factor of 1j + value = prefactor_1 * ( + qml.math.dot(lam_real, phi_real) + qml.math.dot(lam_imag, phi_imag) + ) + T = qml.math.scatter_element_add(T, (j,), value) + + for i in range(j - 1, -1, -1): + # after first iteration of inner loop: apply U_{i+1}^\dagger + if i < j - 1: + phi = qml.devices.qubit.apply_operation( + qml.adjoint(trainable_operations[i + 1], lazy=False), phi + ) + # apply V_{i}^\dagger + for op in reversed(group_after_trainable_op[i]): + adj_op = qml.adjoint(op, lazy=False) + phi = qml.devices.qubit.apply_operation(adj_op, phi) + lam = qml.devices.qubit.apply_operation(adj_op, lam) + + inner_op = trainable_operations[i] + # extract and apply G_i + generator_2, prefactor_2 = qml.generator(inner_op) + # this state vector is missing a factor of 1j * prefactor_2 + mu = qml.devices.qubit.apply_operation(generator_2, lam) + + phi_real, phi_imag = _reshape_real_imag(phi, dim) + mu_real, mu_imag = _reshape_real_imag(mu, dim) + # this entry is missing a factor of 1j * (-1j) = 1, i.e. none + value = ( + prefactor_1 + * prefactor_2 + * (qml.math.dot(mu_real, phi_real) + qml.math.dot(mu_imag, phi_imag)) + ) + L = qml.math.scatter_element_add( + L, [(i, j), (j, i)], value * qml.math.convert_like(qml.math.ones((2,)), value) + ) + # apply U_i^\dagger + lam = qml.devices.qubit.apply_operation(qml.adjoint(inner_op, lazy=False), lam) - if not hybrid: - return mt + # apply U_j and V_j + psi = qml.devices.qubit.apply_operation(outer_op, psi) + for op in group_after_trainable_op[j]: + psi = qml.devices.qubit.apply_operation(op, psi) - cjac = cjac_fn(*args, **kwargs) + # postprocessing: combine L and T into the metric tensor. + # We require outer(conj(T), T) here, but as we skipped the factor 1j above, + # the stored T is real-valued. Thus we have -1j*1j*outer(T, T) = outer(T, T) + metric_tensor = L - qml.math.tensordot(T, T, 0) - return _contract_metric_tensor_with_cjac(mt, cjac) + return metric_tensor - return wrapper + return [tape], processing_fn diff --git a/pennylane/transforms/core/transform.py b/pennylane/transforms/core/transform.py index aabe860d5a4..0feeb1ca9e3 100644 --- a/pennylane/transforms/core/transform.py +++ b/pennylane/transforms/core/transform.py @@ -130,10 +130,8 @@ def qnode_circuit(a): # 3: CHeck the classical co-transform if classical_cotransform is not None: - raise NotImplementedError("Classical cotransforms are not yet integrated.") - # TODO: Add more verification in a future PR - # if not callable(classical_cotransform): - # raise TransformError("The classical co-transform must be a valid Python function.") + if not callable(classical_cotransform): + raise TransformError("The classical co-transform must be a valid Python function.") return TransformDispatcher( quantum_transform, diff --git a/pennylane/transforms/core/transform_dispatcher.py b/pennylane/transforms/core/transform_dispatcher.py index 6b7660a8ad9..93ae8ed9a4e 100644 --- a/pennylane/transforms/core/transform_dispatcher.py +++ b/pennylane/transforms/core/transform_dispatcher.py @@ -67,7 +67,17 @@ def __call__(self, *targs, **tkwargs): # pylint: disable=too-many-return-statem obj, *targs = targs if isinstance(obj, qml.tape.QuantumScript): - transformed_tapes, processing_fn = self._transform(obj, *targs, **tkwargs) + if self._expand_transform: + transformed_tapes, _ = self._expand_transform(obj, *targs, **tkwargs) + transformed_tapes, transform_processing_fn = self._transform( + transformed_tapes[0], *targs, **tkwargs + ) + + def processing_fn(results): + return transform_processing_fn(results) + + else: + transformed_tapes, processing_fn = self._transform(obj, *targs, **tkwargs) if self.is_informative: return processing_fn(transformed_tapes) @@ -108,6 +118,14 @@ def wrapper(obj): return wrapper + def __repr__(self): + return f"" + + @property + def __name__(self): + """Return the quantum transform name.""" + return self._transform.__name__ + @property def transform(self): """Return the quantum transform.""" diff --git a/pennylane/transforms/core/transform_program.py b/pennylane/transforms/core/transform_program.py index 73d716243de..9889ac1b2e0 100644 --- a/pennylane/transforms/core/transform_program.py +++ b/pennylane/transforms/core/transform_program.py @@ -17,6 +17,7 @@ from functools import partial from typing import Callable, List, Tuple, Optional, Sequence +import pennylane as qml from pennylane.typing import Result, ResultBatch from pennylane.tape import QuantumTape @@ -113,6 +114,8 @@ class TransformProgram: def __init__(self, initial_program: Optional[Sequence] = None): self._transform_program = list(initial_program) if initial_program else [] + self._classical_jacobians = None + self._argnums = None def __iter__(self): """list[TransformContainer]: Return an iterator to the underlying transform program.""" @@ -276,36 +279,189 @@ def has_final_transform(self) -> bool: """Check if the transform program has a terminal transform or not.""" return self[-1].final_transform if self else False + def has_classical_cotransform(self) -> bool: + """Check if the transform program has some classical cotransforms. + + Returns: + bool: Boolean + """ + return any(t.classical_cotransform is not None for t in self) + + def _set_all_classical_jacobians( + self, qnode, args, kwargs, argnums + ): # pylint: disable=too-many-statements + """It can be called inside the QNode to get all the classical Jacobians for a gradient transform.""" + + def classical_preprocessing(program, *args, **kwargs): + """Returns the trainable gate parameters for a given QNode input.""" + kwargs.pop("shots", None) + kwargs.pop("argnums", None) + qnode.construct(args, kwargs) + tape = qnode.qtape + tapes, _ = program((tape,)) + res = tuple(qml.math.stack(tape.get_parameters(trainable_only=True)) for tape in tapes) + if len(tapes) == 1: + return res[0] + return res + + def jacobian(classical_function, program, argnums, *args, **kwargs): + indices = qml.math.get_trainable_indices(args) + + if qnode.interface in ["jax", "jax-jit"]: + import jax # pylint: disable=import-outside-toplevel + + if isinstance(args[0], jax.numpy.ndarray): + argnums = 0 if argnums is None else argnums + + if not indices and argnums is None: + raise qml.QuantumFunctionError("No trainable parameters.") + + classical_function = partial(classical_function, program) + + if qnode.interface == "autograd": + jac = qml.jacobian(classical_function, argnum=argnums)(*args, **kwargs) + + if qnode.interface == "tf": + import tensorflow as tf # pylint: disable=import-outside-toplevel + + def _jacobian(*args, **kwargs): + with tf.GradientTape() as tape: + gate_params = classical_function(*args, **kwargs) + + jac = tape.jacobian(gate_params, args) + return jac + + jac = _jacobian(*args, **kwargs) + + if qnode.interface == "torch": + import torch # pylint: disable=import-outside-toplevel + + def _jacobian(*args, **kwargs): # pylint: disable=unused-argument + jac = torch.autograd.functional.jacobian(classical_function, args) + return jac + + jac = _jacobian(*args, **kwargs) + + if qnode.interface in ["jax", "jax-jit"]: + import jax # pylint: disable=import-outside-toplevel + + argnums = 0 if argnums is None else argnums + + def _jacobian(*args, **kwargs): + return jax.jacobian(classical_function, argnums=argnums)(*args, **kwargs) + + jac = _jacobian(*args, **kwargs) + + return jac + + classical_jacobians = [] + for index, transform in enumerate(self): + if transform.classical_cotransform: + argnum = transform._kwargs.get("argnum", None) # pylint: disable=protected-access + if qnode.interface == "jax" and argnum: + raise qml.QuantumFunctionError( + "argnum does not work with the Jax interface. You should use argnums instead." + ) + sub_program = TransformProgram(self[0:index]) + classical_jacobian = jacobian( + classical_preprocessing, sub_program, argnums, *args, **kwargs + ) + qnode.construct(args, kwargs) + tapes, _ = sub_program((qnode.tape,)) + multi_tapes = len(tapes) > 1 + if not multi_tapes: + classical_jacobian = [classical_jacobian] + classical_jacobians.append(classical_jacobian) + else: + classical_jacobians.append(None) + self._classical_jacobians = classical_jacobians + # Reset the initial tape + qnode.construct(args, kwargs) + + def _set_all_argnums(self, qnode, args, kwargs, argnums): + """It can be used inside the QNode to set all argnums (tape level) using argnums from the argnums at the QNode + level. + """ + + def jax_argnums_to_tape_trainable(program, argnums, args, kwargs): + import jax # pylint: disable=import-outside-toplevel + + with jax.core.new_main(jax.interpreters.ad.JVPTrace) as main: + trace = jax.interpreters.ad.JVPTrace(main, 0) + + args_jvp = [ + jax.interpreters.ad.JVPTracer(trace, arg, jax.numpy.zeros(arg.shape)) + if i in argnums + else arg + for i, arg in enumerate(args) + ] + + qnode.construct(args_jvp, kwargs) + tape = qnode.qtape + tapes, _ = program((tape,)) + del trace + return tuple(tape.get_parameters(trainable_only=False) for tape in tapes) + + argnums_list = [] + for index, transform in enumerate(self): + argnums = [0] if qnode.interface in ["jax", "jax-jit"] and argnums is None else argnums + if transform.classical_cotransform and argnums: + params = jax_argnums_to_tape_trainable( + TransformProgram(self[0:index]), argnums, args, kwargs + ) + argnums_list.append([qml.math.get_trainable_indices(param) for param in params]) + else: + argnums_list.append(None) + + self._argnums = argnums_list + + qnode.construct(args, kwargs) + def __call__(self, tapes: Tuple[QuantumTape]) -> Tuple[ResultBatch, BatchPostProcessingFn]: if not self: return tapes, null_postprocessing processing_fns_stack = [] - for transform_container in self: - transform, args, kwargs, cotransform, _, _ = transform_container - - if cotransform: - raise NotImplementedError( - "cotransforms are not yet integrated with TransformProgram" - ) + for i, transform_container in enumerate(self): + transform, targs, tkwargs, cotransform, _, _ = transform_container execution_tapes = [] fns = [] slices = [] + classical_fns = [] + slices_classical = [] + start = 0 - for tape in tapes: - new_tapes, fn = transform(tape, *args, **kwargs) + start_classical = 0 + for j, tape in enumerate(tapes): + if self._argnums is not None and self._argnums[i] is not None: + tape.trainable_params = self._argnums[i][j] + new_tapes, fn = transform(tape, *targs, **tkwargs) execution_tapes.extend(new_tapes) + fns.append(fn) end = start + len(new_tapes) slices.append(slice(start, end)) start = end + if cotransform and self._classical_jacobians: + classical_fns.append( + partial(cotransform, cjac=self._classical_jacobians[i][j], tape=tape) + ) + slices_classical.append(slice(start_classical, start_classical + 1)) + start_classical += 1 + + if cotransform: + batch_postprocessing_classical = partial( + _batch_postprocessing, individual_fns=classical_fns, slices=slices_classical + ) + batch_postprocessing_classical.__doc__ = _batch_postprocessing.__doc__ + processing_fns_stack.append(batch_postprocessing_classical) + batch_postprocessing = partial(_batch_postprocessing, individual_fns=fns, slices=slices) batch_postprocessing.__doc__ = _batch_postprocessing.__doc__ - processing_fns_stack.append(batch_postprocessing) # set input tapes for next iteration. @@ -318,4 +474,6 @@ def __call__(self, tapes: Tuple[QuantumTape]) -> Tuple[ResultBatch, BatchPostPro postprocessing_fn.__doc__ = _apply_postprocessing_stack.__doc__ + # Reset classical jacobians + self._classical_jacobians = [] return tuple(tapes), postprocessing_fn diff --git a/pennylane/transforms/metric_tensor.py b/pennylane/transforms/metric_tensor.py index 639f28c94dc..479e21d7e04 100644 --- a/pennylane/transforms/metric_tensor.py +++ b/pennylane/transforms/metric_tensor.py @@ -15,36 +15,87 @@ Contains the metric_tensor batch_transform which wraps multiple methods of computing the metric tensor. """ +from typing import Sequence, Callable import functools +from functools import partial import warnings import numpy as np import pennylane as qml from pennylane.circuit_graph import LayerData from pennylane.queuing import WrappedObj +from pennylane.transforms.core import transform -from .batch_transform import batch_transform + +def _contract_metric_tensor_with_cjac(mt, cjac, tape): # pylint: disable=unused-argument + """Execute the contraction of pre-computed classical Jacobian(s) + and the metric tensor of a tape in order to obtain the hybrid + metric tensor of a QNode. + + Args: + mt (array): Metric tensor of a tape (2-dimensional) + cjac (array or tuple[array]): The classical Jacobian of a QNode + + Returns: + array or tuple[array]: Hybrid metric tensor(s) of the QNode. + The number of metric tensors depends on the number of QNode arguments + for which the classical Jacobian was computed, the tensor shape(s) + depend on the shape of these QNode arguments. + """ + if isinstance(mt, tuple) and len(mt) == 1: + mt = mt[0] + if isinstance(cjac, tuple): + # Classical processing of multiple arguments is present. Return cjac.T @ mt @ cjac + # as a tuple of contractions. + metric_tensors = tuple( + qml.math.tensordot(c, qml.math.tensordot(mt, c, axes=[[-1], [0]]), axes=[[0], [0]]) + for c in cjac + if c is not None + ) + return metric_tensors[0] if len(metric_tensors) == 1 else metric_tensors + + is_square = cjac.shape == (1,) or (cjac.ndim == 2 and cjac.shape[0] == cjac.shape[1]) + + if is_square and qml.math.allclose(cjac, qml.numpy.eye(cjac.shape[0])): + # Classical Jacobian is the identity. No classical processing + # is present inside the QNode. + return mt + mt_cjac = qml.math.tensordot(mt, cjac, axes=[[-1], [0]]) + mt = qml.math.tensordot(cjac, mt_cjac, axes=[[0], [0]]) + + return mt -def expand_fn( - tape, argnum=None, approx=None, allow_nonunitary=True, aux_wire=None, device_wires=None -): +def _expand_metric_tensor( + tape: qml.tape.QuantumTape, + argnum=None, + approx=None, + allow_nonunitary=True, + aux_wire=None, + device_wires=None, +) -> (Sequence[qml.tape.QuantumTape], Callable): # pylint: disable=too-many-arguments """Set the metric tensor based on whether non-unitary gates are allowed.""" # pylint: disable=unused-argument,too-many-arguments - if not allow_nonunitary and approx is None: # pragma: no cover - return qml.transforms.expand_nonunitary_gen(tape) - return qml.transforms.expand_multipar(tape) + + if not allow_nonunitary and approx is None: + return [qml.transforms.expand_nonunitary_gen(tape)], lambda x: x[0] + return [qml.transforms.expand_multipar(tape)], lambda x: x[0] -@functools.partial(batch_transform, expand_fn=expand_fn) -def metric_tensor( - tape, +@partial( + transform, + expand_transform=_expand_metric_tensor, + classical_cotransform=_contract_metric_tensor_with_cjac, + final_transform=True, +) +def metric_tensor( # pylint:disable=too-many-arguments + tape: qml.tape.QuantumTape, argnum=None, approx=None, allow_nonunitary=True, aux_wire=None, device_wires=None, -): # pylint: disable=too-many-arguments +) -> (Sequence[qml.tape.QuantumTape], Callable): r"""Returns a function that computes the metric tensor of a given QNode or quantum tape. The metric tensor convention we employ here has the following form: @@ -67,7 +118,7 @@ def metric_tensor( This is the case for unitary single-parameter operations. Args: - tape (pennylane.QNode or .QuantumTape): quantum tape or QNode to find the metric tensor of + tape (QuantumTape): quantum tape to find the metric tensor of argnum (int or Sequence[int] or None): Trainable tape-parameter indices with respect to which the metric tensor is computed. If ``argnum=None``, the metric tensor with respect to all trainable parameters is returned. Excluding tape-parameter indices from this list reduces @@ -323,10 +374,14 @@ def circuit(weights): if approx in {"diag", "block-diag"}: # Only require covariance matrix based transform diag_approx = approx == "diag" - return _metric_tensor_cov_matrix(tape, argnum, diag_approx)[:2] + tapes, processing_fn = _metric_tensor_cov_matrix(tape, argnum, diag_approx)[:2] + return tapes, processing_fn if approx is None: - return _metric_tensor_hadamard(tape, argnum, allow_nonunitary, aux_wire, device_wires) + tapes, processing_fn = _metric_tensor_hadamard( + tape, argnum, allow_nonunitary, aux_wire, device_wires + ) + return tapes, processing_fn raise ValueError( f"Unknown value {approx} for keyword argument approx. " @@ -334,142 +389,17 @@ def circuit(weights): ) -def _contract_metric_tensor_with_cjac(mt, cjac): - """Execute the contraction of pre-computed classical Jacobian(s) - and the metric tensor of a tape in order to obtain the hybrid - metric tensor of a QNode. - - Args: - mt (array): Metric tensor of a tape (2-dimensional) - cjac (array or tuple[array]): The classical Jacobian of a QNode - - Returns: - array or tuple[array]: Hybrid metric tensor(s) of the QNode. - The number of metric tensors depends on the number of QNode arguments - for which the classical Jacobian was computed, the tensor shape(s) - depend on the shape of these QNode arguments. - """ - if isinstance(cjac, tuple): - # Classical processing of multiple arguments is present. Return cjac.T @ mt @ cjac - # as a tuple of contractions. - metric_tensors = tuple( - qml.math.tensordot(c, qml.math.tensordot(mt, c, axes=[[-1], [0]]), axes=[[0], [0]]) - for c in cjac - if c is not None - ) - return metric_tensors[0] if len(metric_tensors) == 1 else metric_tensors - - is_square = cjac.shape == (1,) or (cjac.ndim == 2 and cjac.shape[0] == cjac.shape[1]) - - if is_square and qml.math.allclose(cjac, qml.numpy.eye(cjac.shape[0])): - # Classical Jacobian is the identity. No classical processing - # is present inside the QNode. - return mt - - mt = qml.math.tensordot(cjac, qml.math.tensordot(mt, cjac, axes=[[-1], [0]]), axes=[[0], [0]]) - - return mt - - -@metric_tensor.custom_qnode_wrapper +@metric_tensor.custom_qnode_transform def qnode_execution_wrapper(self, qnode, targs, tkwargs): """Here, we overwrite the QNode execution wrapper in order to take into account that classical processing may be present inside the QNode.""" - hybrid = tkwargs.pop("hybrid", True) - - if isinstance(qnode, qml.ExpvalCost): - if qnode._multiple_devices: # pylint: disable=protected-access - warnings.warn( - "ExpvalCost was instantiated with multiple devices. Only the first device " - "will be used to evaluate the metric tensor.", - UserWarning, - ) - - qnode = qnode.qnodes[0] tkwargs.setdefault("device_wires", qnode.device.wires) - mt_fn = self.default_qnode_wrapper(qnode, targs, tkwargs) - - def wrapper(*args, **kwargs): # pylint: disable=too-many-branches - argnum = tkwargs.get("argnum", None) - argnums = tkwargs.get("argnums", None) - - interface = qml.math.get_interface(*args) - trainable_params = qml.math.get_trainable_indices(args) - - if interface == "jax" and argnum is not None: - raise qml.QuantumFunctionError( - "argnum does not work with the Jax interface. You should use argnums instead." - ) - if interface == "jax" and not trainable_params: - if argnums is None: - argnums_ = [0] - - else: - argnums_ = [argnums] if isinstance(argnums, int) else argnums - - params = qml.math.jax_argnums_to_tape_trainable( - qnode, argnums_, self.expand_fn, args, kwargs - ) - argnums_ = qml.math.get_trainable_indices(params) - kwargs["argnums"] = argnums_ - - elif not trainable_params: - warnings.warn( - "Attempted to compute the metric tensor of a QNode with no trainable parameters. " - "If this is unintended, please add trainable parameters in accordance with the " - "chosen auto differentiation framework." - ) - return () - - try: - mt = mt_fn(*args, **kwargs) - except qml.wires.WireError as e: - revert_text = ( - "\n\nReverting to the block-diagonal approximation. It will often be " - "much more efficient to request the block-diagonal approximation directly!" - ) - other_mt_errors = [ - "The requested auxiliary wire is already in use by the circuit.", - "The requested auxiliary wire does not exist on the used device.", - ] - - if str(e) == "The device has no free wire for the auxiliary wire.": - warnings.warn( - "The device does not have a wire that is not used by the circuit." + revert_text - ) - elif str(e) in other_mt_errors: - warnings.warn( - "An auxiliary wire is not available." - "\n\nThis can occur when computing the full metric tensor via the " - "Hadamard test, and the device does not provide an " - "additional wire or the requested auxiliary wire does not exist " - "on the device." + revert_text - ) - else: - raise e - tkwargs["approx"] = "block-diag" - return self(qnode, *targs, **tkwargs)(*args, **kwargs) - - if not hybrid: - return mt - - kwargs.pop("shots", False) - # Special case where we apply a Jax transform (jacobian e.g.) on the gradient transform and argnums are - # defined on the outer transform and therefore on the args. - if interface == "jax": - argnum_cjac = trainable_params or argnums - else: - argnum_cjac = None - - cjac = qml.transforms.classical_jacobian( - qnode, argnum=argnum_cjac, expand_fn=self.expand_fn - )(*args, **kwargs) - return _contract_metric_tensor_with_cjac(mt, cjac) + mt_fn = self.default_qnode_transform(qnode, targs, tkwargs) - return wrapper + return mt_fn def _metric_tensor_cov_matrix(tape, argnum, diag_approx): # pylint: disable=too-many-statements diff --git a/pennylane/transforms/qcut/cutcircuit.py b/pennylane/transforms/qcut/cutcircuit.py index 288eab7612a..10bc2d18972 100644 --- a/pennylane/transforms/qcut/cutcircuit.py +++ b/pennylane/transforms/qcut/cutcircuit.py @@ -50,7 +50,7 @@ def processing_fn(res): # Expand the tapes for handling Hamiltonian with two or more terms tape_meas_ops = tape.measurements - if isinstance(tape_meas_ops[0].obs, qml.Hamiltonian): + if tape_meas_ops and isinstance(tape_meas_ops[0].obs, qml.Hamiltonian): if len(tape_meas_ops) > 1: raise NotImplementedError( "Hamiltonian expansion is supported only with a single Hamiltonian" diff --git a/pennylane/transforms/specs.py b/pennylane/transforms/specs.py index 1bda5cbd805..ee6c2b6fc93 100644 --- a/pennylane/transforms/specs.py +++ b/pennylane/transforms/specs.py @@ -138,7 +138,7 @@ def specs_qnode(*args, **kwargs): else qnode.diff_method ) - if isinstance(qnode.gradient_fn, qml.gradients.gradient_transform): + if isinstance(qnode.gradient_fn, qml.transforms.core.TransformDispatcher): info["gradient_fn"] = _get_absolute_import_path(qnode.gradient_fn) try: diff --git a/tests/drawer/test_draw.py b/tests/drawer/test_draw.py index d6686d62209..a574ceac7c1 100644 --- a/tests/drawer/test_draw.py +++ b/tests/drawer/test_draw.py @@ -173,9 +173,9 @@ def matrices_circuit(): expected2 = ( "0: ─╭|Ψ⟩──U(M0)─┤ <𝓗(M0)>\n" "1: ─╰|Ψ⟩────────┤ \n" + "\n" "M0 = \n[[1. 0.]\n [0. 1.]]" ) - assert draw(matrices_circuit)() == expected2 def test_matrix_parameters_batch_transform(self): diff --git a/tests/gradients/core/test_gradient_transform.py b/tests/gradients/core/test_gradient_transform.py index 7ab8eb1f885..596c0060e6f 100644 --- a/tests/gradients/core/test_gradient_transform.py +++ b/tests/gradients/core/test_gradient_transform.py @@ -25,9 +25,9 @@ def test_repr(): """Test the repr method of gradient transforms.""" - assert repr(qml.gradients.param_shift) == "" - assert repr(qml.gradients.spsa_grad) == "" - assert repr(qml.gradients.finite_diff) == "" + assert repr(qml.gradients.param_shift) == "" + assert repr(qml.gradients.spsa_grad) == "" + assert repr(qml.gradients.finite_diff) == "" class TestGradAnalysis: @@ -401,7 +401,7 @@ def circuit(x, y): expected = qml.jacobian(circuit)(x, y) # pylint:disable=unexpected-keyword-arg - res = qml.gradients.param_shift(circuit, hybrid=True)(x, y) + res = qml.gradients.param_shift(circuit)(x, y) assert isinstance(res, tuple) and len(res) == 2 assert all(np.allclose(_r, _e, atol=tol, rtol=0) for _r, _e in zip(res, expected)) @@ -496,11 +496,10 @@ def circuit(x, y): assert np.allclose(res, expected, atol=tol, rtol=0) - def test_classical_processing_arguments(self, mocker, tol): + def test_classical_processing_arguments(self, tol): """Test that a gradient transform acts on QNodes correctly when the QNode arguments are classically processed""" dev = qml.device("default.qubit", wires=2) - spy = mocker.spy(qml.transforms, "classical_jacobian") @qml.qnode(dev) def circuit(weights): @@ -512,19 +511,14 @@ def circuit(weights): w = np.array([0.543, -0.654], requires_grad=True) res = qml.gradients.param_shift(circuit)(w) - classical_jac = spy.spy_return(w) - assert isinstance(classical_jac, np.ndarray) - assert np.allclose(classical_jac, np.array([[2 * w[0], 0], [0, 1]])) - x, _ = w expected = [-2 * x * np.sin(x**2), 0] assert np.allclose(res, expected, atol=tol, rtol=0) - def test_classical_processing_multiple_arguments(self, mocker, tol): + def test_classical_processing_multiple_arguments(self, tol): """Test that a gradient transform acts on QNodes correctly when multiple QNode arguments are classically processed""" dev = qml.device("default.qubit", wires=2) - spy = mocker.spy(qml.transforms, "classical_jacobian") @qml.qnode(dev) def circuit(data, weights): @@ -540,8 +534,6 @@ def circuit(data, weights): x, _ = w res = qml.gradients.param_shift(circuit)(d, w) - classical_jac = spy.spy_return(d, w) - assert np.allclose(classical_jac, np.array([[2 * w[0], 0], [0, 1]]).T) expected = np.array([-2 * x * np.cos(np.cos(d)) * np.sin(x**2), 0]) assert np.allclose(res, expected, atol=tol, rtol=0) @@ -551,10 +543,6 @@ def circuit(data, weights): w = np.array([0.543, -0.654], requires_grad=True) res = qml.gradients.param_shift(circuit)(d, w) - classical_jac = spy.spy_return(d, w) - assert isinstance(classical_jac, tuple) - assert np.allclose(classical_jac[0], [-np.sin(d), 0, 0]) - assert np.allclose(classical_jac[1], np.array([[0, 2 * w[0], 0], [0, 0, 1]]).T) expected_dd = np.cos(x**2) * np.sin(d) * np.sin(np.cos(d)) expected_dw = np.array([-2 * x * np.cos(np.cos(d)) * np.sin(x**2), 0]) @@ -581,12 +569,6 @@ def circuit(weights): expected = qml.jacobian(circuit)(w) assert np.allclose(res, expected, atol=tol, rtol=0) - # when executed with hybrid=False, only the quantum jacobian is returned - # pylint:disable=unexpected-keyword-arg - res = qml.gradients.param_shift(circuit, hybrid=False)(w) - assert res[0].shape == (4,) - assert res[1].shape == (4,) - @qml.qnode(dev) def circuit1(weights): qml.RX(weights[0], wires=[0]) @@ -597,8 +579,8 @@ def circuit1(weights): w = np.array([0.543**2, -0.654], requires_grad=True) expected = qml.jacobian(circuit1)(w) - assert np.allclose(res[0], expected.T[0], atol=tol, rtol=0) - assert np.allclose(res[1], expected.T[1], atol=tol, rtol=0) + assert np.allclose(res[0][0], expected[0], atol=10e-2, rtol=0) + assert np.allclose(res[1][0], expected[1], atol=10e-2, rtol=0) @pytest.mark.parametrize("strategy", ["gradient", "device"]) def test_template_integration(self, strategy, tol): @@ -641,22 +623,6 @@ def circuit(x): assert circuit(x).shape == tuple() assert circuit(x, shots=1000).shape == tuple() - def test_shots_error(self): - """Raise an exception if shots is used within the QNode""" - dev = qml.device("default.qubit", wires=1, shots=1000) - - def circuit(x, shots): - """A quantum circuit that takes `shots` as an argument.""" - # pylint: disable=unused-argument - qml.RX(x, wires=0) - return qml.expval(qml.PauliZ(0)) - - with pytest.warns(UserWarning, match="Detected 'shots' as an argument to the given"): - qnode = qml.QNode(circuit, dev) - - with pytest.raises(ValueError, match="Detected 'shots' as an argument of the quantum"): - qml.gradients.param_shift(qnode)(0.2, shots=100) - class TestInterfaceIntegration: """Test that the gradient transforms are differentiable diff --git a/tests/gradients/core/test_hadamard_gradient.py b/tests/gradients/core/test_hadamard_gradient.py index 3b485b7aaab..1c0ba0e2db6 100644 --- a/tests/gradients/core/test_hadamard_gradient.py +++ b/tests/gradients/core/test_hadamard_gradient.py @@ -464,7 +464,10 @@ def test_output_shape_matches_qnode_expval(self, cost, expected_shape): circuit = qml.QNode(cost, dev) res_hadamard = qml.gradients.hadamard_grad(circuit)(x) - assert isinstance(res_hadamard, tuple) + + assert isinstance(res_hadamard, (tuple, list)) + if len(res_hadamard) == 1: + res_hadamard = res_hadamard[0] assert len(res_hadamard) == expected_shape[0] if len(expected_shape) > 1: @@ -487,7 +490,9 @@ def test_output_shape_matches_qnode_probs(self, cost, expected_shape): circuit = qml.QNode(cost, dev) res_hadamard = qml.gradients.hadamard_grad(circuit)(x) - assert isinstance(res_hadamard, tuple) + assert isinstance(res_hadamard, (tuple, list)) + if len(res_hadamard) == 1: + res_hadamard = res_hadamard[0] assert len(res_hadamard) == expected_shape[0] if len(expected_shape) > 2: @@ -723,11 +728,10 @@ def test_independent_parameter(self, mocker): assert spy.call_args[0][0:2] == (tape, [0]) @pytest.mark.autograd - def test_no_trainable_params_qnode_autograd(self, mocker): + def test_no_trainable_params_qnode_autograd(self): """Test that the correct ouput and warning is generated in the absence of any trainable parameters""" dev = qml.device("default.qubit", wires=2) - spy = mocker.spy(qml.devices.qubit, "measure") @qml.qnode(dev, interface="autograd") def circuit(weights): @@ -736,18 +740,14 @@ def circuit(weights): return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - res_hadamard = qml.gradients.hadamard_grad(circuit)(weights) - - assert res_hadamard == () - spy.assert_not_called() + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + qml.gradients.hadamard_grad(circuit)(weights) @pytest.mark.torch - def test_no_trainable_params_qnode_torch(self, mocker): + def test_no_trainable_params_qnode_torch(self): """Test that the correct ouput and warning is generated in the absence of any trainable parameters""" dev = qml.device("default.qubit", wires=2) - spy = mocker.spy(qml.devices.qubit, "measure") @qml.qnode(dev, interface="torch") def circuit(weights): @@ -756,18 +756,14 @@ def circuit(weights): return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - res_hadamard = qml.gradients.hadamard_grad(circuit)(weights) - - assert res_hadamard == () - spy.assert_not_called() + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + qml.gradients.hadamard_grad(circuit)(weights) @pytest.mark.tf - def test_no_trainable_params_qnode_tf(self, mocker): + def test_no_trainable_params_qnode_tf(self): """Test that the correct ouput and warning is generated in the absence of any trainable parameters""" dev = qml.device("default.qubit", wires=2) - spy = mocker.spy(qml.devices.qubit, "measure") @qml.qnode(dev, interface="tf") def circuit(weights): @@ -776,18 +772,14 @@ def circuit(weights): return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - res_hadamard = qml.gradients.hadamard_grad(circuit)(weights) - - assert res_hadamard == () - spy.assert_not_called() + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + qml.gradients.hadamard_grad(circuit)(weights) @pytest.mark.jax - def test_no_trainable_params_qnode_jax(self, mocker): + def test_no_trainable_params_qnode_jax(self): """Test that the correct ouput and warning is generated in the absence of any trainable parameters""" dev = qml.device("default.qubit", wires=2) - spy = mocker.spy(qml.devices.qubit, "measure") @qml.qnode(dev, interface="jax") def circuit(weights): @@ -796,18 +788,14 @@ def circuit(weights): return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - res_hadamard = qml.gradients.hadamard_grad(circuit)(weights) - - assert res_hadamard == () - spy.assert_not_called() + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + qml.gradients.hadamard_grad(circuit)(weights) @pytest.mark.autograd - def test_no_trainable_params_qnode_autograd_legacy(self, mocker): + def test_no_trainable_params_qnode_autograd_legacy(self): """Test that the correct ouput and warning is generated in the absence of any trainable parameters""" dev = qml.device("default.qubit.autograd", wires=2) - spy = mocker.spy(dev, "expval") @qml.qnode(dev, interface="autograd") def circuit(weights): @@ -816,18 +804,14 @@ def circuit(weights): return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - res_hadamard = qml.gradients.hadamard_grad(circuit)(weights) - - assert res_hadamard == () - spy.assert_not_called() + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + qml.gradients.hadamard_grad(circuit)(weights) @pytest.mark.torch - def test_no_trainable_params_qnode_torch_legacy(self, mocker): + def test_no_trainable_params_qnode_torch_legacy(self): """Test that the correct ouput and warning is generated in the absence of any trainable parameters""" dev = qml.device("default.qubit.torch", wires=2) - spy = mocker.spy(dev, "expval") @qml.qnode(dev, interface="torch") def circuit(weights): @@ -836,18 +820,14 @@ def circuit(weights): return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - res_hadamard = qml.gradients.hadamard_grad(circuit)(weights) - - assert res_hadamard == () - spy.assert_not_called() + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + qml.gradients.hadamard_grad(circuit)(weights) @pytest.mark.tf - def test_no_trainable_params_qnode_tf_legacy(self, mocker): + def test_no_trainable_params_qnode_tf_legacy(self): """Test that the correct ouput and warning is generated in the absence of any trainable parameters""" dev = qml.device("default.qubit.tf", wires=2) - spy = mocker.spy(dev, "expval") @qml.qnode(dev, interface="tf") def circuit(weights): @@ -856,18 +836,14 @@ def circuit(weights): return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - res_hadamard = qml.gradients.hadamard_grad(circuit)(weights) - - assert res_hadamard == () - spy.assert_not_called() + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + qml.gradients.hadamard_grad(circuit)(weights) @pytest.mark.jax - def test_no_trainable_params_qnode_jax_legacy(self, mocker): + def test_no_trainable_params_qnode_jax_legacy(self): """Test that the correct ouput and warning is generated in the absence of any trainable parameters""" dev = qml.device("default.qubit.jax", wires=2) - spy = mocker.spy(dev, "expval") @qml.qnode(dev, interface="jax") def circuit(weights): @@ -876,11 +852,8 @@ def circuit(weights): return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - res_hadamard = qml.gradients.hadamard_grad(circuit)(weights) - - assert res_hadamard == () - spy.assert_not_called() + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + qml.gradients.hadamard_grad(circuit)(weights) def test_no_trainable_params_tape(self): """Test that the correct ouput and warning is generated in the absence of any trainable @@ -1018,7 +991,7 @@ def test_all_zero_diff_methods_multiple_returns_tape(self): def test_all_zero_diff_methods(self): """Test that the transform works correctly when the diff method for every parameter is identified to be 0, and that no tapes were generated.""" - dev = qml.device("default.qubit", wires=3) + dev = qml.device("default.qubit", wires=4) @qml.qnode(dev) def circuit(params): @@ -1045,9 +1018,6 @@ def circuit(params): assert result[2].shape == (4,) assert np.allclose(result[2], 0) - tapes, _ = qml.gradients.hadamard_grad(circuit.tape) - assert tapes == [] - class TestHadamardTestGradDiff: """Test that the transform is differentiable""" @@ -1333,7 +1303,9 @@ def circuit(x, y): x = jax.numpy.array([0.543, -0.654]) y = jax.numpy.array(-0.123) - res = jax.jacobian(qml.gradients.hadamard_grad(circuit), argnums=argnums)(x, y) + res = jax.jacobian(qml.gradients.hadamard_grad(circuit, argnums=argnums), argnums=argnums)( + x, y + ) res_expected = jax.hessian(circuit, argnums=argnums)(x, y) if argnums == [0]: diff --git a/tests/gradients/core/test_pulse_gradient.py b/tests/gradients/core/test_pulse_gradient.py index 9f8ba10a9d3..8271996b4a8 100644 --- a/tests/gradients/core/test_pulse_gradient.py +++ b/tests/gradients/core/test_pulse_gradient.py @@ -1170,9 +1170,9 @@ def qnode(params): qnode.construct((params,), {}) num_split_times = 5 - tapes, fn = stoch_pulse_grad( - qnode.tape, argnums=[0, 1, 2], num_split_times=num_split_times, sampler_seed=7123 - ) + qnode.tape.trainable_params = [0, 1, 2] + + tapes, fn = stoch_pulse_grad(qnode.tape, num_split_times=num_split_times, sampler_seed=7123) # Two generating terms with two shifts (X_0 and Z_0), one with eight shifts # (Y_0Y_1+0.4 X_1 has eigenvalues [-1.4, -0.6, 0.6, 1.4] yielding frequencies # [0.8, 1.2, 2.0, 2.8] and hence 2 * 4 = 8 shifts) diff --git a/tests/gradients/finite_diff/test_finite_difference.py b/tests/gradients/finite_diff/test_finite_difference.py index 9d27996d802..0179a3036f6 100644 --- a/tests/gradients/finite_diff/test_finite_difference.py +++ b/tests/gradients/finite_diff/test_finite_difference.py @@ -123,7 +123,7 @@ def test_non_differentiable_error(self): with pytest.raises( ValueError, match=r"Cannot differentiate with respect to parameter\(s\) {0}" ): - finite_diff(tape, _expand=False) + finite_diff(tape) # setting trainable parameters avoids this tape.trainable_params = {1, 2} @@ -221,10 +221,8 @@ def circuit(weights): return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - res = qml.gradients.finite_diff(circuit)(weights) - - assert res == () + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + qml.gradients.finite_diff(circuit)(weights) @pytest.mark.torch def test_no_trainable_params_qnode_torch(self): @@ -239,10 +237,8 @@ def circuit(weights): return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - res = qml.gradients.finite_diff(circuit)(weights) - - assert res == () + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + qml.gradients.finite_diff(circuit)(weights) @pytest.mark.tf def test_no_trainable_params_qnode_tf(self): @@ -257,10 +253,8 @@ def circuit(weights): return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - res = qml.gradients.finite_diff(circuit)(weights) - - assert res == () + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + qml.gradients.finite_diff(circuit)(weights) @pytest.mark.jax def test_no_trainable_params_qnode_jax(self): @@ -275,10 +269,8 @@ def circuit(weights): return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - res = qml.gradients.finite_diff(circuit)(weights) - - assert res == () + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + qml.gradients.finite_diff(circuit)(weights) def test_all_zero_diff_methods(self): """Test that the transform works correctly when the diff method for every parameter is @@ -310,9 +302,6 @@ def circuit(params): assert result[2].shape == (4,) assert np.allclose(result[2], 0) - tapes, _ = qml.gradients.finite_diff(circuit.tape) - assert tapes == [] - def test_all_zero_diff_methods_multiple_returns(self): """Test that the transform works correctly when the diff method for every parameter is identified to be 0, and that no tapes were generated.""" @@ -362,9 +351,6 @@ def circuit(params): assert result[1][2].shape == (4,) assert np.allclose(result[1][2], 0) - tapes, _ = qml.gradients.finite_diff(circuit.tape) - assert tapes == [] - def test_y0(self): """Test that if first order finite differences is used, then the tape is executed only once using the current parameter @@ -460,8 +446,17 @@ def cost6(x): transform = [qml.math.shape(qml.gradients.finite_diff(c)(x)) for c in circuits] - expected = [(3,), (3,), (2, 3), (3, 4), (3, 4), (2, 3, 4)] - + expected = [ + (3,), + ( + 1, + 3, + ), + (2, 3), + (3, 4), + (1, 3, 4), + (2, 3, 4), + ] assert all(t == q for t, q in zip(transform, expected)) def test_special_observable_qnode_differentiation(self): @@ -1166,7 +1161,7 @@ def circuit(x, y): res = jax.jacobian( qml.gradients.finite_diff( - circuit, approx_order=approx_order, strategy=strategy, h=1e-5 + circuit, approx_order=approx_order, strategy=strategy, h=1e-5, argnums=argnums ), argnums=argnums, )(x, y) diff --git a/tests/gradients/finite_diff/test_finite_difference_shot_vec.py b/tests/gradients/finite_diff/test_finite_difference_shot_vec.py index 0e4dae14f70..39644a174cb 100644 --- a/tests/gradients/finite_diff/test_finite_difference_shot_vec.py +++ b/tests/gradients/finite_diff/test_finite_difference_shot_vec.py @@ -53,7 +53,7 @@ def test_non_differentiable_error(self): with pytest.raises( ValueError, match=r"Cannot differentiate with respect to parameter\(s\) {0}" ): - finite_diff(tape, _expand=False) + finite_diff(tape) # setting trainable parameters avoids this tape.trainable_params = {1, 2} @@ -162,10 +162,8 @@ def circuit(weights): return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - res = qml.gradients.finite_diff(circuit, h=h_val)(weights) - - assert res == () + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + qml.gradients.finite_diff(circuit, h=h_val)(weights) @pytest.mark.torch def test_no_trainable_params_qnode_torch(self): @@ -180,10 +178,8 @@ def circuit(weights): return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - res = qml.gradients.finite_diff(circuit, h=h_val)(weights) - - assert res == () + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + qml.gradients.finite_diff(circuit, h=h_val)(weights) @pytest.mark.tf def test_no_trainable_params_qnode_tf(self): @@ -198,10 +194,8 @@ def circuit(weights): return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - res = qml.gradients.finite_diff(circuit, h=h_val)(weights) - - assert res == () + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + qml.gradients.finite_diff(circuit, h=h_val)(weights) @pytest.mark.jax def test_no_trainable_params_qnode_jax(self): @@ -216,10 +210,8 @@ def circuit(weights): return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - res = qml.gradients.finite_diff(circuit, h=h_val)(weights) - - assert res == () + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + qml.gradients.finite_diff(circuit, h=h_val)(weights) def test_all_zero_diff_methods(self): """Test that the transform works correctly when the diff method for every parameter is @@ -253,9 +245,6 @@ def circuit(params): assert result[2].shape == (4,) assert np.allclose(result[2], 0) - tapes, _ = qml.gradients.finite_diff(circuit.tape, h=h_val) - assert tapes == [] - def test_all_zero_diff_methods_multiple_returns(self): """Test that the transform works correctly when the diff method for every parameter is identified to be 0, and that no tapes were generated.""" @@ -307,9 +296,6 @@ def circuit(params): assert result[1][2].shape == (4,) assert np.allclose(result[1][2], 0) - tapes, _ = qml.gradients.finite_diff(circuit.tape, h=h_val) - assert tapes == [] - def test_y0(self): """Test that if first order finite differences is used, then the tape is executed only once using the current parameter @@ -410,10 +396,7 @@ def cost6(x): circuits = [qml.QNode(cost, dev) for cost in (cost1, cost2, cost3, cost4, cost5, cost6)] transform = [qml.math.shape(qml.gradients.finite_diff(c, h=h_val)(x)) for c in circuits] - - expected = [(3,), (3,), (2, 3), (3, 4), (3, 4), (2, 3, 4)] - expected = [(len(many_shots_shot_vector),) + e for e in expected] - + expected = [(3, 3), (1, 3, 3), (3, 2, 3), (3, 3, 4), (1, 3, 3, 4), (3, 2, 3, 4)] assert all(t == q for t, q in zip(transform, expected)) def test_special_observable_qnode_differentiation(self): diff --git a/tests/gradients/finite_diff/test_spsa_gradient.py b/tests/gradients/finite_diff/test_spsa_gradient.py index ae73f414782..40ef891b840 100644 --- a/tests/gradients/finite_diff/test_spsa_gradient.py +++ b/tests/gradients/finite_diff/test_spsa_gradient.py @@ -194,7 +194,7 @@ def test_non_differentiable_error(self): with pytest.raises( ValueError, match=r"Cannot differentiate with respect to parameter\(s\) {0}" ): - spsa_grad(tape, _expand=False) + spsa_grad(tape) # setting trainable parameters avoids this tape.trainable_params = {1, 2} @@ -300,10 +300,8 @@ def circuit(weights): return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - res = spsa_grad(circuit)(weights) - - assert res == () + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + spsa_grad(circuit)(weights) @pytest.mark.torch def test_no_trainable_params_qnode_torch(self): @@ -318,10 +316,8 @@ def circuit(weights): return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - res = spsa_grad(circuit)(weights) - - assert res == () + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + spsa_grad(circuit)(weights) @pytest.mark.tf def test_no_trainable_params_qnode_tf(self): @@ -336,10 +332,8 @@ def circuit(weights): return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - res = spsa_grad(circuit)(weights) - - assert res == () + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + spsa_grad(circuit)(weights) @pytest.mark.jax def test_no_trainable_params_qnode_jax(self): @@ -354,10 +348,8 @@ def circuit(weights): return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - res = spsa_grad(circuit)(weights) - - assert res == () + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + spsa_grad(circuit)(weights) def test_all_zero_diff_methods(self): """Test that the transform works correctly when the diff method for every parameter is @@ -389,9 +381,6 @@ def circuit(params): assert result[2].shape == (4,) assert np.allclose(result[2], 0) - tapes, _ = spsa_grad(circuit.tape) - assert tapes == [] - def test_all_zero_diff_methods_multiple_returns(self): """Test that the transform works correctly when the diff method for every parameter is identified to be 0, and that no tapes were generated, with multiple return values.""" @@ -441,9 +430,6 @@ def circuit(params): assert result[1][2].shape == (4,) assert np.allclose(result[1][2], 0) - tapes, _ = spsa_grad(circuit.tape) - assert tapes == [] - def test_y0(self): """Test that if first order finite differences is underlying the SPSA, then the tape is executed only once using the current parameter values.""" @@ -549,7 +535,17 @@ def cost6(x): transform = [qml.math.shape(spsa_grad(c)(x)) for c in circuits] - expected = [(3,), (3,), (2, 3), (3, 4), (3, 4), (2, 3, 4)] + expected = [ + (3,), + ( + 1, + 3, + ), + (2, 3), + (3, 4), + (1, 3, 4), + (2, 3, 4), + ] assert all(t == q for t, q in zip(transform, expected)) diff --git a/tests/gradients/finite_diff/test_spsa_gradient_shot_vec.py b/tests/gradients/finite_diff/test_spsa_gradient_shot_vec.py index 02488c47709..920895d859d 100644 --- a/tests/gradients/finite_diff/test_spsa_gradient_shot_vec.py +++ b/tests/gradients/finite_diff/test_spsa_gradient_shot_vec.py @@ -64,7 +64,7 @@ def test_non_differentiable_error(self): with pytest.raises( ValueError, match=r"Cannot differentiate with respect to parameter\(s\) {0}" ): - spsa_grad(tape, _expand=False) + spsa_grad(tape) # setting trainable parameters avoids this tape.trainable_params = {1, 2} @@ -183,10 +183,8 @@ def circuit(weights): return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - res = spsa_grad(circuit, h=h_val)(weights) - - assert res == () + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + spsa_grad(circuit, h=h_val)(weights) @pytest.mark.torch def test_no_trainable_params_qnode_torch(self): @@ -201,10 +199,8 @@ def circuit(weights): return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - res = spsa_grad(circuit, h=h_val)(weights) - - assert res == () + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + spsa_grad(circuit, h=h_val)(weights) @pytest.mark.tf def test_no_trainable_params_qnode_tf(self): @@ -219,10 +215,8 @@ def circuit(weights): return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - res = spsa_grad(circuit, h=h_val)(weights) - - assert res == () + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + spsa_grad(circuit, h=h_val)(weights) @pytest.mark.jax def test_no_trainable_params_qnode_jax(self): @@ -237,10 +231,8 @@ def circuit(weights): return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - res = spsa_grad(circuit, h=h_val)(weights) - - assert res == () + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + spsa_grad(circuit, h=h_val)(weights) def test_all_zero_diff_methods(self): """Test that the transform works correctly when the diff method for every parameter is @@ -277,9 +269,6 @@ def circuit(params): assert result[2].shape == (4,) assert np.allclose(result[2], 0) - tapes, _ = spsa_grad(circuit.tape, h=h_val) - assert tapes == [] - def test_all_zero_diff_methods_multiple_returns(self): """Test that the transform works correctly when the diff method for every parameter is identified to be 0, and that no tapes were generated.""" @@ -334,9 +323,6 @@ def circuit(params): assert result[1][2].shape == (4,) assert np.allclose(result[1][2], 0) - tapes, _ = spsa_grad(circuit.tape, h=h_val) - assert tapes == [] - def test_y0(self): """Test that if first order finite differences is underlying the SPSA, then the tape is executed only once using the current parameter @@ -473,8 +459,7 @@ def cost6(x): transform = [qml.math.shape(spsa_grad(c, h=h_val)(x)) for c in circuits] - expected = [(3,), (3,), (2, 3), (3, 4), (3, 4), (2, 3, 4)] - expected = [(len(many_shots_shot_vector),) + e for e in expected] + expected = [(3, 3), (1, 3, 3), (3, 2, 3), (3, 3, 4), (1, 3, 3, 4), (3, 2, 3, 4)] assert all(t == q for t, q in zip(transform, expected)) diff --git a/tests/gradients/parameter_shift/test_cv_gradients.py b/tests/gradients/parameter_shift/test_cv_gradients.py index bb136886926..17363621898 100644 --- a/tests/gradients/parameter_shift/test_cv_gradients.py +++ b/tests/gradients/parameter_shift/test_cv_gradients.py @@ -22,7 +22,6 @@ import pennylane.numpy as anp # only to be used inside classical computational nodes import pennylane as qml - alpha = 0.5 # displacement in tests hbar = 2 mag_alphas = np.linspace(0, 1.5, 5) @@ -249,11 +248,14 @@ def qf(x, y): assert qml.math.allclose(grad_A, grad_F, atol=tol, rtol=0) assert qml.math.allclose(grad_A2, grad_F, atol=tol, rtol=0) - @pytest.mark.autograd + @pytest.mark.jax def test_cv_gradients_parameters_inside_array(self, gaussian_dev, tol): "Tests that free parameters inside an array passed to an Operation yield correct gradients." - par = anp.array([0.4, 1.3], requires_grad=True) + import jax + par = jax.numpy.array([0.4, 1.3]) + + @qml.qnode(device=gaussian_dev, diff_method="finite-diff") def qf(x, y): qml.Displacement(0.5, 0, wires=[0]) qml.Squeezing(x, 0, wires=[0]) @@ -263,12 +265,19 @@ def qf(x, y): M[2, 1] = 1.0 return qml.expval(qml.PolyXP(M, [0, 1])) - q = qml.QNode(qf, gaussian_dev) - q(*par) - grad_F = qml.gradients.finite_diff(q, hybrid=False)(*par) - grad_A2 = qml.gradients.param_shift_cv( - q, dev=gaussian_dev, force_order2=True, hybrid=False - )(*par) + grad_F = jax.grad(qf)(*par) + + @qml.qnode(device=gaussian_dev, diff_method="parameter-shift", force_order2=True) + def qf2(x, y): + qml.Displacement(0.5, 0, wires=[0]) + qml.Squeezing(x, 0, wires=[0]) + M = np.zeros((5, 5)) + M[1, 1] = y + M[1, 2] = 1.0 + M[2, 1] = 1.0 + return qml.expval(qml.PolyXP(M, [0, 1])) + + grad_A2 = jax.grad(qf2)(*par) # the different methods agree assert grad_A2 == pytest.approx(grad_F, abs=tol) diff --git a/tests/gradients/parameter_shift/test_parameter_shift.py b/tests/gradients/parameter_shift/test_parameter_shift.py index 6a499d828b7..d4feab4d48f 100644 --- a/tests/gradients/parameter_shift/test_parameter_shift.py +++ b/tests/gradients/parameter_shift/test_parameter_shift.py @@ -379,87 +379,6 @@ def test_independent_parameter(self, mocker): # only called for parameter 0 assert spy.call_args[0][0:2] == (tape, [0]) - # TODO: uncomment when QNode decorator uses new qml.execute pipeline - # @pytest.mark.autograd - # def test_no_trainable_params_qnode_autograd(self, mocker): - # """Test that the correct ouput and warning is generated in the absence of any trainable - # parameters""" - # dev = qml.device("default.qubit", wires=2) - # spy = mocker.spy(dev, "expval") - - # @qml.qnode(dev, interface="autograd") - # def circuit(weights): - # qml.RX(weights[0], wires=0) - # qml.RY(weights[1], wires=0) - # return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) - - # weights = [0.1, 0.2] - # with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - # res = qml.gradients.param_shift(circuit)(weights) - - # assert res == () - # spy.assert_not_called() - - # @pytest.mark.torch - # def test_no_trainable_params_qnode_torch(self, mocker): - # """Test that the correct ouput and warning is generated in the absence of any trainable - # parameters""" - # dev = qml.device("default.qubit", wires=2) - # spy = mocker.spy(dev, "expval") - - # @qml.qnode(dev, interface="torch") - # def circuit(weights): - # qml.RX(weights[0], wires=0) - # qml.RY(weights[1], wires=0) - # return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) - - # weights = [0.1, 0.2] - # with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - # res = qml.gradients.param_shift(circuit)(weights) - - # assert res == () - # spy.assert_not_called() - - # @pytest.mark.tf - # def test_no_trainable_params_qnode_tf(self, mocker): - # """Test that the correct ouput and warning is generated in the absence of any trainable - # parameters""" - # dev = qml.device("default.qubit", wires=2) - # spy = mocker.spy(dev, "expval") - - # @qml.qnode(dev, interface="tf") - # def circuit(weights): - # qml.RX(weights[0], wires=0) - # qml.RY(weights[1], wires=0) - # return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) - - # weights = [0.1, 0.2] - # with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - # res = qml.gradients.param_shift(circuit)(weights) - - # assert res == () - # spy.assert_not_called() - - # @pytest.mark.jax - # def test_no_trainable_params_qnode_jax(self, mocker): - # """Test that the correct ouput and warning is generated in the absence of any trainable - # parameters""" - # dev = qml.device("default.qubit", wires=2) - # spy = mocker.spy(dev, "expval") - - # @qml.qnode(dev, interface="jax") - # def circuit(weights): - # qml.RX(weights[0], wires=0) - # qml.RY(weights[1], wires=0) - # return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) - - # weights = [0.1, 0.2] - # with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - # res = qml.gradients.param_shift(circuit)(weights) - - # assert res == () - # spy.assert_not_called() - @pytest.mark.parametrize("broadcast", [True, False]) def test_no_trainable_params_tape(self, broadcast): """Test that the correct ouput and warning is generated in the absence of any trainable @@ -2293,7 +2212,7 @@ def cost6(x): costs_and_expected_expval = [ (cost1, [3]), - (cost2, [3]), + (cost2, [1, 3]), (cost3, [2, 3]), ] @@ -2306,7 +2225,7 @@ def test_output_shape_matches_qnode_expval(self, cost, expected_shape): circuit = qml.QNode(cost, dev) res = qml.gradients.param_shift(circuit)(x) - assert isinstance(res, tuple) + assert len(res) == expected_shape[0] if len(expected_shape) > 1: @@ -2316,7 +2235,7 @@ def test_output_shape_matches_qnode_expval(self, cost, expected_shape): costs_and_expected_probs = [ (cost4, [3, 4]), - (cost5, [3, 4]), + (cost5, [1, 3, 4]), (cost6, [2, 3, 4]), ] @@ -2329,7 +2248,7 @@ def test_output_shape_matches_qnode_probs(self, cost, expected_shape): circuit = qml.QNode(cost, dev) res = qml.gradients.param_shift(circuit)(x) - assert isinstance(res, tuple) + assert len(res) == expected_shape[0] if len(expected_shape) > 2: @@ -3106,7 +3025,7 @@ def cost6(x): single_measure_circuits = [qml.QNode(cost, dev) for cost in (cost1, cost2, cost4, cost5)] multi_measure_circuits = [qml.QNode(cost, dev) for cost in (cost3, cost6)] - for c, exp_shape in zip(single_measure_circuits, [(3,), (3,), (3, 4), (3, 4)]): + for c, exp_shape in zip(single_measure_circuits, [(3,), (1, 3), (3, 4), (1, 3, 4)]): grad = qml.gradients.param_shift(c, broadcast=True)(x) assert qml.math.shape(grad) == exp_shape @@ -4072,7 +3991,6 @@ def circuit(x): res = qml.gradients.param_shift(circuit)(x) res_expected = jax.jacobian(circuit)(x) - assert res.shape == res_expected.shape assert np.allclose(res, res_expected) @@ -4656,7 +4574,9 @@ def circuit(x, y): x = jax.numpy.array([0.543, -0.654]) y = jax.numpy.array(-0.123) - res = jax.jacobian(qml.gradients.param_shift(circuit), argnums=argnums)(x, y) + res = jax.jacobian(qml.gradients.param_shift(circuit, argnums=argnums), argnums=argnums)( + x, y + ) res_expected = jax.hessian(circuit, argnums=argnums)(x, y) if argnums == [0]: diff --git a/tests/gradients/parameter_shift/test_parameter_shift_cv.py b/tests/gradients/parameter_shift/test_parameter_shift_cv.py index 1c622a61133..cc76c318fbd 100644 --- a/tests/gradients/parameter_shift/test_parameter_shift_cv.py +++ b/tests/gradients/parameter_shift/test_parameter_shift_cv.py @@ -298,10 +298,8 @@ def circuit(weights): return qml.expval(qml.QuadX(0)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - res = qml.gradients.param_shift_cv(circuit)(weights) - - assert res == () + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + qml.gradients.param_shift_cv(circuit)(weights) @pytest.mark.torch def test_no_trainable_params_qnode_torch(self): @@ -317,10 +315,8 @@ def circuit(weights): return qml.expval(qml.QuadX(0)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - res = qml.gradients.param_shift_cv(circuit)(weights) - - assert res == () + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + qml.gradients.param_shift_cv(circuit)(weights) @pytest.mark.tf def test_no_trainable_params_qnode_tf(self): @@ -336,10 +332,8 @@ def circuit(weights): return qml.expval(qml.QuadX(0)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - res = qml.gradients.param_shift_cv(circuit)(weights) - - assert res == () + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + qml.gradients.param_shift_cv(circuit)(weights) @pytest.mark.jax def test_no_trainable_params_qnode_jax(self): @@ -355,10 +349,8 @@ def circuit(weights): return qml.expval(qml.QuadX(0)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - res = qml.gradients.param_shift_cv(circuit)(weights) - - assert res == () + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + qml.gradients.param_shift_cv(circuit)(weights) def test_no_trainable_params_tape(self): """Test that the correct ouput and warning is generated in the absence of any trainable @@ -397,9 +389,6 @@ def circuit(params): result = qml.gradients.param_shift_cv(circuit, dev)(params) assert np.allclose(result, np.zeros((2, 3)), atol=0, rtol=0) - tapes, _ = qml.gradients.param_shift_cv(circuit.tape, dev) - assert tapes == [] - def test_state_non_differentiable_error(self): """Test error raised if attempting to differentiate with respect to a state""" diff --git a/tests/gradients/parameter_shift/test_parameter_shift_shot_vec.py b/tests/gradients/parameter_shift/test_parameter_shift_shot_vec.py index ed9900ca8a2..f9ab005ca03 100644 --- a/tests/gradients/parameter_shift/test_parameter_shift_shot_vec.py +++ b/tests/gradients/parameter_shift/test_parameter_shift_shot_vec.py @@ -100,87 +100,6 @@ def test_independent_parameter(self, mocker): # only called for parameter 0 assert spy.call_args[0][0:2] == (tape, [0]) - # TODO: uncomment and port to shot-vectors when QNode decorator uses new qml.execute pipeline - # @pytest.mark.autograd - # def test_no_trainable_params_qnode_autograd(self, mocker): - # """Test that the correct ouput and warning is generated in the absence of any trainable - # parameters""" - # dev = qml.device("default.qubit", wires=2, shots=default_shot_vector) - # spy = mocker.spy(dev, "expval") - - # @qml.qnode(dev, interface="autograd") - # def circuit(weights): - # qml.RX(weights[0], wires=0) - # qml.RY(weights[1], wires=0) - # return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) - - # weights = [0.1, 0.2] - # with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - # res = qml.gradients.param_shift(circuit)(weights) - - # assert res == () - # spy.assert_not_called() - - # @pytest.mark.torch - # def test_no_trainable_params_qnode_torch(self, mocker): - # """Test that the correct ouput and warning is generated in the absence of any trainable - # parameters""" - # dev = qml.device("default.qubit", wires=2, shots=default_shot_vector) - # spy = mocker.spy(dev, "expval") - - # @qml.qnode(dev, interface="torch") - # def circuit(weights): - # qml.RX(weights[0], wires=0) - # qml.RY(weights[1], wires=0) - # return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) - - # weights = [0.1, 0.2] - # with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - # res = qml.gradients.param_shift(circuit)(weights) - - # assert res == () - # spy.assert_not_called() - - # @pytest.mark.tf - # def test_no_trainable_params_qnode_tf(self, mocker): - # """Test that the correct ouput and warning is generated in the absence of any trainable - # parameters""" - # dev = qml.device("default.qubit", wires=2, shots=default_shot_vector) - # spy = mocker.spy(dev, "expval") - - # @qml.qnode(dev, interface="tf") - # def circuit(weights): - # qml.RX(weights[0], wires=0) - # qml.RY(weights[1], wires=0) - # return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) - - # weights = [0.1, 0.2] - # with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - # res = qml.gradients.param_shift(circuit)(weights) - - # assert res == () - # spy.assert_not_called() - - # @pytest.mark.jax - # def test_no_trainable_params_qnode_jax(self, mocker): - # """Test that the correct ouput and warning is generated in the absence of any trainable - # parameters""" - # dev = qml.device("default.qubit", wires=2, shots=default_shot_vector) - # spy = mocker.spy(dev, "expval") - - # @qml.qnode(dev, interface="jax") - # def circuit(weights): - # qml.RX(weights[0], wires=0) - # qml.RY(weights[1], wires=0) - # return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) - - # weights = [0.1, 0.2] - # with pytest.warns(UserWarning, match="gradient of a QNode with no trainable parameters"): - # res = qml.gradients.param_shift(circuit)(weights) - - # assert res == () - # spy.assert_not_called() - @pytest.mark.parametrize("broadcast", [True, False]) def test_no_trainable_params_tape(self, broadcast): """Test that the correct ouput and warning is generated in the absence of any trainable diff --git a/tests/interfaces/default_qubit_2_integration/test_autograd_default_qubit_2.py b/tests/interfaces/default_qubit_2_integration/test_autograd_default_qubit_2.py index 16b9aa77c2a..09d20d1b028 100644 --- a/tests/interfaces/default_qubit_2_integration/test_autograd_default_qubit_2.py +++ b/tests/interfaces/default_qubit_2_integration/test_autograd_default_qubit_2.py @@ -442,7 +442,21 @@ def cost_fn(a, p): tape = qml.tape.QuantumScript( [qml.RX(a, wires=0), U3(*p, wires=0)], [qml.expval(qml.PauliX(0))] ) - return execute([tape], device, **execute_kwargs)[0] + gradient_fn = execute_kwargs["gradient_fn"] + + if gradient_fn is None: + _gradient_method = None + elif isinstance(gradient_fn, str): + _gradient_method = gradient_fn + else: + _gradient_method = "gradient-transform" + config = qml.devices.ExecutionConfig( + interface="autograd", + gradient_method=_gradient_method, + grad_on_execution=execute_kwargs.get("grad_on_execution", None), + ) + program, _ = device.preprocess(execution_config=config) + return execute([tape], device, **execute_kwargs, transform_program=program)[0] a = np.array(0.1, requires_grad=False) p = np.array([0.1, 0.2, 0.3], requires_grad=True) diff --git a/tests/interfaces/default_qubit_2_integration/test_autograd_qnode_default_qubit_2.py b/tests/interfaces/default_qubit_2_integration/test_autograd_qnode_default_qubit_2.py index 3463e581096..33e26ea1c77 100644 --- a/tests/interfaces/default_qubit_2_integration/test_autograd_qnode_default_qubit_2.py +++ b/tests/interfaces/default_qubit_2_integration/test_autograd_qnode_default_qubit_2.py @@ -106,22 +106,15 @@ def circuit(a): assert isinstance(grad, float) assert grad.shape == tuple() - def test_jacobian(self, interface, dev, diff_method, grad_on_execution, mocker, tol): + def test_jacobian(self, interface, dev, diff_method, grad_on_execution, tol): """Test jacobian calculation""" kwargs = dict( diff_method=diff_method, interface=interface, grad_on_execution=grad_on_execution ) - if diff_method == "parameter-shift": - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") - elif diff_method == "finite-diff": - spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") - elif diff_method == "spsa": - spy = mocker.spy(qml.gradients.spsa_grad, "transform_fn") + if diff_method == "spsa": kwargs["sampler_rng"] = np.random.default_rng(SEED_FOR_SPSA) tol = TOL_FOR_SPSA - elif diff_method == "hadamard": - spy = mocker.spy(qml.gradients.hadamard_grad, "transform_fn") a = np.array(0.1, requires_grad=True) b = np.array(0.2, requires_grad=True) @@ -156,27 +149,14 @@ def cost(x, y): assert res[1].shape == (2,) assert np.allclose(res[1], expected[1], atol=tol, rtol=0) - if diff_method in ("parameter-shift", "finite-diff", "spsa"): - spy.assert_called() - - def test_jacobian_no_evaluate( - self, interface, dev, diff_method, grad_on_execution, mocker, tol - ): + def test_jacobian_no_evaluate(self, interface, dev, diff_method, grad_on_execution, tol): """Test jacobian calculation when no prior circuit evaluation has been performed""" kwargs = dict( diff_method=diff_method, interface=interface, grad_on_execution=grad_on_execution ) - - if diff_method == "parameter-shift": - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") - elif diff_method == "finite-diff": - spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") - elif diff_method == "spsa": - spy = mocker.spy(qml.gradients.spsa_grad, "transform_fn") + if diff_method == "spsa": kwargs["sampler_rng"] = np.random.default_rng(SEED_FOR_SPSA) tol = TOL_FOR_SPSA - elif diff_method == "hadamard": - spy = mocker.spy(qml.gradients.hadamard_grad, "transform_fn") a = np.array(0.1, requires_grad=True) b = np.array(0.2, requires_grad=True) @@ -197,26 +177,20 @@ def cost(x, y): assert np.allclose(res[0], expected[0], atol=tol, rtol=0) assert np.allclose(res[1], expected[1], atol=tol, rtol=0) - if diff_method in ("parameter-shift", "finite-diff", "spsa"): - spy.assert_called() - # call the Jacobian with new parameters a = np.array(0.6, requires_grad=True) b = np.array(0.832, requires_grad=True) res = jac_fn(a, b) expected = ([-np.sin(a), np.sin(a) * np.sin(b)], [0, -np.cos(a) * np.cos(b)]) - expected = ([-np.sin(a), np.sin(a) * np.sin(b)], [0, -np.cos(a) * np.cos(b)]) assert np.allclose(res[0], expected[0], atol=tol, rtol=0) assert np.allclose(res[1], expected[1], atol=tol, rtol=0) - def test_jacobian_options(self, interface, dev, diff_method, grad_on_execution, mocker): + def test_jacobian_options(self, interface, dev, diff_method, grad_on_execution): """Test setting jacobian options""" if diff_method == "backprop": pytest.skip("Test does not support backprop") - spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") - a = np.array([0.1, 0.2], requires_grad=True) @qnode(dev, interface=interface, h=1e-8, order=2, grad_on_execution=grad_on_execution) @@ -227,13 +201,7 @@ def circuit(a): qml.jacobian(circuit)(a) - for args in spy.call_args_list: - assert args[1]["order"] == 2 - assert args[1]["h"] == 1e-8 - - def test_changing_trainability( - self, interface, dev, diff_method, grad_on_execution, mocker, tol - ): + def test_changing_trainability(self, interface, dev, diff_method, grad_on_execution, tol): """Test changing the trainability of parameters changes the number of differentiation requests made""" if diff_method != "parameter-shift": @@ -258,7 +226,6 @@ def loss(a, b): return np.sum(autograd.numpy.hstack(circuit(a, b))) grad_fn = qml.grad(loss) - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") res = grad_fn(a, b) # the tape has reported both arguments as trainable @@ -267,9 +234,6 @@ def loss(a, b): expected = [-np.sin(a) + np.sin(a) * np.sin(b), -np.cos(a) * np.cos(b)] assert np.allclose(res, expected, atol=tol, rtol=0) - # The parameter-shift rule has been called for each argument - assert len(spy.spy_return[0]) == 4 - # make the second QNode argument a constant a = np.array(0.54, requires_grad=True) b = np.array(0.8, requires_grad=False) @@ -282,9 +246,6 @@ def loss(a, b): expected = [-np.sin(a) + np.sin(a) * np.sin(b)] assert np.allclose(res, expected, atol=tol, rtol=0) - # The parameter-shift rule has been called only once - assert len(spy.spy_return[0]) == 2 - # trainability also updates on evaluation a = np.array(0.54, requires_grad=False) b = np.array(0.8, requires_grad=True) @@ -1124,7 +1085,7 @@ def cost_fn(x): assert np.allclose(hess, expected_hess, atol=tol, rtol=0) def test_hessian_vector_valued_separate_args( - self, interface, dev, diff_method, grad_on_execution, mocker, tol + self, interface, dev, diff_method, grad_on_execution, tol ): """Test hessian calculation of a vector valued QNode that has separate input arguments""" if diff_method not in {"parameter-shift", "backprop"}: @@ -1165,8 +1126,6 @@ def circuit(a, b): assert g[1].shape == (2,) assert np.allclose(g[1], expected_g[1], atol=tol, rtol=0) - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") - def jac_fn_a(*args): return jac_fn(*args)[0] @@ -1178,11 +1137,6 @@ def jac_fn_b(*args): assert isinstance(hess_a, tuple) and len(hess_a) == 2 assert isinstance(hess_b, tuple) and len(hess_b) == 2 - if diff_method == "backprop": - spy.assert_not_called() - elif diff_method == "parameter-shift": - spy.assert_called() - exp_hess_a = ( [-0.5 * np.cos(a) * np.cos(b), 0.5 * np.cos(a) * np.cos(b)], [0.5 * np.sin(a) * np.sin(b), -0.5 * np.sin(a) * np.sin(b)], @@ -1355,9 +1309,7 @@ class TestTapeExpansion: with the Autograd interface""" @pytest.mark.parametrize("max_diff", [1, 2]) - def test_gradient_expansion_trainable_only( - self, dev, diff_method, grad_on_execution, max_diff, mocker - ): + def test_gradient_expansion_trainable_only(self, dev, diff_method, grad_on_execution, max_diff): """Test that a *supported* operation with no gradient recipe is only expanded for parameter-shift and finite-differences when it is trainable.""" if diff_method not in ("parameter-shift", "finite-diff", "spsa", "hadamard"): @@ -1385,16 +1337,8 @@ def circuit(x, y): y = np.array(0.7, requires_grad=False) circuit(x, y) - spy = mocker.spy(circuit.gradient_fn, "transform_fn") _ = qml.grad(circuit)(x, y) - input_tape = spy.call_args[0][0] - assert len(input_tape.operations) == 3 - assert input_tape.operations[1].name == "RY" - assert input_tape.operations[1].data[0] == 3 * x - assert input_tape.operations[2].name == "PhaseShift" - assert input_tape.operations[2].grad_method is None - @pytest.mark.parametrize("max_diff", [1, 2]) def test_hamiltonian_expansion_analytic( self, dev, diff_method, grad_on_execution, max_diff, tol @@ -1878,7 +1822,6 @@ def cost(x, y): return anp.hstack(qml.grad(circuit)(x, y)) hess = qml.jacobian(cost)(par_0, par_1) - print(hess) assert isinstance(hess, tuple) assert len(hess) == 2 diff --git a/tests/interfaces/default_qubit_2_integration/test_execute_default_qubit_2.py b/tests/interfaces/default_qubit_2_integration/test_execute_default_qubit_2.py index d0cde46aaf2..72578467152 100644 --- a/tests/interfaces/default_qubit_2_integration/test_execute_default_qubit_2.py +++ b/tests/interfaces/default_qubit_2_integration/test_execute_default_qubit_2.py @@ -64,7 +64,10 @@ def decomposition(self): qs = qml.tape.QuantumScript([CustomOp(0)], [qml.expval(qml.PauliZ(0))]) with pytest.warns(UserWarning, match="device batch transforms cannot be turned off"): - qml.execute((qs, qs), device=dev, device_batch_transform=False) + program, _ = dev.preprocess() + qml.execute( + (qs, qs), device=dev, device_batch_transform=False, transform_program=program + ) def test_split_and_expand_performed(self): """Test that preprocess returns the correct tapes when splitting and expanding @@ -134,7 +137,8 @@ def decomposition(self): qs = qml.tape.QuantumScript([CustomOp(0)], [qml.expval(qml.PauliZ(0))]) with pytest.warns(UserWarning, match="device batch transforms cannot be turned off"): - results = qml.execute([qs], dev, device_batch_transform=False) + program, _ = dev.preprocess() + results = qml.execute([qs], dev, device_batch_transform=False, transform_program=program) assert len(results) == 1 assert qml.math.allclose(results[0], -1) @@ -157,8 +161,8 @@ def test_caching(gradient_fn): assert len(cache) == 1 assert cache[qs.hash] == -1.0 - assert results == (-1.0, -1.0) - assert results2 == (-1.0, -1.0) + assert results == [-1.0, -1.0] + assert results2 == [-1.0, -1.0] assert tracker.totals["batches"] == 1 assert tracker.totals["executions"] == 1 diff --git a/tests/interfaces/default_qubit_2_integration/test_jax_default_qubit_2.py b/tests/interfaces/default_qubit_2_integration/test_jax_default_qubit_2.py index 249794ee193..5d92ea5b92f 100644 --- a/tests/interfaces/default_qubit_2_integration/test_jax_default_qubit_2.py +++ b/tests/interfaces/default_qubit_2_integration/test_jax_default_qubit_2.py @@ -416,7 +416,20 @@ def cost_fn(a, p): [qml.expval(qml.PauliX(0))], shots=shots, ) - return execute([tape], device, **execute_kwargs)[0] + gradient_fn = execute_kwargs["gradient_fn"] + if gradient_fn is None: + _gradient_method = None + elif isinstance(gradient_fn, str): + _gradient_method = gradient_fn + else: + _gradient_method = "gradient-transform" + conf = qml.devices.ExecutionConfig( + interface="autograd", + gradient_method=_gradient_method, + grad_on_execution=execute_kwargs.get("grad_on_execution", None), + ) + program, _ = device.preprocess(execution_config=conf) + return execute([tape], device, **execute_kwargs, transform_program=program)[0] a = jnp.array(0.1) p = jnp.array([0.1, 0.2, 0.3]) diff --git a/tests/interfaces/default_qubit_2_integration/test_jax_jit_qnode_default_qubit_2.py b/tests/interfaces/default_qubit_2_integration/test_jax_jit_qnode_default_qubit_2.py index 22b3bb88f37..2dd5b63f3f5 100644 --- a/tests/interfaces/default_qubit_2_integration/test_jax_jit_qnode_default_qubit_2.py +++ b/tests/interfaces/default_qubit_2_integration/test_jax_jit_qnode_default_qubit_2.py @@ -78,9 +78,7 @@ def circuit(a): assert isinstance(grad, jax.Array) assert grad.shape == () - def test_changing_trainability( - self, dev, diff_method, grad_on_execution, interface, mocker, tol - ): + def test_changing_trainability(self, dev, diff_method, grad_on_execution, interface, tol): """Test changing the trainability of parameters changes the number of differentiation requests made""" if diff_method != "parameter-shift": @@ -102,7 +100,6 @@ def circuit(a, b): return qml.expval(qml.Hamiltonian([1, 1], [qml.PauliZ(0), qml.PauliY(1)])) grad_fn = jax.jit(jax.grad(circuit, argnums=[0, 1])) - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") res = grad_fn(a, b) # the tape has reported both arguments as trainable @@ -111,9 +108,6 @@ def circuit(a, b): expected = [-np.sin(a) + np.sin(a) * np.sin(b), -np.cos(a) * np.cos(b)] assert np.allclose(res, expected, atol=tol, rtol=0) - # The parameter-shift rule has been called for each argument - assert len(spy.spy_return[0]) == 4 - # make the second QNode argument a constant grad_fn = jax.grad(circuit, argnums=0) res = grad_fn(a, b) @@ -124,9 +118,6 @@ def circuit(a, b): expected = [-np.sin(a) + np.sin(a) * np.sin(b)] assert np.allclose(res, expected, atol=tol, rtol=0) - # The parameter-shift rule has been called only once - assert len(spy.spy_return[0]) == 2 - # trainability also updates on evaluation a = np.array(0.54, requires_grad=False) b = np.array(0.8, requires_grad=True) @@ -228,13 +219,10 @@ def circuit(a, p): ) assert np.allclose(res, expected, atol=tol, rtol=0) - def test_jacobian_options(self, dev, diff_method, grad_on_execution, interface, mocker): + def test_jacobian_options(self, dev, diff_method, grad_on_execution, interface): """Test setting jacobian options""" if diff_method != "finite-diff": pytest.skip("Test only applies to finite diff.") - - spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") - a = np.array([0.1, 0.2], requires_grad=True) @qnode( @@ -256,10 +244,6 @@ def circuit(a): jax.jit(jax.jacobian(circuit))(a) - for args in spy.call_args_list: - assert args[1]["approx_order"] == 2 - assert args[1]["h"] == 1e-8 - @pytest.mark.parametrize( "interface,dev,diff_method,grad_on_execution", interface_and_qubit_device_and_diff_method @@ -268,21 +252,15 @@ class TestVectorValuedQNode: """Test that using vector-valued QNodes with JAX integrate with the PennyLane stack""" - def test_diff_expval_expval(self, dev, diff_method, grad_on_execution, interface, mocker, tol): + def test_diff_expval_expval(self, dev, diff_method, grad_on_execution, interface, tol): """Test jacobian calculation""" gradient_kwargs = {} - if diff_method == "parameter-shift": - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") - elif diff_method == "finite-diff": - spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") - elif diff_method == "spsa": - spy = mocker.spy(qml.gradients.spsa_grad, "transform_fn") + + if diff_method == "spsa": gradient_kwargs["sampler_rng"] = np.random.default_rng(SEED_FOR_SPSA) gradient_kwargs["num_directions"] = 20 tol = TOL_FOR_SPSA - elif diff_method == "hadamard": - spy = mocker.spy(qml.gradients.hadamard_grad, "transform_fn") a = np.array(0.1, requires_grad=True) b = np.array(0.2, requires_grad=True) @@ -333,26 +311,15 @@ def circuit(a, b): assert res[1][1].shape == () assert np.allclose(res[1][1], expected[1][1], atol=tol, rtol=0) - if diff_method in ("parameter-shift", "finite-diff"): - spy.assert_called() - - def test_jacobian_no_evaluate( - self, dev, diff_method, grad_on_execution, interface, mocker, tol - ): + def test_jacobian_no_evaluate(self, dev, diff_method, grad_on_execution, interface, tol): """Test jacobian calculation when no prior circuit evaluation has been performed""" gradient_kwargs = {} - if diff_method == "parameter-shift": - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") - elif diff_method == "finite-diff": - spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") - elif diff_method == "spsa": - spy = mocker.spy(qml.gradients.spsa_grad, "transform_fn") + + if diff_method == "spsa": gradient_kwargs["sampler_rng"] = np.random.default_rng(SEED_FOR_SPSA) gradient_kwargs["num_directions"] = 20 tol = TOL_FOR_SPSA - elif diff_method == "hadamard": - spy = mocker.spy(qml.gradients.hadamard_grad, "transform_fn") a = jax.numpy.array(0.1) b = jax.numpy.array(0.2) @@ -385,9 +352,6 @@ def circuit(a, b): assert r.shape == () assert np.allclose(r, e, atol=tol, rtol=0) - if diff_method in ("parameter-shift", "finite-diff", "spsa"): - spy.assert_called() - # call the Jacobian with new parameters a = jax.numpy.array(0.6) b = jax.numpy.array(0.832) @@ -1146,7 +1110,7 @@ def cost_fn(x): assert np.allclose(hess, expected_hess, atol=tol, rtol=0) def test_hessian_vector_valued_separate_args( - self, dev, diff_method, grad_on_execution, interface, mocker, tol + self, dev, diff_method, grad_on_execution, interface, tol ): """Test hessian calculation of a vector valued QNode that has separate input arguments""" gradient_kwargs = {} @@ -1191,14 +1155,8 @@ def circuit(a, b): ) assert np.allclose(g, expected_g.T, atol=tol, rtol=0) - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") hess = jax.jit(jax.jacobian(jac_fn, argnums=[0, 1]))(a, b) - if diff_method == "backprop": - spy.assert_not_called() - elif diff_method == "parameter-shift": - spy.assert_called() - expected_hess = np.array( [ [ @@ -1301,7 +1259,7 @@ class TestTapeExpansion: @pytest.mark.parametrize("max_diff", [1, 2]) def test_gradient_expansion_trainable_only( - self, dev, diff_method, grad_on_execution, max_diff, interface, mocker + self, dev, diff_method, grad_on_execution, max_diff, interface ): """Test that a *supported* operation with no gradient recipe is only expanded for parameter-shift and finite-differences when it is trainable.""" @@ -1331,16 +1289,8 @@ def circuit(x, y): y = jax.numpy.array(0.7) circuit(x, y) - spy = mocker.spy(circuit.gradient_fn, "transform_fn") jax.grad(circuit, argnums=[0])(x, y) - input_tape = spy.call_args[0][0] - assert len(input_tape.operations) == 3 - assert input_tape.operations[1].name == "RY" - assert input_tape.operations[1].data[0] == 3 * x - assert input_tape.operations[2].name == "PhaseShift" - assert input_tape.operations[2].grad_method is None - @pytest.mark.parametrize("max_diff", [1, 2]) def test_hamiltonian_expansion_analytic( self, dev, diff_method, grad_on_execution, max_diff, interface, mocker, tol diff --git a/tests/interfaces/default_qubit_2_integration/test_jax_qnode_default_qubit_2.py b/tests/interfaces/default_qubit_2_integration/test_jax_qnode_default_qubit_2.py index 0a9500c6b9d..fb8de4444a6 100644 --- a/tests/interfaces/default_qubit_2_integration/test_jax_qnode_default_qubit_2.py +++ b/tests/interfaces/default_qubit_2_integration/test_jax_qnode_default_qubit_2.py @@ -86,7 +86,7 @@ def circuit(a): assert grad.shape == () def test_changing_trainability( - self, dev, diff_method, grad_on_execution, interface, mocker, tol + self, dev, diff_method, grad_on_execution, interface, tol ): # pylint:disable=unused-argument """Test changing the trainability of parameters changes the number of differentiation requests made""" @@ -104,7 +104,6 @@ def circuit(a, b): return qml.expval(qml.Hamiltonian([1, 1], [qml.PauliZ(0), qml.PauliY(1)])) grad_fn = jax.grad(circuit, argnums=[0, 1]) - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") res = grad_fn(a, b) # the tape has reported both arguments as trainable @@ -113,9 +112,6 @@ def circuit(a, b): expected = [-np.sin(a) + np.sin(a) * np.sin(b), -np.cos(a) * np.cos(b)] assert np.allclose(res, expected, atol=tol, rtol=0) - # The parameter-shift rule has been called for each argument - assert len(spy.spy_return[0]) == 4 - # make the second QNode argument a constant grad_fn = jax.grad(circuit, argnums=0) res = grad_fn(a, b) @@ -126,9 +122,6 @@ def circuit(a, b): expected = [-np.sin(a) + np.sin(a) * np.sin(b)] assert np.allclose(res, expected, atol=tol, rtol=0) - # The parameter-shift rule has been called only once - assert len(spy.spy_return[0]) == 2 - def test_classical_processing(self, dev, diff_method, grad_on_execution, interface): """Test classical processing within the quantum tape""" a = jax.numpy.array(0.1) @@ -221,14 +214,12 @@ def circuit(a, p): assert np.allclose(res, expected, atol=tol, rtol=0) def test_jacobian_options( - self, dev, diff_method, grad_on_execution, interface, mocker + self, dev, diff_method, grad_on_execution, interface ): # pylint:disable=unused-argument """Test setting jacobian options""" if diff_method != "finite-diff": pytest.skip("Test only applies to finite diff.") - spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") - a = jax.numpy.array([0.1, 0.2]) @qnode(dev, interface=interface, diff_method="finite-diff", h=1e-8, approx_order=2) @@ -239,10 +230,6 @@ def circuit(a): jax.jacobian(circuit)(a) - for args in spy.call_args_list: - assert args[1]["approx_order"] == 2 - assert args[1]["h"] == 1e-8 - @pytest.mark.parametrize( "interface,dev,diff_method,grad_on_execution", interface_and_device_and_diff_method @@ -251,18 +238,13 @@ class TestVectorValuedQNode: """Test that using vector-valued QNodes with JAX integrate with the PennyLane stack""" - def test_diff_expval_expval(self, dev, diff_method, grad_on_execution, interface, mocker, tol): + def test_diff_expval_expval(self, dev, diff_method, grad_on_execution, interface, tol): """Test jacobian calculation""" kwargs = dict( diff_method=diff_method, interface=interface, grad_on_execution=grad_on_execution ) - if diff_method == "parameter-shift": - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") - elif diff_method == "finite-diff": - spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") - elif diff_method == "spsa": - spy = mocker.spy(qml.gradients.spsa_grad, "transform_fn") + if diff_method == "spsa": kwargs["sampler_rng"] = np.random.default_rng(SEED_FOR_SPSA) tol = TOL_FOR_SPSA @@ -308,23 +290,13 @@ def circuit(a, b): assert res[1][1].shape == () assert np.allclose(res[1][1], expected[1][1], atol=tol, rtol=0) - if diff_method in ("parameter-shift", "finite-diff"): - spy.assert_called() - - def test_jacobian_no_evaluate( - self, dev, diff_method, grad_on_execution, interface, mocker, tol - ): + def test_jacobian_no_evaluate(self, dev, diff_method, grad_on_execution, interface, tol): """Test jacobian calculation when no prior circuit evaluation has been performed""" kwargs = dict( diff_method=diff_method, interface=interface, grad_on_execution=grad_on_execution ) - if diff_method == "parameter-shift": - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") - elif diff_method == "finite-diff": - spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") - elif diff_method == "spsa": - spy = mocker.spy(qml.gradients.spsa_grad, "transform_fn") + if diff_method == "spsa": kwargs["sampler_rng"] = np.random.default_rng(SEED_FOR_SPSA) tol = TOL_FOR_SPSA @@ -351,9 +323,6 @@ def circuit(a, b): assert res[i][j].shape == () assert np.allclose(res[i][j], expected[i][j], atol=tol, rtol=0) - if diff_method in ("parameter-shift", "finite-diff", "spsa"): - spy.assert_called() - # call the Jacobian with new parameters a = jax.numpy.array(0.6) b = jax.numpy.array(0.832) @@ -1068,7 +1037,7 @@ def cost_fn(x): assert np.allclose(hess, expected_hess, atol=tol, rtol=0) def test_hessian_vector_valued_separate_args( - self, dev, diff_method, grad_on_execution, interface, mocker, tol + self, dev, diff_method, grad_on_execution, interface, tol ): """Test hessian calculation of a vector valued QNode that has separate input arguments""" gradient_kwargs = {} @@ -1113,14 +1082,8 @@ def circuit(a, b): ) assert np.allclose(g, expected_g.T, atol=tol, rtol=0) - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") hess = jax.jacobian(jac_fn, argnums=[0, 1])(a, b) - if diff_method == "backprop": - spy.assert_not_called() - elif diff_method == "parameter-shift": - spy.assert_called() - expected_hess = np.array( [ [ @@ -1221,7 +1184,7 @@ class TestTapeExpansion: @pytest.mark.parametrize("max_diff", [1, 2]) def test_gradient_expansion_trainable_only( - self, dev, diff_method, grad_on_execution, max_diff, interface, mocker + self, dev, diff_method, grad_on_execution, max_diff, interface ): """Test that a *supported* operation with no gradient recipe is only expanded for parameter-shift and finite-differences when it is trainable.""" @@ -1251,16 +1214,8 @@ def circuit(x, y): y = jax.numpy.array(0.7) circuit(x, y) - spy = mocker.spy(circuit.gradient_fn, "transform_fn") jax.grad(circuit, argnums=[0])(x, y) - input_tape = spy.call_args[0][0] - assert len(input_tape.operations) == 3 - assert input_tape.operations[1].name == "RY" - assert input_tape.operations[1].data[0] == 3 * x - assert input_tape.operations[2].name == "PhaseShift" - assert input_tape.operations[2].grad_method is None - @pytest.mark.parametrize("max_diff", [1, 2]) def test_hamiltonian_expansion_analytic( self, dev, diff_method, grad_on_execution, max_diff, interface, mocker, tol diff --git a/tests/interfaces/default_qubit_2_integration/test_tensorflow_default_qubit_2.py b/tests/interfaces/default_qubit_2_integration/test_tensorflow_default_qubit_2.py index 0312d5875ef..5e3eb211b25 100644 --- a/tests/interfaces/default_qubit_2_integration/test_tensorflow_default_qubit_2.py +++ b/tests/interfaces/default_qubit_2_integration/test_tensorflow_default_qubit_2.py @@ -456,7 +456,20 @@ def cost_fn(a, p): tape = qml.tape.QuantumScript( [qml.RX(a, wires=0), U3(*p, wires=0)], [qml.expval(qml.PauliX(0))] ) - return execute([tape], device, **execute_kwargs)[0] + gradient_fn = execute_kwargs["gradient_fn"] + if gradient_fn is None: + _gradient_method = None + elif isinstance(gradient_fn, str): + _gradient_method = gradient_fn + else: + _gradient_method = "gradient-transform" + config = qml.devices.ExecutionConfig( + interface="autograd", + gradient_method=_gradient_method, + grad_on_execution=execute_kwargs.get("grad_on_execution", None), + ) + program, _ = device.preprocess(execution_config=config) + return execute([tape], device, **execute_kwargs, transform_program=program)[0] a = tf.constant(0.1) p = tf.Variable([0.1, 0.2, 0.3]) diff --git a/tests/interfaces/default_qubit_2_integration/test_tensorflow_qnode_default_qubit_2.py b/tests/interfaces/default_qubit_2_integration/test_tensorflow_qnode_default_qubit_2.py index b894251fa5f..6a8ea43db89 100644 --- a/tests/interfaces/default_qubit_2_integration/test_tensorflow_qnode_default_qubit_2.py +++ b/tests/interfaces/default_qubit_2_integration/test_tensorflow_qnode_default_qubit_2.py @@ -141,23 +141,15 @@ def circuit(p1, p2=y, **kwargs): expected = "0: ──RX(0.10)──RX(0.40)─╭●─┤ State\n1: ──RY(0.06)───────────╰X─┤ State" assert result == expected - def test_jacobian(self, dev, diff_method, grad_on_execution, mocker, tol, interface): + def test_jacobian(self, dev, diff_method, grad_on_execution, tol, interface): """Test jacobian calculation""" kwargs = dict( diff_method=diff_method, grad_on_execution=grad_on_execution, interface=interface ) - spy = None - if diff_method == "parameter-shift": - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") - elif diff_method == "finite-diff": - spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") - elif diff_method == "spsa": - spy = mocker.spy(qml.gradients.spsa_grad, "transform_fn") + if diff_method == "spsa": kwargs["sampler_rng"] = np.random.default_rng(SEED_FOR_SPSA) kwargs["num_directions"] = 20 tol = TOL_FOR_SPSA - if diff_method == "hadamard": - spy = mocker.spy(qml.gradients.hadamard_grad, "transform_fn") a = tf.Variable(0.1, dtype=tf.float64) b = tf.Variable(0.2, dtype=tf.float64) @@ -185,16 +177,11 @@ def circuit(a, b): expected = [[-tf.sin(a), tf.sin(a) * tf.sin(b)], [0, -tf.cos(a) * tf.cos(b)]] assert np.allclose(res, expected, atol=tol, rtol=0) - if spy is not None: - spy.assert_called() - - def test_jacobian_options(self, dev, diff_method, grad_on_execution, mocker, interface): + def test_jacobian_options(self, dev, diff_method, grad_on_execution, interface): """Test setting finite-difference jacobian options""" if diff_method not in {"finite-diff", "spsa"}: pytest.skip("Test only works with finite diff and spsa.") - spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") - a = tf.Variable([0.1, 0.2]) @qnode( @@ -215,13 +202,7 @@ def circuit(a): tape.jacobian(res, a) - for args in spy.call_args_list: - assert args[1]["approx_order"] == 2 - assert args[1]["h"] == 1e-8 - - def test_changing_trainability( - self, dev, diff_method, grad_on_execution, mocker, tol, interface - ): + def test_changing_trainability(self, dev, diff_method, grad_on_execution, tol, interface): """Test changing the trainability of parameters changes the number of differentiation requests made""" if diff_method in ["backprop", "adjoint", "spsa"]: @@ -230,12 +211,8 @@ def test_changing_trainability( a = tf.Variable(0.1, dtype=tf.float64) b = tf.Variable(0.2, dtype=tf.float64) - exp_num_calls = 4 # typically two shifted circuits per parameter - diff_kwargs = {} - if diff_method == "hadamard": - exp_num_calls = 2 # only one circuit per parameter - elif diff_method == "finite-diff": + if diff_method == "finite-diff": diff_kwargs = {"approx_order": 2, "strategy": "center"} @qnode( @@ -261,8 +238,6 @@ def circuit(a, b): expected = [tf.cos(a), -tf.cos(a) * tf.sin(b)] assert np.allclose(res, expected, atol=tol, rtol=0) - spy = mocker.spy(circuit.gradient_fn, "transform_fn") - jac = tape.jacobian(res, [a, b]) expected = [ [-tf.sin(a), tf.sin(a) * tf.sin(b)], @@ -270,9 +245,6 @@ def circuit(a, b): ] assert np.allclose(jac, expected, atol=tol, rtol=0) - # The parameter-shift rule has been called for each argument - assert len(spy.spy_return[0]) == exp_num_calls - # make the second QNode argument a constant a = tf.Variable(0.54, dtype=tf.float64) b = tf.constant(0.8, dtype=tf.float64) @@ -287,14 +259,10 @@ def circuit(a, b): expected = [tf.cos(a), -tf.cos(a) * tf.sin(b)] assert np.allclose(res, expected, atol=tol, rtol=0) - spy.call_args_list = [] jac = tape.jacobian(res, a) expected = [-tf.sin(a), tf.sin(a) * tf.sin(b)] assert np.allclose(jac, expected, atol=tol, rtol=0) - # the gradient transform has only been called once - assert len(spy.call_args_list) == 1 - def test_classical_processing(self, dev, diff_method, grad_on_execution, interface): """Test classical processing within the quantum tape""" a = tf.Variable(0.1, dtype=tf.float64) @@ -956,7 +924,7 @@ class TestTapeExpansion: """Test that tape expansion within the QNode integrates correctly with the TF interface""" - def test_gradient_expansion(self, dev, diff_method, grad_on_execution, mocker, interface): + def test_gradient_expansion(self, dev, diff_method, grad_on_execution, interface): """Test that a *supported* operation with no gradient recipe is expanded for both parameter-shift and finite-differences, but not for execution.""" if diff_method not in ("parameter-shift", "finite-diff", "spsa", "hadamard"): @@ -985,24 +953,8 @@ def circuit(x): with tf.GradientTape() as t2: with tf.GradientTape() as t1: loss = circuit(x) - - spy = mocker.spy(circuit.gradient_fn, "transform_fn") res = t1.gradient(loss, x) - input_tape = spy.call_args[0][0] - assert len(input_tape.operations) == 2 - assert input_tape.operations[1].name == "RY" - assert input_tape.operations[1].data[0] == 3 * x - - if diff_method != "hadamard": - shifted_tape1, shifted_tape2 = spy.spy_return[0] - - assert len(shifted_tape1.operations) == 2 - assert shifted_tape1.operations[1].name == "RY" - - assert len(shifted_tape2.operations) == 2 - assert shifted_tape2.operations[1].name == "RY" - assert np.allclose(res, -3 * np.sin(3 * x)) if diff_method == "parameter-shift": @@ -1012,7 +964,7 @@ def circuit(x): @pytest.mark.parametrize("max_diff", [1, 2]) def test_gradient_expansion_trainable_only( - self, dev, diff_method, grad_on_execution, max_diff, mocker, interface + self, dev, diff_method, grad_on_execution, max_diff, interface ): """Test that a *supported* operation with no gradient recipe is only expanded for parameter-shift and finite-differences when it is trainable.""" @@ -1044,15 +996,7 @@ def circuit(x, y): with tf.GradientTape() as t: res = circuit(x, y) - spy = mocker.spy(circuit.gradient_fn, "transform_fn") - res = t.gradient(res, [x, y]) - - input_tape = spy.call_args[0][0] - assert len(input_tape.operations) == 3 - assert input_tape.operations[1].name == "RY" - assert input_tape.operations[1].data[0] == 3 * x - assert input_tape.operations[2].name == "PhaseShift" - assert input_tape.operations[2].grad_method is None + t.gradient(res, [x, y]) @pytest.mark.parametrize("max_diff", [1, 2]) def test_hamiltonian_expansion_analytic( diff --git a/tests/interfaces/default_qubit_2_integration/test_torch_default_qubit_2.py b/tests/interfaces/default_qubit_2_integration/test_torch_default_qubit_2.py index 34d9f82505c..d4ae61f39b2 100644 --- a/tests/interfaces/default_qubit_2_integration/test_torch_default_qubit_2.py +++ b/tests/interfaces/default_qubit_2_integration/test_torch_default_qubit_2.py @@ -448,7 +448,20 @@ def cost_fn(a, p): tape = qml.tape.QuantumScript( [qml.RX(a, wires=0), U3(*p, wires=0)], [qml.expval(qml.PauliX(0))] ) - return execute([tape], device, **execute_kwargs)[0] + gradient_fn = execute_kwargs["gradient_fn"] + if gradient_fn is None: + _gradient_method = None + elif isinstance(gradient_fn, str): + _gradient_method = gradient_fn + else: + _gradient_method = "gradient-transform" + config = qml.devices.ExecutionConfig( + interface="autograd", + gradient_method=_gradient_method, + grad_on_execution=execute_kwargs.get("grad_on_execution", None), + ) + program, _ = device.preprocess(execution_config=config) + return execute([tape], device, **execute_kwargs, transform_program=program)[0] a = torch.tensor(0.1, requires_grad=False) p = torch.tensor([0.1, 0.2, 0.3], requires_grad=True) diff --git a/tests/interfaces/default_qubit_2_integration/test_torch_qnode_default_qubit_2.py b/tests/interfaces/default_qubit_2_integration/test_torch_qnode_default_qubit_2.py index a3f23a6a67c..9250186f58b 100644 --- a/tests/interfaces/default_qubit_2_integration/test_torch_qnode_default_qubit_2.py +++ b/tests/interfaces/default_qubit_2_integration/test_torch_qnode_default_qubit_2.py @@ -139,22 +139,15 @@ def circuit(p1, p2=y, **kwargs): assert result == expected - def test_jacobian(self, interface, dev, diff_method, grad_on_execution, mocker, tol): + def test_jacobian(self, interface, dev, diff_method, grad_on_execution, tol): """Test jacobian calculation""" kwargs = dict( diff_method=diff_method, grad_on_execution=grad_on_execution, interface=interface ) - if diff_method == "parameter-shift": - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") - elif diff_method == "finite-diff": - spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") - elif diff_method == "spsa": - spy = mocker.spy(qml.gradients.spsa_grad, "transform_fn") + if diff_method == "spsa": kwargs["sampler_rng"] = np.random.default_rng(SEED_FOR_SPSA) kwargs["num_directions"] = 20 tol = TOL_FOR_SPSA - elif diff_method == "hadamard": - spy = mocker.spy(qml.gradients.hadamard_grad, "transform_fn") a_val = 0.1 b_val = 0.2 @@ -196,9 +189,6 @@ def circuit(a, b): assert np.allclose(a.grad, expected[0], atol=tol, rtol=0) assert np.allclose(b.grad, expected[1], atol=tol, rtol=0) - if diff_method in ("parameter-shift", "finite-diff", "spsa"): - spy.assert_called() - # TODO: fix this behavior with float: already present before return type. @pytest.mark.xfail def test_jacobian_dtype(self, interface, dev, diff_method, grad_on_execution): @@ -234,13 +224,11 @@ def circuit(a, b): assert a.grad.dtype is torch.float32 assert b.grad.dtype is torch.float32 - def test_jacobian_options(self, interface, dev, diff_method, grad_on_execution, mocker): + def test_jacobian_options(self, interface, dev, diff_method, grad_on_execution): """Test setting jacobian options""" if diff_method not in {"finite-diff", "spsa"}: pytest.skip("Test only works with finite-diff and spsa") - spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") - a = torch.tensor([0.1, 0.2], requires_grad=True) @qnode( @@ -259,13 +247,7 @@ def circuit(a): res = circuit(a) res.backward() - for args in spy.call_args_list: - assert args[1]["approx_order"] == 2 - assert args[1]["h"] == 1e-8 - - def test_changing_trainability( - self, interface, dev, diff_method, grad_on_execution, mocker, tol - ): + def test_changing_trainability(self, interface, dev, diff_method, grad_on_execution, tol): """Test that changing the trainability of parameters changes the number of differentiation requests made""" if diff_method != "parameter-shift": @@ -296,8 +278,6 @@ def circuit(a, b): assert np.allclose(res[0].detach().numpy(), expected[0], atol=tol, rtol=0) assert np.allclose(res[1].detach().numpy(), expected[1], atol=tol, rtol=0) - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") - loss = res[0] + res[1] loss.backward() @@ -307,9 +287,6 @@ def circuit(a, b): ] assert np.allclose([a.grad, b.grad], expected, atol=tol, rtol=0) - # The parameter-shift rule has been called for each argument - assert len(spy.spy_return[0]) == 4 - # make the second QNode argument a constant a_val = 0.54 b_val = 0.8 @@ -327,15 +304,11 @@ def circuit(a, b): assert np.allclose(res[0].detach().numpy(), expected[0], atol=tol, rtol=0) assert np.allclose(res[1].detach().numpy(), expected[1], atol=tol, rtol=0) - spy.call_args_list = [] loss = res[0] + res[1] loss.backward() expected = -np.sin(a_val) + np.sin(a_val) * np.sin(b_val) assert np.allclose(a.grad, expected, atol=tol, rtol=0) - # the gradient transform has only been called once - assert len(spy.call_args_list) == 1 - def test_classical_processing(self, interface, dev, diff_method, grad_on_execution): """Test classical processing within the quantum tape""" a = torch.tensor(0.1, dtype=torch.float64, requires_grad=True) @@ -1061,7 +1034,7 @@ class TestTapeExpansion: """Test that tape expansion within the QNode integrates correctly with the Torch interface""" - def test_gradient_expansion(self, dev, diff_method, grad_on_execution, mocker): + def test_gradient_expansion(self, dev, diff_method, grad_on_execution): """Test that a *supported* operation with no gradient recipe is expanded for both parameter-shift and finite-differences, but not for execution.""" if diff_method not in ("parameter-shift", "finite-diff", "spsa", "hadamard"): @@ -1089,24 +1062,9 @@ def circuit(x): loss = circuit(x) - spy = mocker.spy(circuit.gradient_fn, "transform_fn") loss.backward() res = x.grad - input_tape = spy.call_args[0][0] - assert len(input_tape.operations) == 2 - assert input_tape.operations[1].name == "RY" - assert input_tape.operations[1].data[0] == 3 * x - - if diff_method != "hadamard": - shifted_tape1, shifted_tape2 = spy.spy_return[0] - - assert len(shifted_tape1.operations) == 2 - assert shifted_tape1.operations[1].name == "RY" - - assert len(shifted_tape2.operations) == 2 - assert shifted_tape2.operations[1].name == "RY" - assert torch.allclose(res, -3 * torch.sin(3 * x)) if diff_method == "parameter-shift": @@ -1116,7 +1074,11 @@ def circuit(x): @pytest.mark.parametrize("max_diff", [1, 2]) def test_gradient_expansion_trainable_only( - self, dev, diff_method, grad_on_execution, max_diff, mocker + self, + dev, + diff_method, + grad_on_execution, + max_diff, ): """Test that a *supported* operation with no gradient recipe is only expanded for parameter-shift and finite-differences when it is trainable.""" @@ -1146,17 +1108,8 @@ def circuit(x, y): y = torch.tensor(0.7, requires_grad=False) loss = circuit(x, y) - - spy = mocker.spy(circuit.gradient_fn, "transform_fn") loss.backward() - input_tape = spy.call_args[0][0] - assert len(input_tape.operations) == 3 - assert input_tape.operations[1].name == "RY" - assert input_tape.operations[1].data[0] == 3 * x - assert input_tape.operations[2].name == "PhaseShift" - assert input_tape.operations[2].grad_method is None - @pytest.mark.parametrize("max_diff", [1, 2]) def test_hamiltonian_expansion_analytic( self, dev, diff_method, grad_on_execution, max_diff, tol diff --git a/tests/interfaces/test_autograd_qnode.py b/tests/interfaces/test_autograd_qnode.py index fdaf469980c..73ad1da9e39 100644 --- a/tests/interfaces/test_autograd_qnode.py +++ b/tests/interfaces/test_autograd_qnode.py @@ -172,23 +172,17 @@ def circuit(a): assert isinstance(grad, float) assert grad.shape == tuple() - def test_jacobian(self, interface, dev_name, diff_method, grad_on_execution, mocker, tol): + def test_jacobian(self, interface, dev_name, diff_method, grad_on_execution, tol): """Test jacobian calculation""" num_wires = 2 kwargs = dict( diff_method=diff_method, interface=interface, grad_on_execution=grad_on_execution ) - if diff_method == "parameter-shift": - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") - elif diff_method == "finite-diff": - spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") - elif diff_method == "spsa": - spy = mocker.spy(qml.gradients.spsa_grad, "transform_fn") + if diff_method == "spsa": spsa_kwargs = dict(sampler_rng=np.random.default_rng(SEED_FOR_SPSA), num_directions=10) kwargs = {**kwargs, **spsa_kwargs} tol = TOL_FOR_SPSA elif diff_method == "hadamard": - spy = mocker.spy(qml.gradients.hadamard_grad, "transform_fn") num_wires = 3 a = np.array(0.1, requires_grad=True) @@ -226,28 +220,17 @@ def cost(x, y): assert res[1].shape == (2,) assert np.allclose(res[1], expected[1], atol=tol, rtol=0) - if diff_method in ("parameter-shift", "finite-diff", "spsa"): - spy.assert_called() - - def test_jacobian_no_evaluate( - self, interface, dev_name, diff_method, grad_on_execution, mocker, tol - ): + def test_jacobian_no_evaluate(self, interface, dev_name, diff_method, grad_on_execution, tol): """Test jacobian calculation when no prior circuit evaluation has been performed""" num_wires = 2 kwargs = dict( diff_method=diff_method, interface=interface, grad_on_execution=grad_on_execution ) - if diff_method == "parameter-shift": - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") - elif diff_method == "finite-diff": - spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") - elif diff_method == "spsa": - spy = mocker.spy(qml.gradients.spsa_grad, "transform_fn") + if diff_method == "spsa": kwargs["sampler_rng"] = np.random.default_rng(SEED_FOR_SPSA) tol = TOL_FOR_SPSA elif diff_method == "hadamard": - spy = mocker.spy(qml.gradients.hadamard_grad, "transform_fn") num_wires = 3 a = np.array(0.1, requires_grad=True) @@ -271,20 +254,16 @@ def cost(x, y): assert np.allclose(res[0], expected[0], atol=tol, rtol=0) assert np.allclose(res[1], expected[1], atol=tol, rtol=0) - if diff_method in ("parameter-shift", "finite-diff", "spsa"): - spy.assert_called() - # call the Jacobian with new parameters a = np.array(0.6, requires_grad=True) b = np.array(0.832, requires_grad=True) res = jac_fn(a, b) expected = ([-np.sin(a), np.sin(a) * np.sin(b)], [0, -np.cos(a) * np.cos(b)]) - expected = ([-np.sin(a), np.sin(a) * np.sin(b)], [0, -np.cos(a) * np.cos(b)]) assert np.allclose(res[0], expected[0], atol=tol, rtol=0) assert np.allclose(res[1], expected[1], atol=tol, rtol=0) - def test_jacobian_options(self, interface, dev_name, diff_method, grad_on_execution, mocker): + def test_jacobian_options(self, interface, dev_name, diff_method, grad_on_execution): """Test setting jacobian options""" wires = [0] if diff_method in ["backprop", "adjoint"]: @@ -310,17 +289,10 @@ def circuit(a): return qml.expval(qml.PauliZ(0)) circuit(a) - spy = mocker.spy(circuit.gradient_fn, "transform_fn") qml.jacobian(circuit)(a) - for args in spy.call_args_list: - for key, val in kwargs.items(): - assert args[1][key] == val - - def test_changing_trainability( - self, interface, dev_name, diff_method, grad_on_execution, mocker, tol - ): + def test_changing_trainability(self, interface, dev_name, diff_method, grad_on_execution, tol): """Test changing the trainability of parameters changes the number of differentiation requests made""" if diff_method != "parameter-shift": @@ -342,7 +314,6 @@ def loss(a, b): return np.sum(autograd.numpy.hstack(circuit(a, b))) grad_fn = qml.grad(loss) - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") res = grad_fn(a, b) # the tape has reported both arguments as trainable @@ -351,9 +322,6 @@ def loss(a, b): expected = [-np.sin(a) + np.sin(a) * np.sin(b), -np.cos(a) * np.cos(b)] assert np.allclose(res, expected, atol=tol, rtol=0) - # The parameter-shift rule has been called for each argument - assert len(spy.spy_return[0]) == 4 - # make the second QNode argument a constant a = np.array(0.54, requires_grad=True) b = np.array(0.8, requires_grad=False) @@ -366,9 +334,6 @@ def loss(a, b): expected = [-np.sin(a) + np.sin(a) * np.sin(b)] assert np.allclose(res, expected, atol=tol, rtol=0) - # The parameter-shift rule has been called only once - assert len(spy.spy_return[0]) == 2 - # trainability also updates on evaluation a = np.array(0.54, requires_grad=False) b = np.array(0.8, requires_grad=True) @@ -1264,7 +1229,7 @@ def cost_fn(x): assert np.allclose(hess, expected_hess, atol=tol, rtol=0) def test_hessian_vector_valued_separate_args( - self, interface, dev_name, diff_method, grad_on_execution, mocker, tol + self, interface, dev_name, diff_method, grad_on_execution, tol ): """Test hessian calculation of a vector valued QNode that has separate input arguments""" if diff_method not in {"parameter-shift", "backprop"}: @@ -1307,7 +1272,6 @@ def circuit(a, b): assert g[1].shape == (2,) assert np.allclose(g[1], expected_g[1], atol=tol, rtol=0) - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") jac_fn_a = lambda *args: jac_fn(*args)[0] jac_fn_b = lambda *args: jac_fn(*args)[1] hess_a = qml.jacobian(jac_fn_a)(a, b) @@ -1315,11 +1279,6 @@ def circuit(a, b): assert isinstance(hess_a, tuple) and len(hess_a) == 2 assert isinstance(hess_b, tuple) and len(hess_b) == 2 - if diff_method == "backprop": - spy.assert_not_called() - elif diff_method == "parameter-shift": - spy.assert_called() - exp_hess_a = ( [-0.5 * np.cos(a) * np.cos(b), 0.5 * np.cos(a) * np.cos(b)], [0.5 * np.sin(a) * np.sin(b), -0.5 * np.sin(a) * np.sin(b)], @@ -1584,7 +1543,7 @@ class TestTapeExpansion: @pytest.mark.parametrize("max_diff", [1, 2]) def test_gradient_expansion_trainable_only( - self, dev_name, diff_method, grad_on_execution, max_diff, mocker + self, dev_name, diff_method, grad_on_execution, max_diff ): """Test that a *supported* operation with no gradient recipe is only expanded for parameter-shift and finite-differences when it is trainable.""" @@ -1613,21 +1572,12 @@ def circuit(x, y): PhaseShift(2 * y, wires=0) return qml.expval(qml.PauliX(0)) - spy = mocker.spy(circuit.device, "batch_execute") x = np.array(0.5, requires_grad=True) y = np.array(0.7, requires_grad=False) circuit(x, y) - spy = mocker.spy(circuit.gradient_fn, "transform_fn") qml.grad(circuit)(x, y) - input_tape = spy.call_args[0][0] - assert len(input_tape.operations) == 3 - assert input_tape.operations[1].name == "RY" - assert input_tape.operations[1].data[0] == 3 * x - assert input_tape.operations[2].name == "PhaseShift" - assert input_tape.operations[2].grad_method is None - @pytest.mark.parametrize("max_diff", [1, 2]) def test_hamiltonian_expansion_analytic( self, dev_name, diff_method, grad_on_execution, max_diff, tol diff --git a/tests/interfaces/test_jacobian_products.py b/tests/interfaces/test_jacobian_products.py index f4d1cd036dc..b8a45d4afb1 100644 --- a/tests/interfaces/test_jacobian_products.py +++ b/tests/interfaces/test_jacobian_products.py @@ -57,7 +57,7 @@ def test_transform_jacobian_product_basics(self): expected_repr = ( f"TransformJacobianProducts({repr(inner_execute_numpy)}, " - "gradient_transform=, " + "gradient_transform=, " "gradient_kwargs={'aux_wire': 'aux'})" ) assert repr(jpc) == expected_repr diff --git a/tests/interfaces/test_jax_jit_qnode.py b/tests/interfaces/test_jax_jit_qnode.py index 6fb29cec859..f8fc3b262db 100644 --- a/tests/interfaces/test_jax_jit_qnode.py +++ b/tests/interfaces/test_jax_jit_qnode.py @@ -83,9 +83,7 @@ def circuit(a): assert isinstance(grad, jax.Array) assert grad.shape == () - def test_changing_trainability( - self, dev_name, diff_method, grad_on_execution, interface, mocker, tol - ): + def test_changing_trainability(self, dev_name, diff_method, grad_on_execution, interface, tol): """Test changing the trainability of parameters changes the number of differentiation requests made""" if diff_method != "parameter-shift": @@ -109,7 +107,6 @@ def circuit(a, b): return qml.expval(qml.Hamiltonian([1, 1], [qml.PauliZ(0), qml.PauliY(1)])) grad_fn = jax.jit(jax.grad(circuit, argnums=[0, 1])) - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") res = grad_fn(a, b) # the tape has reported both arguments as trainable @@ -118,9 +115,6 @@ def circuit(a, b): expected = [-np.sin(a) + np.sin(a) * np.sin(b), -np.cos(a) * np.cos(b)] assert np.allclose(res, expected, atol=tol, rtol=0) - # The parameter-shift rule has been called for each argument - assert len(spy.spy_return[0]) == 4 - # make the second QNode argument a constant grad_fn = jax.grad(circuit, argnums=0) res = grad_fn(a, b) @@ -131,9 +125,6 @@ def circuit(a, b): expected = [-np.sin(a) + np.sin(a) * np.sin(b)] assert np.allclose(res, expected, atol=tol, rtol=0) - # The parameter-shift rule has been called only once - assert len(spy.spy_return[0]) == 2 - # trainability also updates on evaluation a = np.array(0.54, requires_grad=False) b = np.array(0.8, requires_grad=True) @@ -258,13 +249,11 @@ def circuit(a, p): ) assert np.allclose(res, expected, atol=tol, rtol=0) - def test_jacobian_options(self, dev_name, diff_method, grad_on_execution, interface, mocker): + def test_jacobian_options(self, dev_name, diff_method, grad_on_execution, interface): """Test setting jacobian options""" if diff_method != "finite-diff": pytest.skip("Test only applies to finite diff.") - spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") - a = np.array([0.1, 0.2], requires_grad=True) dev = qml.device(dev_name, wires=1) @@ -288,10 +277,6 @@ def circuit(a): jax.jit(jax.jacobian(circuit))(a) - for args in spy.call_args_list: - assert args[1]["approx_order"] == 2 - assert args[1]["h"] == 1e-8 - @pytest.mark.parametrize( "interface,dev_name,diff_method,grad_on_execution", interface_and_qubit_device_and_diff_method @@ -300,22 +285,14 @@ class TestVectorValuedQNode: """Test that using vector-valued QNodes with JAX integrate with the PennyLane stack""" - def test_diff_expval_expval( - self, dev_name, diff_method, grad_on_execution, interface, mocker, tol - ): + def test_diff_expval_expval(self, dev_name, diff_method, grad_on_execution, interface, tol): """Test jacobian calculation""" gradient_kwargs = {} - if diff_method == "parameter-shift": - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") - elif diff_method == "finite-diff": - spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") - elif diff_method == "spsa": - spy = mocker.spy(qml.gradients.spsa_grad, "transform_fn") + + if diff_method == "spsa": gradient_kwargs = {"sampler_rng": SEED_FOR_SPSA} tol = TOL_FOR_SPSA - elif diff_method == "hadamard": - spy = mocker.spy(qml.gradients.hadamard_grad, "transform_fn") a = np.array(0.1, requires_grad=True) b = np.array(0.2, requires_grad=True) @@ -373,25 +350,13 @@ def circuit(a, b): assert res[1][1].shape == () assert np.allclose(res[1][1], expected[1][1], atol=tol, rtol=0) - if diff_method in ("parameter-shift", "finite-diff"): - spy.assert_called() - - def test_jacobian_no_evaluate( - self, dev_name, diff_method, grad_on_execution, interface, mocker, tol - ): + def test_jacobian_no_evaluate(self, dev_name, diff_method, grad_on_execution, interface, tol): """Test jacobian calculation when no prior circuit evaluation has been performed""" gradient_kwargs = {} - if diff_method == "parameter-shift": - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") - elif diff_method == "finite-diff": - spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") - elif diff_method == "spsa": - spy = mocker.spy(qml.gradients.spsa_grad, "transform_fn") + if diff_method == "spsa": gradient_kwargs = {"sampler_rng": SEED_FOR_SPSA} tol = TOL_FOR_SPSA - elif diff_method == "hadamard": - spy = mocker.spy(qml.gradients.hadamard_grad, "transform_fn") a = jax.numpy.array(0.1) b = jax.numpy.array(0.2) @@ -431,9 +396,6 @@ def circuit(a, b): assert r.shape == () assert np.allclose(r, e, atol=tol, rtol=0) - if diff_method in ("parameter-shift", "finite-diff", "spsa"): - spy.assert_called() - # call the Jacobian with new parameters a = jax.numpy.array(0.6) b = jax.numpy.array(0.832) @@ -1270,7 +1232,7 @@ def cost_fn(x): assert np.allclose(hess, expected_hess, atol=tol, rtol=0) def test_hessian_vector_valued_separate_args( - self, dev_name, diff_method, grad_on_execution, interface, mocker, tol + self, dev_name, diff_method, grad_on_execution, interface, tol ): """Test hessian calculation of a vector valued QNode that has separate input arguments""" gradient_kwargs = {} @@ -1321,15 +1283,8 @@ def circuit(a, b): ] ) assert np.allclose(g, expected_g.T, atol=tol, rtol=0) - - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") hess = jax.jit(jax.jacobian(jac_fn, argnums=[0, 1]))(a, b) - if diff_method == "backprop": - spy.assert_not_called() - elif diff_method == "parameter-shift": - spy.assert_called() - expected_hess = np.array( [ [ @@ -1528,7 +1483,7 @@ class TestTapeExpansion: @pytest.mark.parametrize("max_diff", [1, 2]) def test_gradient_expansion_trainable_only( - self, dev_name, diff_method, grad_on_execution, max_diff, interface, mocker + self, dev_name, diff_method, grad_on_execution, max_diff, interface ): """Test that a *supported* operation with no gradient recipe is only expanded for parameter-shift and finite-differences when it is trainable.""" @@ -1561,21 +1516,11 @@ def circuit(x, y): PhaseShift(2 * y, wires=0) return qml.expval(qml.PauliX(0)) - spy = mocker.spy(circuit.device, "batch_execute") x = jax.numpy.array(0.5) y = jax.numpy.array(0.7) circuit(x, y) - - spy = mocker.spy(circuit.gradient_fn, "transform_fn") jax.grad(circuit, argnums=[0])(x, y) - input_tape = spy.call_args[0][0] - assert len(input_tape.operations) == 3 - assert input_tape.operations[1].name == "RY" - assert input_tape.operations[1].data[0] == 3 * x - assert input_tape.operations[2].name == "PhaseShift" - assert input_tape.operations[2].grad_method is None - @pytest.mark.parametrize("max_diff", [1, 2]) def test_hamiltonian_expansion_analytic( self, dev_name, diff_method, grad_on_execution, max_diff, interface, mocker, tol diff --git a/tests/interfaces/test_jax_qnode.py b/tests/interfaces/test_jax_qnode.py index 91260e2d90c..6dec4753854 100644 --- a/tests/interfaces/test_jax_qnode.py +++ b/tests/interfaces/test_jax_qnode.py @@ -86,9 +86,7 @@ def circuit(a): assert isinstance(grad, jax.Array) assert grad.shape == () - def test_changing_trainability( - self, dev_name, diff_method, grad_on_execution, interface, mocker, tol - ): + def test_changing_trainability(self, dev_name, diff_method, grad_on_execution, interface, tol): """Test changing the trainability of parameters changes the number of differentiation requests made""" if diff_method != "parameter-shift": @@ -112,7 +110,6 @@ def circuit(a, b): return qml.expval(qml.Hamiltonian([1, 1], [qml.PauliZ(0), qml.PauliY(1)])) grad_fn = jax.grad(circuit, argnums=[0, 1]) - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") res = grad_fn(a, b) # the tape has reported both arguments as trainable @@ -121,9 +118,6 @@ def circuit(a, b): expected = [-np.sin(a) + np.sin(a) * np.sin(b), -np.cos(a) * np.cos(b)] assert np.allclose(res, expected, atol=tol, rtol=0) - # The parameter-shift rule has been called for each argument - assert len(spy.spy_return[0]) == 4 - # make the second QNode argument a constant grad_fn = jax.grad(circuit, argnums=0) res = grad_fn(a, b) @@ -134,9 +128,6 @@ def circuit(a, b): expected = [-np.sin(a) + np.sin(a) * np.sin(b)] assert np.allclose(res, expected, atol=tol, rtol=0) - # The parameter-shift rule has been called only once - assert len(spy.spy_return[0]) == 2 - # trainability also updates on evaluation a = np.array(0.54, requires_grad=False) b = np.array(0.8, requires_grad=True) @@ -260,13 +251,11 @@ def circuit(a, p): ) assert np.allclose(res, expected, atol=tol, rtol=0) - def test_jacobian_options(self, dev_name, diff_method, grad_on_execution, interface, mocker): + def test_jacobian_options(self, dev_name, diff_method, grad_on_execution, interface): """Test setting jacobian options""" if diff_method != "finite-diff": pytest.skip("Test only applies to finite diff.") - spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") - a = np.array([0.1, 0.2], requires_grad=True) dev = qml.device(dev_name, wires=1) @@ -286,10 +275,6 @@ def circuit(a): jax.jacobian(circuit)(a) - for args in spy.call_args_list: - assert args[1]["approx_order"] == 2 - assert args[1]["h"] == 1e-8 - @pytest.mark.parametrize( "interface,dev_name,diff_method,grad_on_execution", interface_and_qubit_device_and_diff_method @@ -298,19 +283,12 @@ class TestVectorValuedQNode: """Test that using vector-valued QNodes with JAX integrate with the PennyLane stack""" - def test_diff_expval_expval( - self, dev_name, diff_method, grad_on_execution, interface, mocker, tol - ): + def test_diff_expval_expval(self, dev_name, diff_method, grad_on_execution, interface, tol): """Test jacobian calculation""" kwargs = dict( diff_method=diff_method, interface=interface, grad_on_execution=grad_on_execution ) - if diff_method == "parameter-shift": - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") - elif diff_method == "finite-diff": - spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") - elif diff_method == "spsa": - spy = mocker.spy(qml.gradients.spsa_grad, "transform_fn") + if diff_method == "spsa": kwargs["sampler_rng"] = np.random.default_rng(SEED_FOR_SPSA) tol = TOL_FOR_SPSA @@ -362,22 +340,12 @@ def circuit(a, b): assert res[1][1].shape == () assert np.allclose(res[1][1], expected[1][1], atol=tol, rtol=0) - if diff_method in ("parameter-shift", "finite-diff"): - spy.assert_called() - - def test_jacobian_no_evaluate( - self, dev_name, diff_method, grad_on_execution, interface, mocker, tol - ): + def test_jacobian_no_evaluate(self, dev_name, diff_method, grad_on_execution, interface, tol): """Test jacobian calculation when no prior circuit evaluation has been performed""" kwargs = dict( diff_method=diff_method, interface=interface, grad_on_execution=grad_on_execution ) - if diff_method == "parameter-shift": - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") - elif diff_method == "finite-diff": - spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") - elif diff_method == "spsa": - spy = mocker.spy(qml.gradients.spsa_grad, "transform_fn") + if diff_method == "spsa": kwargs["sampler_rng"] = np.random.default_rng(SEED_FOR_SPSA) tol = TOL_FOR_SPSA @@ -412,9 +380,6 @@ def circuit(a, b): assert r.shape == () assert np.allclose(r, e, atol=tol, rtol=0) - if diff_method in ("parameter-shift", "finite-diff", "spsa"): - spy.assert_called() - # call the Jacobian with new parameters a = jax.numpy.array(0.6) b = jax.numpy.array(0.832) @@ -1217,7 +1182,7 @@ def cost_fn(x): assert np.allclose(hess, expected_hess, atol=tol, rtol=0) def test_hessian_vector_valued_separate_args( - self, dev_name, diff_method, grad_on_execution, interface, mocker, tol + self, dev_name, diff_method, grad_on_execution, interface, tol ): """Test hessian calculation of a vector valued QNode that has separate input arguments""" gradient_kwargs = {} @@ -1268,15 +1233,8 @@ def circuit(a, b): ] ) assert np.allclose(g, expected_g.T, atol=tol, rtol=0) - - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") hess = jax.jacobian(jac_fn, argnums=[0, 1])(a, b) - if diff_method == "backprop": - spy.assert_not_called() - elif diff_method == "parameter-shift": - spy.assert_called() - expected_hess = np.array( [ [ @@ -1472,7 +1430,7 @@ class TestTapeExpansion: @pytest.mark.parametrize("max_diff", [1, 2]) def test_gradient_expansion_trainable_only( - self, dev_name, diff_method, grad_on_execution, max_diff, interface, mocker + self, dev_name, diff_method, grad_on_execution, max_diff, interface ): """Test that a *supported* operation with no gradient recipe is only expanded for parameter-shift and finite-differences when it is trainable.""" @@ -1505,21 +1463,12 @@ def circuit(x, y): PhaseShift(2 * y, wires=0) return qml.expval(qml.PauliX(0)) - spy = mocker.spy(circuit.device, "batch_execute") x = jax.numpy.array(0.5) y = jax.numpy.array(0.7) circuit(x, y) - spy = mocker.spy(circuit.gradient_fn, "transform_fn") jax.grad(circuit, argnums=[0])(x, y) - input_tape = spy.call_args[0][0] - assert len(input_tape.operations) == 3 - assert input_tape.operations[1].name == "RY" - assert input_tape.operations[1].data[0] == 3 * x - assert input_tape.operations[2].name == "PhaseShift" - assert input_tape.operations[2].grad_method is None - @pytest.mark.parametrize("max_diff", [1, 2]) def test_hamiltonian_expansion_analytic( self, dev_name, diff_method, grad_on_execution, max_diff, interface, mocker, tol diff --git a/tests/interfaces/test_tensorflow_qnode.py b/tests/interfaces/test_tensorflow_qnode.py index 7773c86d27d..da68bbf7f2f 100644 --- a/tests/interfaces/test_tensorflow_qnode.py +++ b/tests/interfaces/test_tensorflow_qnode.py @@ -161,24 +161,19 @@ def circuit(p1, p2=y, **kwargs): expected = "0: ──RX(0.10)──RX(0.40)─╭●─┤ State\n1: ──RY(0.06)───────────╰X─┤ State" assert result == expected - def test_jacobian(self, dev_name, diff_method, grad_on_execution, mocker, tol, interface): + def test_jacobian(self, dev_name, diff_method, grad_on_execution, tol, interface): """Test jacobian calculation""" kwargs = dict( diff_method=diff_method, grad_on_execution=grad_on_execution, interface=interface ) - if diff_method == "parameter-shift": - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") - elif diff_method == "finite-diff": - spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") - elif diff_method == "spsa": - spy = mocker.spy(qml.gradients.spsa_grad, "transform_fn") + + if diff_method == "spsa": kwargs["sampler_rng"] = np.random.default_rng(SEED_FOR_SPSA) tol = TOL_FOR_SPSA num_wires = 2 if diff_method == "hadamard": - spy = mocker.spy(qml.gradients.hadamard_grad, "transform_fn") num_wires = 3 dev = qml.device(dev_name, wires=num_wires) @@ -209,9 +204,6 @@ def circuit(a, b): expected = [[-tf.sin(a), tf.sin(a) * tf.sin(b)], [0, -tf.cos(a) * tf.cos(b)]] assert np.allclose(res, expected, atol=tol, rtol=0) - if diff_method in ("parameter-shift", "finite-diff", "spsa"): - spy.assert_called() - def test_jacobian_dtype(self, dev_name, diff_method, grad_on_execution, interface): """Test calculating the jacobian with a different datatype""" if diff_method == "backprop": @@ -249,13 +241,11 @@ def circuit(a, b): res = tape.jacobian(res, [a, b]) assert [r.dtype is tf.float32 for r in res] - def test_jacobian_options(self, dev_name, diff_method, grad_on_execution, mocker, interface): + def test_jacobian_options(self, dev_name, diff_method, grad_on_execution, interface): """Test setting finite-difference jacobian options""" if diff_method not in {"finite-diff", "spsa"}: pytest.skip("Test only works with finite diff and spsa.") - spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") - a = tf.Variable([0.1, 0.2]) num_wires = 1 @@ -283,13 +273,7 @@ def circuit(a): tape.jacobian(res, a) - for args in spy.call_args_list: - assert args[1]["approx_order"] == 2 - assert args[1]["h"] == 1e-8 - - def test_changing_trainability( - self, dev_name, diff_method, grad_on_execution, mocker, tol, interface - ): + def test_changing_trainability(self, dev_name, diff_method, grad_on_execution, tol, interface): """Test changing the trainability of parameters changes the number of differentiation requests made""" if diff_method in ["backprop", "adjoint", "spsa"]: @@ -299,12 +283,10 @@ def test_changing_trainability( b = tf.Variable(0.2, dtype=tf.float64) num_wires = 2 - exp_num_calls = 4 # typically two shifted circuits per parameter diff_kwargs = {} if diff_method == "hadamard": num_wires = 3 - exp_num_calls = 2 # only one circuit per parameter elif diff_method == "finite-diff": diff_kwargs = {"approx_order": 2, "strategy": "center"} @@ -333,8 +315,6 @@ def circuit(a, b): expected = [tf.cos(a), -tf.cos(a) * tf.sin(b)] assert np.allclose(res, expected, atol=tol, rtol=0) - spy = mocker.spy(circuit.gradient_fn, "transform_fn") - jac = tape.jacobian(res, [a, b]) expected = [ [-tf.sin(a), tf.sin(a) * tf.sin(b)], @@ -342,9 +322,6 @@ def circuit(a, b): ] assert np.allclose(jac, expected, atol=tol, rtol=0) - # The parameter-shift rule has been called for each argument - assert len(spy.spy_return[0]) == exp_num_calls - # make the second QNode argument a constant a = tf.Variable(0.54, dtype=tf.float64) b = tf.constant(0.8, dtype=tf.float64) @@ -359,14 +336,10 @@ def circuit(a, b): expected = [tf.cos(a), -tf.cos(a) * tf.sin(b)] assert np.allclose(res, expected, atol=tol, rtol=0) - spy.call_args_list = [] jac = tape.jacobian(res, a) expected = [-tf.sin(a), tf.sin(a) * tf.sin(b)] assert np.allclose(jac, expected, atol=tol, rtol=0) - # the gradient transform has only been called once - assert len(spy.call_args_list) == 1 - def test_classical_processing(self, dev_name, diff_method, grad_on_execution, interface): """Test classical processing within the quantum tape""" a = tf.Variable(0.1, dtype=tf.float64) @@ -553,7 +526,7 @@ def circuit(weights): spy.assert_not_called() # execute with shots=100 - res = circuit(weights, shots=100) # pylint: disable=unexpected-keyword-arg + circuit(weights, shots=100) # pylint: disable=unexpected-keyword-arg spy.assert_called() assert spy.spy_return.shape == (100,) @@ -1265,7 +1238,7 @@ class TestTapeExpansion: """Test that tape expansion within the QNode integrates correctly with the TF interface""" - def test_gradient_expansion(self, dev_name, diff_method, grad_on_execution, mocker, interface): + def test_gradient_expansion(self, dev_name, diff_method, grad_on_execution, interface): """Test that a *supported* operation with no gradient recipe is expanded for both parameter-shift and finite-differences, but not for execution.""" if diff_method not in ("parameter-shift", "finite-diff", "spsa", "hadamard"): @@ -1295,30 +1268,14 @@ def circuit(x): PhaseShift(x, wires=0) return qml.expval(qml.PauliX(0)) - spy = mocker.spy(circuit.device, "batch_execute") x = tf.Variable(0.5, dtype=tf.float64) with tf.GradientTape() as t2: with tf.GradientTape() as t1: loss = circuit(x) - spy = mocker.spy(circuit.gradient_fn, "transform_fn") res = t1.gradient(loss, x) - input_tape = spy.call_args[0][0] - assert len(input_tape.operations) == 2 - assert input_tape.operations[1].name == "RY" - assert input_tape.operations[1].data[0] == 3 * x - - if diff_method != "hadamard": - shifted_tape1, shifted_tape2 = spy.spy_return[0] - - assert len(shifted_tape1.operations) == 2 - assert shifted_tape1.operations[1].name == "RY" - - assert len(shifted_tape2.operations) == 2 - assert shifted_tape2.operations[1].name == "RY" - assert np.allclose(res, -3 * np.sin(3 * x)) if diff_method == "parameter-shift": @@ -1328,7 +1285,7 @@ def circuit(x): @pytest.mark.parametrize("max_diff", [1, 2]) def test_gradient_expansion_trainable_only( - self, dev_name, diff_method, grad_on_execution, max_diff, mocker, interface + self, dev_name, diff_method, grad_on_execution, max_diff, interface ): """Test that a *supported* operation with no gradient recipe is only expanded for parameter-shift and finite-differences when it is trainable.""" @@ -1361,22 +1318,13 @@ def circuit(x, y): PhaseShift(2 * y, wires=0) return qml.expval(qml.PauliX(0)) - spy = mocker.spy(circuit.device, "batch_execute") x = tf.Variable(0.5, dtype=tf.float64) y = tf.constant(0.7, dtype=tf.float64) with tf.GradientTape() as t: res = circuit(x, y) - spy = mocker.spy(circuit.gradient_fn, "transform_fn") - res = t.gradient(res, [x, y]) - - input_tape = spy.call_args[0][0] - assert len(input_tape.operations) == 3 - assert input_tape.operations[1].name == "RY" - assert input_tape.operations[1].data[0] == 3 * x - assert input_tape.operations[2].name == "PhaseShift" - assert input_tape.operations[2].grad_method is None + t.gradient(res, [x, y]) @pytest.mark.parametrize("max_diff", [1, 2]) def test_hamiltonian_expansion_analytic( diff --git a/tests/interfaces/test_torch_qnode.py b/tests/interfaces/test_torch_qnode.py index 77382a6597c..4218a6fc26b 100644 --- a/tests/interfaces/test_torch_qnode.py +++ b/tests/interfaces/test_torch_qnode.py @@ -158,21 +158,15 @@ def circuit(p1, p2=y, **kwargs): assert result == expected - def test_jacobian(self, interface, dev_name, diff_method, grad_on_execution, mocker, tol): + def test_jacobian(self, interface, dev_name, diff_method, grad_on_execution, tol): """Test jacobian calculation""" kwargs = dict( diff_method=diff_method, grad_on_execution=grad_on_execution, interface=interface ) - if diff_method == "parameter-shift": - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") - elif diff_method == "finite-diff": - spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") - elif diff_method == "spsa": - spy = mocker.spy(qml.gradients.spsa_grad, "transform_fn") + + if diff_method == "spsa": kwargs["sampler_rng"] = np.random.default_rng(SEED_FOR_SPSA) tol = TOL_FOR_SPSA - elif diff_method == "hadamard": - spy = mocker.spy(qml.gradients.hadamard_grad, "transform_fn") a_val = 0.1 b_val = 0.2 @@ -221,9 +215,6 @@ def circuit(a, b): assert np.allclose(a.grad, expected[0], atol=tol, rtol=0) assert np.allclose(b.grad, expected[1], atol=tol, rtol=0) - if diff_method in ("parameter-shift", "finite-diff", "spsa"): - spy.assert_called() - # TODO: fix this behavior with float: already present before return type. @pytest.mark.xfail def test_jacobian_dtype(self, interface, dev_name, diff_method, grad_on_execution): @@ -261,13 +252,11 @@ def circuit(a, b): assert a.grad.dtype is torch.float32 assert b.grad.dtype is torch.float32 - def test_jacobian_options(self, interface, dev_name, diff_method, grad_on_execution, mocker): + def test_jacobian_options(self, interface, dev_name, diff_method, grad_on_execution): """Test setting jacobian options""" if diff_method not in {"finite-diff", "spsa"}: pytest.skip("Test only works with finite-diff and spsa") - spy = mocker.spy(qml.gradients.finite_diff, "transform_fn") - a = torch.tensor([0.1, 0.2], requires_grad=True) dev = qml.device(dev_name, wires=1) @@ -288,13 +277,7 @@ def circuit(a): res = circuit(a) res.backward() - for args in spy.call_args_list: - assert args[1]["approx_order"] == 2 - assert args[1]["h"] == 1e-8 - - def test_changing_trainability( - self, interface, dev_name, diff_method, grad_on_execution, mocker, tol - ): + def test_changing_trainability(self, interface, dev_name, diff_method, grad_on_execution, tol): """Test that changing the trainability of parameters changes the number of differentiation requests made""" if diff_method != "parameter-shift": @@ -327,8 +310,6 @@ def circuit(a, b): assert np.allclose(res[0].detach().numpy(), expected[0], atol=tol, rtol=0) assert np.allclose(res[1].detach().numpy(), expected[1], atol=tol, rtol=0) - spy = mocker.spy(qml.gradients.param_shift, "transform_fn") - loss = res[0] + res[1] loss.backward() @@ -338,9 +319,6 @@ def circuit(a, b): ] assert np.allclose([a.grad, b.grad], expected, atol=tol, rtol=0) - # The parameter-shift rule has been called for each argument - assert len(spy.spy_return[0]) == 4 - # make the second QNode argument a constant a_val = 0.54 b_val = 0.8 @@ -358,15 +336,11 @@ def circuit(a, b): assert np.allclose(res[0].detach().numpy(), expected[0], atol=tol, rtol=0) assert np.allclose(res[1].detach().numpy(), expected[1], atol=tol, rtol=0) - spy.call_args_list = [] loss = res[0] + res[1] loss.backward() expected = -np.sin(a_val) + np.sin(a_val) * np.sin(b_val) assert np.allclose(a.grad, expected, atol=tol, rtol=0) - # the gradient transform has only been called once - assert len(spy.call_args_list) == 1 - def test_classical_processing(self, interface, dev_name, diff_method, grad_on_execution): """Test classical processing within the quantum tape""" a = torch.tensor(0.1, dtype=torch.float64, requires_grad=True) @@ -1324,7 +1298,7 @@ class TestTapeExpansion: """Test that tape expansion within the QNode integrates correctly with the Torch interface""" - def test_gradient_expansion(self, dev_name, diff_method, grad_on_execution, mocker): + def test_gradient_expansion(self, dev_name, diff_method, grad_on_execution): """Test that a *supported* operation with no gradient recipe is expanded for both parameter-shift and finite-differences, but not for execution.""" if diff_method not in ("parameter-shift", "finite-diff", "spsa", "hadamard"): @@ -1355,29 +1329,12 @@ def circuit(x): PhaseShift(x, wires=0) return qml.expval(qml.PauliX(0)) - spy = mocker.spy(circuit.device, "batch_execute") x = torch.tensor(0.5, requires_grad=True, dtype=torch.float64) loss = circuit(x) - - spy = mocker.spy(circuit.gradient_fn, "transform_fn") loss.backward() res = x.grad - input_tape = spy.call_args[0][0] - assert len(input_tape.operations) == 2 - assert input_tape.operations[1].name == "RY" - assert input_tape.operations[1].data[0] == 3 * x - - if diff_method != "hadamard": - shifted_tape1, shifted_tape2 = spy.spy_return[0] - - assert len(shifted_tape1.operations) == 2 - assert shifted_tape1.operations[1].name == "RY" - - assert len(shifted_tape2.operations) == 2 - assert shifted_tape2.operations[1].name == "RY" - assert torch.allclose(res, -3 * torch.sin(3 * x)) if diff_method == "parameter-shift": @@ -1387,7 +1344,7 @@ def circuit(x): @pytest.mark.parametrize("max_diff", [1, 2]) def test_gradient_expansion_trainable_only( - self, dev_name, diff_method, grad_on_execution, max_diff, mocker + self, dev_name, diff_method, grad_on_execution, max_diff ): """Test that a *supported* operation with no gradient recipe is only expanded for parameter-shift and finite-differences when it is trainable.""" @@ -1420,22 +1377,12 @@ def circuit(x, y): PhaseShift(2 * y, wires=0) return qml.expval(qml.PauliX(0)) - spy = mocker.spy(circuit.device, "batch_execute") x = torch.tensor(0.5, requires_grad=True) y = torch.tensor(0.7, requires_grad=False) loss = circuit(x, y) - - spy = mocker.spy(circuit.gradient_fn, "transform_fn") loss.backward() - input_tape = spy.call_args[0][0] - assert len(input_tape.operations) == 3 - assert input_tape.operations[1].name == "RY" - assert input_tape.operations[1].data[0] == 3 * x - assert input_tape.operations[2].name == "PhaseShift" - assert input_tape.operations[2].grad_method is None - @pytest.mark.parametrize("max_diff", [1, 2]) def test_hamiltonian_expansion_analytic( self, dev_name, diff_method, grad_on_execution, max_diff, tol diff --git a/tests/interfaces/test_transform_program_integration.py b/tests/interfaces/test_transform_program_integration.py index 2ac697bcafe..5e7d4b328c1 100644 --- a/tests/interfaces/test_transform_program_integration.py +++ b/tests/interfaces/test_transform_program_integration.py @@ -17,6 +17,7 @@ """ import copy from typing import Tuple, Callable +from functools import partial import pytest import numpy as np @@ -250,3 +251,32 @@ def transform_mul(tape: qml.tape.QuantumTape): assert qml.math.allclose(results_reverse[0], 4.0) # (-1.0 + 1.0) * 2.0 = 0.0 assert qml.math.allclose(results_reverse[1], 0.0) + + def test_composable_transform(self): + """Test the composition of a gradient transform with another transform.""" + import jax + + dev = qml.device("default.qubit", wires=2) + + @partial(qml.gradients.param_shift, argnums=[0, 1]) + @qml.transforms.split_non_commuting + @qml.qnode(device=dev, interface="jax") + def circuit(x, y): + qml.RX(x, wires=0) + qml.RZ(y, wires=1) + qml.CNOT(wires=[0, 1]) + return qml.expval(qml.PauliZ(wires=0)), qml.expval(qml.PauliY(wires=0)) + + x = jax.numpy.array(0.1) + y = jax.numpy.array(0.2) + + res = circuit(x, y) + + assert isinstance(res, tuple) + assert len(res) == 2 + + assert isinstance(res[0], tuple) + assert len(res[0]) == 2 + + assert isinstance(res[1], tuple) + assert len(res[1]) == 2 diff --git a/tests/logging/test_logging_autograd.py b/tests/logging/test_logging_autograd.py index c9839f308b8..aca061083b1 100644 --- a/tests/logging/test_logging_autograd.py +++ b/tests/logging/test_logging_autograd.py @@ -23,7 +23,7 @@ _grad_log_map = { "adjoint": "gradient_fn=adjoint, interface=autograd, grad_on_execution=best, gradient_kwargs={}", "backprop": "gradient_fn=backprop, interface=autograd, grad_on_execution=best, gradient_kwargs={}", - "parameter-shift": "gradient_fn=", + "parameter-shift": "gradient_fn=", } diff --git a/tests/qinfo/test_fisher.py b/tests/qinfo/test_fisher.py index 7ccae1e9e29..fb6afb31239 100644 --- a/tests/qinfo/test_fisher.py +++ b/tests/qinfo/test_fisher.py @@ -157,7 +157,7 @@ def qfunc(params): qml.RX(params[0], wires=0) qml.RX(params[1], wires=0) qml.CNOT(wires=(0, 1)) - return qml.probs() + return qml.probs(wires=[0, 1]) params = pnp.random.random(2) diff --git a/tests/qnn/test_keras.py b/tests/qnn/test_keras.py index 34566606cf9..196c5329300 100644 --- a/tests/qnn/test_keras.py +++ b/tests/qnn/test_keras.py @@ -843,6 +843,7 @@ def circuit(inputs, w1, w2): expected = ( f"0: ─╭AngleEmbedding(M0)──RX({w1})─╭StronglyEntanglingLayers(M1)─┤ \n" f"1: ─╰AngleEmbedding(M0)───────────╰StronglyEntanglingLayers(M1)─┤ \n" + f"\n" f"M0 = \n{x}\n" f"M1 = \n{m1}" ) diff --git a/tests/qnn/test_qnn_torch.py b/tests/qnn/test_qnn_torch.py index ba2701063a9..16adcdf856a 100644 --- a/tests/qnn/test_qnn_torch.py +++ b/tests/qnn/test_qnn_torch.py @@ -775,6 +775,7 @@ def circuit(inputs, w1, w2): expected = ( f"0: ─╭AngleEmbedding(M0)──RX({w1})─╭StronglyEntanglingLayers(M1)─┤ \n" f"1: ─╰AngleEmbedding(M0)───────────╰StronglyEntanglingLayers(M1)─┤ \n" + f"\n" f"M0 = \n{x}\n" f"M1 = \n{m1}" ) diff --git a/tests/tape/test_qscript.py b/tests/tape/test_qscript.py index a115f99f4fd..66535b3c451 100644 --- a/tests/tape/test_qscript.py +++ b/tests/tape/test_qscript.py @@ -1048,9 +1048,9 @@ def test_output_shapes_single_qnode_check(self, measurement, expected_shape, sho ops = [qml.RY(a, 0), qml.RX(b, 0)] qs = QuantumScript(ops, [measurement], shots=shots) - + program, _ = dev.preprocess() # TODO: test gradient_fn is not None when the interface `execute` functions are implemented - res = qml.execute([qs], dev, gradient_fn=None)[0] + res = qml.execute([qs], dev, gradient_fn=None, transform_program=program)[0] if isinstance(shots, tuple): res_shape = tuple(r.shape for r in res) @@ -1212,7 +1212,10 @@ def test_broadcasting_single(self, measurement, expected_shape, shots): qml.apply(measurement) tape = qml.tape.QuantumScript.from_queue(q, shots=shots) - expected_shape = qml.execute([tape], dev, gradient_fn=None)[0].shape + program, _ = dev.preprocess() + expected_shape = qml.execute([tape], dev, gradient_fn=None, transform_program=program)[ + 0 + ].shape assert tape.shape(dev) == expected_shape @@ -1240,7 +1243,8 @@ def test_broadcasting_multi(self, measurement, expected, shots): qml.apply(measurement) tape = qml.tape.QuantumScript.from_queue(q, shots=shots) - expected = qml.execute([tape], dev, gradient_fn=None)[0] + program, _ = dev.preprocess() + expected = qml.execute([tape], dev, gradient_fn=None, transform_program=program)[0] actual = tape.shape(dev) for exp, act in zip(expected, actual): @@ -1290,7 +1294,8 @@ def test_multi_measure_sample_wires_shot_vector(self): res = qs.shape(dev) assert res == expected - expected = qml.execute([qs], dev, gradient_fn=None)[0] + program, _ = dev.preprocess() + expected = qml.execute([qs], dev, gradient_fn=None, transform_program=program)[0] expected_shape = tuple(tuple(e_.shape for e_ in e) for e in expected) assert res == expected_shape diff --git a/tests/tape/test_tape.py b/tests/tape/test_tape.py index e509694f117..5f6e07d75ae 100644 --- a/tests/tape/test_tape.py +++ b/tests/tape/test_tape.py @@ -1904,7 +1904,10 @@ def test_output_shapes_single_qnode_check(self, measurement, _, shots): qml.apply(measurement) tape = qml.tape.QuantumScript.from_queue(q, shots=shots) - res = qml.execute([tape], dev, gradient_fn=qml.gradients.param_shift)[0] + program, _ = dev.preprocess() + res = qml.execute( + [tape], dev, gradient_fn=qml.gradients.param_shift, transform_program=program + )[0] if isinstance(res, tuple): res_shape = tuple(r.shape for r in res) @@ -2105,7 +2108,8 @@ def test_broadcasting_single(self, measurement, _, shots): qml.apply(measurement) tape = qml.tape.QuantumScript.from_queue(q, shots=shots) - expected = qml.execute([tape], dev, gradient_fn=None)[0] + program, _ = dev.preprocess() + expected = qml.execute([tape], dev, gradient_fn=None, transform_program=program)[0] assert tape.shape(dev) == expected.shape @pytest.mark.autograd @@ -2132,7 +2136,8 @@ def test_broadcasting_multi(self, measurement, expected, shots): qml.apply(measurement) tape = qml.tape.QuantumScript.from_queue(q, shots=shots) - expected = qml.execute([tape], dev, gradient_fn=None)[0] + program, _ = dev.preprocess() + expected = qml.execute([tape], dev, gradient_fn=None, transform_program=program)[0] expected = tuple(i.shape for i in expected) assert tape.shape(dev) == expected diff --git a/tests/templates/test_subroutines/test_approx_time_evolution.py b/tests/templates/test_subroutines/test_approx_time_evolution.py index 82203dbd926..93337c557a0 100644 --- a/tests/templates/test_subroutines/test_approx_time_evolution.py +++ b/tests/templates/test_subroutines/test_approx_time_evolution.py @@ -18,7 +18,6 @@ import numpy as np from pennylane import numpy as pnp import pennylane as qml -from pennylane.gradients.finite_difference import finite_diff # pylint: disable=protected-access @@ -389,7 +388,7 @@ def test_torch(self, tol): @pytest.mark.autograd @pytest.mark.parametrize( "dev_name,diff_method", - [["default.qubit.autograd", "backprop"], ["default.qubit", qml.gradients.param_shift]], + [["default.qubit", "backprop"], ["default.qubit", qml.gradients.param_shift]], ) def test_trainable_hamiltonian(dev_name, diff_method): """Test that the ApproxTimeEvolution template @@ -413,8 +412,9 @@ def cost(coeffs, t): if diff_method is qml.gradients.param_shift and dev_name != "default.qubit": tape = dev.expand_fn(tape) - - return qml.execute([tape], dev, diff_method)[0] + return qml.execute([tape], dev, diff_method)[0] + program, _ = dev.preprocess() + return qml.execute([tape], dev, gradient_fn=diff_method, transform_program=program)[0] t = pnp.array(0.54, requires_grad=True) coeffs = pnp.array([-0.6, 2.0], requires_grad=True) @@ -431,7 +431,12 @@ def cost(coeffs, t): assert grad[1].shape == tuple() # compare to finite-differences - tape = create_tape(coeffs, t) - g_tapes, fn = finite_diff(tape, _expand=False, validate_params=False) - expected = fn(qml.execute(g_tapes, dev, None)) - assert np.allclose(qml.math.hstack(grad), qml.math.stack(expected)) + + @qml.qnode(dev, diff_method="finite-diff") + def circuit(coeffs, t): + H = qml.Hamiltonian(coeffs, obs) + qml.ApproxTimeEvolution(H, t, 2) + return qml.expval(qml.PauliZ(0)) + + expected = qml.grad(circuit)(coeffs, t) + assert np.allclose(qml.math.hstack(grad), qml.math.hstack(expected)) diff --git a/tests/test_qnode.py b/tests/test_qnode.py index 0ec147e90b0..724ca8e1b7a 100644 --- a/tests/test_qnode.py +++ b/tests/test_qnode.py @@ -1679,24 +1679,8 @@ def circuit(x): return qml.expval(qml.PauliZ(0)) x = pnp.array(0.5, requires_grad=True) - spy = mocker.spy(circuit.gradient_fn, "transform_fn") qml.grad(circuit)(x) - # check that the gradient recipe was applied *prior* to - # device expansion - input_tape = spy.call_args[0][0] - assert len(input_tape.operations) == 1 - assert input_tape.operations[0].name == "UnsupportedOp" - assert input_tape.operations[0].data[0] == x - - shifted_tape1, shifted_tape2 = spy.spy_return[0] - - assert len(shifted_tape1.operations) == 1 - assert shifted_tape1.operations[0].name == "UnsupportedOp" - - assert len(shifted_tape2.operations) == 1 - assert shifted_tape2.operations[0].name == "UnsupportedOp" - # check second derivative assert np.allclose(qml.grad(qml.grad(circuit))(x), -9 * np.cos(3 * x)) @@ -1721,26 +1705,11 @@ def circuit(x): PhaseShift(x, wires=0) return qml.expval(qml.PauliX(0)) - spy = mocker.spy(circuit.device, "execute") x = pnp.array(0.5, requires_grad=True) circuit(x) - spy = mocker.spy(circuit.gradient_fn, "transform_fn") res = qml.grad(circuit)(x) - input_tape = spy.call_args[0][0] - assert len(input_tape.operations) == 2 - assert input_tape.operations[1].name == "RY" - assert input_tape.operations[1].data[0] == 3 * x - - shifted_tape1, shifted_tape2 = spy.spy_return[0] - - assert len(shifted_tape1.operations) == 2 - assert shifted_tape1.operations[1].name == "RY" - - assert len(shifted_tape2.operations) == 2 - assert shifted_tape2.operations[1].name == "RY" - assert np.allclose(res, -3 * np.sin(3 * x)) # test second order derivatives diff --git a/tests/test_qnode_legacy.py b/tests/test_qnode_legacy.py index 4c728f78709..ad9ce49605b 100644 --- a/tests/test_qnode_legacy.py +++ b/tests/test_qnode_legacy.py @@ -1801,24 +1801,8 @@ def circuit(x): return qml.expval(qml.PauliZ(0)) x = pnp.array(0.5, requires_grad=True) - spy = mocker.spy(circuit.gradient_fn, "transform_fn") qml.grad(circuit)(x) - # check that the gradient recipe was applied *prior* to - # device expansion - input_tape = spy.call_args[0][0] - assert len(input_tape.operations) == 1 - assert input_tape.operations[0].name == "UnsupportedOp" - assert input_tape.operations[0].data[0] == x - - shifted_tape1, shifted_tape2 = spy.spy_return[0] - - assert len(shifted_tape1.operations) == 1 - assert shifted_tape1.operations[0].name == "UnsupportedOp" - - assert len(shifted_tape2.operations) == 1 - assert shifted_tape2.operations[0].name == "UnsupportedOp" - # check second derivative assert np.allclose(qml.grad(qml.grad(circuit))(x), -9 * np.cos(3 * x)) @@ -1843,26 +1827,11 @@ def circuit(x): PhaseShift(x, wires=0) return qml.expval(qml.PauliX(0)) - spy = mocker.spy(circuit.device, "batch_execute") x = pnp.array(0.5, requires_grad=True) circuit(x) - spy = mocker.spy(circuit.gradient_fn, "transform_fn") res = qml.grad(circuit)(x) - input_tape = spy.call_args[0][0] - assert len(input_tape.operations) == 2 - assert input_tape.operations[1].name == "RY" - assert input_tape.operations[1].data[0] == 3 * x - - shifted_tape1, shifted_tape2 = spy.spy_return[0] - - assert len(shifted_tape1.operations) == 2 - assert shifted_tape1.operations[1].name == "RY" - - assert len(shifted_tape2.operations) == 2 - assert shifted_tape2.operations[1].name == "RY" - assert np.allclose(res, -3 * np.sin(3 * x)) # test second order derivatives diff --git a/tests/test_return_types_dq2.py b/tests/test_return_types_dq2.py index 3acae93dc59..58494b14073 100644 --- a/tests/test_return_types_dq2.py +++ b/tests/test_return_types_dq2.py @@ -44,7 +44,14 @@ def circuit(x): if dev.shots: pytest.skip("cannot return analytic measurements with finite shots.") - res = qml.execute(tapes=[qnode.tape], device=dev, gradient_fn=None, interface=interface) + program, _ = dev.preprocess() + res = qml.execute( + tapes=[qnode.tape], + device=dev, + gradient_fn=None, + interface=interface, + transform_program=program, + ) assert res[0].shape == (2**wires,) assert isinstance(res[0], (np.ndarray, np.float64)) @@ -1219,7 +1226,8 @@ def return_type(self): tape = qml.tape.QuantumScript.from_queue(q) dev = qml.device("default.qubit", wires=3) with pytest.raises(qml.DeviceError, match="Analytic circuits must only contain"): - qml.execute(tapes=[tape], device=dev, gradient_fn=None) + program, _ = dev.preprocess() + qml.execute(tapes=[tape], device=dev, gradient_fn=None, transform_program=program) def test_state_return_with_other_types(self): """Test that an exception is raised when a state is returned along with another return @@ -1248,7 +1256,8 @@ def test_entropy_no_custom_wires(self): qml.vn_entropy(wires=["a"]) tape = qml.tape.QuantumScript.from_queue(q) - res = qml.execute(tapes=[tape], device=dev, gradient_fn=None) + program, _ = dev.preprocess() + res = qml.execute(tapes=[tape], device=dev, gradient_fn=None, transform_program=program) assert res == (0,) def test_custom_wire_labels_error(self): @@ -1261,5 +1270,6 @@ def test_custom_wire_labels_error(self): qml.mutual_info(wires0=["a"], wires1=["b"]) tape = qml.tape.QuantumScript.from_queue(q) - res = qml.execute(tapes=[tape], device=dev, gradient_fn=None) + program, _ = dev.preprocess() + res = qml.execute(tapes=[tape], device=dev, gradient_fn=None, transform_program=program) assert res == (0,) diff --git a/tests/test_vqe.py b/tests/test_vqe.py index a3472d59956..0b1834fb151 100644 --- a/tests/test_vqe.py +++ b/tests/test_vqe.py @@ -747,25 +747,6 @@ def test_optimize_grad_tf(self): assert np.allclose(dc, big_hamiltonian_grad) - @pytest.mark.parametrize("approx", [None, "block-diag", "diag"]) - def test_metric_tensor(self, approx): - """Test that the metric tensor can be calculated.""" - - dev = qml.device("default.qubit", wires=3) - p = pnp.array([1.0, 1.0, 1.0], requires_grad=True) - - def ansatz(params, **kwargs): - qml.RX(params[0], wires=0) - qml.RY(params[1], wires=0) - qml.CNOT(wires=[0, 1]) - qml.PhaseShift(params[2], wires=1) - - h = qml.Hamiltonian([1, 1], [qml.PauliZ(0), qml.PauliZ(1)]) - qnodes = catch_warn_ExpvalCost(ansatz, h, dev) - mt = qml.metric_tensor(qnodes, approx=approx)(p) # pylint:disable=not-callable - assert mt.shape == (3, 3) - assert isinstance(mt, pnp.ndarray) - def test_multiple_devices_opt_true(self): """Test if a ValueError is raised when multiple devices are passed when optimize=True.""" dev = [qml.device("default.qubit", wires=2), qml.device("default.qubit", wires=2)] diff --git a/tests/transforms/test_adjoint_metric_tensor.py b/tests/transforms/test_adjoint_metric_tensor.py index 04aa867ddec..f4987d077a4 100644 --- a/tests/transforms/test_adjoint_metric_tensor.py +++ b/tests/transforms/test_adjoint_metric_tensor.py @@ -272,16 +272,17 @@ def circuit(*params): return qml.expval(qml.PauliZ(wires[0])) circuit(*params) - mt = qml.adjoint_metric_tensor(circuit.qtape, dev) - expected = qml.math.reshape(expected, qml.math.shape(mt)) + + mt = qml.adjoint_metric_tensor(circuit)(*params) assert qml.math.allclose(mt, expected) - mt = qml.adjoint_metric_tensor(circuit, hybrid=False)(*params) + mt = qml.adjoint_metric_tensor(circuit.qtape) + expected = qml.math.reshape(expected, qml.math.shape(mt)) assert qml.math.allclose(mt, expected) @pytest.mark.jax - @pytest.mark.skip("JAX does not support forward pass executiong of the metric tensor.") - @pytest.mark.parametrize("dev_name", ["default.qubit", "default.qubit.jax"]) + @pytest.mark.skip("JAX does not support forward pass execution of the metric tensor.") + @pytest.mark.parametrize("dev_name", ["default.qubit"]) def test_correct_output_tape_jax(self, dev_name, ansatz, params): """Test that the output is correct when using JAX and calling the adjoint metric tensor directly on a tape.""" @@ -302,18 +303,18 @@ def circuit(*params): return qml.expval(qml.PauliZ(0)) circuit(*j_params) - mt = qml.adjoint_metric_tensor(circuit.qtape, dev) + mt = qml.adjoint_metric_tensor(circuit.qtape) expected = qml.math.reshape(expected, qml.math.shape(mt)) assert qml.math.allclose(mt, expected) - mt = qml.adjoint_metric_tensor(circuit, hybrid=False)(*j_params) + mt = qml.adjoint_metric_tensor(circuit)(*j_params) assert qml.math.allclose(mt, expected) interfaces = ["auto", "torch"] @pytest.mark.torch @pytest.mark.parametrize("interface", interfaces) - @pytest.mark.parametrize("dev_name", ["default.qubit", "default.qubit.torch"]) + @pytest.mark.parametrize("dev_name", ["default.qubit"]) def test_correct_output_tape_torch(self, ansatz, params, interface, dev_name): """Test that the output is correct when using Torch and calling the adjoint metric tensor directly on a tape.""" @@ -331,18 +332,18 @@ def circuit(*params): return qml.expval(qml.PauliZ(0)) circuit(*t_params) - mt = qml.adjoint_metric_tensor(circuit.qtape, dev) + mt = qml.adjoint_metric_tensor(circuit)(*t_params) + assert qml.math.allclose(mt, expected) + + mt = qml.adjoint_metric_tensor(circuit.qtape) expected = qml.math.reshape(expected, qml.math.shape(mt)) assert qml.math.allclose(mt.detach().numpy(), expected) - mt = qml.adjoint_metric_tensor(circuit, hybrid=False)(*t_params) - assert qml.math.allclose(mt, expected) - interfaces = ["auto", "tf"] @pytest.mark.tf @pytest.mark.parametrize("interface", interfaces) - @pytest.mark.parametrize("dev_name", ["default.qubit", "default.qubit.tf"]) + @pytest.mark.parametrize("dev_name", ["default.qubit"]) def test_correct_output_tape_tf(self, ansatz, params, interface, dev_name): """Test that the output is correct when using TensorFlow and calling the adjoint metric tensor directly on a tape.""" @@ -361,13 +362,13 @@ def circuit(*params): with tf.GradientTape(): circuit(*t_params) - mt = qml.adjoint_metric_tensor(circuit.qtape, dev) + mt = qml.adjoint_metric_tensor(circuit.qtape) - expected = qml.math.reshape(expected, qml.math.shape(mt)) + with tf.GradientTape(): + mt = qml.adjoint_metric_tensor(circuit)(*t_params) assert qml.math.allclose(mt, expected) - with tf.GradientTape(): - mt = qml.adjoint_metric_tensor(circuit, hybrid=False)(*t_params) + expected = qml.math.reshape(expected, qml.math.shape(mt)) assert qml.math.allclose(mt, expected) @@ -402,7 +403,7 @@ def circuit(*params): assert qml.math.allclose(mt, expected) @pytest.mark.jax - @pytest.mark.skip("JAX does not support forward pass executiong of the metric tensor.") + @pytest.mark.skip("JAX does not support forward pass execution of the metric tensor.") @pytest.mark.parametrize("ansatz, params", list(zip(fubini_ansatze, fubini_params))) def test_correct_output_qnode_jax(self, ansatz, params): """Test that the output is correct when using JAX and @@ -435,7 +436,7 @@ def circuit(*params): @pytest.mark.torch @pytest.mark.parametrize("ansatz, params", list(zip(fubini_ansatze, fubini_params))) @pytest.mark.parametrize("interface", interfaces) - @pytest.mark.parametrize("dev_name", ["default.qubit", "default.qubit.torch"]) + @pytest.mark.parametrize("dev_name", ["default.qubit"]) def test_correct_output_qnode_torch(self, ansatz, params, interface, dev_name): """Test that the output is correct when using Torch and calling the adjoint metric tensor on a QNode.""" @@ -488,31 +489,6 @@ def circuit(*params): else: assert qml.math.allclose(mt, expected) - @pytest.mark.autograd - @pytest.mark.parametrize("dev_name", ["default.qubit", "default.qubit.autograd"]) - def test_autograd_with_other_device(self, dev_name): - """Test passing an extra device to the QNode wrapper.""" - ansatz = fubini_ansatz2 - params = fubini_params[2] - - exp_fn = autodiff_metric_tensor(ansatz, self.num_wires) - expected = qml.jacobian(exp_fn)(*params) - dev = qml.device("default.qubit", wires=self.num_wires) - dev2 = qml.device(dev_name, wires=self.num_wires) - - @qml.qnode(dev) - def circuit(*params): - """Circuit with dummy output to create a QNode.""" - ansatz(*params, dev.wires) - return qml.expval(qml.PauliZ(0)) - - mt = qml.jacobian(qml.adjoint_metric_tensor(circuit, device=dev2))(*params) - - if isinstance(mt, tuple): - assert all(qml.math.allclose(_mt, _exp) for _mt, _exp in zip(mt, expected)) - else: - assert qml.math.allclose(mt, expected) - diff_fubini_ansatze = [ fubini_ansatz0, @@ -576,7 +552,7 @@ def circuit(*params): ansatz(*params, dev.wires) return qml.expval(qml.PauliZ(0)) - mt_fn = qml.adjoint_metric_tensor(circuit, hybrid=True) + mt_fn = qml.adjoint_metric_tensor(circuit) argnums = list(range(len(params))) mt_jac = jax.jacobian(mt_fn, argnums=argnums)(*j_params) @@ -641,43 +617,12 @@ def circuit(*params): assert qml.math.allclose(mt_jac, expected) -class TestErrors: - """Test that errors are raised correctly.""" - - def test_error_wrong_object_passed(self): - """Test that an error is raised if neither a tape nor a QNode is passed.""" - - def ansatz(x, y): - qml.RX(x, wires=0) - qml.RY(y, wires=1) - - dev = qml.device("default.qubit", wires=2) - - with pytest.raises(qml.QuantumFunctionError, match="The passed object is not a "): - qml.adjoint_metric_tensor(ansatz, device=dev) - - def test_error_finite_shots(self): - """Test that an error is raised if the device has a finite number of shots set.""" - with qml.queuing.AnnotatedQueue() as q: - qml.RX(0.2, wires=0) - qml.RY(1.9, wires=1) - tape = qml.tape.QuantumScript.from_queue(q, shots=1) - dev = qml.device("default.qubit", wires=2, shots=1) - - with pytest.raises(ValueError, match="The adjoint method for the metric tensor"): - qml.adjoint_metric_tensor(tape, device=dev) - - def test_warning_multiple_devices(self): - """Test that a warning is issued if an ExpvalCost with multiple - devices is passed.""" - dev1 = qml.device("default.qubit", wires=2) - dev2 = qml.device("default.qubit", wires=1) - H = qml.Hamiltonian([0.2, 0.9], [qml.PauliZ(0), qml.PauliY(0)]) - - def ansatz(x, wires): - qml.RX(x, wires=wires[0]) +def test_error_finite_shots(): + """Test that an error is raised if the device has a finite number of shots set.""" + with qml.queuing.AnnotatedQueue() as q: + qml.RX(0.2, wires=0) + qml.RY(1.9, wires=1) + tape = qml.tape.QuantumScript.from_queue(q, shots=1) - with pytest.warns(UserWarning, match="is deprecated,"): - cost = qml.ExpvalCost(ansatz, H, [dev1, dev2]) - with pytest.warns(UserWarning, match="ExpvalCost was instantiated"): - qml.adjoint_metric_tensor(cost) + with pytest.raises(ValueError, match="The adjoint method for the metric tensor"): + qml.adjoint_metric_tensor(tape) diff --git a/tests/transforms/test_batch_transform.py b/tests/transforms/test_batch_transform.py index eb2d671da38..146964ec6ac 100644 --- a/tests/transforms/test_batch_transform.py +++ b/tests/transforms/test_batch_transform.py @@ -648,7 +648,7 @@ def cost(x, weights): for g, e in zip(grad, expected): assert qml.math.allclose(g, e) - def test_batch_transforms_qnode(self, diff_method, mocker): + def test_batch_transforms_qnode(self, diff_method): """Test that batch transforms can be applied to a QNode without affecting device batch transforms""" if diff_method == "backprop": @@ -667,10 +667,8 @@ def circuit(weights): qml.CNOT(wires=[0, 1]) return qml.expval(H) - spy = mocker.spy(dev, "preprocess") - res = circuit(weights) - spy.assert_called() + assert np.allclose(res, [0, -np.sin(weights[1])], atol=0.1) diff --git a/tests/transforms/test_experimental/test_transform_dispatcher.py b/tests/transforms/test_experimental/test_transform_dispatcher.py index ec3a5afb3fd..9149a8f8a84 100644 --- a/tests/transforms/test_experimental/test_transform_dispatcher.py +++ b/tests/transforms/test_experimental/test_transform_dispatcher.py @@ -19,7 +19,6 @@ import pennylane as qml from pennylane.transforms.core import transform, TransformError -# TODO: Replace with default qubit 2 dev = qml.device("default.qubit", wires=2) with qml.tape.QuantumTape() as tape_circuit: @@ -352,6 +351,15 @@ def test_dispatcher_signature_non_valid_transform(self, non_valid_transform): with pytest.raises(TransformError): transform(non_valid_transform) + @pytest.mark.parametrize("valid_transform", valid_transforms) + def test_dispatcher_signature_classical_cotransform(self, valid_transform): + """Test that valid transforms with non-valid co transform raises a Transform error.""" + + with pytest.raises( + TransformError, match="The classical co-transform must be a valid Python function." + ): + transform(valid_transform, classical_cotransform=3) + def test_error_not_callable_transform(self): """Test that a non-callable is not a valid transforms.""" @@ -411,14 +419,6 @@ def test_multiple_args_expand_transform(self): ): transform(first_valid_transform, expand_transform=non_valid_expand_transform) - def test_cotransform_not_implemented(self): - """Test that a co-transform must be a callable.""" - - with pytest.raises( - NotImplementedError, match="Classical cotransforms are not yet integrated." - ): - transform(first_valid_transform, classical_cotransform=non_callable) - def test_qfunc_transform_multiple_tapes(self): """Test that quantum function is not compatible with multiple tapes.""" dispatched_transform = transform(second_valid_transform) diff --git a/tests/transforms/test_experimental/test_transform_program.py b/tests/transforms/test_experimental/test_transform_program.py index 4926411d0f7..bd0922a6aee 100644 --- a/tests/transforms/test_experimental/test_transform_program.py +++ b/tests/transforms/test_experimental/test_transform_program.py @@ -465,7 +465,7 @@ def test_insert_transform_with_expand(self): assert transform_program[1].transform is first_valid_transform def test_valid_transforms(self): - """Test that that it is only possible to create valid transforms.""" + """Test that it is only possible to create valid transforms.""" transform_program = TransformProgram() transform1 = TransformContainer(transform=first_valid_transform, is_informative=True) transform_program.push_back(transform1) @@ -500,21 +500,6 @@ def test_call_on_empty_program(self): obj = [1, 2, 3, "b"] assert null_postprocessing(obj) is obj - def test_cotransform_support_notimplemented(self): - """Test that a transform with a cotransform raises a not implemented error.""" - - my_transform = TransformContainer( - first_valid_transform, classical_cotransform=lambda res: res - ) - - prog = TransformProgram((my_transform,)) - - batch = (qml.tape.QuantumScript([], [qml.state()]),) - with pytest.raises( - NotImplementedError, match="cotransforms are not yet integrated with TransformProgram" - ): - prog(batch) - def test_single_transform_program(self): """Basic test with a single transform that only modifies the tape but not the results.""" diff --git a/tests/transforms/test_metric_tensor.py b/tests/transforms/test_metric_tensor.py index 1b4c6db5807..8037735801a 100644 --- a/tests/transforms/test_metric_tensor.py +++ b/tests/transforms/test_metric_tensor.py @@ -77,26 +77,6 @@ def circuit(a, b): assert qml.math.shape(result[0]) == () assert qml.math.shape(result[1]) == () - @pytest.mark.parametrize("diff_method", ["parameter-shift", "backprop"]) - def test_parameter_fan_out(self, diff_method): - """The metric tensor is with respect to the quantum circuit and ignores - classical processing if ``hybrid=False``. As a result, if there is - parameter fan-out, the returned metric tensor will be larger than - ``(len(args), len(args))`` if hybrid computation is deactivated. - """ - dev = qml.device("default.qubit", wires=2) - - def circuit(a): - qml.RX(a, wires=0) - qml.RX(a, wires=0) - return qml.expval(qml.PauliX(0)) - - circuit = qml.QNode(circuit, dev, diff_method=diff_method) - params = np.array([0.1], requires_grad=True) - # pylint:disable=unexpected-keyword-arg - result = qml.metric_tensor(circuit, hybrid=False, approx="block-diag")(*params) - assert result.shape == (2, 2) - def test_construct_subcircuit(self): """Test correct subcircuits constructed""" with qml.queuing.AnnotatedQueue() as q: @@ -231,13 +211,12 @@ def circuit(abc): assert qml.math.allclose(g_diag, np.diag(expected), atol=tol, rtol=0) assert qml.math.allclose(g_blockdiag, np.diag(expected), atol=tol, rtol=0) - @pytest.mark.parametrize("strategy", ["gradient", "device"]) - def test_template_integration(self, strategy): + def test_template_integration(self): """Test that the metric tensor transform acts on QNodes correctly when the QNode contains a template""" dev = qml.device("default.qubit", wires=3) - @qml.qnode(dev, expansion_strategy=strategy) + @qml.qnode(dev) def circuit(weights): qml.templates.StronglyEntanglingLayers(weights, wires=[0, 1, 2]) return qml.probs(wires=[0, 1]) @@ -858,10 +837,8 @@ def circuit(weights): return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="tensor of a QNode with no trainable parameters"): - res = qml.metric_tensor(circuit)(weights) - - assert res == () + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + qml.metric_tensor(circuit)(weights) @pytest.mark.torch @pytest.mark.parametrize("interface", ["auto", "torch"]) @@ -878,10 +855,8 @@ def circuit(weights): return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="tensor of a QNode with no trainable parameters"): - res = qml.metric_tensor(circuit)(weights) - - assert res == () + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + qml.metric_tensor(circuit)(weights) @pytest.mark.tf @pytest.mark.parametrize("interface", ["auto", "tf"]) @@ -898,10 +873,8 @@ def circuit(weights): return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="tensor of a QNode with no trainable parameters"): - res = qml.metric_tensor(circuit)(weights) - - assert res == () + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + qml.metric_tensor(circuit)(weights) @pytest.mark.jax @pytest.mark.parametrize("interface", ["auto", "jax"]) @@ -918,10 +891,8 @@ def circuit(weights): return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)) weights = [0.1, 0.2] - with pytest.warns(UserWarning, match="tensor of a QNode with no trainable parameters"): - res = qml.metric_tensor(circuit)(weights) - - assert res == () + with pytest.raises(qml.QuantumFunctionError, match="No trainable parameters."): + qml.metric_tensor(circuit)(weights) def test_no_trainable_params_tape(self): """Test that the correct ouput and warning is generated in the absence of any trainable @@ -1621,7 +1592,9 @@ def circuit(x, z): x = np.array(0.5, requires_grad=True) z = np.array(0.1, requires_grad=True) - with pytest.warns(UserWarning, match="The device does not have a wire that is not used"): + with pytest.raises( + qml.wires.WireError, match="The device has no free wire for the auxiliary wire." + ): qml.metric_tensor(circuit, approx=None)(x, z) @@ -1637,34 +1610,14 @@ def circuit(x): qml.RY(x, wires=0) return qml.expval(qml.PauliZ(0)) - with pytest.warns(UserWarning, match="An auxiliary wire is not available."): + with pytest.raises( + qml.wires.WireError, match="The requested auxiliary wire does not exist on the used device." + ): qml.metric_tensor(circuit, aux_wire=404)(x) -@pytest.mark.filterwarnings("ignore:An auxiliary wire is not") -def test_error_aux_wire_replaced(): - """Tests that even if an aux_wire is provided, it is superseded by a device - wire if it does not exist itself on the device, so that the metric_tensor is - successfully computed.""" - dev = qml.device("default.qubit", wires=qml.wires.Wires(["wire1", "wire2", "hidden_wire"])) - - @qml.qnode(dev) - def circuit(x, z): - qml.RX(x, wires="wire1") - qml.RZ(z, wires="wire2") - qml.CNOT(wires=["wire1", "wire2"]) - qml.RX(x, wires="wire1") - qml.RZ(z, wires="wire2") - return qml.expval(qml.PauliZ("wire2")) - - x = np.array(0.5, requires_grad=True) - z = np.array(0.1, requires_grad=True) - - qml.metric_tensor(circuit, approx=None, aux_wire="wire3")(x, z) - - @pytest.mark.parametrize("allow_nonunitary", [True, False]) -def test_error_generator_not_registered(allow_nonunitary, monkeypatch): +def test_error_generator_not_registered(allow_nonunitary): """Tests that an error is raised if an operation doe not have a controlled-generator operation registered.""" dev = qml.device("default.qubit", wires=qml.wires.Wires(["wire1", "wire2", "wire3"])) @@ -1672,22 +1625,6 @@ def test_error_generator_not_registered(allow_nonunitary, monkeypatch): x = np.array(0.5, requires_grad=True) z = np.array(0.1, requires_grad=True) - @qml.qnode(dev) - def circuit0(x, z): - qml.CRX(x, wires=["wire1", "wire2"]) - qml.RZ(z, wires="wire2") - return qml.expval(qml.PauliZ("wire2")) - - with monkeypatch.context() as m: - exp_fn = lambda tape, *args, **kwargs: tape - m.setattr("pennylane.transforms.metric_tensor.expand_fn", exp_fn) - - if allow_nonunitary: - qml.metric_tensor(circuit0, approx=None, allow_nonunitary=allow_nonunitary)(x, z) - else: - with pytest.raises(ValueError, match="Generator for operation"): - qml.metric_tensor(circuit0, approx=None, allow_nonunitary=allow_nonunitary)(x, z) - class RX(qml.RX): def generator(self): return qml.Hadamard(self.wires) @@ -1698,15 +1635,11 @@ def circuit1(x, z): qml.RZ(z, wires="wire1") return qml.expval(qml.PauliZ("wire2")) - with monkeypatch.context() as m: - exp_fn = lambda tape, *args, **kwargs: tape - m.setattr("pennylane.transforms.metric_tensor.expand_fn", exp_fn) - - if allow_nonunitary: + if allow_nonunitary: + qml.metric_tensor(circuit1, approx=None, allow_nonunitary=allow_nonunitary)(x, z) + else: + with pytest.raises(ValueError, match="Generator for operation"): qml.metric_tensor(circuit1, approx=None, allow_nonunitary=allow_nonunitary)(x, z) - else: - with pytest.raises(ValueError, match="Generator for operation"): - qml.metric_tensor(circuit1, approx=None, allow_nonunitary=allow_nonunitary)(x, z) def test_no_error_missing_aux_wire_not_used(recwarn): diff --git a/tests/transforms/test_specs.py b/tests/transforms/test_specs.py index 10c499be588..0904ddb99c3 100644 --- a/tests/transforms/test_specs.py +++ b/tests/transforms/test_specs.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for the specs transform""" +from typing import Sequence, Callable from collections import defaultdict from contextlib import nullcontext import pytest @@ -53,7 +54,9 @@ def circ(): if diff_method == "parameter-shift": assert info["num_gradient_executions"] == 0 - assert info["gradient_fn"] == "pennylane.gradients.parameter_shift.param_shift" + assert ( + info["gradient_fn"] == "pennylane.transforms.core.transform_dispatcher.param_shift" + ) @pytest.mark.parametrize( "diff_method, len_info", [("backprop", 11), ("parameter-shift", 12), ("adjoint", 11)] @@ -184,15 +187,15 @@ def circuit(): with pytest.warns(UserWarning, match="gradient of a tape with no trainable parameters"): info = qml.specs(circuit)() - assert info["diff_method"] == "pennylane.gradients.parameter_shift.param_shift" - assert info["gradient_fn"] == "pennylane.gradients.parameter_shift.param_shift" + assert info["diff_method"] == "pennylane.transforms.core.transform_dispatcher.param_shift" + assert info["gradient_fn"] == "pennylane.transforms.core.transform_dispatcher.param_shift" def test_custom_gradient_transform(self): """Test that a custom gradient transform is properly labelled""" dev = qml.device("default.qubit", wires=2) - @qml.gradients.gradient_transform - def my_transform(tape): + @qml.transforms.core.transform + def my_transform(tape: qml.tape.QuantumTape) -> (Sequence[qml.tape.QuantumTape], Callable): return tape, None @qml.qnode(dev, diff_method=my_transform) @@ -200,8 +203,8 @@ def circuit(): return qml.probs(wires=0) info = qml.specs(circuit)() - assert info["diff_method"] == "test_specs.my_transform" - assert info["gradient_fn"] == "test_specs.my_transform" + assert info["diff_method"] == "pennylane.transforms.core.transform_dispatcher.my_transform" + assert info["gradient_fn"] == "pennylane.transforms.core.transform_dispatcher.my_transform" @pytest.mark.parametrize( "device,num_wires",