Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Speed up GroverOperator and add GlobalPhase to its decomposition #4666

Merged
merged 25 commits into from
Nov 23, 2023
Merged
Show file tree
Hide file tree
Changes from 23 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions doc/releases/changelog-dev.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,10 @@

<h3>Improvements 🛠</h3>

* `default.qubit` now applies `GroverOperator` faster by not using its matrix representation but a
custom rule for `apply_operation`. Also, the matrix representation of `GroverOperator` runs faster now.
[(#4666)](https://github.com/PennyLaneAI/pennylane/pull/4666)

* `default.qubit` no longer uses a dense matrix for `MultiControlledX` for more than 8 operation wires.
[(#4673)](https://github.com/PennyLaneAI/pennylane/pull/4673)

Expand Down Expand Up @@ -51,6 +55,9 @@

<h3>Breaking changes 💔</h3>

* The decomposition of `GroverOperator` now has an additional global phase operation.
dwierichs marked this conversation as resolved.
Show resolved Hide resolved
[(#4666)](https://github.com/PennyLaneAI/pennylane/pull/4666)

* The `prep` keyword argument has been removed from `QuantumScript` and `QuantumTape`.
`StatePrepBase` operations should be placed at the beginning of the `ops` list instead.
[(#4756)](https://github.com/PennyLaneAI/pennylane/pull/4756)
Expand Down Expand Up @@ -98,6 +105,9 @@

<h3>Bug fixes 🐛</h3>

* The decomposition of `GroverOperator` now has the same global phase as its matrix.
[(#4666)](https://github.com/PennyLaneAI/pennylane/pull/4666)

* Jax can now differeniate a batch of circuits where one tape does not have trainable parameters.
[(#4837)](https://github.com/PennyLaneAI/pennylane/pull/4837)

Expand Down
42 changes: 42 additions & 0 deletions pennylane/devices/qubit/apply_operation.py
Original file line number Diff line number Diff line change
Expand Up @@ -315,6 +315,48 @@ def apply_multicontrolledx(
return state


@apply_operation.register
def apply_grover(op: qml.GroverOperator, state, is_state_batched: bool = False, debugger=None):
"""Apply GroverOperator either via a custom matrix-free method (more than 8 operation
wires) or via standard matrix based methods (else)."""
if len(op.wires) < 9:
return _apply_operation_default(op, state, is_state_batched, debugger)
return _apply_grover_without_matrix(state, op.wires, is_state_batched)


def _apply_grover_without_matrix(state, op_wires, is_state_batched):
r"""Apply GroverOperator to state. This method uses that this operator
is :math:`2*P-\mathbb{I}`, where :math:`P` is the projector onto the
all-plus state. This allows us to compute the new state by replacing summing
over all axes on which the operation acts, and "filling in" the all-plus state
in the resulting lower-dimensional state via a Kronecker product.
"""
num_wires = len(op_wires)
# 2 * Squared normalization of the all-plus state on the op wires
# (squared, because we skipped the normalization when summing, 2* because of the def of Grover)
prefactor = 2 ** (1 - num_wires)

# The axes to sum over in order to obtain <+|\psi>, where <+| only acts on the op wires.
sum_axes = [w + is_state_batched for w in op_wires]
collapsed = math.sum(state, axis=tuple(sum_axes))

if num_wires == (len(qml.math.shape(state)) - is_state_batched):
# If the operation acts on all wires, we can skip the tensor product with all-ones state
new_shape = (-1,) + (1,) * num_wires if is_state_batched else (1,) * num_wires
return prefactor * math.reshape(collapsed, new_shape) - state
# [todo]: Once Tensorflow support expand_dims with multiple axes in the second argument,
# use the following line instead of the two above.
# return prefactor * math.expand_dims(collapsed, sum_axes) - state

all_plus = math.cast_like(math.full([2] * num_wires, prefactor), state)
# After the Kronecker product (realized with tensordot with axes=0), we need to move
# the new axes to the summed-away axes' positions. Finally, subtract the original state.
source = list(range(math.ndim(collapsed), math.ndim(state)))
# Probably it will be better to use math.full or math.tile to create the outer product
# here computed with math.tensordot. However, Tensorflow and Torch do not have full support
return math.moveaxis(math.tensordot(collapsed, all_plus, axes=0), source, sum_axes) - state


@apply_operation.register
def apply_snapshot(op: qml.Snapshot, state, is_state_batched: bool = False, debugger=None):
"""Take a snapshot of the state"""
Expand Down
37 changes: 26 additions & 11 deletions pennylane/templates/subroutines/grover.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,9 @@
"""
Contains the Grover Operation template.
"""
import itertools
import functools
import numpy as np
from pennylane.operation import AnyWires, Operation
from pennylane.ops import Hadamard, PauliZ, MultiControlledX
from pennylane.ops import Hadamard, PauliZ, MultiControlledX, GlobalPhase
from pennylane.wires import Wires


Expand Down Expand Up @@ -165,17 +163,34 @@ def compute_decomposition(
for wire in wires[:-1]:
op_list.append(Hadamard(wire))

op_list.append(GlobalPhase(np.pi, wires))

return op_list

@staticmethod
@functools.lru_cache()
def compute_matrix(n_wires, work_wires): # pylint: disable=arguments-differ,unused-argument
# s1 = H|0>, Hadamard on a single qubit in the ground state
s1 = np.array([1, 1]) / np.sqrt(2)
r"""Representation of the operator as a canonical matrix in the computational basis
(static method).

The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.

# uniform superposition state |s>
s = functools.reduce(np.kron, list(itertools.repeat(s1, n_wires)))
.. seealso:: :meth:`.GroverOperator.matrix` and :func:`qml.matrix() <pennylane.matrix>`

Args:
n_wires (int): Number of wires the ``GroverOperator`` acts on
work_wires (Any or Iterable[Any]): optional auxiliary wires to assist decompositions.
*Unused argument*.

# Grover diffusion operator
G = 2 * np.outer(s, s) - np.identity(2**n_wires)
return G
Returns:
tensor_like: matrix representation

The Grover diffusion operator is :math:`2|+\rangle\langle +| - \mathbb{I}`.
The first term is an all-ones matrix multiplied with two times the squared
normalization factor of the all-plus state, i.e. all entries of the first term are
:math:`2^{1-N}` for :math:`N` wires.
"""
dim = 2**n_wires
# Grover diffusion operator. Realize the all-ones entry via broadcasting when subtracting
# the second term.
return 2 / dim - np.eye(dim)
238 changes: 237 additions & 1 deletion tests/devices/qubit/test_apply_operation.py
dwierichs marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
Expand Up @@ -206,6 +206,30 @@ def test_cnot(self, method, wire, ml_framework):
assert qml.math.allclose(initial1[1], new1[0])
assert qml.math.allclose(initial1[0], new1[1])

def test_grover(self, method, wire, ml_framework):
"""Test the application of GroverOperator on a two qubit state."""

initial_state = np.array(
[
[0.04624539 + 0.3895457j, 0.22399401 + 0.53870339j],
[-0.483054 + 0.2468498j, -0.02772249 - 0.45901669j],
]
)
initial_state = qml.math.asarray(initial_state, like=ml_framework)

wires = [wire, 1 - wire]
op = qml.GroverOperator(wires)
new_state = method(op, initial_state)

overlap = qml.math.sum(initial_state) / 2
ones_state = qml.math.ones_like(initial_state) / 2
expected_state = 2 * ones_state * overlap - initial_state
assert qml.math.allclose(new_state, expected_state)
state_via_mat = qml.math.tensordot(
op.matrix().reshape([2] * 4), initial_state, axes=[[2, 3], [0, 1]]
)
assert qml.math.allclose(new_state, state_via_mat)

def test_identity(self, method, wire, ml_framework):
"""Test the application of a GlobalPhase gate on a two qubit state."""

Expand Down Expand Up @@ -830,6 +854,218 @@ def test_double_excitation(self, method):

assert qml.math.allclose(state_v1, state_v2)

@pytest.mark.parametrize("apply_wires", ([0, 3], [0, 1, 3, 2], [2, 1], [1, 3]))
def test_grover(self, method, apply_wires):
"""Tests a four qubit GroverOperator."""
op = qml.GroverOperator(apply_wires)
new_state = method(op, self.state)

expected_state = np.copy(self.state)
dwierichs marked this conversation as resolved.
Show resolved Hide resolved
dwierichs marked this conversation as resolved.
Show resolved Hide resolved
for _op in op.decomposition():
expected_state = method(_op, expected_state)

assert qml.math.allclose(expected_state, new_state)


class TestApplyGroverOperator:
"""Test that GroverOperator is applied correctly."""

def grover_kernel_full_wires(self, state, op_wires, batched):
"""Additional kernel to apply GroverOperator to all state wires."""
prefactor = 2 ** (1 - len(op_wires))
sum_axes = tuple(range(batched, np.ndim(state)))
collapsed = np.sum(state, axis=sum_axes)
return prefactor * np.expand_dims(collapsed, sum_axes) - state

def grover_kernel_partial_wires(self, state, op_wires, batched):
"""Additional kernel to apply GroverOperator to some of all state wires."""
num_wires = len(op_wires)
sum_axes = [w + batched for w in op_wires]
collapsed = np.sum(state, tuple(sum_axes))
prefactor = 2 ** (1 - num_wires)
bcast_shape = [2] * num_wires + list(state.shape[:-num_wires])
expanded = np.broadcast_to(prefactor * collapsed, bcast_shape)
source = list(range(num_wires))
expanded = np.moveaxis(expanded, source, sum_axes)
return expanded - state

@pytest.mark.parametrize(
"op_wires, state_wires, einsum_called, tensordot_called",
[
(2, 2, True, False),
(3, 3, False, True),
(9, 9, False, False),
(2, 13, False, True),
(3, 9, False, True),
(9, 13, False, False),
],
)
def test_dispatching(self, op_wires, state_wires, einsum_called, tensordot_called, mocker):
"""Test that apply_operation dispatches to einsum, tensordot and the kernel correctly."""
# pylint: disable=too-many-arguments
np.random.seed(752)
state = np.random.random([2] * state_wires) + 1j * np.random.random([2] * state_wires)

op = qml.GroverOperator(list(range(op_wires)))
spy_einsum = mocker.spy(qml.math, "einsum")
spy_tensordot = mocker.spy(qml.math, "argsort")
apply_operation(op, state, is_state_batched=False, debugger=None)
assert spy_einsum.call_count == int(einsum_called)
assert spy_tensordot.call_count == int(tensordot_called)

@pytest.mark.parametrize("op_wires, state_wires", [(2, 2), (3, 3), (9, 9)])
@pytest.mark.parametrize("batch_dim", [None, 1, 3])
def test_correctness_full_wires(self, op_wires, state_wires, batch_dim):
"""Test that apply_operation is correct for GroverOperator for all dispatch branches
when applying it to all wires of a state."""
np.random.seed(752)
batched = batch_dim is not None
shape = [batch_dim] + [2] * state_wires if batched else [2] * state_wires
flat_shape = (batch_dim, 2**state_wires) if batched else (2**state_wires,)
state = np.random.random(shape) + 1j * np.random.random(shape)
dwierichs marked this conversation as resolved.
Show resolved Hide resolved

op = qml.GroverOperator(list(range(op_wires)))
out = apply_operation(op, state, is_state_batched=batched, debugger=None)
# Double transpose to accomodate for batching
expected_via_mat = (op.matrix() @ state.reshape(flat_shape).T).T.reshape(shape)
expected_via_kernel = self.grover_kernel_full_wires(state, op.wires, batched)
dwierichs marked this conversation as resolved.
Show resolved Hide resolved
assert np.allclose(out, expected_via_mat)
assert np.allclose(out, expected_via_kernel)

@pytest.mark.parametrize("op_wires, state_wires", [(3, 5), (9, 13)])
@pytest.mark.parametrize("batch_dim", [None, 1, 3])
def test_correctness_partial_wires(self, op_wires, state_wires, batch_dim):
"""Test that apply_operation is correct for GroverOperator for all dispatch branches
but einsum (because Grover can't act on a single wire)
when applying it only to some of the wires of a state."""
np.random.seed(752)
batched = batch_dim is not None
shape = [batch_dim] + [2] * state_wires if batched else [2] * state_wires
state = np.random.random(shape) + 1j * np.random.random(shape)

for start_wire in [0, 1, state_wires - op_wires]:
wires = list(range(start_wire, start_wire + op_wires))
op = qml.GroverOperator(wires)
out = apply_operation(op, state, is_state_batched=batched, debugger=None)
expected_via_mat = apply_operation_tensordot(op, state, batched)
expected_via_kernel = self.grover_kernel_partial_wires(state, wires, batched)
assert np.allclose(out, expected_via_mat)
assert np.allclose(out, expected_via_kernel)

@pytest.mark.autograd
@pytest.mark.parametrize("op_wires, state_wires", [(2, 2), (3, 3), (9, 9), (3, 5), (9, 13)])
@pytest.mark.parametrize("batch_dim", [None, 1, 3])
def test_correctness_autograd(self, op_wires, state_wires, batch_dim):
"""Test that apply_operation is correct for GroverOperator for all dispatch branches
when applying it to an Autograd state."""
batched = batch_dim is not None
shape = [batch_dim] + [2] * state_wires if batched else [2] * state_wires
# Input state
np.random.seed(752)
state = np.random.random(shape) + 1j * np.random.random(shape)

wires = list(range(op_wires))
op = qml.GroverOperator(wires)
expected_via_mat = apply_operation_tensordot(op, state, batched)
if op_wires == state_wires:
expected_via_kernel = self.grover_kernel_full_wires(state, wires, batched)
else:
expected_via_kernel = self.grover_kernel_partial_wires(state, wires, batched)

# Cast to interface and apply operation
state = qml.numpy.array(state)
out = apply_operation(op, state, is_state_batched=batched, debugger=None)

assert qml.math.allclose(out, expected_via_mat)
assert qml.math.allclose(out, expected_via_kernel)

@pytest.mark.tf
@pytest.mark.parametrize("op_wires, state_wires", [(2, 2), (3, 3), (9, 9), (3, 5), (9, 13)])
@pytest.mark.parametrize("batch_dim", [None, 1, 3])
def test_correctness_tf(self, op_wires, state_wires, batch_dim):
"""Test that apply_operation is correct for GroverOperator for all dispatch branches
when applying it to a Tensorflow state."""
import tensorflow as tf

batched = batch_dim is not None
shape = [batch_dim] + [2] * state_wires if batched else [2] * state_wires
# Input state
np.random.seed(752)
state = np.random.random(shape) + 1j * np.random.random(shape)

wires = list(range(op_wires))
op = qml.GroverOperator(wires)
expected_via_mat = apply_operation_tensordot(op, state, batched)
if op_wires == state_wires:
expected_via_kernel = self.grover_kernel_full_wires(state, wires, batched)
else:
expected_via_kernel = self.grover_kernel_partial_wires(state, wires, batched)

# Cast to interface and apply operation
state = tf.Variable(state)
out = apply_operation(op, state, is_state_batched=batched, debugger=None)

assert qml.math.allclose(out, expected_via_mat)
assert qml.math.allclose(out, expected_via_kernel)

@pytest.mark.jax
@pytest.mark.parametrize("op_wires, state_wires", [(2, 2), (3, 3), (9, 9), (3, 5), (9, 13)])
@pytest.mark.parametrize("batch_dim", [None, 1, 3])
def test_correctness_jax(self, op_wires, state_wires, batch_dim):
"""Test that apply_operation is correct for GroverOperator for all dispatch branches
when applying it to a Jax state."""
import jax

batched = batch_dim is not None
shape = [batch_dim] + [2] * state_wires if batched else [2] * state_wires
# Input state
np.random.seed(752)
state = np.random.random(shape) + 1j * np.random.random(shape)

wires = list(range(op_wires))
op = qml.GroverOperator(wires)
expected_via_mat = apply_operation_tensordot(op, state, batched)
if op_wires == state_wires:
expected_via_kernel = self.grover_kernel_full_wires(state, wires, batched)
else:
expected_via_kernel = self.grover_kernel_partial_wires(state, wires, batched)

# Cast to interface and apply operation
state = jax.numpy.array(state)
out = apply_operation(op, state, is_state_batched=batched, debugger=None)

assert qml.math.allclose(out, expected_via_mat)
assert qml.math.allclose(out, expected_via_kernel)

@pytest.mark.torch
@pytest.mark.parametrize("op_wires, state_wires", [(2, 2), (3, 3), (9, 9), (3, 5), (9, 13)])
@pytest.mark.parametrize("batch_dim", [None, 1, 3])
def test_correctness_torch(self, op_wires, state_wires, batch_dim):
"""Test that apply_operation is correct for GroverOperator for all dispatch branches
when applying it to a Torch state."""
import torch

batched = batch_dim is not None
shape = [batch_dim] + [2] * state_wires if batched else [2] * state_wires
# Input state
np.random.seed(752)
state = np.random.random(shape) + 1j * np.random.random(shape)

wires = list(range(op_wires))
op = qml.GroverOperator(wires)
expected_via_mat = apply_operation_tensordot(op, state, batched)
if op_wires == state_wires:
expected_via_kernel = self.grover_kernel_full_wires(state, wires, batched)
else:
expected_via_kernel = self.grover_kernel_partial_wires(state, wires, batched)

# Cast to interface and apply operation
state = torch.tensor(state, requires_grad=True)
out = apply_operation(op, state, is_state_batched=batched, debugger=None)

assert qml.math.allclose(out, expected_via_mat)
assert qml.math.allclose(out, expected_via_kernel)


class TestMultiControlledXKernel:
"""Test the specialized kernel for MultiControlledX and its dispatching."""
Expand Down Expand Up @@ -942,7 +1178,7 @@ def test_with_torch(self, batch_dim):
@pytest.mark.tf
@pytest.mark.parametrize("op", (qml.PauliZ(8), qml.CNOT((5, 6))))
def test_tf_large_state(op):
""" "Tests that custom kernels that use slicing fall back to a different method when
"""Tests that custom kernels that use slicing fall back to a different method when
the state has a large number of wires."""
import tensorflow as tf

Expand Down
Loading
Loading