Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Follow-up bug fixes for _group_measurements #5559

Merged
merged 16 commits into from
Apr 24, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 19 additions & 4 deletions pennylane/devices/qubit/sampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,13 +99,27 @@ def _group_measurements(mps: List[Union[SampleMeasurement, ClassicalShadowMP, Sh
return all_mp_groups, all_indices


def _get_num_shots_for_expval_H(obs):
def _get_num_executions_for_expval_H(obs):
indices = obs.grouping_indices
if indices:
return len(indices)
return sum(int(not isinstance(o, qml.Identity)) for o in obs.terms()[1])


def _get_num_executions_for_sum(obs):

if obs.grouping_indices:
return len(obs.grouping_indices)

if not obs.pauli_rep:
return sum(int(not isinstance(o, qml.Identity)) for o in obs.terms()[1])

_, ops = obs.terms()
with qml.QueuingManager.stop_recording():
op_groups = qml.pauli.group_observables(ops)
return len(op_groups)


# pylint: disable=no-member
def get_num_shots_and_executions(tape: qml.tape.QuantumTape) -> Tuple[int, int]:
"""Get the total number of qpu executions and shots.
Expand All @@ -125,14 +139,15 @@ def get_num_shots_and_executions(tape: qml.tape.QuantumTape) -> Tuple[int, int]:
if isinstance(group[0], ExpectationMP) and isinstance(
group[0].obs, (qml.ops.Hamiltonian, qml.ops.LinearCombination)
):
H_executions = _get_num_shots_for_expval_H(group[0].obs)
H_executions = _get_num_executions_for_expval_H(group[0].obs)
num_executions += H_executions
if tape.shots:
num_shots += tape.shots.total_shots * H_executions
elif isinstance(group[0], ExpectationMP) and isinstance(group[0].obs, qml.ops.Sum):
num_executions += len(group[0].obs)
sum_executions = _get_num_executions_for_sum(group[0].obs)
num_executions += sum_executions
if tape.shots:
num_shots += tape.shots.total_shots * len(group[0].obs)
num_shots += tape.shots.total_shots * sum_executions
elif isinstance(group[0], (ClassicalShadowMP, ShadowExpvalMP)):
num_executions += tape.shots.total_shots
if tape.shots:
Expand Down
16 changes: 15 additions & 1 deletion pennylane/pauli/grouping/group_observables.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,6 +246,14 @@ def group_observables(observables, coefficients=None, grouping_type="qwc", metho
if coefficients is None:
return partitioned_paulis

partitioned_coeffs = _partition_coeffs(partitioned_paulis, observables, coefficients)

return partitioned_paulis, partitioned_coeffs


def _partition_coeffs(partitioned_paulis, observables, coefficients):
"""Partition the coefficients according to the Pauli word groupings."""

partitioned_coeffs = [
qml.math.cast_like([0] * len(g), coefficients) for g in partitioned_paulis
]
Expand All @@ -259,6 +267,12 @@ def group_observables(observables, coefficients=None, grouping_type="qwc", metho
for pauli_word in partition:
# find index of this pauli word in remaining original observables,
for ind, observable in enumerate(observables):
if isinstance(observable, qml.ops.Hamiltonian):
# Converts single-term Hamiltonian to SProd because
# are_identical_pauli_words cannot handle Hamiltonian
coeffs, ops = observable.terms()
# Assuming the Hamiltonian has only one term
observable = qml.s_prod(coeffs[0], ops[0])
if are_identical_pauli_words(pauli_word, observable):
indices.append(coeff_indices[ind])
observables.pop(ind)
Expand All @@ -273,4 +287,4 @@ def group_observables(observables, coefficients=None, grouping_type="qwc", metho
if isinstance(coefficients, list):
partitioned_coeffs = [list(p) for p in partitioned_coeffs]

return partitioned_paulis, partitioned_coeffs
return partitioned_coeffs
9 changes: 8 additions & 1 deletion tests/devices/default_qubit/test_default_qubit_tracking.py
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,14 @@ def circuit_3(y):
20,
),
# op arithmetic test cases
([qml.expval(qml.sum(qml.PauliX(0), qml.PauliX(1)))], 2, 20),
([qml.expval(qml.sum(qml.PauliX(0), qml.PauliY(0)))], 2, 20),
([qml.expval(qml.sum(qml.PauliX(0), qml.PauliX(0) @ qml.PauliX(1)))], 1, 10),
([qml.expval(qml.sum(qml.PauliX(0), qml.Hadamard(0)))], 2, 20),
(
[qml.expval(qml.sum(qml.PauliX(0), qml.PauliY(1) @ qml.PauliX(1), grouping_type="qwc"))],
1,
10,
),
(
[
qml.expval(qml.prod(qml.PauliX(0), qml.PauliX(1))),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -726,24 +726,24 @@ def cost_fn(x):


@pytest.mark.parametrize("execute_kwargs, shots, device", test_matrix)
@pytest.mark.parametrize("use_new_op_math", (True, False))
@pytest.mark.usefixtures("use_legacy_and_new_opmath")
class TestHamiltonianWorkflows:
"""Test that tapes ending with expectations
of Hamiltonians provide correct results and gradients"""

@pytest.fixture
def cost_fn(self, execute_kwargs, shots, device, use_new_op_math):
def cost_fn(self, execute_kwargs, shots, device):
"""Cost function for gradient tests"""

def _cost_fn(weights, coeffs1, coeffs2):
obs1 = [qml.PauliZ(0), qml.PauliZ(0) @ qml.PauliX(1), qml.PauliY(0)]
H1 = qml.Hamiltonian(coeffs1, obs1)
if use_new_op_math:
if qml.operation.active_new_opmath():
H1 = qml.pauli.pauli_sentence(H1).operation()

obs2 = [qml.PauliZ(0)]
H2 = qml.Hamiltonian(coeffs2, obs2)
if use_new_op_math:
if qml.operation.active_new_opmath():
H2 = qml.pauli.pauli_sentence(H2).operation()

with qml.queuing.AnnotatedQueue() as q:
Expand Down Expand Up @@ -786,12 +786,10 @@ def cost_fn_jacobian(weights, coeffs1, coeffs2):
]
)

def test_multiple_hamiltonians_not_trainable(
self, execute_kwargs, cost_fn, shots, use_new_op_math
):
def test_multiple_hamiltonians_not_trainable(self, execute_kwargs, cost_fn, shots):
"""Test hamiltonian with no trainable parameters."""

if execute_kwargs["gradient_fn"] == "adjoint" and not use_new_op_math:
if execute_kwargs["gradient_fn"] == "adjoint" and not qml.operation.active_new_opmath():
pytest.skip("adjoint differentiation does not suppport hamiltonians.")

coeffs1 = np.array([0.1, 0.2, 0.3], requires_grad=False)
Expand All @@ -814,11 +812,11 @@ def test_multiple_hamiltonians_not_trainable(
else:
assert np.allclose(res, expected, atol=atol_for_shots(shots), rtol=0)

def test_multiple_hamiltonians_trainable(self, execute_kwargs, cost_fn, shots, use_new_op_math):
def test_multiple_hamiltonians_trainable(self, execute_kwargs, cost_fn, shots):
"""Test hamiltonian with trainable parameters."""
if execute_kwargs["gradient_fn"] == "adjoint":
pytest.skip("trainable hamiltonians not supported with adjoint")
if use_new_op_math:
if qml.operation.active_new_opmath():
pytest.skip("parameter shift derivatives do not yet support sums.")

coeffs1 = np.array([0.1, 0.2, 0.3], requires_grad=True)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -701,24 +701,24 @@ def cost_fn(x):


@pytest.mark.parametrize("execute_kwargs, shots, device", test_matrix)
@pytest.mark.parametrize("use_new_op_math", (True, False))
@pytest.mark.usefixtures("use_legacy_and_new_opmath")
class TestHamiltonianWorkflows:
"""Test that tapes ending with expectations
of Hamiltonians provide correct results and gradients"""

@pytest.fixture
def cost_fn(self, execute_kwargs, shots, device, use_new_op_math):
def cost_fn(self, execute_kwargs, shots, device):
"""Cost function for gradient tests"""

def _cost_fn(weights, coeffs1, coeffs2):
obs1 = [qml.PauliZ(0), qml.PauliZ(0) @ qml.PauliX(1), qml.PauliY(0)]
H1 = qml.Hamiltonian(coeffs1, obs1)
if use_new_op_math:
if qml.operation.active_new_opmath():
H1 = qml.pauli.pauli_sentence(H1).operation()

obs2 = [qml.PauliZ(0)]
H2 = qml.Hamiltonian(coeffs2, obs2)
if use_new_op_math:
if qml.operation.active_new_opmath():
H2 = qml.pauli.pauli_sentence(H2).operation()

with qml.queuing.AnnotatedQueue() as q:
Expand Down Expand Up @@ -764,12 +764,10 @@ def cost_fn_jacobian(weights, coeffs1, coeffs2):
]
)

def test_multiple_hamiltonians_not_trainable(
self, execute_kwargs, cost_fn, shots, use_new_op_math
):
def test_multiple_hamiltonians_not_trainable(self, execute_kwargs, cost_fn, shots):
"""Test hamiltonian with no trainable parameters."""

if execute_kwargs["gradient_fn"] == "adjoint" and not use_new_op_math:
if execute_kwargs["gradient_fn"] == "adjoint" and not qml.operation.active_new_opmath():
pytest.skip("adjoint differentiation does not suppport hamiltonians.")

coeffs1 = jnp.array([0.1, 0.2, 0.3])
Expand All @@ -792,11 +790,11 @@ def test_multiple_hamiltonians_not_trainable(
else:
assert np.allclose(res, expected, atol=atol_for_shots(shots), rtol=0)

def test_multiple_hamiltonians_trainable(self, execute_kwargs, cost_fn, shots, use_new_op_math):
def test_multiple_hamiltonians_trainable(self, execute_kwargs, cost_fn, shots):
"""Test hamiltonian with trainable parameters."""
if execute_kwargs["gradient_fn"] == "adjoint":
pytest.skip("trainable hamiltonians not supported with adjoint")
if use_new_op_math:
if qml.operation.active_new_opmath():
pytest.skip("parameter shift derivatives do not yet support sums.")

coeffs1 = jnp.array([0.1, 0.2, 0.3])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -707,24 +707,24 @@ def cost_fn(x):


@pytest.mark.parametrize("execute_kwargs, shots, device", test_matrix)
@pytest.mark.parametrize("use_new_op_math", (True, False))
@pytest.mark.usefixtures("use_legacy_and_new_opmath")
class TestHamiltonianWorkflows:
"""Test that tapes ending with expectations
of Hamiltonians provide correct results and gradients"""

@pytest.fixture
def cost_fn(self, execute_kwargs, shots, device, use_new_op_math):
def cost_fn(self, execute_kwargs, shots, device):
"""Cost function for gradient tests"""

def _cost_fn(weights, coeffs1, coeffs2):
obs1 = [qml.PauliZ(0), qml.PauliZ(0) @ qml.PauliX(1), qml.PauliY(0)]
H1 = qml.Hamiltonian(coeffs1, obs1)
if use_new_op_math:
if qml.operation.active_new_opmath():
H1 = qml.pauli.pauli_sentence(H1).operation()

obs2 = [qml.PauliZ(0)]
H2 = qml.Hamiltonian(coeffs2, obs2)
if use_new_op_math:
if qml.operation.active_new_opmath():
H2 = qml.pauli.pauli_sentence(H2).operation()

with qml.queuing.AnnotatedQueue() as q:
Expand Down Expand Up @@ -767,12 +767,10 @@ def cost_fn_jacobian(weights, coeffs1, coeffs2):
]
)

def test_multiple_hamiltonians_not_trainable(
self, execute_kwargs, cost_fn, shots, use_new_op_math
):
def test_multiple_hamiltonians_not_trainable(self, execute_kwargs, cost_fn, shots):
"""Test hamiltonian with no trainable parameters."""

if execute_kwargs["gradient_fn"] == "adjoint" and not use_new_op_math:
if execute_kwargs["gradient_fn"] == "adjoint" and not qml.operation.active_new_opmath():
pytest.skip("adjoint differentiation does not suppport hamiltonians.")

device_vjp = execute_kwargs.get("device_vjp", False)
Expand All @@ -791,11 +789,11 @@ def test_multiple_hamiltonians_not_trainable(
expected = self.cost_fn_jacobian(weights, coeffs1, coeffs2)[:, :2]
assert np.allclose(jac, expected, atol=atol_for_shots(shots), rtol=0)

def test_multiple_hamiltonians_trainable(self, cost_fn, execute_kwargs, shots, use_new_op_math):
def test_multiple_hamiltonians_trainable(self, cost_fn, execute_kwargs, shots):
"""Test hamiltonian with trainable parameters."""
if execute_kwargs["gradient_fn"] == "adjoint":
pytest.skip("trainable hamiltonians not supported with adjoint")
if use_new_op_math:
if qml.operation.active_new_opmath():
pytest.skip("parameter shift derivatives do not yet support sums.")

coeffs1 = tf.Variable([0.1, 0.2, 0.3], dtype=tf.float64)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -727,24 +727,24 @@ def cost_fn(x):


@pytest.mark.parametrize("execute_kwargs, shots, device", test_matrix)
@pytest.mark.parametrize("use_new_op_math", (True, False))
@pytest.mark.usefixtures("use_legacy_and_new_opmath")
class TestHamiltonianWorkflows:
"""Test that tapes ending with expectations
of Hamiltonians provide correct results and gradients"""

@pytest.fixture
def cost_fn(self, execute_kwargs, shots, device, use_new_op_math):
def cost_fn(self, execute_kwargs, shots, device):
"""Cost function for gradient tests"""

def _cost_fn(weights, coeffs1, coeffs2):
obs1 = [qml.PauliZ(0), qml.PauliZ(0) @ qml.PauliX(1), qml.PauliY(0)]
H1 = qml.Hamiltonian(coeffs1, obs1)
if use_new_op_math:
if qml.operation.active_new_opmath():
H1 = qml.pauli.pauli_sentence(H1).operation()

obs2 = [qml.PauliZ(0)]
H2 = qml.Hamiltonian(coeffs2, obs2)
if use_new_op_math:
if qml.operation.active_new_opmath():
H2 = qml.pauli.pauli_sentence(H2).operation()

with qml.queuing.AnnotatedQueue() as q:
Expand Down Expand Up @@ -795,12 +795,10 @@ def cost_fn_jacobian(weights, coeffs1, coeffs2):
]
)

def test_multiple_hamiltonians_not_trainable(
self, execute_kwargs, cost_fn, shots, use_new_op_math
):
def test_multiple_hamiltonians_not_trainable(self, execute_kwargs, cost_fn, shots):
"""Test hamiltonian with no trainable parameters."""

if execute_kwargs["gradient_fn"] == "adjoint" and not use_new_op_math:
if execute_kwargs["gradient_fn"] == "adjoint" and not qml.operation.active_new_opmath():
pytest.skip("adjoint differentiation does not suppport hamiltonians.")

coeffs1 = torch.tensor([0.1, 0.2, 0.3], requires_grad=False)
Expand All @@ -823,11 +821,11 @@ def test_multiple_hamiltonians_not_trainable(
else:
assert torch.allclose(res, expected, atol=atol_for_shots(shots), rtol=0)

def test_multiple_hamiltonians_trainable(self, execute_kwargs, cost_fn, shots, use_new_op_math):
def test_multiple_hamiltonians_trainable(self, execute_kwargs, cost_fn, shots):
"""Test hamiltonian with trainable parameters."""
if execute_kwargs["gradient_fn"] == "adjoint":
pytest.skip("trainable hamiltonians not supported with adjoint")
if use_new_op_math:
if qml.operation.active_new_opmath():
pytest.skip("parameter shift derivatives do not yet support sums.")

coeffs1 = torch.tensor([0.1, 0.2, 0.3], requires_grad=True)
Expand Down
Loading