From b8bf98b43de7075d3cd7a819a11c40d9a3534980 Mon Sep 17 00:00:00 2001 From: "Lee J. O'Riordan" Date: Thu, 2 May 2024 14:52:16 -0400 Subject: [PATCH 01/24] Begin expansion of probs tests --- .../measurements/tests/CMakeLists.txt | 1 + .../tests/Test_MeasurementsLQubit.cpp | 87 +++++++++++++++++++ 2 files changed, 88 insertions(+) diff --git a/pennylane_lightning/core/src/simulators/lightning_qubit/measurements/tests/CMakeLists.txt b/pennylane_lightning/core/src/simulators/lightning_qubit/measurements/tests/CMakeLists.txt index 1ea6861b8a..0bfbe65f90 100644 --- a/pennylane_lightning/core/src/simulators/lightning_qubit/measurements/tests/CMakeLists.txt +++ b/pennylane_lightning/core/src/simulators/lightning_qubit/measurements/tests/CMakeLists.txt @@ -17,6 +17,7 @@ FetchAndIncludeCatch() add_library(lightning_qubit_measurements_tests INTERFACE) target_link_libraries(lightning_qubit_measurements_tests INTERFACE Catch2::Catch2 lightning_measurements + lightning_qubit_observables lightning_qubit_measurements ) diff --git a/pennylane_lightning/core/src/simulators/lightning_qubit/measurements/tests/Test_MeasurementsLQubit.cpp b/pennylane_lightning/core/src/simulators/lightning_qubit/measurements/tests/Test_MeasurementsLQubit.cpp index b4be45e99a..6a1e92289e 100644 --- a/pennylane_lightning/core/src/simulators/lightning_qubit/measurements/tests/Test_MeasurementsLQubit.cpp +++ b/pennylane_lightning/core/src/simulators/lightning_qubit/measurements/tests/Test_MeasurementsLQubit.cpp @@ -20,6 +20,7 @@ #include #include "MeasurementsLQubit.hpp" +#include "ObservablesLQubit.hpp" #include "StateVectorLQubitManaged.hpp" #include "StateVectorLQubitRaw.hpp" #include "Util.hpp" @@ -33,6 +34,7 @@ namespace { using namespace Pennylane::Util; using namespace Pennylane::LightningQubit; +using namespace Pennylane::LightningQubit::Observables; using namespace Pennylane::LightningQubit::Measures; }; // namespace @@ -196,6 +198,91 @@ TEMPLATE_PRODUCT_TEST_CASE("Variances", "[Measurements]", } } +TEMPLATE_PRODUCT_TEST_CASE("Probabilities", "[Measurements]", + (StateVectorLQubitManaged, StateVectorLQubitRaw), + (float, double)) { + using StateVectorT = TestType; + using PrecisionT = typename StateVectorT::PrecisionT; + using ComplexT = typename StateVectorT::ComplexT; + SECTION("1 qubit") { + // Defining the State Vector that will be measured. + auto statevector_data = std::vector{ + {1.0, 0.0}, {0.0, 0.0}}; // createNonTrivialState(); + StateVectorT statevector(statevector_data.data(), + statevector_data.size()); + + // Initializing the measurements class. + // This object attaches to the statevector allowing several + // measurements. + Measurements Measurer(statevector); + + SECTION("Testing probs()") { + auto p0 = Measurer.probs(); + statevector.applyOperation("Hadamard", {0}, false); + auto p1 = Measurer.probs(); + + REQUIRE_THAT( + p0, + Catch::Approx(std::vector{1.0, 0.0}).margin(1e-7)); + REQUIRE_THAT( + p1, + Catch::Approx(std::vector{0.5, 0.5}).margin(1e-7)); + } + SECTION("Testing probs(NamedObs)") { + const auto obs1 = Observables::NamedObs( + {"PauliX"}, std::vector{0}); + const auto obs2 = Observables::NamedObs( + {"PauliZ"}, std::vector{0}); + const auto obs3 = Observables::NamedObs( + {"Hadamard"}, std::vector{0}); + auto p0_obs1 = Measurer.probs(obs1); + auto p0_obs2 = Measurer.probs(obs2); + auto p0_obs3 = Measurer.probs(obs3); + + CHECK_THAT( + p0_obs1, + Catch::Approx(std::vector{0.5, 0.5}).margin(1e-7)); + CHECK_THAT( + p0_obs2, + Catch::Approx(std::vector{1.0, 0.0}).margin(1e-7)); + CHECK_THAT(p0_obs3, Catch::Approx(std::vector{ + 0.85355339, 0.14644661}) + .margin(1e-7)); + + statevector.applyOperation("Hadamard", {0}, false); + auto p1_obs1 = Measurer.probs(obs1); + auto p1_obs2 = Measurer.probs(obs2); + auto p1_obs3 = Measurer.probs(obs3); + + CHECK_THAT( + p1_obs1, + Catch::Approx(std::vector{1.0, 0.0}).margin(1e-7)); + CHECK_THAT( + p1_obs2, + Catch::Approx(std::vector{0.5, 0.5}).margin(1e-7)); + CHECK_THAT(p1_obs3, Catch::Approx(std::vector{ + 0.85355339, 0.14644661}) + .margin(1e-7)); + + statevector.applyOperation("Hadamard", {0}, false); + auto p2_obs1 = Measurer.probs(obs1); + auto p2_obs2 = Measurer.probs(obs2); + auto p2_obs3 = Measurer.probs(obs3); + + CHECK_THAT( + p0_obs1, + Catch::Approx(std::vector{0.5, 0.5}).margin(1e-7)); + CHECK_THAT( + p0_obs2, + Catch::Approx(std::vector{1.0, 0.0}).margin(1e-7)); + CHECK_THAT(p0_obs3, Catch::Approx(std::vector{ + 0.85355339, 0.14644661}) + .margin(1e-7)); + } + } + SECTION("n-qubit") {} +} + TEMPLATE_PRODUCT_TEST_CASE("Sample with Metropolis (Local Kernel)", "[Measurements][MCMC]", (StateVectorLQubitManaged, StateVectorLQubitRaw), From 5361ef0a674e2ea3d68d29fd7f738e82e7f68d25 Mon Sep 17 00:00:00 2001 From: "Lee J. O'Riordan" Date: Thu, 2 May 2024 16:33:02 -0400 Subject: [PATCH 02/24] Update tests to exhaustively compute probs with wires --- .../tests/Test_MeasurementsLQubit.cpp | 256 +++++++++++++++++- 1 file changed, 249 insertions(+), 7 deletions(-) diff --git a/pennylane_lightning/core/src/simulators/lightning_qubit/measurements/tests/Test_MeasurementsLQubit.cpp b/pennylane_lightning/core/src/simulators/lightning_qubit/measurements/tests/Test_MeasurementsLQubit.cpp index 6a1e92289e..5e63e2c058 100644 --- a/pennylane_lightning/core/src/simulators/lightning_qubit/measurements/tests/Test_MeasurementsLQubit.cpp +++ b/pennylane_lightning/core/src/simulators/lightning_qubit/measurements/tests/Test_MeasurementsLQubit.cpp @@ -205,15 +205,10 @@ TEMPLATE_PRODUCT_TEST_CASE("Probabilities", "[Measurements]", using PrecisionT = typename StateVectorT::PrecisionT; using ComplexT = typename StateVectorT::ComplexT; SECTION("1 qubit") { - // Defining the State Vector that will be measured. - auto statevector_data = std::vector{ - {1.0, 0.0}, {0.0, 0.0}}; // createNonTrivialState(); + auto statevector_data = std::vector{{1.0, 0.0}, {0.0, 0.0}}; StateVectorT statevector(statevector_data.data(), statevector_data.size()); - // Initializing the measurements class. - // This object attaches to the statevector allowing several - // measurements. Measurements Measurer(statevector); SECTION("Testing probs()") { @@ -280,7 +275,254 @@ TEMPLATE_PRODUCT_TEST_CASE("Probabilities", "[Measurements]", .margin(1e-7)); } } - SECTION("n-qubit") {} + SECTION("n-qubit") { + SECTION("2 qubits") { + SECTION("|00> state") { + constexpr std::size_t num_qubits = 2; + auto statevector_data = + std::vector((1UL << num_qubits), {0.0, 0.0}); + const std::vector wires{0, 1}; + statevector_data[0] = {1.0, 0.0}; + + StateVectorT statevector(statevector_data.data(), + statevector_data.size()); + Measurements Measurer(statevector); + + auto p0_full = Measurer.probs(); + auto p0_0 = Measurer.probs({0}, wires); + auto p0_1 = Measurer.probs({1}, wires); + auto p0_perm0 = Measurer.probs({1, 0}, wires); + + REQUIRE_THAT(p0_full, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0}) + .margin(1e-7)); + REQUIRE_THAT(p0_0, + Catch::Approx(std::vector{1.0, 0.0}) + .margin(1e-7)); + REQUIRE_THAT(p0_1, + Catch::Approx(std::vector{1.0, 0.0}) + .margin(1e-7)); + + REQUIRE_THAT(p0_perm0, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0}) + .margin(1e-7)); + + statevector.applyOperation("Hadamard", {0}, false); + auto p1_full = Measurer.probs(); + auto p1_0 = Measurer.probs({0}, wires); + auto p1_1 = Measurer.probs({1}, wires); + auto p1_perm0 = Measurer.probs({1, 0}, wires); + + REQUIRE_THAT(p1_full, Catch::Approx(std::vector{ + 0.5, 0.0, 0.5, 0.0}) + .margin(1e-7)); + REQUIRE_THAT(p1_0, + Catch::Approx(std::vector{0.5, 0.5}) + .margin(1e-7)); + REQUIRE_THAT(p1_1, + Catch::Approx(std::vector{1.0, 0.0}) + .margin(1e-7)); + REQUIRE_THAT(p1_perm0, Catch::Approx(std::vector{ + 0.5, 0.5, 0.0, 0.0}) + .margin(1e-7)); + + statevector.applyOperation("Hadamard", {1}, false); + auto p2_full = Measurer.probs(); + auto p2_0 = Measurer.probs({0}, wires); + auto p2_1 = Measurer.probs({1}, wires); + auto p2_perm0 = Measurer.probs({1, 0}, wires); + + REQUIRE_THAT(p2_full, Catch::Approx(std::vector{ + 0.25, 0.25, 0.25, 0.25}) + .margin(1e-7)); + REQUIRE_THAT(p2_0, + Catch::Approx(std::vector{0.5, 0.5}) + .margin(1e-7)); + REQUIRE_THAT(p2_1, + Catch::Approx(std::vector{0.5, 0.5}) + .margin(1e-7)); + REQUIRE_THAT(p2_perm0, + Catch::Approx(std::vector{0.25, 0.25, + 0.25, 0.25}) + .margin(1e-7)); + } + } + SECTION("3 qubits") { + SECTION("|000> state") { + constexpr std::size_t num_qubits = 3; + auto statevector_data = + std::vector((1UL << num_qubits), {0.0, 0.0}); + const std::vector wires{0, 1, 2}; + statevector_data[0] = {1.0, 0.0}; + + StateVectorT statevector(statevector_data.data(), + statevector_data.size()); + Measurements Measurer(statevector); + + auto p0_full = Measurer.probs(); + auto p0_0 = Measurer.probs({0}, wires); + auto p0_1 = Measurer.probs({1}, wires); + auto p0_2 = Measurer.probs({2}, wires); + + auto p0_01 = Measurer.probs({0, 1}, wires); + auto p0_02 = Measurer.probs({0, 2}, wires); + auto p0_12 = Measurer.probs({1, 2}, wires); + + auto p0_10 = Measurer.probs({1, 0}, wires); + auto p0_20 = Measurer.probs({2, 0}, wires); + auto p0_21 = Measurer.probs({2, 1}, wires); + + auto p0_012 = Measurer.probs({0, 1, 2}, wires); + auto p0_021 = Measurer.probs({0, 2, 1}, wires); + auto p0_102 = Measurer.probs({1, 0, 2}, wires); + auto p0_120 = Measurer.probs({1, 2, 0}, wires); + auto p0_201 = Measurer.probs({2, 0, 1}, wires); + auto p0_210 = Measurer.probs({2, 1, 0}, wires); + + REQUIRE_THAT(p0_full, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0}) + .margin(1e-7)); + REQUIRE_THAT(p0_0, + Catch::Approx(std::vector{1.0, 0.0}) + .margin(1e-7)); + REQUIRE_THAT(p0_1, + Catch::Approx(std::vector{1.0, 0.0}) + .margin(1e-7)); + REQUIRE_THAT(p0_2, + Catch::Approx(std::vector{1.0, 0.0}) + .margin(1e-7)); + + REQUIRE_THAT(p0_01, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0}) + .margin(1e-7)); + REQUIRE_THAT(p0_02, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0}) + .margin(1e-7)); + REQUIRE_THAT(p0_12, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0}) + .margin(1e-7)); + + REQUIRE_THAT(p0_10, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0}) + .margin(1e-7)); + REQUIRE_THAT(p0_20, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0}) + .margin(1e-7)); + REQUIRE_THAT(p0_21, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0}) + .margin(1e-7)); + + REQUIRE_THAT(p0_012, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0}) + .margin(1e-7)); + REQUIRE_THAT(p0_021, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0}) + .margin(1e-7)); + REQUIRE_THAT(p0_102, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0}) + .margin(1e-7)); + + REQUIRE_THAT(p0_120, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0}) + .margin(1e-7)); + REQUIRE_THAT(p0_201, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0}) + .margin(1e-7)); + REQUIRE_THAT(p0_210, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0}) + .margin(1e-7)); + + statevector.applyOperation("Hadamard", {0}, false); + + auto p1_full = Measurer.probs(); + auto p1_0 = Measurer.probs({0}, wires); + auto p1_1 = Measurer.probs({1}, wires); + auto p1_2 = Measurer.probs({2}, wires); + + auto p1_01 = Measurer.probs({0, 1}, wires); + auto p1_02 = Measurer.probs({0, 2}, wires); + auto p1_12 = Measurer.probs({1, 2}, wires); + + auto p1_10 = Measurer.probs({1, 0}, wires); + auto p1_20 = Measurer.probs({2, 0}, wires); + auto p1_21 = Measurer.probs({2, 1}, wires); + + auto p1_012 = Measurer.probs({0, 1, 2}, wires); + auto p1_021 = Measurer.probs({0, 2, 1}, wires); + auto p1_102 = Measurer.probs({1, 0, 2}, wires); + auto p1_120 = Measurer.probs({1, 2, 0}, wires); + auto p1_201 = Measurer.probs({2, 0, 1}, wires); + auto p1_210 = Measurer.probs({2, 1, 0}, wires); + + REQUIRE_THAT(p1_full, Catch::Approx(std::vector{ + 0.5, 0.0, 0.0, 0.0, 0.5, + 0.0, 0.0, 0.0}) + .margin(1e-7)); + REQUIRE_THAT(p1_0, + Catch::Approx(std::vector{0.5, 0.5}) + .margin(1e-7)); + REQUIRE_THAT(p1_1, + Catch::Approx(std::vector{1.0, 0.0}) + .margin(1e-7)); + REQUIRE_THAT(p1_2, + Catch::Approx(std::vector{1.0, 0.0}) + .margin(1e-7)); + + REQUIRE_THAT(p1_01, Catch::Approx(std::vector{ + 0.5, 0.0, 0.5, 0.0}) + .margin(1e-7)); + REQUIRE_THAT(p1_02, Catch::Approx(std::vector{ + 0.5, 0.0, 0.5, 0.0}) + .margin(1e-7)); + REQUIRE_THAT(p1_12, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0}) + .margin(1e-7)); + + REQUIRE_THAT(p1_10, Catch::Approx(std::vector{ + 0.5, 0.5, 0.0, 0.0}) + .margin(1e-7)); + REQUIRE_THAT(p1_20, Catch::Approx(std::vector{ + 0.5, 0.5, 0.0, 0.0}) + .margin(1e-7)); + REQUIRE_THAT(p1_21, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0}) + .margin(1e-7)); + + REQUIRE_THAT(p1_012, Catch::Approx(std::vector{ + 0.5, 0.0, 0.0, 0.0, 0.5, + 0.0, 0.0, 0.0}) + .margin(1e-7)); + REQUIRE_THAT(p1_021, Catch::Approx(std::vector{ + 0.5, 0.0, 0.0, 0.0, 0.5, + 0.0, 0.0, 0.0}) + .margin(1e-7)); + REQUIRE_THAT(p1_102, Catch::Approx(std::vector{ + 0.5, 0.0, 0.5, 0.0, 0.0, + 0.0, 0.0, 0.0}) + .margin(1e-7)); + + REQUIRE_THAT(p1_120, Catch::Approx(std::vector{ + 0.5, 0.5, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0}) + .margin(1e-7)); + REQUIRE_THAT(p1_201, Catch::Approx(std::vector{ + 0.5, 0.0, 0.5, 0.0, 0.0, + 0.0, 0.0, 0.0}) + .margin(1e-7)); + REQUIRE_THAT(p1_210, Catch::Approx(std::vector{ + 0.5, 0.5, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0}) + .margin(1e-7)); + } + } + } } TEMPLATE_PRODUCT_TEST_CASE("Sample with Metropolis (Local Kernel)", From 1aacd3709d85f05a68f168f8fc8983244737926e Mon Sep 17 00:00:00 2001 From: "Lee J. O'Riordan" Date: Thu, 2 May 2024 16:42:31 -0400 Subject: [PATCH 03/24] Update all tests in LQ Meas to use CHECK instead of REQUIRES for better signal of failures --- .../tests/Test_MeasurementsLQubit.cpp | 343 +++++++++--------- 1 file changed, 171 insertions(+), 172 deletions(-) diff --git a/pennylane_lightning/core/src/simulators/lightning_qubit/measurements/tests/Test_MeasurementsLQubit.cpp b/pennylane_lightning/core/src/simulators/lightning_qubit/measurements/tests/Test_MeasurementsLQubit.cpp index 5e63e2c058..63de0d3a95 100644 --- a/pennylane_lightning/core/src/simulators/lightning_qubit/measurements/tests/Test_MeasurementsLQubit.cpp +++ b/pennylane_lightning/core/src/simulators/lightning_qubit/measurements/tests/Test_MeasurementsLQubit.cpp @@ -83,17 +83,17 @@ TEMPLATE_PRODUCT_TEST_CASE("Expected Values", "[Measurements]", operations_list = {PauliX, PauliX, PauliX}; exp_values = Measurer.expval(operations_list, wires_list); exp_values_ref = {0.49272486, 0.42073549, 0.28232124}; - REQUIRE_THAT(exp_values, Catch::Approx(exp_values_ref).margin(1e-6)); + CHECK_THAT(exp_values, Catch::Approx(exp_values_ref).margin(1e-6)); operations_list = {PauliY, PauliY, PauliY}; exp_values = Measurer.expval(operations_list, wires_list); exp_values_ref = {-0.64421768, -0.47942553, -0.29552020}; - REQUIRE_THAT(exp_values, Catch::Approx(exp_values_ref).margin(1e-6)); + CHECK_THAT(exp_values, Catch::Approx(exp_values_ref).margin(1e-6)); operations_list = {PauliZ, PauliZ, PauliZ}; exp_values = Measurer.expval(operations_list, wires_list); exp_values_ref = {0.58498357, 0.77015115, 0.91266780}; - REQUIRE_THAT(exp_values, Catch::Approx(exp_values_ref).margin(1e-6)); + CHECK_THAT(exp_values, Catch::Approx(exp_values_ref).margin(1e-6)); } SECTION("Testing list of operators defined by its name:") { @@ -105,17 +105,17 @@ TEMPLATE_PRODUCT_TEST_CASE("Expected Values", "[Measurements]", operations_list = {"PauliX", "PauliX", "PauliX"}; exp_values = Measurer.expval(operations_list, wires_list); exp_values_ref = {0.49272486, 0.42073549, 0.28232124}; - REQUIRE_THAT(exp_values, Catch::Approx(exp_values_ref).margin(1e-6)); + CHECK_THAT(exp_values, Catch::Approx(exp_values_ref).margin(1e-6)); operations_list = {"PauliY", "PauliY", "PauliY"}; exp_values = Measurer.expval(operations_list, wires_list); exp_values_ref = {-0.64421768, -0.47942553, -0.29552020}; - REQUIRE_THAT(exp_values, Catch::Approx(exp_values_ref).margin(1e-6)); + CHECK_THAT(exp_values, Catch::Approx(exp_values_ref).margin(1e-6)); operations_list = {"PauliZ", "PauliZ", "PauliZ"}; exp_values = Measurer.expval(operations_list, wires_list); exp_values_ref = {0.58498357, 0.77015115, 0.91266780}; - REQUIRE_THAT(exp_values, Catch::Approx(exp_values_ref).margin(1e-6)); + CHECK_THAT(exp_values, Catch::Approx(exp_values_ref).margin(1e-6)); } } @@ -162,17 +162,17 @@ TEMPLATE_PRODUCT_TEST_CASE("Variances", "[Measurements]", operations_list = {PauliX, PauliX, PauliX}; variances = Measurer.var(operations_list, wires_list); variances_ref = {0.7572222, 0.8229816, 0.9202947}; - REQUIRE_THAT(variances, Catch::Approx(variances_ref).margin(1e-6)); + CHECK_THAT(variances, Catch::Approx(variances_ref).margin(1e-6)); operations_list = {PauliY, PauliY, PauliY}; variances = Measurer.var(operations_list, wires_list); variances_ref = {0.5849835, 0.7701511, 0.9126678}; - REQUIRE_THAT(variances, Catch::Approx(variances_ref).margin(1e-6)); + CHECK_THAT(variances, Catch::Approx(variances_ref).margin(1e-6)); operations_list = {PauliZ, PauliZ, PauliZ}; variances = Measurer.var(operations_list, wires_list); variances_ref = {0.6577942, 0.4068672, 0.1670374}; - REQUIRE_THAT(variances, Catch::Approx(variances_ref).margin(1e-6)); + CHECK_THAT(variances, Catch::Approx(variances_ref).margin(1e-6)); } SECTION("Testing list of operators defined by its name:") { @@ -184,17 +184,17 @@ TEMPLATE_PRODUCT_TEST_CASE("Variances", "[Measurements]", operations_list = {"PauliX", "PauliX", "PauliX"}; variances = Measurer.var(operations_list, wires_list); variances_ref = {0.7572222, 0.8229816, 0.9202947}; - REQUIRE_THAT(variances, Catch::Approx(variances_ref).margin(1e-6)); + CHECK_THAT(variances, Catch::Approx(variances_ref).margin(1e-6)); operations_list = {"PauliY", "PauliY", "PauliY"}; variances = Measurer.var(operations_list, wires_list); variances_ref = {0.5849835, 0.7701511, 0.9126678}; - REQUIRE_THAT(variances, Catch::Approx(variances_ref).margin(1e-6)); + CHECK_THAT(variances, Catch::Approx(variances_ref).margin(1e-6)); operations_list = {"PauliZ", "PauliZ", "PauliZ"}; variances = Measurer.var(operations_list, wires_list); variances_ref = {0.6577942, 0.4068672, 0.1670374}; - REQUIRE_THAT(variances, Catch::Approx(variances_ref).margin(1e-6)); + CHECK_THAT(variances, Catch::Approx(variances_ref).margin(1e-6)); } } @@ -216,10 +216,10 @@ TEMPLATE_PRODUCT_TEST_CASE("Probabilities", "[Measurements]", statevector.applyOperation("Hadamard", {0}, false); auto p1 = Measurer.probs(); - REQUIRE_THAT( + CHECK_THAT( p0, Catch::Approx(std::vector{1.0, 0.0}).margin(1e-7)); - REQUIRE_THAT( + CHECK_THAT( p1, Catch::Approx(std::vector{0.5, 0.5}).margin(1e-7)); } @@ -293,19 +293,19 @@ TEMPLATE_PRODUCT_TEST_CASE("Probabilities", "[Measurements]", auto p0_1 = Measurer.probs({1}, wires); auto p0_perm0 = Measurer.probs({1, 0}, wires); - REQUIRE_THAT(p0_full, Catch::Approx(std::vector{ - 1.0, 0.0, 0.0, 0.0}) - .margin(1e-7)); - REQUIRE_THAT(p0_0, - Catch::Approx(std::vector{1.0, 0.0}) - .margin(1e-7)); - REQUIRE_THAT(p0_1, - Catch::Approx(std::vector{1.0, 0.0}) - .margin(1e-7)); - - REQUIRE_THAT(p0_perm0, Catch::Approx(std::vector{ - 1.0, 0.0, 0.0, 0.0}) - .margin(1e-7)); + CHECK_THAT(p0_full, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0}) + .margin(1e-7)); + CHECK_THAT(p0_0, + Catch::Approx(std::vector{1.0, 0.0}) + .margin(1e-7)); + CHECK_THAT(p0_1, + Catch::Approx(std::vector{1.0, 0.0}) + .margin(1e-7)); + + CHECK_THAT(p0_perm0, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0}) + .margin(1e-7)); statevector.applyOperation("Hadamard", {0}, false); auto p1_full = Measurer.probs(); @@ -313,18 +313,18 @@ TEMPLATE_PRODUCT_TEST_CASE("Probabilities", "[Measurements]", auto p1_1 = Measurer.probs({1}, wires); auto p1_perm0 = Measurer.probs({1, 0}, wires); - REQUIRE_THAT(p1_full, Catch::Approx(std::vector{ - 0.5, 0.0, 0.5, 0.0}) - .margin(1e-7)); - REQUIRE_THAT(p1_0, - Catch::Approx(std::vector{0.5, 0.5}) - .margin(1e-7)); - REQUIRE_THAT(p1_1, - Catch::Approx(std::vector{1.0, 0.0}) - .margin(1e-7)); - REQUIRE_THAT(p1_perm0, Catch::Approx(std::vector{ - 0.5, 0.5, 0.0, 0.0}) - .margin(1e-7)); + CHECK_THAT(p1_full, Catch::Approx(std::vector{ + 0.5, 0.0, 0.5, 0.0}) + .margin(1e-7)); + CHECK_THAT(p1_0, + Catch::Approx(std::vector{0.5, 0.5}) + .margin(1e-7)); + CHECK_THAT(p1_1, + Catch::Approx(std::vector{1.0, 0.0}) + .margin(1e-7)); + CHECK_THAT(p1_perm0, Catch::Approx(std::vector{ + 0.5, 0.5, 0.0, 0.0}) + .margin(1e-7)); statevector.applyOperation("Hadamard", {1}, false); auto p2_full = Measurer.probs(); @@ -332,19 +332,18 @@ TEMPLATE_PRODUCT_TEST_CASE("Probabilities", "[Measurements]", auto p2_1 = Measurer.probs({1}, wires); auto p2_perm0 = Measurer.probs({1, 0}, wires); - REQUIRE_THAT(p2_full, Catch::Approx(std::vector{ - 0.25, 0.25, 0.25, 0.25}) - .margin(1e-7)); - REQUIRE_THAT(p2_0, - Catch::Approx(std::vector{0.5, 0.5}) - .margin(1e-7)); - REQUIRE_THAT(p2_1, - Catch::Approx(std::vector{0.5, 0.5}) - .margin(1e-7)); - REQUIRE_THAT(p2_perm0, - Catch::Approx(std::vector{0.25, 0.25, - 0.25, 0.25}) - .margin(1e-7)); + CHECK_THAT(p2_full, Catch::Approx(std::vector{ + 0.25, 0.25, 0.25, 0.25}) + .margin(1e-7)); + CHECK_THAT(p2_0, + Catch::Approx(std::vector{0.5, 0.5}) + .margin(1e-7)); + CHECK_THAT(p2_1, + Catch::Approx(std::vector{0.5, 0.5}) + .margin(1e-7)); + CHECK_THAT(p2_perm0, Catch::Approx(std::vector{ + 0.25, 0.25, 0.25, 0.25}) + .margin(1e-7)); } } SECTION("3 qubits") { @@ -379,65 +378,65 @@ TEMPLATE_PRODUCT_TEST_CASE("Probabilities", "[Measurements]", auto p0_201 = Measurer.probs({2, 0, 1}, wires); auto p0_210 = Measurer.probs({2, 1, 0}, wires); - REQUIRE_THAT(p0_full, Catch::Approx(std::vector{ - 1.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0}) - .margin(1e-7)); - REQUIRE_THAT(p0_0, - Catch::Approx(std::vector{1.0, 0.0}) - .margin(1e-7)); - REQUIRE_THAT(p0_1, - Catch::Approx(std::vector{1.0, 0.0}) - .margin(1e-7)); - REQUIRE_THAT(p0_2, - Catch::Approx(std::vector{1.0, 0.0}) - .margin(1e-7)); - - REQUIRE_THAT(p0_01, Catch::Approx(std::vector{ - 1.0, 0.0, 0.0, 0.0}) - .margin(1e-7)); - REQUIRE_THAT(p0_02, Catch::Approx(std::vector{ - 1.0, 0.0, 0.0, 0.0}) - .margin(1e-7)); - REQUIRE_THAT(p0_12, Catch::Approx(std::vector{ - 1.0, 0.0, 0.0, 0.0}) - .margin(1e-7)); - - REQUIRE_THAT(p0_10, Catch::Approx(std::vector{ - 1.0, 0.0, 0.0, 0.0}) - .margin(1e-7)); - REQUIRE_THAT(p0_20, Catch::Approx(std::vector{ - 1.0, 0.0, 0.0, 0.0}) - .margin(1e-7)); - REQUIRE_THAT(p0_21, Catch::Approx(std::vector{ - 1.0, 0.0, 0.0, 0.0}) + CHECK_THAT(p0_full, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0}) .margin(1e-7)); - - REQUIRE_THAT(p0_012, Catch::Approx(std::vector{ - 1.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0}) - .margin(1e-7)); - REQUIRE_THAT(p0_021, Catch::Approx(std::vector{ - 1.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0}) - .margin(1e-7)); - REQUIRE_THAT(p0_102, Catch::Approx(std::vector{ - 1.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0}) - .margin(1e-7)); - - REQUIRE_THAT(p0_120, Catch::Approx(std::vector{ - 1.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0}) - .margin(1e-7)); - REQUIRE_THAT(p0_201, Catch::Approx(std::vector{ - 1.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0}) - .margin(1e-7)); - REQUIRE_THAT(p0_210, Catch::Approx(std::vector{ - 1.0, 0.0, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0}) - .margin(1e-7)); + CHECK_THAT(p0_0, + Catch::Approx(std::vector{1.0, 0.0}) + .margin(1e-7)); + CHECK_THAT(p0_1, + Catch::Approx(std::vector{1.0, 0.0}) + .margin(1e-7)); + CHECK_THAT(p0_2, + Catch::Approx(std::vector{1.0, 0.0}) + .margin(1e-7)); + + CHECK_THAT(p0_01, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0}) + .margin(1e-7)); + CHECK_THAT(p0_02, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0}) + .margin(1e-7)); + CHECK_THAT(p0_12, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0}) + .margin(1e-7)); + + CHECK_THAT(p0_10, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0}) + .margin(1e-7)); + CHECK_THAT(p0_20, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0}) + .margin(1e-7)); + CHECK_THAT(p0_21, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0}) + .margin(1e-7)); + + CHECK_THAT(p0_012, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0}) + .margin(1e-7)); + CHECK_THAT(p0_021, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0}) + .margin(1e-7)); + CHECK_THAT(p0_102, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0}) + .margin(1e-7)); + + CHECK_THAT(p0_120, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0}) + .margin(1e-7)); + CHECK_THAT(p0_201, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0}) + .margin(1e-7)); + CHECK_THAT(p0_210, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0}) + .margin(1e-7)); statevector.applyOperation("Hadamard", {0}, false); @@ -461,65 +460,65 @@ TEMPLATE_PRODUCT_TEST_CASE("Probabilities", "[Measurements]", auto p1_201 = Measurer.probs({2, 0, 1}, wires); auto p1_210 = Measurer.probs({2, 1, 0}, wires); - REQUIRE_THAT(p1_full, Catch::Approx(std::vector{ - 0.5, 0.0, 0.0, 0.0, 0.5, - 0.0, 0.0, 0.0}) - .margin(1e-7)); - REQUIRE_THAT(p1_0, - Catch::Approx(std::vector{0.5, 0.5}) - .margin(1e-7)); - REQUIRE_THAT(p1_1, - Catch::Approx(std::vector{1.0, 0.0}) - .margin(1e-7)); - REQUIRE_THAT(p1_2, - Catch::Approx(std::vector{1.0, 0.0}) - .margin(1e-7)); - - REQUIRE_THAT(p1_01, Catch::Approx(std::vector{ - 0.5, 0.0, 0.5, 0.0}) - .margin(1e-7)); - REQUIRE_THAT(p1_02, Catch::Approx(std::vector{ - 0.5, 0.0, 0.5, 0.0}) - .margin(1e-7)); - REQUIRE_THAT(p1_12, Catch::Approx(std::vector{ - 1.0, 0.0, 0.0, 0.0}) - .margin(1e-7)); - - REQUIRE_THAT(p1_10, Catch::Approx(std::vector{ - 0.5, 0.5, 0.0, 0.0}) - .margin(1e-7)); - REQUIRE_THAT(p1_20, Catch::Approx(std::vector{ - 0.5, 0.5, 0.0, 0.0}) - .margin(1e-7)); - REQUIRE_THAT(p1_21, Catch::Approx(std::vector{ - 1.0, 0.0, 0.0, 0.0}) + CHECK_THAT(p1_full, Catch::Approx(std::vector{ + 0.5, 0.0, 0.0, 0.0, 0.5, + 0.0, 0.0, 0.0}) .margin(1e-7)); - - REQUIRE_THAT(p1_012, Catch::Approx(std::vector{ - 0.5, 0.0, 0.0, 0.0, 0.5, - 0.0, 0.0, 0.0}) - .margin(1e-7)); - REQUIRE_THAT(p1_021, Catch::Approx(std::vector{ - 0.5, 0.0, 0.0, 0.0, 0.5, - 0.0, 0.0, 0.0}) - .margin(1e-7)); - REQUIRE_THAT(p1_102, Catch::Approx(std::vector{ - 0.5, 0.0, 0.5, 0.0, 0.0, - 0.0, 0.0, 0.0}) - .margin(1e-7)); - - REQUIRE_THAT(p1_120, Catch::Approx(std::vector{ - 0.5, 0.5, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0}) - .margin(1e-7)); - REQUIRE_THAT(p1_201, Catch::Approx(std::vector{ - 0.5, 0.0, 0.5, 0.0, 0.0, - 0.0, 0.0, 0.0}) - .margin(1e-7)); - REQUIRE_THAT(p1_210, Catch::Approx(std::vector{ - 0.5, 0.5, 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0}) - .margin(1e-7)); + CHECK_THAT(p1_0, + Catch::Approx(std::vector{0.5, 0.5}) + .margin(1e-7)); + CHECK_THAT(p1_1, + Catch::Approx(std::vector{1.0, 0.0}) + .margin(1e-7)); + CHECK_THAT(p1_2, + Catch::Approx(std::vector{1.0, 0.0}) + .margin(1e-7)); + + CHECK_THAT(p1_01, Catch::Approx(std::vector{ + 0.5, 0.0, 0.5, 0.0}) + .margin(1e-7)); + CHECK_THAT(p1_02, Catch::Approx(std::vector{ + 0.5, 0.0, 0.5, 0.0}) + .margin(1e-7)); + CHECK_THAT(p1_12, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0}) + .margin(1e-7)); + + CHECK_THAT(p1_10, Catch::Approx(std::vector{ + 0.5, 0.5, 0.0, 0.0}) + .margin(1e-7)); + CHECK_THAT(p1_20, Catch::Approx(std::vector{ + 0.5, 0.5, 0.0, 0.0}) + .margin(1e-7)); + CHECK_THAT(p1_21, Catch::Approx(std::vector{ + 1.0, 0.0, 0.0, 0.0}) + .margin(1e-7)); + + CHECK_THAT(p1_012, Catch::Approx(std::vector{ + 0.5, 0.0, 0.0, 0.0, 0.5, + 0.0, 0.0, 0.0}) + .margin(1e-7)); + CHECK_THAT(p1_021, Catch::Approx(std::vector{ + 0.5, 0.0, 0.0, 0.0, 0.5, + 0.0, 0.0, 0.0}) + .margin(1e-7)); + CHECK_THAT(p1_102, Catch::Approx(std::vector{ + 0.5, 0.0, 0.5, 0.0, 0.0, + 0.0, 0.0, 0.0}) + .margin(1e-7)); + + CHECK_THAT(p1_120, Catch::Approx(std::vector{ + 0.5, 0.5, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0}) + .margin(1e-7)); + CHECK_THAT(p1_201, Catch::Approx(std::vector{ + 0.5, 0.0, 0.5, 0.0, 0.0, + 0.0, 0.0, 0.0}) + .margin(1e-7)); + CHECK_THAT(p1_210, Catch::Approx(std::vector{ + 0.5, 0.5, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0}) + .margin(1e-7)); } } } @@ -582,8 +581,8 @@ TEMPLATE_PRODUCT_TEST_CASE("Sample with Metropolis (Local Kernel)", // compare estimated probabilities to real probabilities SECTION("No wires provided:") { - REQUIRE_THAT(probabilities, - Catch::Approx(expected_probabilities).margin(.05)); + CHECK_THAT(probabilities, + Catch::Approx(expected_probabilities).margin(.05)); } } @@ -644,7 +643,7 @@ TEMPLATE_PRODUCT_TEST_CASE("Sample with Metropolis (NonZeroRandom Kernel)", // compare estimated probabilities to real probabilities SECTION("No wires provided:") { - REQUIRE_THAT(probabilities, - Catch::Approx(expected_probabilities).margin(.05)); + CHECK_THAT(probabilities, + Catch::Approx(expected_probabilities).margin(.05)); } } From a8a47fb05f32571a9b370029d9df70929b856405 Mon Sep 17 00:00:00 2001 From: "Lee J. O'Riordan" Date: Thu, 2 May 2024 17:37:11 -0400 Subject: [PATCH 04/24] Add Jet Permuter --- .../core/src/utils/NDPermuter.hpp | 268 ++++++++++++++++++ .../core/src/utils/tests/CMakeLists.txt | 1 + .../core/src/utils/tests/Test_NDPermuter.cpp | 35 +++ 3 files changed, 304 insertions(+) create mode 100644 pennylane_lightning/core/src/utils/NDPermuter.hpp create mode 100644 pennylane_lightning/core/src/utils/tests/Test_NDPermuter.cpp diff --git a/pennylane_lightning/core/src/utils/NDPermuter.hpp b/pennylane_lightning/core/src/utils/NDPermuter.hpp new file mode 100644 index 0000000000..356995b821 --- /dev/null +++ b/pennylane_lightning/core/src/utils/NDPermuter.hpp @@ -0,0 +1,268 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include "Error.hpp" + +namespace Pennylane::Util { + +/** + * @brief Interface for tensor permutation backend. + * + * The Permuter class represents the front-end interface for calling + * permutations, which are a generalization of transposition to high-rank + * tensors. The class follows a composition-based approach, where we instantiate + * with a given backend permuter, who makes available two `Transpose` methods, + * one which returns the transform result, and another which modifies a + * reference directly. + * + * Example 1: + * const std::vector data_in {0,1,2,3,4,5}; + * std::vector data_out(data_in.size(), 0); + * Permuter> p; + * p.Transpose(data_in, {2,3}, data_out, {"a","b"}, {"b","a"}); + * + * Example 2: + * const std::vector data_in {0,1,2,3,4,5}; + * Permuter> p; + * auto data_out = p.Transpose(data_in, {2,3}, {"a","b"}, {"b","a"}); + * + * @tparam PermuteBackend + */ +template class Permuter { + public: + /** + * @brief Reshape the given lexicographic data vector from old to new index + * ordering. + * + * @tparam T Data participating in the permutation. + * @param data_in Input data to be transposed. + * @param shape Current shape of the tensor data in each dimension. + * @param data_out Output data following the transpose. + * @param current_order Current index ordering of the tensor. + * @param new_order New index ordering of the tensor. + */ + template + void Transpose(const std::vector &data_in, + const std::vector &shape, std::vector &data_out, + const std::vector ¤t_order, + const std::vector &new_order) { + const std::set idx_old(current_order.begin(), + current_order.end()); + const std::set idx_new(new_order.begin(), new_order.end()); + const auto data_size = std::accumulate(shape.begin(), shape.end(), 1, + std::multiplies()); + PL_ABORT_IF_NOT(idx_old.size() == current_order.size(), + "Duplicate existing indices found. Please ensure " + "indices are unique."); + PL_ABORT_IF_NOT(idx_new.size() == new_order.size(), + "Duplicate transpose indices found. Please ensure " + "indices are unique."); + PL_ABORT_IF_NOT(shape.size() == new_order.size(), + "Tensor shape does not match number of indices."); + PL_ABORT_IF_NOT(data_size == data_in.size(), + "Tensor shape does not match given input tensor data."); + PL_ABORT_IF_NOT( + data_size == data_out.size(), + "Tensor shape does not match given output tensor data."); + PL_ABORT_IF_NOT( + idx_old == idx_new, + "New indices are an invalid permutation of the existing indices"); + + permuter_b_.Transpose(data_in, shape, data_out, current_order, + new_order); + } + + /** + * @brief Reshape the given lexicographic data vector from old to new index + * ordering. + * + * @tparam T Data participating in the permutation. + * @param data_in Input data to be transposed. + * @param shape Current shape of the tensor data in each dimension. + * @param current_order Current index ordering of the tensor. + * @param new_order New index ordering of the tensor. + * @return std::vector Output data following the transpose. + */ + template + std::vector Transpose(const std::vector &data_in, + const std::vector &shape, + const std::vector ¤t_order, + const std::vector &new_order) { + const std::set idx_old(current_order.begin(), + current_order.end()); + const std::set idx_new(new_order.begin(), new_order.end()); + const auto data_size = std::accumulate(shape.begin(), shape.end(), 1, + std::multiplies()); + PL_ABORT_IF_NOT(idx_old.size() == current_order.size(), + "Duplicate existing indices found. Please ensure " + "indices are unique."); + PL_ABORT_IF_NOT(idx_new.size() == new_order.size(), + "Duplicate transpose indices found. Please ensure " + "indices are unique."); + PL_ABORT_IF_NOT(shape.size() == new_order.size(), + "Tensor shape does not match number of indices."); + PL_ABORT_IF_NOT(data_size == data_in.size(), + "Tensor shape does not match given tensor data."); + PL_ABORT_IF_NOT( + idx_old == idx_new, + "New indices are an invalid permutation of the existing indices"); + + PL_ABORT_IF(shape.empty(), "Tensor shape cannot be empty."); + PL_ABORT_IF(new_order.empty(), "Tensor indices cannot be empty."); + return permuter_b_.Transpose(data_in, shape, current_order, new_order); + } + + protected: + friend PermuterBackend; + + private: + PermuterBackend permuter_b_; +}; + +/** + * @brief Default Permuter backend class for generalised transforms. Adapted + * from QFlex. + * + * @tparam blocksize Controls the internal data chunk size for cache blocking. + */ +template class DefaultPermuter { + + public: + /** + * @brief Reference-based transpose operation. See `Permuter` class for more + * details. + */ + template + void Transpose(const std::vector &data_, + const std::vector &shape, std::vector &data_out, + const std::vector &old_indices, + const std::vector &new_indices) { + data_out = data_; + + if (new_indices == old_indices) + return; + + const std::size_t num_indices = old_indices.size(); + const std::size_t total_dim = data_.size(); + std::size_t remaining_data = total_dim; + + if (num_indices == 0) { + PL_ABORT("Number of indices cannot be zero."); + } + + // Create map_old_to_new_idxpos from old to new indices, and + // new_dimensions. + std::vector map_old_to_new_idxpos(num_indices); + std::vector new_dimensions(num_indices); + for (size_t i = 0; i < num_indices; ++i) { + for (size_t j = 0; j < num_indices; ++j) { + if (old_indices[i] == new_indices[j]) { + map_old_to_new_idxpos[i] = j; + new_dimensions[j] = shape[i]; + break; + } + } + } + + std::vector old_super_dimensions(num_indices, 1); + std::vector new_super_dimensions(num_indices, 1); + + const std::size_t old_dimensions_size = shape.size(); + for (size_t i = old_dimensions_size; --i;) { + old_super_dimensions[i - 1] = old_super_dimensions[i] * shape[i]; + new_super_dimensions[i - 1] = + new_super_dimensions[i] * new_dimensions[i]; + } + + std::vector small_map_old_to_new_position(blocksize_); + + // Position old and new. + std::size_t po = 0, pn; + // Counter of the values of each indices in the iteration (old + // ordering). + std::vector old_counter(num_indices, 0); + // offset is important when doing this in blocks, as it's indeed + // implemented. + std::size_t offset = 0; + // internal_po keeps track of interations within a block. + // Blocks have size `blocksize`. + std::size_t internal_po = 0; + + T *data = data_out.data(); + const T *scratch = + data_.data(); // internal pointer offers better performance than + // pointer from argument + + std::size_t effective_max; + + while (true) { + // If end of entire opration, break. + if (po == total_dim - 1) + break; + + internal_po = 0; + // Each iteration of the while block goes through a new position. + // Inside the while, j takes care of increasing indices properly. + while (true) { + po = 0; + pn = 0; + for (size_t i = 0; i < num_indices; i++) { + po += old_super_dimensions[i] * old_counter[i]; + pn += new_super_dimensions[map_old_to_new_idxpos[i]] * + old_counter[i]; + } + small_map_old_to_new_position[po - offset] = pn; + + bool complete{true}; + for (size_t j = num_indices; j--;) { + if (++old_counter[j] < shape[j]) { + complete = false; + break; + } else { + old_counter[j] = 0; + } + } + // If end of block or end of entire operation, break. + if ((++internal_po == blocksize_) || (po == total_dim - 1)) + break; + // If last index (0) was increased, then go back to fastest + // index. + if (complete) + break; + } + // Copy data for this block, taking into account offset of + // small_map... + effective_max = std::min(blocksize_, remaining_data); + for (size_t p = 0; p < effective_max; p++) { + data[small_map_old_to_new_position[p]] = scratch[offset + p]; + } + + offset += blocksize_; + remaining_data -= blocksize_; + } + } + + /** + * @brief Return-based transpose operation. See `Permuter` class for more + * details. + */ + template + std::vector Transpose(std::vector data_, + const std::vector &shape, + const std::vector &old_indices, + const std::vector &new_indices) { + std::vector data_out(std::move(data_)); + Transpose(data_, shape, data_out, old_indices, new_indices); + return data_out; + } + + private: + static constexpr std::size_t blocksize_ = BLOCKSIZE; +}; + +} // namespace Pennylane::Util diff --git a/pennylane_lightning/core/src/utils/tests/CMakeLists.txt b/pennylane_lightning/core/src/utils/tests/CMakeLists.txt index ccb0c8d062..679dd01fc8 100644 --- a/pennylane_lightning/core/src/utils/tests/CMakeLists.txt +++ b/pennylane_lightning/core/src/utils/tests/CMakeLists.txt @@ -29,6 +29,7 @@ target_sources(utils_tests INTERFACE runner_utils.cpp) set(TEST_SOURCES Test_BitUtil.cpp Test_ConstantUtil.cpp Test_Error.cpp + Test_NDPermuter.cpp Test_RuntimeInfo.cpp Test_TypeTraits.cpp Test_Util.cpp diff --git a/pennylane_lightning/core/src/utils/tests/Test_NDPermuter.cpp b/pennylane_lightning/core/src/utils/tests/Test_NDPermuter.cpp new file mode 100644 index 0000000000..b906834fe5 --- /dev/null +++ b/pennylane_lightning/core/src/utils/tests/Test_NDPermuter.cpp @@ -0,0 +1,35 @@ + +// Copyright 2018-2023 Xanadu Quantum Technologies Inc. + +// Licensed under the Apache License, Version 2.0 (the License); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an AS IS BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include + +#include "NDPermuter.hpp" +#include "TestHelpers.hpp" +#include + +/// @cond DEV +namespace { +using namespace Pennylane; +using namespace Pennylane::Util; +} // namespace +/// @endcond + +TEMPLATE_TEST_CASE("Util::DefaultPermuter::Constructibility", + "[Default Constructibility]", DefaultPermuter<>, + DefaultPermuter<8>) { + SECTION("DefaultPermuter") { REQUIRE(std::is_constructible_v); } +} From b6b3308b77e1e372a61de7ddf8caffa23eee1559 Mon Sep 17 00:00:00 2001 From: "Lee J. O'Riordan" Date: Thu, 2 May 2024 17:37:42 -0400 Subject: [PATCH 05/24] Add noisy functionality --- .../measurements/MeasurementsLQubit.hpp | 36 +++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) diff --git a/pennylane_lightning/core/src/simulators/lightning_qubit/measurements/MeasurementsLQubit.hpp b/pennylane_lightning/core/src/simulators/lightning_qubit/measurements/MeasurementsLQubit.hpp index 0e74849964..b8626d24c0 100644 --- a/pennylane_lightning/core/src/simulators/lightning_qubit/measurements/MeasurementsLQubit.hpp +++ b/pennylane_lightning/core/src/simulators/lightning_qubit/measurements/MeasurementsLQubit.hpp @@ -31,6 +31,7 @@ #include "LinearAlgebra.hpp" #include "MeasurementsBase.hpp" +#include "NDPermuter.hpp" //transpose_state_tensor, sorting_indices #include "Observables.hpp" #include "SparseLinAlg.hpp" #include "StateVectorLQubitManaged.hpp" @@ -100,22 +101,30 @@ class Measurements final // Determining index that would sort the vector. // This information is needed later. const auto sorted_ind_wires = Pennylane::Util::sorting_indices(wires); + // Sorting wires. std::vector sorted_wires(wires.size()); for (size_t pos = 0; pos < wires.size(); pos++) { sorted_wires[pos] = wires[sorted_ind_wires[pos]]; } + + std::cout << "wires=" << wires << "\n"; + std::cout << "sorted_ind_wires=" << sorted_ind_wires << "\n"; + std::cout << "sorted_wires=" << sorted_wires << "\n"; + // Determining probabilities for the sorted wires. const ComplexT *arr_data = this->_statevector.getData(); size_t num_qubits = this->_statevector.getNumQubits(); - const std::vector all_indices = Gates::generateBitPatterns(sorted_wires, num_qubits); const std::vector all_offsets = Gates::generateBitPatterns( Gates::getIndicesAfterExclusion(sorted_wires, num_qubits), num_qubits); + std::cout << "all_indices=" << all_indices << "\n"; + std::cout << "all_offsets=" << all_offsets << "\n"; + std::vector probabilities(all_indices.size(), 0); size_t ind_probs = 0; @@ -125,13 +134,36 @@ class Measurements final } ind_probs++; } + std::cout << "probabilities=" << probabilities << "\n"; + // Transposing the probabilities tensor with the indices determined // at the beginning. if (wires != sorted_wires) { probabilities = Pennylane::Util::transpose_state_tensor( probabilities, sorted_ind_wires); } - return probabilities; + std::cout << "probabilities+1=" << probabilities << "\n"; + + // return probabilities; + + Pennylane::Util::Permuter> p; + std::vector shape(num_qubits, 2); + std::vector wire_labels_old(sorted_ind_wires.size(), ""); + std::vector wire_labels_new(sorted_ind_wires.size(), ""); + + std::transform(sorted_ind_wires.begin(), sorted_ind_wires.end(), + wire_labels_old.begin(), + [](std::size_t index) { return std::to_string(index); }); + std::transform(wires.begin(), wires.end(), wire_labels_new.begin(), + [](std::size_t index) { return std::to_string(index); }); + // for () + + auto probs_sorted = probabilities; + p.Transpose(probabilities, shape, probs_sorted, wire_labels_old, + wire_labels_new); + std::cout << "probabilities+2=" << probs_sorted << "\n"; + return probs_sorted; + // return probabilities; } /** From 5a3a2862f1480f8b450163210c2e6b5917b0ed8c Mon Sep 17 00:00:00 2001 From: "Lee J. O'Riordan" Date: Fri, 3 May 2024 08:44:17 -0400 Subject: [PATCH 06/24] Fix permutation ordering --- .../measurements/MeasurementsLQubit.hpp | 58 ++++++++----------- .../core/src/utils/NDPermuter.hpp | 4 +- 2 files changed, 25 insertions(+), 37 deletions(-) diff --git a/pennylane_lightning/core/src/simulators/lightning_qubit/measurements/MeasurementsLQubit.hpp b/pennylane_lightning/core/src/simulators/lightning_qubit/measurements/MeasurementsLQubit.hpp index b8626d24c0..6c2276df4e 100644 --- a/pennylane_lightning/core/src/simulators/lightning_qubit/measurements/MeasurementsLQubit.hpp +++ b/pennylane_lightning/core/src/simulators/lightning_qubit/measurements/MeasurementsLQubit.hpp @@ -31,7 +31,7 @@ #include "LinearAlgebra.hpp" #include "MeasurementsBase.hpp" -#include "NDPermuter.hpp" //transpose_state_tensor, sorting_indices +#include "NDPermuter.hpp" #include "Observables.hpp" #include "SparseLinAlg.hpp" #include "StateVectorLQubitManaged.hpp" @@ -45,6 +45,7 @@ using namespace Pennylane::Measures; using namespace Pennylane::Observables; using Pennylane::LightningQubit::StateVectorLQubitManaged; using Pennylane::LightningQubit::Util::innerProdC; +namespace PUtil = Pennylane::Util; } // namespace /// @endcond @@ -108,10 +109,6 @@ class Measurements final sorted_wires[pos] = wires[sorted_ind_wires[pos]]; } - std::cout << "wires=" << wires << "\n"; - std::cout << "sorted_ind_wires=" << sorted_ind_wires << "\n"; - std::cout << "sorted_wires=" << sorted_wires << "\n"; - // Determining probabilities for the sorted wires. const ComplexT *arr_data = this->_statevector.getData(); @@ -122,9 +119,6 @@ class Measurements final Gates::getIndicesAfterExclusion(sorted_wires, num_qubits), num_qubits); - std::cout << "all_indices=" << all_indices << "\n"; - std::cout << "all_offsets=" << all_offsets << "\n"; - std::vector probabilities(all_indices.size(), 0); size_t ind_probs = 0; @@ -134,36 +128,30 @@ class Measurements final } ind_probs++; } - std::cout << "probabilities=" << probabilities << "\n"; - // Transposing the probabilities tensor with the indices determined - // at the beginning. + // Permute the data according to the required wire ordering if (wires != sorted_wires) { - probabilities = Pennylane::Util::transpose_state_tensor( - probabilities, sorted_ind_wires); + static constexpr std::size_t CACHE_SIZE = 8; + PUtil::Permuter> p; + std::vector shape(wires.size(), 2); + std::vector wire_labels_old(sorted_wires.size(), ""); + std::vector wire_labels_new(wires.size(), ""); + + std::transform(sorted_wires.begin(), sorted_wires.end(), + wire_labels_old.begin(), [](std::size_t index) { + return std::to_string(index); + }); + std::transform( + wires.begin(), wires.end(), wire_labels_new.begin(), + [](std::size_t index) { return std::to_string(index); }); + + auto probs_sorted = probabilities; + p.Transpose(probabilities, shape, probs_sorted, wire_labels_old, + wire_labels_new); + return probs_sorted; } - std::cout << "probabilities+1=" << probabilities << "\n"; - - // return probabilities; - - Pennylane::Util::Permuter> p; - std::vector shape(num_qubits, 2); - std::vector wire_labels_old(sorted_ind_wires.size(), ""); - std::vector wire_labels_new(sorted_ind_wires.size(), ""); - - std::transform(sorted_ind_wires.begin(), sorted_ind_wires.end(), - wire_labels_old.begin(), - [](std::size_t index) { return std::to_string(index); }); - std::transform(wires.begin(), wires.end(), wire_labels_new.begin(), - [](std::size_t index) { return std::to_string(index); }); - // for () - - auto probs_sorted = probabilities; - p.Transpose(probabilities, shape, probs_sorted, wire_labels_old, - wire_labels_new); - std::cout << "probabilities+2=" << probs_sorted << "\n"; - return probs_sorted; - // return probabilities; + + return probabilities; } /** diff --git a/pennylane_lightning/core/src/utils/NDPermuter.hpp b/pennylane_lightning/core/src/utils/NDPermuter.hpp index 356995b821..6aac4a7eb6 100644 --- a/pennylane_lightning/core/src/utils/NDPermuter.hpp +++ b/pennylane_lightning/core/src/utils/NDPermuter.hpp @@ -54,8 +54,8 @@ template class Permuter { const std::set idx_old(current_order.begin(), current_order.end()); const std::set idx_new(new_order.begin(), new_order.end()); - const auto data_size = std::accumulate(shape.begin(), shape.end(), 1, - std::multiplies()); + const std::size_t data_size = std::accumulate( + shape.begin(), shape.end(), 1, std::multiplies()); PL_ABORT_IF_NOT(idx_old.size() == current_order.size(), "Duplicate existing indices found. Please ensure " "indices are unique."); From 611f7ac628a1ed6e3b68b1a2a2cce9bee35a111b Mon Sep 17 00:00:00 2001 From: "Lee J. O'Riordan" Date: Fri, 3 May 2024 08:46:02 -0400 Subject: [PATCH 07/24] Update changelog --- .github/CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/CHANGELOG.md b/.github/CHANGELOG.md index f80ca8802d..2a535c02ec 100644 --- a/.github/CHANGELOG.md +++ b/.github/CHANGELOG.md @@ -105,6 +105,9 @@ ### Bug fixes +* Fix wire order permutations when using `qml.probs` with out-of-order wires. + [(#707)](https://github.com/PennyLaneAI/pennylane-lightning/pull/707) + * `dynamic_one_shot` was refactored to use `SampleMP` measurements as a way to return the mid-circuit measurement samples. `LightningQubit`'s `simulate` is modified accordingly. [(#694)](https://github.com/PennyLaneAI/pennylane/pull/694) From 2d2f45165a5af0529dc757b87d0ea353369a0b6a Mon Sep 17 00:00:00 2001 From: "Lee J. O'Riordan" Date: Fri, 3 May 2024 09:16:05 -0400 Subject: [PATCH 08/24] Fix wrong ordering in probs tests --- .../core/src/measurements/tests/Test_MeasurementsBase.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pennylane_lightning/core/src/measurements/tests/Test_MeasurementsBase.cpp b/pennylane_lightning/core/src/measurements/tests/Test_MeasurementsBase.cpp index ff28ba9c62..9182aa760c 100644 --- a/pennylane_lightning/core/src/measurements/tests/Test_MeasurementsBase.cpp +++ b/pennylane_lightning/core/src/measurements/tests/Test_MeasurementsBase.cpp @@ -102,11 +102,11 @@ template void testProbabilities() { {0.67078706, 0.03062806, 0.17564072, 0.00801973, 0.0870997, 0.00397696, 0.02280642, 0.00104134}}, {{1, 2, 0}, - {0.67078706, 0.0870997, 0.17564072, 0.02280642, 0.03062806, - 0.00397696, 0.00801973, 0.00104134}}, - {{2, 0, 1}, {0.67078706, 0.17564072, 0.03062806, 0.00801973, 0.0870997, 0.02280642, 0.00397696, 0.00104134}}, + {{2, 0, 1}, + {0.67078706, 0.0870997, 0.17564072, 0.02280642, 0.03062806, + 0.00397696, 0.00801973, 0.00104134}}, {{2, 1, 0}, {0.67078706, 0.17564072, 0.0870997, 0.02280642, 0.03062806, 0.00801973, 0.00397696, 0.00104134}}, From 4eeb474b7bba09c7bc98c157469c7297603db8ee Mon Sep 17 00:00:00 2001 From: "Lee J. O'Riordan" Date: Fri, 3 May 2024 10:29:18 -0400 Subject: [PATCH 09/24] Add recommendation from apache flink to purge azure dirs --- .github/workflows/tests_linux_cpp.yml | 96 ++++++++++++++++++++++++ .github/workflows/tests_lkcpu_python.yml | 48 ++++++++++++ .github/workflows/tests_lqcpu_python.yml | 48 ++++++++++++ .github/workflows/wheel_linux_x86_64.yml | 48 ++++++++++++ 4 files changed, 240 insertions(+) diff --git a/.github/workflows/tests_linux_cpp.yml b/.github/workflows/tests_linux_cpp.yml index 2a4d2c7058..36697d7364 100644 --- a/.github/workflows/tests_linux_cpp.yml +++ b/.github/workflows/tests_linux_cpp.yml @@ -44,6 +44,30 @@ jobs: runs-on: ${{ matrix.os }} steps: + - name: Remove GH Action tooling + run: | + # Following instructions from Apache Flink + # https://github.com/apache/flink/blob/release-1.15/tools/azure-pipelines/free_disk_space.sh + echo "==============================================================================" + echo "Freeing up disk space on CI system" + echo "==============================================================================" + + echo "Listing 100 largest packages" + dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 + df -h + echo "Removing large packages" + sudo apt-get remove -y '^ghc-8.*' + sudo apt-get remove -y '^dotnet-.*' + sudo apt-get remove -y 'php.*' + sudo apt-get remove -y azure-cli google-cloud-sdk hhvm google-chrome-stable firefox powershell mono-devel + sudo apt-get autoremove -y + sudo apt-get clean + df -h + echo "Removing large directories" + # deleting 15GB + rm -rf /usr/share/dotnet/ + df -h + - uses: actions/setup-python@v5 name: Install Python with: @@ -132,6 +156,30 @@ jobs: runs-on: ${{ matrix.os }} steps: + - name: Remove GH Action tooling + run: | + # Following instructions from Apache Flink + # https://github.com/apache/flink/blob/release-1.15/tools/azure-pipelines/free_disk_space.sh + echo "==============================================================================" + echo "Freeing up disk space on CI system" + echo "==============================================================================" + + echo "Listing 100 largest packages" + dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 + df -h + echo "Removing large packages" + sudo apt-get remove -y '^ghc-8.*' + sudo apt-get remove -y '^dotnet-.*' + sudo apt-get remove -y 'php.*' + sudo apt-get remove -y azure-cli google-cloud-sdk hhvm google-chrome-stable firefox powershell mono-devel + sudo apt-get autoremove -y + sudo apt-get clean + df -h + echo "Removing large directories" + # deleting 15GB + rm -rf /usr/share/dotnet/ + df -h + - uses: actions/setup-python@v5 name: Install Python with: @@ -198,6 +246,30 @@ jobs: runs-on: ${{ matrix.os }} steps: + - name: Remove GH Action tooling + run: | + # Following instructions from Apache Flink + # https://github.com/apache/flink/blob/release-1.15/tools/azure-pipelines/free_disk_space.sh + echo "==============================================================================" + echo "Freeing up disk space on CI system" + echo "==============================================================================" + + echo "Listing 100 largest packages" + dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 + df -h + echo "Removing large packages" + sudo apt-get remove -y '^ghc-8.*' + sudo apt-get remove -y '^dotnet-.*' + sudo apt-get remove -y 'php.*' + sudo apt-get remove -y azure-cli google-cloud-sdk hhvm google-chrome-stable firefox powershell mono-devel + sudo apt-get autoremove -y + sudo apt-get clean + df -h + echo "Removing large directories" + # deleting 15GB + rm -rf /usr/share/dotnet/ + df -h + - uses: actions/setup-python@v5 name: Install Python with: @@ -292,6 +364,30 @@ jobs: runs-on: ${{ matrix.os }} steps: + - name: Remove GH Action tooling + run: | + # Following instructions from Apache Flink + # https://github.com/apache/flink/blob/release-1.15/tools/azure-pipelines/free_disk_space.sh + echo "==============================================================================" + echo "Freeing up disk space on CI system" + echo "==============================================================================" + + echo "Listing 100 largest packages" + dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 + df -h + echo "Removing large packages" + sudo apt-get remove -y '^ghc-8.*' + sudo apt-get remove -y '^dotnet-.*' + sudo apt-get remove -y 'php.*' + sudo apt-get remove -y azure-cli google-cloud-sdk hhvm google-chrome-stable firefox powershell mono-devel + sudo apt-get autoremove -y + sudo apt-get clean + df -h + echo "Removing large directories" + # deleting 15GB + rm -rf /usr/share/dotnet/ + df -h + - uses: actions/setup-python@v5 name: Install Python with: diff --git a/.github/workflows/tests_lkcpu_python.yml b/.github/workflows/tests_lkcpu_python.yml index e44c08db67..085f73d059 100644 --- a/.github/workflows/tests_lkcpu_python.yml +++ b/.github/workflows/tests_lkcpu_python.yml @@ -53,6 +53,30 @@ jobs: runs-on: ${{ matrix.os }} steps: + - name: Remove GH Action tooling + run: | + # Following instructions from Apache Flink + # https://github.com/apache/flink/blob/release-1.15/tools/azure-pipelines/free_disk_space.sh + echo "==============================================================================" + echo "Freeing up disk space on CI system" + echo "==============================================================================" + + echo "Listing 100 largest packages" + dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 + df -h + echo "Removing large packages" + sudo apt-get remove -y '^ghc-8.*' + sudo apt-get remove -y '^dotnet-.*' + sudo apt-get remove -y 'php.*' + sudo apt-get remove -y azure-cli google-cloud-sdk hhvm google-chrome-stable firefox powershell mono-devel + sudo apt-get autoremove -y + sudo apt-get clean + df -h + echo "Removing large directories" + # deleting 15GB + rm -rf /usr/share/dotnet/ + df -h + - name: Checkout PennyLane-Lightning uses: actions/checkout@v4 with: @@ -148,6 +172,30 @@ jobs: runs-on: ${{ matrix.os }} steps: + - name: Remove GH Action tooling + run: | + # Following instructions from Apache Flink + # https://github.com/apache/flink/blob/release-1.15/tools/azure-pipelines/free_disk_space.sh + echo "==============================================================================" + echo "Freeing up disk space on CI system" + echo "==============================================================================" + + echo "Listing 100 largest packages" + dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 + df -h + echo "Removing large packages" + sudo apt-get remove -y '^ghc-8.*' + sudo apt-get remove -y '^dotnet-.*' + sudo apt-get remove -y 'php.*' + sudo apt-get remove -y azure-cli google-cloud-sdk hhvm google-chrome-stable firefox powershell mono-devel + sudo apt-get autoremove -y + sudo apt-get clean + df -h + echo "Removing large directories" + # deleting 15GB + rm -rf /usr/share/dotnet/ + df -h + - uses: actions/setup-python@v5 name: Install Python with: diff --git a/.github/workflows/tests_lqcpu_python.yml b/.github/workflows/tests_lqcpu_python.yml index a9d9cdb125..d2b53fa085 100644 --- a/.github/workflows/tests_lqcpu_python.yml +++ b/.github/workflows/tests_lqcpu_python.yml @@ -44,6 +44,30 @@ jobs: runs-on: ${{ matrix.os }} steps: + - name: Remove GH Action tooling + run: | + # Following instructions from Apache Flink + # https://github.com/apache/flink/blob/release-1.15/tools/azure-pipelines/free_disk_space.sh + echo "==============================================================================" + echo "Freeing up disk space on CI system" + echo "==============================================================================" + + echo "Listing 100 largest packages" + dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 + df -h + echo "Removing large packages" + sudo apt-get remove -y '^ghc-8.*' + sudo apt-get remove -y '^dotnet-.*' + sudo apt-get remove -y 'php.*' + sudo apt-get remove -y azure-cli google-cloud-sdk hhvm google-chrome-stable firefox powershell mono-devel + sudo apt-get autoremove -y + sudo apt-get clean + df -h + echo "Removing large directories" + # deleting 15GB + rm -rf /usr/share/dotnet/ + df -h + - uses: actions/setup-python@v5 name: Install Python with: @@ -122,6 +146,30 @@ jobs: runs-on: ${{ matrix.os }} steps: + - name: Remove GH Action tooling + run: | + # Following instructions from Apache Flink + # https://github.com/apache/flink/blob/release-1.15/tools/azure-pipelines/free_disk_space.sh + echo "==============================================================================" + echo "Freeing up disk space on CI system" + echo "==============================================================================" + + echo "Listing 100 largest packages" + dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 + df -h + echo "Removing large packages" + sudo apt-get remove -y '^ghc-8.*' + sudo apt-get remove -y '^dotnet-.*' + sudo apt-get remove -y 'php.*' + sudo apt-get remove -y azure-cli google-cloud-sdk hhvm google-chrome-stable firefox powershell mono-devel + sudo apt-get autoremove -y + sudo apt-get clean + df -h + echo "Removing large directories" + # deleting 15GB + rm -rf /usr/share/dotnet/ + df -h + - uses: actions/setup-python@v5 name: Install Python with: diff --git a/.github/workflows/wheel_linux_x86_64.yml b/.github/workflows/wheel_linux_x86_64.yml index d11649c4f0..ae5b0fd029 100644 --- a/.github/workflows/wheel_linux_x86_64.yml +++ b/.github/workflows/wheel_linux_x86_64.yml @@ -46,6 +46,30 @@ jobs: container: ${{ matrix.container_img }} steps: + - name: Remove GH Action tooling + run: | + # Following instructions from Apache Flink + # https://github.com/apache/flink/blob/release-1.15/tools/azure-pipelines/free_disk_space.sh + echo "==============================================================================" + echo "Freeing up disk space on CI system" + echo "==============================================================================" + + echo "Listing 100 largest packages" + dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 + df -h + echo "Removing large packages" + sudo apt-get remove -y '^ghc-8.*' + sudo apt-get remove -y '^dotnet-.*' + sudo apt-get remove -y 'php.*' + sudo apt-get remove -y azure-cli google-cloud-sdk hhvm google-chrome-stable firefox powershell mono-devel + sudo apt-get autoremove -y + sudo apt-get clean + df -h + echo "Removing large directories" + # deleting 15GB + rm -rf /usr/share/dotnet/ + df -h + - name: Cache installation directories id: kokkos-cache uses: actions/cache@v3 @@ -107,6 +131,30 @@ jobs: container: ${{ matrix.container_img }} steps: + - name: Remove GH Action tooling + run: | + # Following instructions from Apache Flink + # https://github.com/apache/flink/blob/release-1.15/tools/azure-pipelines/free_disk_space.sh + echo "==============================================================================" + echo "Freeing up disk space on CI system" + echo "==============================================================================" + + echo "Listing 100 largest packages" + dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 + df -h + echo "Removing large packages" + sudo apt-get remove -y '^ghc-8.*' + sudo apt-get remove -y '^dotnet-.*' + sudo apt-get remove -y 'php.*' + sudo apt-get remove -y azure-cli google-cloud-sdk hhvm google-chrome-stable firefox powershell mono-devel + sudo apt-get autoremove -y + sudo apt-get clean + df -h + echo "Removing large directories" + # deleting 15GB + rm -rf /usr/share/dotnet/ + df -h + - name: Restoring cached dependencies id: kokkos-cache uses: actions/cache@v3 From 61435ebfef2ff9b74c510118fac65c68ee2d1e19 Mon Sep 17 00:00:00 2001 From: "Lee J. O'Riordan" Date: Fri, 3 May 2024 10:30:02 -0400 Subject: [PATCH 10/24] Forbid out of order indices for probs with LK and LG in CPP layer --- .../measurements/tests/Test_MeasurementsBase.cpp | 15 ++++++++++----- .../measurements/MeasurementsGPU.hpp | 8 ++++++++ .../measurements/MeasurementsKokkos.hpp | 9 +++++++++ 3 files changed, 27 insertions(+), 5 deletions(-) diff --git a/pennylane_lightning/core/src/measurements/tests/Test_MeasurementsBase.cpp b/pennylane_lightning/core/src/measurements/tests/Test_MeasurementsBase.cpp index 9182aa760c..a4ca85d556 100644 --- a/pennylane_lightning/core/src/measurements/tests/Test_MeasurementsBase.cpp +++ b/pennylane_lightning/core/src/measurements/tests/Test_MeasurementsBase.cpp @@ -84,7 +84,7 @@ template void testProbabilities() { // Expected results calculated with Pennylane default.qubit: std::vector, std::vector>> input = { -#ifdef _ENABLE_PLGPU +#if defined(_ENABLE_PLGPU) // Bit index reodering conducted in the python layer // for L-GPU. Also L-GPU backend doesn't support // out of order wires for probability calculation @@ -92,9 +92,9 @@ template void testProbabilities() { {0.67078706, 0.03062806, 0.0870997, 0.00397696, 0.17564072, 0.00801973, 0.02280642, 0.00104134}} #else - {{0, 1, 2}, - {0.67078706, 0.03062806, 0.0870997, 0.00397696, 0.17564072, - 0.00801973, 0.02280642, 0.00104134}}, +#if defined(_ENABLE_PLQUBIT) + // LightningQubit currently supports arbitrary wire index + // ordering. {{0, 2, 1}, {0.67078706, 0.0870997, 0.03062806, 0.00397696, 0.17564072, 0.02280642, 0.00801973, 0.00104134}}, @@ -110,10 +110,15 @@ template void testProbabilities() { {{2, 1, 0}, {0.67078706, 0.17564072, 0.0870997, 0.02280642, 0.03062806, 0.00801973, 0.00397696, 0.00104134}}, + {{2, 1}, {0.84642778, 0.10990612, 0.0386478, 0.0050183}}, + +#endif + {{0, 1, 2}, + {0.67078706, 0.03062806, 0.0870997, 0.00397696, 0.17564072, + 0.00801973, 0.02280642, 0.00104134}}, {{0, 1}, {0.70141512, 0.09107666, 0.18366045, 0.02384776}}, {{0, 2}, {0.75788676, 0.03460502, 0.19844714, 0.00906107}}, {{1, 2}, {0.84642778, 0.0386478, 0.10990612, 0.0050183}}, - {{2, 1}, {0.84642778, 0.10990612, 0.0386478, 0.0050183}}, {{0}, {0.79249179, 0.20750821}}, {{1}, {0.88507558, 0.11492442}}, {{2}, {0.9563339, 0.0436661}} diff --git a/pennylane_lightning/core/src/simulators/lightning_gpu/measurements/MeasurementsGPU.hpp b/pennylane_lightning/core/src/simulators/lightning_gpu/measurements/MeasurementsGPU.hpp index dc213c126f..9b4ceaabc3 100644 --- a/pennylane_lightning/core/src/simulators/lightning_gpu/measurements/MeasurementsGPU.hpp +++ b/pennylane_lightning/core/src/simulators/lightning_gpu/measurements/MeasurementsGPU.hpp @@ -92,6 +92,10 @@ class Measurements final * @return std::vector */ auto probs(const std::vector &wires) -> std::vector { + PL_ABORT_IF_NOT(std::is_sorted(wires.cbegin(), wires.cend()), + "LightningGPU does not currently support out-of-order " + "wire indices with probability calculations"); + // Data return type fixed as double in custatevec function call std::vector probabilities(Pennylane::Util::exp2(wires.size())); // this should be built upon by the wires not participating @@ -193,6 +197,10 @@ class Measurements final std::vector probs(const std::vector &wires, size_t num_shots) { + PL_ABORT_IF_NOT(std::is_sorted(wires.cbegin(), wires.cend()), + "LightningGPU does not currently support out-of-order " + "wire indices with probability calculations"); + return BaseType::probs(wires, num_shots); } diff --git a/pennylane_lightning/core/src/simulators/lightning_kokkos/measurements/MeasurementsKokkos.hpp b/pennylane_lightning/core/src/simulators/lightning_kokkos/measurements/MeasurementsKokkos.hpp index d4f85aeead..0b5cd01012 100644 --- a/pennylane_lightning/core/src/simulators/lightning_kokkos/measurements/MeasurementsKokkos.hpp +++ b/pennylane_lightning/core/src/simulators/lightning_kokkos/measurements/MeasurementsKokkos.hpp @@ -506,6 +506,10 @@ class Measurements final * The basis columns are rearranged according to wires. */ std::vector probs(const std::vector &wires) { + PL_ABORT_IF_NOT( + std::is_sorted(wires.cbegin(), wires.cend()), + "LightningKokkos does not currently support out-of-order wire " + "indices with probability calculations"); using MDPolicyType_2D = Kokkos::MDRangePolicy>; @@ -645,6 +649,11 @@ class Measurements final std::vector probs(const std::vector &wires, size_t num_shots) { + PL_ABORT_IF_NOT( + std::is_sorted(wires.cbegin(), wires.cend()), + "LightningKokkos does not currently support out-of-order wire " + "indices with probability calculations"); + return BaseType::probs(wires, num_shots); } From 0e4e6fd056b215d58cac531fadcf3857acb765c4 Mon Sep 17 00:00:00 2001 From: "Lee J. O'Riordan" Date: Fri, 3 May 2024 10:39:34 -0400 Subject: [PATCH 11/24] Remove MSFT tools from GH Actions --- .github/workflows/tests_linux_cpp.yml | 23 ++++------------------- .github/workflows/tests_lkcpu_python.yml | 11 ++--------- .github/workflows/tests_lqcpu_python.yml | 10 ++-------- .github/workflows/wheel_linux_x86_64.yml | 11 ++--------- 4 files changed, 10 insertions(+), 45 deletions(-) diff --git a/.github/workflows/tests_linux_cpp.yml b/.github/workflows/tests_linux_cpp.yml index 36697d7364..93178e086b 100644 --- a/.github/workflows/tests_linux_cpp.yml +++ b/.github/workflows/tests_linux_cpp.yml @@ -51,15 +51,12 @@ jobs: echo "==============================================================================" echo "Freeing up disk space on CI system" echo "==============================================================================" - echo "Listing 100 largest packages" dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 df -h echo "Removing large packages" - sudo apt-get remove -y '^ghc-8.*' sudo apt-get remove -y '^dotnet-.*' - sudo apt-get remove -y 'php.*' - sudo apt-get remove -y azure-cli google-cloud-sdk hhvm google-chrome-stable firefox powershell mono-devel + sudo apt-get remove -y microsoft-edge-stable sudo apt-get autoremove -y sudo apt-get clean df -h @@ -67,7 +64,6 @@ jobs: # deleting 15GB rm -rf /usr/share/dotnet/ df -h - - uses: actions/setup-python@v5 name: Install Python with: @@ -163,15 +159,12 @@ jobs: echo "==============================================================================" echo "Freeing up disk space on CI system" echo "==============================================================================" - echo "Listing 100 largest packages" dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 df -h echo "Removing large packages" - sudo apt-get remove -y '^ghc-8.*' sudo apt-get remove -y '^dotnet-.*' - sudo apt-get remove -y 'php.*' - sudo apt-get remove -y azure-cli google-cloud-sdk hhvm google-chrome-stable firefox powershell mono-devel + sudo apt-get remove -y microsoft-edge-stable sudo apt-get autoremove -y sudo apt-get clean df -h @@ -253,15 +246,12 @@ jobs: echo "==============================================================================" echo "Freeing up disk space on CI system" echo "==============================================================================" - echo "Listing 100 largest packages" dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 df -h echo "Removing large packages" - sudo apt-get remove -y '^ghc-8.*' sudo apt-get remove -y '^dotnet-.*' - sudo apt-get remove -y 'php.*' - sudo apt-get remove -y azure-cli google-cloud-sdk hhvm google-chrome-stable firefox powershell mono-devel + sudo apt-get remove -y microsoft-edge-stable sudo apt-get autoremove -y sudo apt-get clean df -h @@ -269,7 +259,6 @@ jobs: # deleting 15GB rm -rf /usr/share/dotnet/ df -h - - uses: actions/setup-python@v5 name: Install Python with: @@ -371,15 +360,12 @@ jobs: echo "==============================================================================" echo "Freeing up disk space on CI system" echo "==============================================================================" - echo "Listing 100 largest packages" dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 df -h echo "Removing large packages" - sudo apt-get remove -y '^ghc-8.*' sudo apt-get remove -y '^dotnet-.*' - sudo apt-get remove -y 'php.*' - sudo apt-get remove -y azure-cli google-cloud-sdk hhvm google-chrome-stable firefox powershell mono-devel + sudo apt-get remove -y microsoft-edge-stable sudo apt-get autoremove -y sudo apt-get clean df -h @@ -387,7 +373,6 @@ jobs: # deleting 15GB rm -rf /usr/share/dotnet/ df -h - - uses: actions/setup-python@v5 name: Install Python with: diff --git a/.github/workflows/tests_lkcpu_python.yml b/.github/workflows/tests_lkcpu_python.yml index 085f73d059..e2ea69f22d 100644 --- a/.github/workflows/tests_lkcpu_python.yml +++ b/.github/workflows/tests_lkcpu_python.yml @@ -60,15 +60,12 @@ jobs: echo "==============================================================================" echo "Freeing up disk space on CI system" echo "==============================================================================" - echo "Listing 100 largest packages" dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 df -h echo "Removing large packages" - sudo apt-get remove -y '^ghc-8.*' sudo apt-get remove -y '^dotnet-.*' - sudo apt-get remove -y 'php.*' - sudo apt-get remove -y azure-cli google-cloud-sdk hhvm google-chrome-stable firefox powershell mono-devel + sudo apt-get remove -y microsoft-edge-stable sudo apt-get autoremove -y sudo apt-get clean df -h @@ -76,7 +73,6 @@ jobs: # deleting 15GB rm -rf /usr/share/dotnet/ df -h - - name: Checkout PennyLane-Lightning uses: actions/checkout@v4 with: @@ -179,15 +175,12 @@ jobs: echo "==============================================================================" echo "Freeing up disk space on CI system" echo "==============================================================================" - echo "Listing 100 largest packages" dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 df -h echo "Removing large packages" - sudo apt-get remove -y '^ghc-8.*' sudo apt-get remove -y '^dotnet-.*' - sudo apt-get remove -y 'php.*' - sudo apt-get remove -y azure-cli google-cloud-sdk hhvm google-chrome-stable firefox powershell mono-devel + sudo apt-get remove -y microsoft-edge-stable sudo apt-get autoremove -y sudo apt-get clean df -h diff --git a/.github/workflows/tests_lqcpu_python.yml b/.github/workflows/tests_lqcpu_python.yml index d2b53fa085..5b2e03822d 100644 --- a/.github/workflows/tests_lqcpu_python.yml +++ b/.github/workflows/tests_lqcpu_python.yml @@ -51,15 +51,12 @@ jobs: echo "==============================================================================" echo "Freeing up disk space on CI system" echo "==============================================================================" - echo "Listing 100 largest packages" dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 df -h echo "Removing large packages" - sudo apt-get remove -y '^ghc-8.*' sudo apt-get remove -y '^dotnet-.*' - sudo apt-get remove -y 'php.*' - sudo apt-get remove -y azure-cli google-cloud-sdk hhvm google-chrome-stable firefox powershell mono-devel + sudo apt-get remove -y microsoft-edge-stable sudo apt-get autoremove -y sudo apt-get clean df -h @@ -153,15 +150,12 @@ jobs: echo "==============================================================================" echo "Freeing up disk space on CI system" echo "==============================================================================" - echo "Listing 100 largest packages" dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 df -h echo "Removing large packages" - sudo apt-get remove -y '^ghc-8.*' sudo apt-get remove -y '^dotnet-.*' - sudo apt-get remove -y 'php.*' - sudo apt-get remove -y azure-cli google-cloud-sdk hhvm google-chrome-stable firefox powershell mono-devel + sudo apt-get remove -y microsoft-edge-stable sudo apt-get autoremove -y sudo apt-get clean df -h diff --git a/.github/workflows/wheel_linux_x86_64.yml b/.github/workflows/wheel_linux_x86_64.yml index ae5b0fd029..43951838b8 100644 --- a/.github/workflows/wheel_linux_x86_64.yml +++ b/.github/workflows/wheel_linux_x86_64.yml @@ -53,15 +53,12 @@ jobs: echo "==============================================================================" echo "Freeing up disk space on CI system" echo "==============================================================================" - echo "Listing 100 largest packages" dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 df -h echo "Removing large packages" - sudo apt-get remove -y '^ghc-8.*' sudo apt-get remove -y '^dotnet-.*' - sudo apt-get remove -y 'php.*' - sudo apt-get remove -y azure-cli google-cloud-sdk hhvm google-chrome-stable firefox powershell mono-devel + sudo apt-get remove -y microsoft-edge-stable sudo apt-get autoremove -y sudo apt-get clean df -h @@ -138,15 +135,12 @@ jobs: echo "==============================================================================" echo "Freeing up disk space on CI system" echo "==============================================================================" - echo "Listing 100 largest packages" dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 df -h echo "Removing large packages" - sudo apt-get remove -y '^ghc-8.*' sudo apt-get remove -y '^dotnet-.*' - sudo apt-get remove -y 'php.*' - sudo apt-get remove -y azure-cli google-cloud-sdk hhvm google-chrome-stable firefox powershell mono-devel + sudo apt-get remove -y microsoft-edge-stable sudo apt-get autoremove -y sudo apt-get clean df -h @@ -154,7 +148,6 @@ jobs: # deleting 15GB rm -rf /usr/share/dotnet/ df -h - - name: Restoring cached dependencies id: kokkos-cache uses: actions/cache@v3 From 44ca70ebbd8ef5012fa3154253cd908b5bd91595 Mon Sep 17 00:00:00 2001 From: "Lee J. O'Riordan" Date: Fri, 3 May 2024 10:43:10 -0400 Subject: [PATCH 12/24] Just try to remove dotnet --- .github/workflows/tests_linux_cpp.yml | 12 ++++-------- .github/workflows/tests_lkcpu_python.yml | 6 ++---- .github/workflows/tests_lqcpu_python.yml | 4 +--- .github/workflows/wheel_linux_x86_64.yml | 6 ++---- 4 files changed, 9 insertions(+), 19 deletions(-) diff --git a/.github/workflows/tests_linux_cpp.yml b/.github/workflows/tests_linux_cpp.yml index 93178e086b..b82e353fe1 100644 --- a/.github/workflows/tests_linux_cpp.yml +++ b/.github/workflows/tests_linux_cpp.yml @@ -55,8 +55,7 @@ jobs: dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 df -h echo "Removing large packages" - sudo apt-get remove -y '^dotnet-.*' - sudo apt-get remove -y microsoft-edge-stable + sudo apt-get remove -y '^dotnet.*' sudo apt-get autoremove -y sudo apt-get clean df -h @@ -163,8 +162,7 @@ jobs: dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 df -h echo "Removing large packages" - sudo apt-get remove -y '^dotnet-.*' - sudo apt-get remove -y microsoft-edge-stable + sudo apt-get remove -y '^dotnet.*' sudo apt-get autoremove -y sudo apt-get clean df -h @@ -250,8 +248,7 @@ jobs: dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 df -h echo "Removing large packages" - sudo apt-get remove -y '^dotnet-.*' - sudo apt-get remove -y microsoft-edge-stable + sudo apt-get remove -y '^dotnet.*' sudo apt-get autoremove -y sudo apt-get clean df -h @@ -364,8 +361,7 @@ jobs: dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 df -h echo "Removing large packages" - sudo apt-get remove -y '^dotnet-.*' - sudo apt-get remove -y microsoft-edge-stable + sudo apt-get remove -y '^dotnet.*' sudo apt-get autoremove -y sudo apt-get clean df -h diff --git a/.github/workflows/tests_lkcpu_python.yml b/.github/workflows/tests_lkcpu_python.yml index e2ea69f22d..70bd25e72e 100644 --- a/.github/workflows/tests_lkcpu_python.yml +++ b/.github/workflows/tests_lkcpu_python.yml @@ -64,8 +64,7 @@ jobs: dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 df -h echo "Removing large packages" - sudo apt-get remove -y '^dotnet-.*' - sudo apt-get remove -y microsoft-edge-stable + sudo apt-get remove -y '^dotnet.*' sudo apt-get autoremove -y sudo apt-get clean df -h @@ -179,8 +178,7 @@ jobs: dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 df -h echo "Removing large packages" - sudo apt-get remove -y '^dotnet-.*' - sudo apt-get remove -y microsoft-edge-stable + sudo apt-get remove -y '^dotnet.*' sudo apt-get autoremove -y sudo apt-get clean df -h diff --git a/.github/workflows/tests_lqcpu_python.yml b/.github/workflows/tests_lqcpu_python.yml index 5b2e03822d..4b83c2fba5 100644 --- a/.github/workflows/tests_lqcpu_python.yml +++ b/.github/workflows/tests_lqcpu_python.yml @@ -55,8 +55,7 @@ jobs: dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 df -h echo "Removing large packages" - sudo apt-get remove -y '^dotnet-.*' - sudo apt-get remove -y microsoft-edge-stable + sudo apt-get remove -y '^dotnet.*' sudo apt-get autoremove -y sudo apt-get clean df -h @@ -155,7 +154,6 @@ jobs: df -h echo "Removing large packages" sudo apt-get remove -y '^dotnet-.*' - sudo apt-get remove -y microsoft-edge-stable sudo apt-get autoremove -y sudo apt-get clean df -h diff --git a/.github/workflows/wheel_linux_x86_64.yml b/.github/workflows/wheel_linux_x86_64.yml index 43951838b8..c0b8c8c741 100644 --- a/.github/workflows/wheel_linux_x86_64.yml +++ b/.github/workflows/wheel_linux_x86_64.yml @@ -57,8 +57,7 @@ jobs: dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 df -h echo "Removing large packages" - sudo apt-get remove -y '^dotnet-.*' - sudo apt-get remove -y microsoft-edge-stable + sudo apt-get remove -y '^dotnet.*' sudo apt-get autoremove -y sudo apt-get clean df -h @@ -139,8 +138,7 @@ jobs: dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 df -h echo "Removing large packages" - sudo apt-get remove -y '^dotnet-.*' - sudo apt-get remove -y microsoft-edge-stable + sudo apt-get remove -y '^dotnet.*' sudo apt-get autoremove -y sudo apt-get clean df -h From 20542bc15f896c4d40932b39fbf27547f8557b1a Mon Sep 17 00:00:00 2001 From: "Lee J. O'Riordan" Date: Fri, 3 May 2024 10:52:43 -0400 Subject: [PATCH 13/24] List opt hostedtoolcache --- .github/workflows/tests_linux_cpp.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/tests_linux_cpp.yml b/.github/workflows/tests_linux_cpp.yml index b82e353fe1..2b12955a62 100644 --- a/.github/workflows/tests_linux_cpp.yml +++ b/.github/workflows/tests_linux_cpp.yml @@ -60,6 +60,7 @@ jobs: sudo apt-get clean df -h echo "Removing large directories" + ls -ltrah /opt/hostedtoolcache # deleting 15GB rm -rf /usr/share/dotnet/ df -h @@ -166,6 +167,7 @@ jobs: sudo apt-get autoremove -y sudo apt-get clean df -h + ls -ltrah /opt/hostedtoolcache echo "Removing large directories" # deleting 15GB rm -rf /usr/share/dotnet/ @@ -252,6 +254,7 @@ jobs: sudo apt-get autoremove -y sudo apt-get clean df -h + ls -ltrah /opt/hostedtoolcache echo "Removing large directories" # deleting 15GB rm -rf /usr/share/dotnet/ @@ -365,6 +368,7 @@ jobs: sudo apt-get autoremove -y sudo apt-get clean df -h + ls -ltrah /opt/hostedtoolcache echo "Removing large directories" # deleting 15GB rm -rf /usr/share/dotnet/ From b3f705167a7e3adada1cabef78ecafb2844f5142 Mon Sep 17 00:00:00 2001 From: "Lee J. O'Riordan" Date: Fri, 3 May 2024 10:59:43 -0400 Subject: [PATCH 14/24] Remove packages from /opt/hostedtoolcache --- .github/workflows/tests_linux_cpp.yml | 31 +++++++++++++++++------- .github/workflows/tests_lkcpu_python.yml | 13 ++++++++-- .github/workflows/tests_lqcpu_python.yml | 14 ++++++++--- .github/workflows/wheel_linux_x86_64.yml | 13 ++++++++-- 4 files changed, 55 insertions(+), 16 deletions(-) diff --git a/.github/workflows/tests_linux_cpp.yml b/.github/workflows/tests_linux_cpp.yml index 2b12955a62..de0ce19774 100644 --- a/.github/workflows/tests_linux_cpp.yml +++ b/.github/workflows/tests_linux_cpp.yml @@ -60,8 +60,11 @@ jobs: sudo apt-get clean df -h echo "Removing large directories" - ls -ltrah /opt/hostedtoolcache - # deleting 15GB + sudo rm -rf /opt/hostedtoolcache/CodeQL + sudo rm -rf /opt/hostedtoolcache/Java_Temurin-Hotspot_jdk + sudo rm -rf /opt/hostedtoolcache/Ruby + sudo rm -rf /opt/hostedtoolcache/PyPy + sudo rm -rf /opt/hostedtoolcache/go rm -rf /usr/share/dotnet/ df -h - uses: actions/setup-python@v5 @@ -167,12 +170,14 @@ jobs: sudo apt-get autoremove -y sudo apt-get clean df -h - ls -ltrah /opt/hostedtoolcache echo "Removing large directories" - # deleting 15GB + sudo rm -rf /opt/hostedtoolcache/CodeQL + sudo rm -rf /opt/hostedtoolcache/Java_Temurin-Hotspot_jdk + sudo rm -rf /opt/hostedtoolcache/Ruby + sudo rm -rf /opt/hostedtoolcache/PyPy + sudo rm -rf /opt/hostedtoolcache/go rm -rf /usr/share/dotnet/ df -h - - uses: actions/setup-python@v5 name: Install Python with: @@ -254,11 +259,15 @@ jobs: sudo apt-get autoremove -y sudo apt-get clean df -h - ls -ltrah /opt/hostedtoolcache echo "Removing large directories" - # deleting 15GB + sudo rm -rf /opt/hostedtoolcache/CodeQL + sudo rm -rf /opt/hostedtoolcache/Java_Temurin-Hotspot_jdk + sudo rm -rf /opt/hostedtoolcache/Ruby + sudo rm -rf /opt/hostedtoolcache/PyPy + sudo rm -rf /opt/hostedtoolcache/go rm -rf /usr/share/dotnet/ df -h + - uses: actions/setup-python@v5 name: Install Python with: @@ -368,11 +377,15 @@ jobs: sudo apt-get autoremove -y sudo apt-get clean df -h - ls -ltrah /opt/hostedtoolcache echo "Removing large directories" - # deleting 15GB + sudo rm -rf /opt/hostedtoolcache/CodeQL + sudo rm -rf /opt/hostedtoolcache/Java_Temurin-Hotspot_jdk + sudo rm -rf /opt/hostedtoolcache/Ruby + sudo rm -rf /opt/hostedtoolcache/PyPy + sudo rm -rf /opt/hostedtoolcache/go rm -rf /usr/share/dotnet/ df -h + - uses: actions/setup-python@v5 name: Install Python with: diff --git a/.github/workflows/tests_lkcpu_python.yml b/.github/workflows/tests_lkcpu_python.yml index 70bd25e72e..430b402bb5 100644 --- a/.github/workflows/tests_lkcpu_python.yml +++ b/.github/workflows/tests_lkcpu_python.yml @@ -69,9 +69,14 @@ jobs: sudo apt-get clean df -h echo "Removing large directories" - # deleting 15GB + sudo rm -rf /opt/hostedtoolcache/CodeQL + sudo rm -rf /opt/hostedtoolcache/Java_Temurin-Hotspot_jdk + sudo rm -rf /opt/hostedtoolcache/Ruby + sudo rm -rf /opt/hostedtoolcache/PyPy + sudo rm -rf /opt/hostedtoolcache/go rm -rf /usr/share/dotnet/ df -h + - name: Checkout PennyLane-Lightning uses: actions/checkout@v4 with: @@ -183,7 +188,11 @@ jobs: sudo apt-get clean df -h echo "Removing large directories" - # deleting 15GB + sudo rm -rf /opt/hostedtoolcache/CodeQL + sudo rm -rf /opt/hostedtoolcache/Java_Temurin-Hotspot_jdk + sudo rm -rf /opt/hostedtoolcache/Ruby + sudo rm -rf /opt/hostedtoolcache/PyPy + sudo rm -rf /opt/hostedtoolcache/go rm -rf /usr/share/dotnet/ df -h diff --git a/.github/workflows/tests_lqcpu_python.yml b/.github/workflows/tests_lqcpu_python.yml index 4b83c2fba5..c06deab611 100644 --- a/.github/workflows/tests_lqcpu_python.yml +++ b/.github/workflows/tests_lqcpu_python.yml @@ -60,7 +60,11 @@ jobs: sudo apt-get clean df -h echo "Removing large directories" - # deleting 15GB + sudo rm -rf /opt/hostedtoolcache/CodeQL + sudo rm -rf /opt/hostedtoolcache/Java_Temurin-Hotspot_jdk + sudo rm -rf /opt/hostedtoolcache/Ruby + sudo rm -rf /opt/hostedtoolcache/PyPy + sudo rm -rf /opt/hostedtoolcache/go rm -rf /usr/share/dotnet/ df -h @@ -153,12 +157,16 @@ jobs: dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 df -h echo "Removing large packages" - sudo apt-get remove -y '^dotnet-.*' + sudo apt-get remove -y '^dotnet.*' sudo apt-get autoremove -y sudo apt-get clean df -h echo "Removing large directories" - # deleting 15GB + sudo rm -rf /opt/hostedtoolcache/CodeQL + sudo rm -rf /opt/hostedtoolcache/Java_Temurin-Hotspot_jdk + sudo rm -rf /opt/hostedtoolcache/Ruby + sudo rm -rf /opt/hostedtoolcache/PyPy + sudo rm -rf /opt/hostedtoolcache/go rm -rf /usr/share/dotnet/ df -h diff --git a/.github/workflows/wheel_linux_x86_64.yml b/.github/workflows/wheel_linux_x86_64.yml index c0b8c8c741..2a2c58da0a 100644 --- a/.github/workflows/wheel_linux_x86_64.yml +++ b/.github/workflows/wheel_linux_x86_64.yml @@ -62,7 +62,11 @@ jobs: sudo apt-get clean df -h echo "Removing large directories" - # deleting 15GB + sudo rm -rf /opt/hostedtoolcache/CodeQL + sudo rm -rf /opt/hostedtoolcache/Java_Temurin-Hotspot_jdk + sudo rm -rf /opt/hostedtoolcache/Ruby + sudo rm -rf /opt/hostedtoolcache/PyPy + sudo rm -rf /opt/hostedtoolcache/go rm -rf /usr/share/dotnet/ df -h @@ -143,9 +147,14 @@ jobs: sudo apt-get clean df -h echo "Removing large directories" - # deleting 15GB + sudo rm -rf /opt/hostedtoolcache/CodeQL + sudo rm -rf /opt/hostedtoolcache/Java_Temurin-Hotspot_jdk + sudo rm -rf /opt/hostedtoolcache/Ruby + sudo rm -rf /opt/hostedtoolcache/PyPy + sudo rm -rf /opt/hostedtoolcache/go rm -rf /usr/share/dotnet/ df -h + - name: Restoring cached dependencies id: kokkos-cache uses: actions/cache@v3 From 7da871887f2f707b23bfecfc7f2ec7f6eefe732d Mon Sep 17 00:00:00 2001 From: "Lee J. O'Riordan" Date: Fri, 3 May 2024 11:04:45 -0400 Subject: [PATCH 15/24] Fix missing test hit --- .github/workflows/tests_linux_cpp.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests_linux_cpp.yml b/.github/workflows/tests_linux_cpp.yml index de0ce19774..1ff4b3b5dd 100644 --- a/.github/workflows/tests_linux_cpp.yml +++ b/.github/workflows/tests_linux_cpp.yml @@ -106,7 +106,7 @@ jobs: -DPL_BACKEND=${{ matrix.pl_backend }} \ -DCMAKE_CXX_COMPILER=$(which g++-$GCC_VERSION) \ -DENABLE_COVERAGE=ON \ - -DLQ_ENABLE_KERNEL_AVX_STREAM=ON \ + -DLQ_ENABLE_KERNEL_AVX_STREAMING=ON \ -DLQ_ENABLE_KERNEL_OMP=ON cmake --build ./Build From a0bbe3457e89bf61d839c2cafd7a3d563924d4de Mon Sep 17 00:00:00 2001 From: "Lee J. O'Riordan" Date: Fri, 3 May 2024 11:32:44 -0400 Subject: [PATCH 16/24] Forbid and remove broken tests from LK probs ordering --- .../tests/Test_StateVectorKokkos_Measure.cpp | 37 +++++++++++-------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/pennylane_lightning/core/src/simulators/lightning_kokkos/measurements/tests/Test_StateVectorKokkos_Measure.cpp b/pennylane_lightning/core/src/simulators/lightning_kokkos/measurements/tests/Test_StateVectorKokkos_Measure.cpp index 6add1f2140..82d0396df0 100644 --- a/pennylane_lightning/core/src/simulators/lightning_kokkos/measurements/tests/Test_StateVectorKokkos_Measure.cpp +++ b/pennylane_lightning/core/src/simulators/lightning_kokkos/measurements/tests/Test_StateVectorKokkos_Measure.cpp @@ -226,25 +226,30 @@ TEMPLATE_TEST_CASE("Probabilities", "[Measures]", float, double) { {{0, 1, 2}, {0.67078706, 0.03062806, 0.0870997, 0.00397696, 0.17564072, 0.00801973, 0.02280642, 0.00104134}}, - {{0, 2, 1}, - {0.67078706, 0.0870997, 0.03062806, 0.00397696, 0.17564072, 0.02280642, - 0.00801973, 0.00104134}}, - {{1, 0, 2}, - {0.67078706, 0.03062806, 0.17564072, 0.00801973, 0.0870997, 0.00397696, - 0.02280642, 0.00104134}}, - {{1, 2, 0}, - {0.67078706, 0.0870997, 0.17564072, 0.02280642, 0.03062806, 0.00397696, - 0.00801973, 0.00104134}}, - {{2, 0, 1}, - {0.67078706, 0.17564072, 0.03062806, 0.00801973, 0.0870997, 0.02280642, - 0.00397696, 0.00104134}}, - {{2, 1, 0}, - {0.67078706, 0.17564072, 0.0870997, 0.02280642, 0.03062806, 0.00801973, - 0.00397696, 0.00104134}}, + //{{0, 2, 1}, + // {0.67078706, 0.0870997, 0.03062806, 0.00397696, 0.17564072, + // 0.02280642, + // 0.00801973, 0.00104134}}, + //{{1, 0, 2}, + // {0.67078706, 0.03062806, 0.17564072, 0.00801973, 0.0870997, + // 0.00397696, + // 0.02280642, 0.00104134}}, + //{{1, 2, 0}, + // {0.67078706, 0.0870997, 0.17564072, 0.02280642, 0.03062806, + // 0.00397696, + // 0.00801973, 0.00104134}}, + //{{2, 0, 1}, + // {0.67078706, 0.17564072, 0.03062806, 0.00801973, 0.0870997, + // 0.02280642, + // 0.00397696, 0.00104134}}, + //{{2, 1, 0}, + // {0.67078706, 0.17564072, 0.0870997, 0.02280642, 0.03062806, + // 0.00801973, + // 0.00397696, 0.00104134}}, // TODO: Fix LK out-of-order permutations {{0, 1}, {0.70141512, 0.09107666, 0.18366045, 0.02384776}}, {{0, 2}, {0.75788676, 0.03460502, 0.19844714, 0.00906107}}, {{1, 2}, {0.84642778, 0.0386478, 0.10990612, 0.0050183}}, - {{2, 1}, {0.84642778, 0.10990612, 0.0386478, 0.0050183}}, + //{{2, 1}, {0.84642778, 0.10990612, 0.0386478, 0.0050183}}, {{0}, {0.79249179, 0.20750821}}, {{1}, {0.88507558, 0.11492442}}, {{2}, {0.9563339, 0.0436661}}}; From f96c4258792ffa38b41c7aeafd1313bce2444dea Mon Sep 17 00:00:00 2001 From: "Lee J. O'Riordan" Date: Fri, 3 May 2024 11:51:58 -0400 Subject: [PATCH 17/24] Fix clang-tidy isms --- .github/workflows/tests_linux_cpp.yml | 2 +- .../tests/Test_StateVectorKokkos_Measure.cpp | 22 +------------------ .../measurements/MeasurementsLQubit.hpp | 2 +- .../core/src/utils/NDPermuter.hpp | 18 ++++++++++----- 4 files changed, 15 insertions(+), 29 deletions(-) diff --git a/.github/workflows/tests_linux_cpp.yml b/.github/workflows/tests_linux_cpp.yml index 1ff4b3b5dd..de0ce19774 100644 --- a/.github/workflows/tests_linux_cpp.yml +++ b/.github/workflows/tests_linux_cpp.yml @@ -106,7 +106,7 @@ jobs: -DPL_BACKEND=${{ matrix.pl_backend }} \ -DCMAKE_CXX_COMPILER=$(which g++-$GCC_VERSION) \ -DENABLE_COVERAGE=ON \ - -DLQ_ENABLE_KERNEL_AVX_STREAMING=ON \ + -DLQ_ENABLE_KERNEL_AVX_STREAM=ON \ -DLQ_ENABLE_KERNEL_OMP=ON cmake --build ./Build diff --git a/pennylane_lightning/core/src/simulators/lightning_kokkos/measurements/tests/Test_StateVectorKokkos_Measure.cpp b/pennylane_lightning/core/src/simulators/lightning_kokkos/measurements/tests/Test_StateVectorKokkos_Measure.cpp index 82d0396df0..662bafc7be 100644 --- a/pennylane_lightning/core/src/simulators/lightning_kokkos/measurements/tests/Test_StateVectorKokkos_Measure.cpp +++ b/pennylane_lightning/core/src/simulators/lightning_kokkos/measurements/tests/Test_StateVectorKokkos_Measure.cpp @@ -226,30 +226,10 @@ TEMPLATE_TEST_CASE("Probabilities", "[Measures]", float, double) { {{0, 1, 2}, {0.67078706, 0.03062806, 0.0870997, 0.00397696, 0.17564072, 0.00801973, 0.02280642, 0.00104134}}, - //{{0, 2, 1}, - // {0.67078706, 0.0870997, 0.03062806, 0.00397696, 0.17564072, - // 0.02280642, - // 0.00801973, 0.00104134}}, - //{{1, 0, 2}, - // {0.67078706, 0.03062806, 0.17564072, 0.00801973, 0.0870997, - // 0.00397696, - // 0.02280642, 0.00104134}}, - //{{1, 2, 0}, - // {0.67078706, 0.0870997, 0.17564072, 0.02280642, 0.03062806, - // 0.00397696, - // 0.00801973, 0.00104134}}, - //{{2, 0, 1}, - // {0.67078706, 0.17564072, 0.03062806, 0.00801973, 0.0870997, - // 0.02280642, - // 0.00397696, 0.00104134}}, - //{{2, 1, 0}, - // {0.67078706, 0.17564072, 0.0870997, 0.02280642, 0.03062806, - // 0.00801973, - // 0.00397696, 0.00104134}}, // TODO: Fix LK out-of-order permutations + // TODO: Fix LK out-of-order permutations {{0, 1}, {0.70141512, 0.09107666, 0.18366045, 0.02384776}}, {{0, 2}, {0.75788676, 0.03460502, 0.19844714, 0.00906107}}, {{1, 2}, {0.84642778, 0.0386478, 0.10990612, 0.0050183}}, - //{{2, 1}, {0.84642778, 0.10990612, 0.0386478, 0.0050183}}, {{0}, {0.79249179, 0.20750821}}, {{1}, {0.88507558, 0.11492442}}, {{2}, {0.9563339, 0.0436661}}}; diff --git a/pennylane_lightning/core/src/simulators/lightning_qubit/measurements/MeasurementsLQubit.hpp b/pennylane_lightning/core/src/simulators/lightning_qubit/measurements/MeasurementsLQubit.hpp index 6c2276df4e..580f223367 100644 --- a/pennylane_lightning/core/src/simulators/lightning_qubit/measurements/MeasurementsLQubit.hpp +++ b/pennylane_lightning/core/src/simulators/lightning_qubit/measurements/MeasurementsLQubit.hpp @@ -132,7 +132,7 @@ class Measurements final // Permute the data according to the required wire ordering if (wires != sorted_wires) { static constexpr std::size_t CACHE_SIZE = 8; - PUtil::Permuter> p; + PUtil::Permuter> p{}; std::vector shape(wires.size(), 2); std::vector wire_labels_old(sorted_wires.size(), ""); std::vector wire_labels_new(wires.size(), ""); diff --git a/pennylane_lightning/core/src/utils/NDPermuter.hpp b/pennylane_lightning/core/src/utils/NDPermuter.hpp index 6aac4a7eb6..02ea79fa08 100644 --- a/pennylane_lightning/core/src/utils/NDPermuter.hpp +++ b/pennylane_lightning/core/src/utils/NDPermuter.hpp @@ -54,8 +54,8 @@ template class Permuter { const std::set idx_old(current_order.begin(), current_order.end()); const std::set idx_new(new_order.begin(), new_order.end()); - const std::size_t data_size = std::accumulate( - shape.begin(), shape.end(), 1, std::multiplies()); + const std::size_t data_size = + std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<>()); PL_ABORT_IF_NOT(idx_old.size() == current_order.size(), "Duplicate existing indices found. Please ensure " "indices are unique."); @@ -144,8 +144,9 @@ template class DefaultPermuter { const std::vector &new_indices) { data_out = data_; - if (new_indices == old_indices) + if (new_indices == old_indices) { return; + } const std::size_t num_indices = old_indices.size(); const std::size_t total_dim = data_.size(); @@ -202,8 +203,9 @@ template class DefaultPermuter { while (true) { // If end of entire opration, break. - if (po == total_dim - 1) + if (po == total_dim - 1) { break; + } internal_po = 0; // Each iteration of the while block goes through a new position. @@ -219,6 +221,7 @@ template class DefaultPermuter { small_map_old_to_new_position[po - offset] = pn; bool complete{true}; + // NOLINTBEGIN for (size_t j = num_indices; j--;) { if (++old_counter[j] < shape[j]) { complete = false; @@ -227,13 +230,16 @@ template class DefaultPermuter { old_counter[j] = 0; } } + // NOLINTEND // If end of block or end of entire operation, break. - if ((++internal_po == blocksize_) || (po == total_dim - 1)) + if ((++internal_po == blocksize_) || (po == total_dim - 1)) { break; + } // If last index (0) was increased, then go back to fastest // index. - if (complete) + if (complete) { break; + } } // Copy data for this block, taking into account offset of // small_map... From 45eec8906327d34b69398a6e55b3ed6cb34d94c9 Mon Sep 17 00:00:00 2001 From: "Lee J. O'Riordan" Date: Fri, 3 May 2024 12:03:12 -0400 Subject: [PATCH 18/24] Ensure libopenblas is not broken from failed apt pull with dev --- .github/workflows/tests_linux_cpp.yml | 2 +- .github/workflows/tests_lqcpu_python.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests_linux_cpp.yml b/.github/workflows/tests_linux_cpp.yml index de0ce19774..3f71b271b2 100644 --- a/.github/workflows/tests_linux_cpp.yml +++ b/.github/workflows/tests_linux_cpp.yml @@ -188,7 +188,7 @@ jobs: - name: Install dependencies run: | - sudo apt-get update && sudo apt-get -y -q install cmake gcc-$GCC_VERSION g++-$GCC_VERSION libopenblas-dev ninja-build gcovr lcov + sudo apt-get update && sudo apt-get -y -q install cmake gcc-$GCC_VERSION g++-$GCC_VERSION libopenblas-base libopenblas-dev ninja-build gcovr lcov python -m pip install scipy - name: Build and run unit tests diff --git a/.github/workflows/tests_lqcpu_python.yml b/.github/workflows/tests_lqcpu_python.yml index c06deab611..3f56e0353a 100644 --- a/.github/workflows/tests_lqcpu_python.yml +++ b/.github/workflows/tests_lqcpu_python.yml @@ -97,7 +97,7 @@ jobs: - name: Install dependencies run: | - sudo apt-get update && sudo apt-get -y -q install cmake gcc-$GCC_VERSION g++-$GCC_VERSION libopenblas-dev + sudo apt-get update && sudo apt-get -y -q install cmake gcc-$GCC_VERSION g++-$GCC_VERSION libopenblas-base libopenblas-dev python -m pip install scipy wheel - name: Get required Python packages From db41f3ec315514cd5cd4ee05198eb94a92aa40f7 Mon Sep 17 00:00:00 2001 From: "Lee J. O'Riordan" Date: Fri, 3 May 2024 12:06:34 -0400 Subject: [PATCH 19/24] Appease GH Action --- .github/workflows/tests_linux_cpp.yml | 4 ++++ .github/workflows/tests_lkcpu_python.yml | 2 ++ .github/workflows/tests_lqcpu_python.yml | 2 ++ .github/workflows/wheel_linux_x86_64.yml | 2 ++ 4 files changed, 10 insertions(+) diff --git a/.github/workflows/tests_linux_cpp.yml b/.github/workflows/tests_linux_cpp.yml index 3f71b271b2..01555b2344 100644 --- a/.github/workflows/tests_linux_cpp.yml +++ b/.github/workflows/tests_linux_cpp.yml @@ -66,6 +66,7 @@ jobs: sudo rm -rf /opt/hostedtoolcache/PyPy sudo rm -rf /opt/hostedtoolcache/go rm -rf /usr/share/dotnet/ + sudo apt --fix-broken install -y df -h - uses: actions/setup-python@v5 name: Install Python @@ -177,6 +178,7 @@ jobs: sudo rm -rf /opt/hostedtoolcache/PyPy sudo rm -rf /opt/hostedtoolcache/go rm -rf /usr/share/dotnet/ + sudo apt --fix-broken install -y df -h - uses: actions/setup-python@v5 name: Install Python @@ -266,6 +268,7 @@ jobs: sudo rm -rf /opt/hostedtoolcache/PyPy sudo rm -rf /opt/hostedtoolcache/go rm -rf /usr/share/dotnet/ + sudo apt --fix-broken install -y df -h - uses: actions/setup-python@v5 @@ -384,6 +387,7 @@ jobs: sudo rm -rf /opt/hostedtoolcache/PyPy sudo rm -rf /opt/hostedtoolcache/go rm -rf /usr/share/dotnet/ + sudo apt --fix-broken install -y df -h - uses: actions/setup-python@v5 diff --git a/.github/workflows/tests_lkcpu_python.yml b/.github/workflows/tests_lkcpu_python.yml index 430b402bb5..1ee2591c43 100644 --- a/.github/workflows/tests_lkcpu_python.yml +++ b/.github/workflows/tests_lkcpu_python.yml @@ -75,6 +75,7 @@ jobs: sudo rm -rf /opt/hostedtoolcache/PyPy sudo rm -rf /opt/hostedtoolcache/go rm -rf /usr/share/dotnet/ + sudo apt --fix-broken install -y df -h - name: Checkout PennyLane-Lightning @@ -194,6 +195,7 @@ jobs: sudo rm -rf /opt/hostedtoolcache/PyPy sudo rm -rf /opt/hostedtoolcache/go rm -rf /usr/share/dotnet/ + sudo apt --fix-broken install -y df -h - uses: actions/setup-python@v5 diff --git a/.github/workflows/tests_lqcpu_python.yml b/.github/workflows/tests_lqcpu_python.yml index 3f56e0353a..11fd7f5f5f 100644 --- a/.github/workflows/tests_lqcpu_python.yml +++ b/.github/workflows/tests_lqcpu_python.yml @@ -66,6 +66,7 @@ jobs: sudo rm -rf /opt/hostedtoolcache/PyPy sudo rm -rf /opt/hostedtoolcache/go rm -rf /usr/share/dotnet/ + sudo apt --fix-broken install -y df -h - uses: actions/setup-python@v5 @@ -168,6 +169,7 @@ jobs: sudo rm -rf /opt/hostedtoolcache/PyPy sudo rm -rf /opt/hostedtoolcache/go rm -rf /usr/share/dotnet/ + sudo apt --fix-broken install -y df -h - uses: actions/setup-python@v5 diff --git a/.github/workflows/wheel_linux_x86_64.yml b/.github/workflows/wheel_linux_x86_64.yml index 2a2c58da0a..6ddd2703a3 100644 --- a/.github/workflows/wheel_linux_x86_64.yml +++ b/.github/workflows/wheel_linux_x86_64.yml @@ -68,6 +68,7 @@ jobs: sudo rm -rf /opt/hostedtoolcache/PyPy sudo rm -rf /opt/hostedtoolcache/go rm -rf /usr/share/dotnet/ + sudo apt --fix-broken install -y df -h - name: Cache installation directories @@ -153,6 +154,7 @@ jobs: sudo rm -rf /opt/hostedtoolcache/PyPy sudo rm -rf /opt/hostedtoolcache/go rm -rf /usr/share/dotnet/ + sudo apt --fix-broken install -y df -h - name: Restoring cached dependencies From 4d49be3e4fe57c89a1f43a653f1608181107ce89 Mon Sep 17 00:00:00 2001 From: "Lee J. O'Riordan" Date: Fri, 3 May 2024 12:08:59 -0400 Subject: [PATCH 20/24] Full remove GH action modifier step due to unexplained failures --- .github/workflows/tests_linux_cpp.yml | 98 ------------------------ .github/workflows/tests_lkcpu_python.yml | 50 ------------ .github/workflows/tests_lqcpu_python.yml | 50 ------------ .github/workflows/wheel_linux_x86_64.yml | 50 ------------ 4 files changed, 248 deletions(-) diff --git a/.github/workflows/tests_linux_cpp.yml b/.github/workflows/tests_linux_cpp.yml index 01555b2344..521c9ab0e7 100644 --- a/.github/workflows/tests_linux_cpp.yml +++ b/.github/workflows/tests_linux_cpp.yml @@ -44,30 +44,6 @@ jobs: runs-on: ${{ matrix.os }} steps: - - name: Remove GH Action tooling - run: | - # Following instructions from Apache Flink - # https://github.com/apache/flink/blob/release-1.15/tools/azure-pipelines/free_disk_space.sh - echo "==============================================================================" - echo "Freeing up disk space on CI system" - echo "==============================================================================" - echo "Listing 100 largest packages" - dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 - df -h - echo "Removing large packages" - sudo apt-get remove -y '^dotnet.*' - sudo apt-get autoremove -y - sudo apt-get clean - df -h - echo "Removing large directories" - sudo rm -rf /opt/hostedtoolcache/CodeQL - sudo rm -rf /opt/hostedtoolcache/Java_Temurin-Hotspot_jdk - sudo rm -rf /opt/hostedtoolcache/Ruby - sudo rm -rf /opt/hostedtoolcache/PyPy - sudo rm -rf /opt/hostedtoolcache/go - rm -rf /usr/share/dotnet/ - sudo apt --fix-broken install -y - df -h - uses: actions/setup-python@v5 name: Install Python with: @@ -156,30 +132,6 @@ jobs: runs-on: ${{ matrix.os }} steps: - - name: Remove GH Action tooling - run: | - # Following instructions from Apache Flink - # https://github.com/apache/flink/blob/release-1.15/tools/azure-pipelines/free_disk_space.sh - echo "==============================================================================" - echo "Freeing up disk space on CI system" - echo "==============================================================================" - echo "Listing 100 largest packages" - dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 - df -h - echo "Removing large packages" - sudo apt-get remove -y '^dotnet.*' - sudo apt-get autoremove -y - sudo apt-get clean - df -h - echo "Removing large directories" - sudo rm -rf /opt/hostedtoolcache/CodeQL - sudo rm -rf /opt/hostedtoolcache/Java_Temurin-Hotspot_jdk - sudo rm -rf /opt/hostedtoolcache/Ruby - sudo rm -rf /opt/hostedtoolcache/PyPy - sudo rm -rf /opt/hostedtoolcache/go - rm -rf /usr/share/dotnet/ - sudo apt --fix-broken install -y - df -h - uses: actions/setup-python@v5 name: Install Python with: @@ -246,31 +198,6 @@ jobs: runs-on: ${{ matrix.os }} steps: - - name: Remove GH Action tooling - run: | - # Following instructions from Apache Flink - # https://github.com/apache/flink/blob/release-1.15/tools/azure-pipelines/free_disk_space.sh - echo "==============================================================================" - echo "Freeing up disk space on CI system" - echo "==============================================================================" - echo "Listing 100 largest packages" - dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 - df -h - echo "Removing large packages" - sudo apt-get remove -y '^dotnet.*' - sudo apt-get autoremove -y - sudo apt-get clean - df -h - echo "Removing large directories" - sudo rm -rf /opt/hostedtoolcache/CodeQL - sudo rm -rf /opt/hostedtoolcache/Java_Temurin-Hotspot_jdk - sudo rm -rf /opt/hostedtoolcache/Ruby - sudo rm -rf /opt/hostedtoolcache/PyPy - sudo rm -rf /opt/hostedtoolcache/go - rm -rf /usr/share/dotnet/ - sudo apt --fix-broken install -y - df -h - - uses: actions/setup-python@v5 name: Install Python with: @@ -365,31 +292,6 @@ jobs: runs-on: ${{ matrix.os }} steps: - - name: Remove GH Action tooling - run: | - # Following instructions from Apache Flink - # https://github.com/apache/flink/blob/release-1.15/tools/azure-pipelines/free_disk_space.sh - echo "==============================================================================" - echo "Freeing up disk space on CI system" - echo "==============================================================================" - echo "Listing 100 largest packages" - dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 - df -h - echo "Removing large packages" - sudo apt-get remove -y '^dotnet.*' - sudo apt-get autoremove -y - sudo apt-get clean - df -h - echo "Removing large directories" - sudo rm -rf /opt/hostedtoolcache/CodeQL - sudo rm -rf /opt/hostedtoolcache/Java_Temurin-Hotspot_jdk - sudo rm -rf /opt/hostedtoolcache/Ruby - sudo rm -rf /opt/hostedtoolcache/PyPy - sudo rm -rf /opt/hostedtoolcache/go - rm -rf /usr/share/dotnet/ - sudo apt --fix-broken install -y - df -h - - uses: actions/setup-python@v5 name: Install Python with: diff --git a/.github/workflows/tests_lkcpu_python.yml b/.github/workflows/tests_lkcpu_python.yml index 1ee2591c43..e44c08db67 100644 --- a/.github/workflows/tests_lkcpu_python.yml +++ b/.github/workflows/tests_lkcpu_python.yml @@ -53,31 +53,6 @@ jobs: runs-on: ${{ matrix.os }} steps: - - name: Remove GH Action tooling - run: | - # Following instructions from Apache Flink - # https://github.com/apache/flink/blob/release-1.15/tools/azure-pipelines/free_disk_space.sh - echo "==============================================================================" - echo "Freeing up disk space on CI system" - echo "==============================================================================" - echo "Listing 100 largest packages" - dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 - df -h - echo "Removing large packages" - sudo apt-get remove -y '^dotnet.*' - sudo apt-get autoremove -y - sudo apt-get clean - df -h - echo "Removing large directories" - sudo rm -rf /opt/hostedtoolcache/CodeQL - sudo rm -rf /opt/hostedtoolcache/Java_Temurin-Hotspot_jdk - sudo rm -rf /opt/hostedtoolcache/Ruby - sudo rm -rf /opt/hostedtoolcache/PyPy - sudo rm -rf /opt/hostedtoolcache/go - rm -rf /usr/share/dotnet/ - sudo apt --fix-broken install -y - df -h - - name: Checkout PennyLane-Lightning uses: actions/checkout@v4 with: @@ -173,31 +148,6 @@ jobs: runs-on: ${{ matrix.os }} steps: - - name: Remove GH Action tooling - run: | - # Following instructions from Apache Flink - # https://github.com/apache/flink/blob/release-1.15/tools/azure-pipelines/free_disk_space.sh - echo "==============================================================================" - echo "Freeing up disk space on CI system" - echo "==============================================================================" - echo "Listing 100 largest packages" - dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 - df -h - echo "Removing large packages" - sudo apt-get remove -y '^dotnet.*' - sudo apt-get autoremove -y - sudo apt-get clean - df -h - echo "Removing large directories" - sudo rm -rf /opt/hostedtoolcache/CodeQL - sudo rm -rf /opt/hostedtoolcache/Java_Temurin-Hotspot_jdk - sudo rm -rf /opt/hostedtoolcache/Ruby - sudo rm -rf /opt/hostedtoolcache/PyPy - sudo rm -rf /opt/hostedtoolcache/go - rm -rf /usr/share/dotnet/ - sudo apt --fix-broken install -y - df -h - - uses: actions/setup-python@v5 name: Install Python with: diff --git a/.github/workflows/tests_lqcpu_python.yml b/.github/workflows/tests_lqcpu_python.yml index 11fd7f5f5f..5c48006936 100644 --- a/.github/workflows/tests_lqcpu_python.yml +++ b/.github/workflows/tests_lqcpu_python.yml @@ -44,31 +44,6 @@ jobs: runs-on: ${{ matrix.os }} steps: - - name: Remove GH Action tooling - run: | - # Following instructions from Apache Flink - # https://github.com/apache/flink/blob/release-1.15/tools/azure-pipelines/free_disk_space.sh - echo "==============================================================================" - echo "Freeing up disk space on CI system" - echo "==============================================================================" - echo "Listing 100 largest packages" - dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 - df -h - echo "Removing large packages" - sudo apt-get remove -y '^dotnet.*' - sudo apt-get autoremove -y - sudo apt-get clean - df -h - echo "Removing large directories" - sudo rm -rf /opt/hostedtoolcache/CodeQL - sudo rm -rf /opt/hostedtoolcache/Java_Temurin-Hotspot_jdk - sudo rm -rf /opt/hostedtoolcache/Ruby - sudo rm -rf /opt/hostedtoolcache/PyPy - sudo rm -rf /opt/hostedtoolcache/go - rm -rf /usr/share/dotnet/ - sudo apt --fix-broken install -y - df -h - - uses: actions/setup-python@v5 name: Install Python with: @@ -147,31 +122,6 @@ jobs: runs-on: ${{ matrix.os }} steps: - - name: Remove GH Action tooling - run: | - # Following instructions from Apache Flink - # https://github.com/apache/flink/blob/release-1.15/tools/azure-pipelines/free_disk_space.sh - echo "==============================================================================" - echo "Freeing up disk space on CI system" - echo "==============================================================================" - echo "Listing 100 largest packages" - dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 - df -h - echo "Removing large packages" - sudo apt-get remove -y '^dotnet.*' - sudo apt-get autoremove -y - sudo apt-get clean - df -h - echo "Removing large directories" - sudo rm -rf /opt/hostedtoolcache/CodeQL - sudo rm -rf /opt/hostedtoolcache/Java_Temurin-Hotspot_jdk - sudo rm -rf /opt/hostedtoolcache/Ruby - sudo rm -rf /opt/hostedtoolcache/PyPy - sudo rm -rf /opt/hostedtoolcache/go - rm -rf /usr/share/dotnet/ - sudo apt --fix-broken install -y - df -h - - uses: actions/setup-python@v5 name: Install Python with: diff --git a/.github/workflows/wheel_linux_x86_64.yml b/.github/workflows/wheel_linux_x86_64.yml index 6ddd2703a3..d11649c4f0 100644 --- a/.github/workflows/wheel_linux_x86_64.yml +++ b/.github/workflows/wheel_linux_x86_64.yml @@ -46,31 +46,6 @@ jobs: container: ${{ matrix.container_img }} steps: - - name: Remove GH Action tooling - run: | - # Following instructions from Apache Flink - # https://github.com/apache/flink/blob/release-1.15/tools/azure-pipelines/free_disk_space.sh - echo "==============================================================================" - echo "Freeing up disk space on CI system" - echo "==============================================================================" - echo "Listing 100 largest packages" - dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 - df -h - echo "Removing large packages" - sudo apt-get remove -y '^dotnet.*' - sudo apt-get autoremove -y - sudo apt-get clean - df -h - echo "Removing large directories" - sudo rm -rf /opt/hostedtoolcache/CodeQL - sudo rm -rf /opt/hostedtoolcache/Java_Temurin-Hotspot_jdk - sudo rm -rf /opt/hostedtoolcache/Ruby - sudo rm -rf /opt/hostedtoolcache/PyPy - sudo rm -rf /opt/hostedtoolcache/go - rm -rf /usr/share/dotnet/ - sudo apt --fix-broken install -y - df -h - - name: Cache installation directories id: kokkos-cache uses: actions/cache@v3 @@ -132,31 +107,6 @@ jobs: container: ${{ matrix.container_img }} steps: - - name: Remove GH Action tooling - run: | - # Following instructions from Apache Flink - # https://github.com/apache/flink/blob/release-1.15/tools/azure-pipelines/free_disk_space.sh - echo "==============================================================================" - echo "Freeing up disk space on CI system" - echo "==============================================================================" - echo "Listing 100 largest packages" - dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 - df -h - echo "Removing large packages" - sudo apt-get remove -y '^dotnet.*' - sudo apt-get autoremove -y - sudo apt-get clean - df -h - echo "Removing large directories" - sudo rm -rf /opt/hostedtoolcache/CodeQL - sudo rm -rf /opt/hostedtoolcache/Java_Temurin-Hotspot_jdk - sudo rm -rf /opt/hostedtoolcache/Ruby - sudo rm -rf /opt/hostedtoolcache/PyPy - sudo rm -rf /opt/hostedtoolcache/go - rm -rf /usr/share/dotnet/ - sudo apt --fix-broken install -y - df -h - - name: Restoring cached dependencies id: kokkos-cache uses: actions/cache@v3 From ac8623743d880ae0706094baff70b105bf9c48d8 Mon Sep 17 00:00:00 2001 From: "Lee J. O'Riordan" Date: Fri, 3 May 2024 13:19:33 -0400 Subject: [PATCH 21/24] Add bidreictional wire validation for LGPU CPP layer --- .../simulators/lightning_gpu/measurements/MeasurementsGPU.hpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pennylane_lightning/core/src/simulators/lightning_gpu/measurements/MeasurementsGPU.hpp b/pennylane_lightning/core/src/simulators/lightning_gpu/measurements/MeasurementsGPU.hpp index 9b4ceaabc3..3a80ca8990 100644 --- a/pennylane_lightning/core/src/simulators/lightning_gpu/measurements/MeasurementsGPU.hpp +++ b/pennylane_lightning/core/src/simulators/lightning_gpu/measurements/MeasurementsGPU.hpp @@ -92,7 +92,8 @@ class Measurements final * @return std::vector */ auto probs(const std::vector &wires) -> std::vector { - PL_ABORT_IF_NOT(std::is_sorted(wires.cbegin(), wires.cend()), + PL_ABORT_IF_NOT(std::is_sorted(wires.cbegin(), wires.cend()) || + std::is_sorted(wires.rbegin(), wires.rend()), "LightningGPU does not currently support out-of-order " "wire indices with probability calculations"); From e8d7c33d28e7a4f6c39dbf1d2d653ef296019e76 Mon Sep 17 00:00:00 2001 From: "Lee J. O'Riordan" Date: Fri, 3 May 2024 13:19:50 -0400 Subject: [PATCH 22/24] Clang-tidy warning fix --- pennylane_lightning/core/src/utils/NDPermuter.hpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pennylane_lightning/core/src/utils/NDPermuter.hpp b/pennylane_lightning/core/src/utils/NDPermuter.hpp index 02ea79fa08..18da0bc1cd 100644 --- a/pennylane_lightning/core/src/utils/NDPermuter.hpp +++ b/pennylane_lightning/core/src/utils/NDPermuter.hpp @@ -183,7 +183,8 @@ template class DefaultPermuter { std::vector small_map_old_to_new_position(blocksize_); // Position old and new. - std::size_t po = 0, pn; + std::size_t po = 0; + std::size_t pn = 0; // Counter of the values of each indices in the iteration (old // ordering). std::vector old_counter(num_indices, 0); From cc43f5d04adc3af07b32128f3b9db1f9157b8f66 Mon Sep 17 00:00:00 2001 From: "Lee J. O'Riordan" Date: Fri, 3 May 2024 13:29:21 -0400 Subject: [PATCH 23/24] Appease CT --- pennylane_lightning/core/src/utils/NDPermuter.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pennylane_lightning/core/src/utils/NDPermuter.hpp b/pennylane_lightning/core/src/utils/NDPermuter.hpp index 02ea79fa08..be7623b493 100644 --- a/pennylane_lightning/core/src/utils/NDPermuter.hpp +++ b/pennylane_lightning/core/src/utils/NDPermuter.hpp @@ -96,8 +96,8 @@ template class Permuter { const std::set idx_old(current_order.begin(), current_order.end()); const std::set idx_new(new_order.begin(), new_order.end()); - const auto data_size = std::accumulate(shape.begin(), shape.end(), 1, - std::multiplies()); + const auto data_size = + std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<>()); PL_ABORT_IF_NOT(idx_old.size() == current_order.size(), "Duplicate existing indices found. Please ensure " "indices are unique."); From feb55a47d1f6d40b8ebe815f3029e9ef6cabe9e0 Mon Sep 17 00:00:00 2001 From: "Lee J. O'Riordan" Date: Fri, 3 May 2024 13:52:28 -0400 Subject: [PATCH 24/24] Update JET license agreement --- README.rst | 1 + .../core/src/utils/NDPermuter.hpp | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/README.rst b/README.rst index 63980f2b8f..d2df483c03 100644 --- a/README.rst +++ b/README.rst @@ -455,5 +455,6 @@ PennyLane Lightning makes use of the following libraries and tools, which are un - **pybind11:** https://github.com/pybind/pybind11 - **Kokkos Core:** https://github.com/kokkos/kokkos - **NVIDIA cuQuantum:** https://developer.nvidia.com/cuquantum-sdk +- **Xanadu JET:** https://github.com/XanaduAI/jet .. acknowledgements-end-inclusion-marker-do-not-remove diff --git a/pennylane_lightning/core/src/utils/NDPermuter.hpp b/pennylane_lightning/core/src/utils/NDPermuter.hpp index f7f2eddf6c..46e25c7c56 100644 --- a/pennylane_lightning/core/src/utils/NDPermuter.hpp +++ b/pennylane_lightning/core/src/utils/NDPermuter.hpp @@ -1,3 +1,22 @@ +// Copyright 2024 Xanadu Quantum Technologies Inc. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 + +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file is verbatim copied from JET's permuter module at: +// https://github.com/XanaduAI/jet/tree/v0.2.2/include/jet/permute and reserves +// all licensing and attributions to that repository's implementations, +// including inspiration from QFlex https://github.com/ngnrsaa/qflex. + #pragma once #include