Skip to content

Commit

Permalink
obs wire col-major order -> row-major order
Browse files Browse the repository at this point in the history
  • Loading branch information
multiphaseCFD committed May 15, 2024
1 parent e3927fe commit 26451ca
Show file tree
Hide file tree
Showing 4 changed files with 101 additions and 121 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,6 @@ class TNCudaBase : public TensornetBase<Precision, Derived> {

std::shared_ptr<TNCudaGateCache<Precision>> gate_cache_;

cutensornetNetworkOperator_t obsOperator_;

public:
using PrecisionT = Precision;

Expand Down Expand Up @@ -94,15 +92,6 @@ class TNCudaBase : public TensornetBase<Precision, Derived> {
reinterpret_cast<int64_t *>(BaseType::getQubitDims().data()),
/* cudaDataType_t */ typeData_,
/* cutensornetState_t * */ &quantumState_));

PL_CUTENSORNET_IS_SUCCESS(cutensornetCreateNetworkOperator(
/* const cutensornetHandle_t */ handle_.get(),
/* int32_t */ static_cast<int32_t>(BaseType::getNumQubits()),
/* const int64_t stateModeExtents */
reinterpret_cast<int64_t *>(
const_cast<size_t *>(BaseType::getQubitDims().data())),
/* cudaDataType_t */ typeData_,
/* cutensornetNetworkOperator_t */ &obsOperator_));
}

explicit TNCudaBase(const std::size_t numQubits, DevTag<int> dev_tag)
Expand All @@ -128,23 +117,12 @@ class TNCudaBase : public TensornetBase<Precision, Derived> {
reinterpret_cast<int64_t *>(BaseType::getQubitDims().data()),
/* cudaDataType_t */ typeData_,
/* cutensornetState_t * */ &quantumState_));

PL_CUTENSORNET_IS_SUCCESS(cutensornetCreateNetworkOperator(
/* const cutensornetHandle_t */ handle_.get(),
/* int32_t */ static_cast<int32_t>(BaseType::getNumQubits()),
/* const int64_t stateModeExtents */
reinterpret_cast<int64_t *>(
const_cast<size_t *>(BaseType::getQubitDims().data())),
/* cudaDataType_t */ typeData_,
/* cutensornetNetworkOperator_t */ &obsOperator_));
}

~TNCudaBase() {
PL_CUTENSORNET_IS_SUCCESS(cutensornetDestroyState(quantumState_));
}

cutensornetNetworkOperator_t getTNOperator() { return obsOperator_; }

/**
* @brief Get the CUDA data type.
*
Expand Down Expand Up @@ -313,15 +291,15 @@ class TNCudaBase : public TensornetBase<Precision, Derived> {

void get_final_state() { staic_cast<Derived *>(this)->get_final_state(); }

ComplexT expval() {
ComplexT expval(cutensornetNetworkOperator_t obsOperator) {
ComplexT expectVal{0.0, 0.0}, stateNorm2{0.0, 0.0};

cutensornetStateExpectation_t expectation;

PL_CUTENSORNET_IS_SUCCESS(cutensornetCreateExpectation(
/* const cutensornetHandle_t */ getTNCudaHandle(),
/* cutensornetState_t */ getQuantumState(),
/* cutensornetNetworkOperator_t */ obsOperator_,
/* cutensornetNetworkOperator_t */ obsOperator,
/* cutensornetStateExpectation_t * */ &expectation));

// Configure the computation of the specified quantum circuit
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,9 +71,9 @@ template <class StateTensorT> class Measurements {
* @return Expectation value with respect to the given observable.
*/
auto expval(ObservableTNCuda<StateTensorT> &ob, StateTensorT &state_tensor)
-> ComplexT {
-> PrecisionT {
ob.appendTNOperator(state_tensor);
return state_tensor.expval();
return state_tensor.expval(ob.getTNOperator()).real();
}
};
} // namespace Pennylane::LightningTensor::TNCuda::Measures
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,7 @@ using namespace Pennylane::LightningTensor::TNCuda::Observables;
} // namespace
/// @endcond

TEMPLATE_TEST_CASE("[Identity]", "[StateVectorCudaManaged_Expval]", float,
double) {
TEMPLATE_TEST_CASE("[Identity]", "[MPSTNCuda_Expval]", float, double) {
using StateTensorT = MPSTNCuda<TestType>;
auto ONE = TestType(1);

Expand All @@ -53,47 +52,47 @@ TEMPLATE_TEST_CASE("[Identity]", "[StateVectorCudaManaged_Expval]", float,
mps_state.get_final_state();
auto ob = NamedObs<StateTensorT>("Identity", {0});
auto res = measure.expval(ob, mps_state);
CHECK(res.real() == Approx(ONE));
CHECK(res == Approx(ONE));
}
}
/*
TEMPLATE_TEST_CASE("[PauliX]", "[StateVectorCudaManaged_Expval]", float,
double) {

TEMPLATE_TEST_CASE("[PauliX]", "[MPSTNCuda_Expval]", float, double) {
{
using StateVectorT = StateVectorCudaManaged<TestType>;
const std::size_t num_qubits = 3;
using StateTensorT = MPSTNCuda<TestType>;

std::size_t bondDim = GENERATE(2, 3, 4, 5);
std::size_t num_qubits = 3;
std::size_t maxBondDim = bondDim;

StateTensorT mps_state{num_qubits, maxBondDim};

auto measure = Measurements<StateTensorT>();

auto ZERO = TestType(0);
auto ONE = TestType(1);

SECTION("Using expval") {
StateVectorT sv{num_qubits};
sv.initSV();
auto m = Measurements(sv);
sv.applyOperations({{"Hadamard"}, {"CNOT"}, {"CNOT"}},
{{0}, {0, 1}, {1, 2}},
{{false}, {false}, {false}});
auto ob = NamedObs<StateVectorT>("PauliX", {0});
auto res = m.expval(ob);
mps_state.applyOperations({{"Hadamard"}, {"CNOT"}, {"CNOT"}},
{{0}, {0, 1}, {1, 2}},
{{false}, {false}, {false}});
mps_state.get_final_state();
auto ob = NamedObs<StateTensorT>("PauliX", {0});
auto res = measure.expval(ob, mps_state);
CHECK(res == ZERO);
}

SECTION("Using expval: Plus states") {
StateVectorT sv{num_qubits};
sv.initSV();
auto m = Measurements(sv);
sv.applyOperations({{"Hadamard"}, {"Hadamard"}, {"Hadamard"}},
{{0}, {1}, {2}}, {{false}, {false}, {false}});
auto ob = NamedObs<StateVectorT>("PauliX", {0});
auto res = m.expval(ob);
mps_state.applyOperations(
{{"Hadamard"}, {"Hadamard"}, {"Hadamard"}}, {{0}, {1}, {2}},
{{false}, {false}, {false}});
mps_state.get_final_state();
auto ob = NamedObs<StateTensorT>("PauliX", {0});
auto res = measure.expval(ob, mps_state);
CHECK(res == Approx(ONE));
}

SECTION("Using expval: Minus states") {
StateVectorT sv{num_qubits};
sv.initSV();
auto m = Measurements(sv);
sv.applyOperations(
mps_state.applyOperations(
{{"PauliX"},
{"Hadamard"},
{"PauliX"},
Expand All @@ -102,65 +101,63 @@ TEMPLATE_TEST_CASE("[PauliX]", "[StateVectorCudaManaged_Expval]", float,
{"Hadamard"}},
{{0}, {0}, {1}, {1}, {2}, {2}},
{{false}, {false}, {false}, {false}, {false}, {false}});
auto ob = NamedObs<StateVectorT>("PauliX", {0});
auto res = m.expval(ob);
mps_state.get_final_state();
auto ob = NamedObs<StateTensorT>("PauliX", {0});
auto res = measure.expval(ob, mps_state);
CHECK(res == -Approx(ONE));
}
}
}

TEMPLATE_TEST_CASE("[PauliY]", "[StateVectorCudaManaged_Expval]", float,
double) {
TEMPLATE_TEST_CASE("[PauliY]", "[MPSTNCuda_Expval]", float, double) {
{
using StateVectorT = StateVectorCudaManaged<TestType>;
const std::size_t num_qubits = 3;
using StateTensorT = MPSTNCuda<TestType>;

std::size_t bondDim = GENERATE(2, 3, 4, 5);
std::size_t num_qubits = 3;
std::size_t maxBondDim = bondDim;

StateTensorT mps_state{num_qubits, maxBondDim};

auto measure = Measurements<StateTensorT>();

auto ZERO = TestType(0);
auto ONE = TestType(1);
auto PI = TestType(M_PI);

SECTION("Using expval") {
StateVectorT sv{num_qubits};
sv.initSV();
auto m = Measurements(sv);
sv.applyOperations({{"Hadamard"}, {"CNOT"}, {"CNOT"}},
{{0}, {0, 1}, {1, 2}},
{{false}, {false}, {false}});
auto ob = NamedObs<StateVectorT>("PauliY", {0});
auto res = m.expval(ob);
mps_state.applyOperations({{"Hadamard"}, {"CNOT"}, {"CNOT"}},
{{0}, {0, 1}, {1, 2}},
{{false}, {false}, {false}});
auto ob = NamedObs<StateTensorT>("PauliY", {0});
auto res = measure.expval(ob, mps_state);
CHECK(res == ZERO);
}

SECTION("Using expval: Plus i states") {
StateVectorT sv{num_qubits};
sv.initSV();
auto m = Measurements(sv);
sv.applyOperations({{"RX"}, {"RX"}, {"RX"}}, {{0}, {1}, {2}},
{{false}, {false}, {false}},
{{-PI / 2}, {-PI / 2}, {-PI / 2}});
auto ob = NamedObs<StateVectorT>("PauliY", {0});
auto res = m.expval(ob);
mps_state.applyOperations({{"RX"}, {"RX"}, {"RX"}}, {{0}, {1}, {2}},
{{false}, {false}, {false}},
{{-PI / 2}, {-PI / 2}, {-PI / 2}});
auto ob = NamedObs<StateTensorT>("PauliY", {0});
auto res = measure.expval(ob, mps_state);
CHECK(res == Approx(ONE));
}

SECTION("Using expval: Minus i states") {
StateVectorT sv{num_qubits};
sv.initSV();
auto m = Measurements(sv);
sv.applyOperations({{"RX"}, {"RX"}, {"RX"}}, {{0}, {1}, {2}},
{{false}, {false}, {false}},
{{PI / 2}, {PI / 2}, {PI / 2}});
auto ob = NamedObs<StateVectorT>("PauliY", {0});
auto res = m.expval(ob);
mps_state.applyOperations({{"RX"}, {"RX"}, {"RX"}}, {{0}, {1}, {2}},
{{false}, {false}, {false}},
{{PI / 2}, {PI / 2}, {PI / 2}});
auto ob = NamedObs<StateTensorT>("PauliY", {0});
auto res = measure.expval(ob, mps_state);
CHECK(res == -Approx(ONE));
}
}
}
TEMPLATE_TEST_CASE("[PauliZ]", "[StateVectorCudaManaged_Expval]", float,
/*
TEMPLATE_TEST_CASE("[PauliZ]", "[MPSTNCuda_Expval]", float,
double) {
{
using StateVectorT = StateVectorCudaManaged<TestType>;
using StateTensorT = MPSTNCuda<TestType>;
using PrecisionT = StateVectorT::PrecisionT;
// Defining the statevector that will be measured.
Expand All @@ -176,23 +173,29 @@ TEMPLATE_TEST_CASE("[PauliZ]", "[StateVectorCudaManaged_Expval]", float,
}
}
}
*/

TEMPLATE_TEST_CASE("[Hadamard]", "[StateVectorCudaManaged_Expval]", float,
double) {
TEMPLATE_TEST_CASE("[Hadamard]", "[MPSTNCuda_Expval]", float, double) {
{
using StateVectorT = StateVectorCudaManaged<TestType>;
const std::size_t num_qubits = 3;
using StateTensorT = MPSTNCuda<TestType>;

std::size_t bondDim = GENERATE(2, 3, 4, 5);
std::size_t num_qubits = 3;
std::size_t maxBondDim = bondDim;

StateTensorT mps_state{num_qubits, maxBondDim};

auto measure = Measurements<StateTensorT>();

auto INVSQRT2 = TestType(0.707106781186547524401);

SECTION("Using expval") {
StateVectorT sv{num_qubits};
sv.initSV();
auto m = Measurements(sv);
sv.applyOperation("PauliX", {0});
auto ob = NamedObs<StateVectorT>("Hadamard", {0});
auto res = m.expval(ob);
mps_state.applyOperation("PauliX", {0});
mps_state.get_final_state();

auto ob = NamedObs<StateTensorT>("Hadamard", {0});
auto res = measure.expval(ob, mps_state);
CHECK(res == Approx(-INVSQRT2).epsilon(1e-7));
}
}
}
*/
}
Original file line number Diff line number Diff line change
Expand Up @@ -60,27 +60,22 @@ template <class StateTensorT> class ObservableTNCuda {
ObservableTNCuda &operator=(ObservableTNCuda &&) noexcept = default;

protected:
// cutensornetNetworkOperator_t obsOperator_{nullptr};
cutensornetNetworkOperator_t obsOperator_{nullptr};

/**
* @brief Apply the observable to the given statevector in place.
*/
/*
void createTNOperator(StateTensorT &state_tensor) {
// PL_ABORT_IF_NOT(
// obsOperator_ == nullptr,
// "The createTNOperator() method can be called only once.");

PL_CUTENSORNET_IS_SUCCESS(cutensornetCreateNetworkOperator(
/-* const cutensornetHandle_t *-/ state_tensor.getTNCudaHandle(),
/-* int32_t *-/ static_cast<int32_t>(state_tensor.getNumQubits()),
/-* const int64_t stateModeExtents *-/
/* const cutensornetHandle_t */ state_tensor.getTNCudaHandle(),
/* int32_t */ static_cast<int32_t>(state_tensor.getNumQubits()),
/* const int64_t stateModeExtents */
reinterpret_cast<int64_t *>(
const_cast<size_t *>(state_tensor.getQubitDims().data())),
/-* cudaDataType_t *-/ state_tensor.getCudaDataType(),
/-* cutensornetNetworkOperator_t *-/ &obsOperator_));
/* cudaDataType_t */ state_tensor.getCudaDataType(),
/* cutensornetNetworkOperator_t */ &obsOperator_));
}
*/

private:
/**
Expand All @@ -95,13 +90,13 @@ template <class StateTensorT> class ObservableTNCuda {

public:
virtual ~ObservableTNCuda() {
// if (obsOperator_ != nullptr) {
// PL_CUTENSORNET_IS_SUCCESS(
// cutensornetDestroyNetworkOperator(obsOperator_));
// }
if (obsOperator_ != nullptr) {
PL_CUTENSORNET_IS_SUCCESS(
cutensornetDestroyNetworkOperator(obsOperator_));
}
}

// cutensornetNetworkOperator_t getTNOperator() { return obsOperator_; }
cutensornetNetworkOperator_t getTNOperator() { return obsOperator_; }

virtual void appendTNOperator(StateTensorT &state_tensor) = 0;

Expand Down Expand Up @@ -207,12 +202,7 @@ class NamedObs final : public ObservableTNCuda<StateTensorT> {

wires_int_ = std::vector<int32_t>(wires_.size());

std::transform(wires_.begin(), wires_.end(), wires_int_.begin(),
[](size_t x) { return static_cast<int32_t>(x); });

numStateModes_.push_back(static_cast<int32_t>(wires_.size()));

stateModes_.push_back(wires_int_.data());
}

~NamedObs() {}
Expand All @@ -229,7 +219,16 @@ class NamedObs final : public ObservableTNCuda<StateTensorT> {
}

void appendTNOperator(StateTensorT &state_tensor) {
// this->createTNOperator(state_tensor);
if (this->obsOperator_ == nullptr) {
this->createTNOperator(state_tensor);
std::transform(wires_.begin(), wires_.end(), wires_int_.begin(),
[&](size_t x) {
return static_cast<int32_t>(
state_tensor.getNumQubits() - x - 1);
});

stateModes_.push_back(wires_int_.data());
}

auto &&par = (params_.empty()) ? std::vector<PrecisionT>{0.0} : params_;

Expand All @@ -250,7 +249,7 @@ class NamedObs final : public ObservableTNCuda<StateTensorT> {

PL_CUTENSORNET_IS_SUCCESS(cutensornetNetworkOperatorAppendProduct(
/* const cutensornetHandle_t */ state_tensor.getTNCudaHandle(),
/* cutensornetNetworkOperator_t */ state_tensor.getTNOperator(),
/* cutensornetNetworkOperator_t */ this->getTNOperator(),
/* cuDoubleComplex coefficient*/ cuDoubleComplex{1, 0.0},
/* int32_t numTensors */ 1,
/* const int32_t numStateModes[] */ numStateModes_.data(),
Expand Down

0 comments on commit 26451ca

Please sign in to comment.