Skip to content

Commit

Permalink
Merge branch 'add_MPSCutn_basis' into add_LTensor_nonParam_gate
Browse files Browse the repository at this point in the history
  • Loading branch information
multiphaseCFD committed May 9, 2024
2 parents 10b041f + 03b0434 commit 881004d
Show file tree
Hide file tree
Showing 17 changed files with 1,382 additions and 85 deletions.
10 changes: 8 additions & 2 deletions .github/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@
* Update Linux wheels to use manylinux_2_28 images.
[(#667)](https://github.com/PennyLaneAI/pennylane-lightning/pull/667)

* Add support for `qml.expval` and `qml.var` in the `lightning.tensor` device for the `quimb` interface and the MPS method.
[(#686)](https://github.com/PennyLaneAI/pennylane-lightning/pull/686)

### Documentation

### Bug fixes
Expand All @@ -23,14 +26,17 @@

This release contains contributions from (in alphabetical order):

Amintor Dusko, Vincent Michaud-Rioux, Shuli Shu
Amintor Dusko, Pietropaolo Frisoni, Vincent Michaud-Rioux, Shuli Shu

---

# Release 0.36.0

### New features since last release

* Add `cutensornet` backed `MPS` C++ layer to `lightning.tensor`.
[(#704)](https://github.com/PennyLaneAI/pennylane-lightning/pull/704)

* Add Python class for the `lightning.tensor` device which uses the new device API and the interface for `quimb` based on the MPS method.
[(#671)](https://github.com/PennyLaneAI/pennylane-lightning/pull/671)

Expand Down Expand Up @@ -185,7 +191,7 @@ Amintor Dusko, Vincent Michaud-Rioux, Shuli Shu

This release contains contributions from (in alphabetical order):

Ali Asadi, Amintor Dusko, Thomas Germain, Christina Lee, Erick Ochoa Lopez, Vincent Michaud-Rioux, Rashid N H M, Lee James O'Riordan, Mudit Pandey, Shuli Shu
Ali Asadi, Amintor Dusko, Pietropaolo Frisoni, Thomas Germain, Christina Lee, Erick Ochoa Lopez, Vincent Michaud-Rioux, Rashid N H M, Lee James O'Riordan, Mudit Pandey, Shuli Shu

---

Expand Down
15 changes: 9 additions & 6 deletions .github/workflows/tests_lmps_tncuda_cpp.yml
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ jobs:
pl_tensor_backend: ["cutensornet"]
cuda_version: ["12"]

name: C++ tests (Lightning-Tensor-MPS-TNCuda)
name: C++ Tests (${{ matrix.pl_backend }}, method-${{ matrix.pl_tensor_method }}, backend-${{ matrix.pl_tensor_backend }}, cuda-${{ matrix.cuda_version }})
runs-on:
- ${{ matrix.os }}
- self-hosted
Expand All @@ -95,21 +95,27 @@ jobs:
name: Install Python
with:
python-version: '3.9'


# Since the self-hosted runner can be re-used. It is best to set up all package
# installations in a virtual environment that gets cleaned at the end of each workflow run
- name: Setup Python virtual environment
id: setup_venv
env:
VENV_NAME: ${{ github.workspace }}/venv_${{ steps.setup_python.outputs.python-version }}_${{ github.sha }}
run: |
# Clear any pre-existing venvs
rm -rf venv_*
# Create new venv for this workflow_run
python --version
python -m venv ${{ env.VENV_NAME }}
# Add the venv to PATH for subsequent steps
echo ${{ env.VENV_NAME }}/bin >> $GITHUB_PATH
# Adding venv name as an output for subsequent steps to reference if needed
echo "venv_name=${{ env.VENV_NAME }}" >> $GITHUB_OUTPUT
- name: Display Python-Path
id: python_path
run: |
Expand Down Expand Up @@ -185,8 +191,5 @@ jobs:
if: always()
run: |
rm -rf ${{ steps.setup_venv.outputs.venv_name }}
rm -rf *
rm -rf .git
rm -rf .gitignore
rm -rf .github
rm -rf * .git .gitignore .github
pip cache purge
2 changes: 1 addition & 1 deletion pennylane_lightning/core/_version.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,4 @@
Version number (major.minor.patch[-label])
"""

__version__ = "0.37.0-dev3"
__version__ = "0.37.0-dev4"
Original file line number Diff line number Diff line change
Expand Up @@ -17,4 +17,4 @@ endif()
target_include_directories(${PL_BACKEND}_tensor INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/base)
target_link_libraries(${PL_BACKEND}_tensor INTERFACE tensorBase lightning_utils lightning_compile_options lightning_external_libs)

set_property(TARGET ${PL_BACKEND}_tensor PROPERTY POSITION_INDEPENDENT_CODE ON)
set_property(TARGET ${PL_BACKEND}_tensor PROPERTY POSITION_INDEPENDENT_CODE ON)
Original file line number Diff line number Diff line change
Expand Up @@ -33,21 +33,20 @@ namespace Pennylane::LightningTensor {
*/
template <class PrecisionT, class Derived> class TensorBase {
private:
std::size_t rank_; // A rank N tensor has N modes
std::size_t length_; // Number of elements
std::vector<std::size_t> modes_; // modes for contraction identify
std::vector<std::size_t> extents_; // Number of elements in each mode
const std::size_t rank_; // A rank N tensor has N modes
std::size_t length_; // Number of elements
const std::vector<std::size_t> modes_; // modes for contraction identify
const std::vector<std::size_t> extents_; // Number of elements in each mode

public:
TensorBase(std::size_t rank, const std::vector<std::size_t> &modes,
const std::vector<std::size_t> &extents)
explicit TensorBase(std::size_t rank, const std::vector<std::size_t> &modes,
const std::vector<std::size_t> &extents)
: rank_(rank), modes_(modes), extents_(extents) {
PL_ABORT_IF_NOT(rank_ == extents_.size(),
"Please check if rank or extents are set correctly.");
length_ = 1;
for (auto extent : extents) {
length_ *= extent;
}
length_ = std::accumulate(extents.begin(), extents.end(),
std::size_t{1}, std::multiplies<>());
}

~TensorBase() {}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,10 @@ class TensorCuda final : public TensorBase<PrecisionT, TensorCuda<PrecisionT>> {
using BaseType = TensorBase<PrecisionT, TensorCuda>;
using CFP_t = decltype(cuUtil::getCudaType(PrecisionT{}));

TensorCuda(const std::size_t rank, const std::vector<std::size_t> &modes,
const std::vector<std::size_t> &extents,
const DevTag<int> &dev_tag, bool device_alloc = true)
explicit TensorCuda(const std::size_t rank,
const std::vector<std::size_t> &modes,
const std::vector<std::size_t> &extents,
const DevTag<int> &dev_tag, bool device_alloc = true)
: TensorBase<PrecisionT, TensorCuda<PrecisionT>>(rank, modes, extents),
data_buffer_{std::make_shared<DataBuffer<CFP_t>>(
BaseType::getLength(), dev_tag, device_alloc)} {}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,4 +67,4 @@ TEMPLATE_TEST_CASE("TensorCuda::baseMethods", "[TensorCuda]", float, double) {
SECTION("getExtents()") { CHECK(tensor.getExtents() == extents); }

SECTION("getLength()") { CHECK(tensor.getLength() == length); }
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,6 @@ class MPSTNCuda final : public TNCudaBase<Precision, MPSTNCuda<Precision>> {
using BaseType = TNCudaBase<Precision, MPSTNCuda>;

MPSStatus MPSInitialized_ = MPSStatus::MPSInitNotSet;
MPSStatus MPSFinalized_ = MPSStatus::MPSFinalizedNotSet;

const std::size_t maxBondDim_;

Expand Down Expand Up @@ -161,11 +160,6 @@ class MPSTNCuda final : public TNCudaBase<Precision, MPSTNCuda<Precision>> {
"Please ensure all elements of a basis state should be "
"either 0 or 1.");

PL_ABORT_IF(MPSInitialized_ == MPSStatus::MPSInitSet,
"setBasisState() can be called only once.");

MPSInitialized_ = MPSStatus::MPSInitSet;

CFP_t value_cu =
Pennylane::LightningGPU::Util::complexToCu<ComplexT>({1.0, 0.0});

Expand All @@ -186,8 +180,10 @@ class MPSTNCuda final : public TNCudaBase<Precision, MPSTNCuda<Precision>> {
&value_cu, sizeof(CFP_t), cudaMemcpyHostToDevice));
}

updateQuantumStateMPS_(getSitesExtentsPtr().data(),
getTensorsDataPtr().data());
if (MPSInitialized_ == MPSStatus::MPSInitNotSet) {
MPSInitialized_ = MPSStatus::MPSInitSet;
updateQuantumStateMPS_();
}
};

/**
Expand Down Expand Up @@ -314,19 +310,17 @@ class MPSTNCuda final : public TNCudaBase<Precision, MPSTNCuda<Precision>> {
* @brief Update quantumState (cutensornetState_t) with data provided by a
* user
*
* @param extentsIn Extents of each sites
* @param tensorsIn Pointer to tensors provided by a user
*/
void updateQuantumStateMPS_(const int64_t *const *extentsIn,
uint64_t **tensorsIn) {
void updateQuantumStateMPS_() {
PL_CUTENSORNET_IS_SUCCESS(cutensornetStateInitializeMPS(
/*const cutensornetHandle_t */ BaseType::getTNCudaHandle(),
/*cutensornetState_t*/ BaseType::getQuantumState(),
/*cutensornetBoundaryCondition_t */
CUTENSORNET_BOUNDARY_CONDITION_OPEN,
/*const int64_t *const* */ extentsIn,
/*const int64_t *const* */ getSitesExtentsPtr().data(),
/*const int64_t *const* */ nullptr,
/*void ** */ reinterpret_cast<void **>(tensorsIn)));
/*void ** */
reinterpret_cast<void **>(getTensorsDataPtr().data())));
}
};
} // namespace Pennylane::LightningTensor::TNCuda
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,29 @@ TEMPLATE_TEST_CASE("MPSTNCuda::SetBasisStates() & reset()", "[MPSTNCuda]",
CHECK(expected_state ==
Pennylane::Util::approx(mps_state.getDataVector()));
}

SECTION("Test different bondDim and different basisstate & reset()") {
std::size_t bondDim = GENERATE(2, 3, 4, 5);
std::size_t stateIdx = GENERATE(0, 1, 2, 3, 4, 5, 6, 7);
std::size_t num_qubits = 3;
std::size_t maxBondDim = bondDim;

MPSTNCuda<TestType> mps_state{num_qubits, maxBondDim};

mps_state.setBasisState(basisStates[stateIdx]);

mps_state.reset();

std::vector<std::complex<TestType>> expected_state(
size_t{1} << num_qubits, std::complex<TestType>({0.0, 0.0}));

std::size_t index = 0;

expected_state[index] = {1.0, 0.0};

CHECK(expected_state ==
Pennylane::Util::approx(mps_state.getDataVector()));
}
}

TEMPLATE_TEST_CASE("MPSTNCuda::getDataVector()", "[MPSTNCuda]", float, double) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,6 @@ enum class MPSStatus : uint32_t {
BEGIN = 0,
MPSInitNotSet = 0,
MPSInitSet,
MPSFinalizedNotSet,
MPSFinalizedSet,
END
};

Expand Down
Loading

0 comments on commit 881004d

Please sign in to comment.