Skip to content

Commit

Permalink
set computeState() as protected & make format
Browse files Browse the repository at this point in the history
  • Loading branch information
multiphaseCFD committed May 2, 2024
1 parent 5445bbe commit e606691
Show file tree
Hide file tree
Showing 5 changed files with 30 additions and 19 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,8 @@ class CutnBase : public TensornetBase<Precision, Derived> {
*
* @return std::size_t
*/
std::size_t getWorkSpaceMemorySize(cutensornetWorkspaceDescriptor_t &workDesc) {
std::size_t
getWorkSpaceMemorySize(cutensornetWorkspaceDescriptor_t &workDesc) {
int64_t worksize{0};

PL_CUTENSORNET_IS_SUCCESS(cutensornetWorkspaceGetMemorySize(
Expand Down Expand Up @@ -147,6 +148,7 @@ class CutnBase : public TensornetBase<Precision, Derived> {
/* int64_t */ worksize));
}

protected:
/**
* @brief Save quantumState information to data provided by a user
*
Expand Down Expand Up @@ -179,7 +181,8 @@ class CutnBase : public TensornetBase<Precision, Derived> {
"Insufficient workspace size on Device!");

const std::size_t d_scratch_length = worksize / sizeof(std::size_t);
DataBuffer<std::size_t, int> d_scratch(d_scratch_length, getDevTag(), true);
DataBuffer<std::size_t, int> d_scratch(d_scratch_length, getDevTag(),
true);

setWorkSpaceMemory(
workDesc, reinterpret_cast<void *>(d_scratch.getData()), worksize);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,9 @@ class MPSCutn final : public CutnBase<Precision, MPSCutn<Precision>> {
*
* @return std::size_t
*/
[[nodiscard]] auto getMaxBondDim() const -> std::size_t { return maxBondDim_; };
[[nodiscard]] auto getMaxBondDim() const -> std::size_t {
return maxBondDim_;
};

/**
* @brief Get a vector of pointers to extents of each site
Expand Down Expand Up @@ -207,7 +209,7 @@ class MPSCutn final : public CutnBase<Precision, MPSCutn<Precision>> {
static_cast<int64_t>(std::size_t{1} << BaseType::getNumQubits()));
output_extentsPtr.emplace_back(extent_int64.data());

BaseType::computeState(output_extentsPtr, output_tensorPtr);
this->computeState(output_extentsPtr, output_tensorPtr);

std::vector<ComplexT> results(output_extent.front());
output_tensor.CopyGpuDataToHost(results.data(), results.size());
Expand All @@ -232,15 +234,15 @@ class MPSCutn final : public CutnBase<Precision, MPSCutn<Precision>> {
{BaseType::getQubitDims()[i], maxBondDim_});
} else if (i == BaseType::getNumQubits() - 1) {
// Rightmost site (shared mode, state mode)
localSiteModes =
std::vector<std::size_t>({i + BaseType::getNumQubits() - 1, i});
localSiteModes = std::vector<std::size_t>(
{i + BaseType::getNumQubits() - 1, i});
localSiteExtents = std::vector<std::size_t>(
{maxBondDim_, BaseType::getQubitDims()[i]});
} else {
// Interior sites (state mode, state mode, shared mode)
localSiteModes =
std::vector<std::size_t>({i + BaseType::getNumQubits() - 1, i,
i + BaseType::getNumQubits()});
std::vector<std::size_t>({i + BaseType::getNumQubits() - 1,
i, i + BaseType::getNumQubits()});
localSiteExtents = std::vector<std::size_t>(
{maxBondDim_, BaseType::getQubitDims()[i], maxBondDim_});
}
Expand All @@ -253,8 +255,9 @@ class MPSCutn final : public CutnBase<Precision, MPSCutn<Precision>> {
// cutensornet backend
std::vector<int64_t> siteExtents_int64(sitesExtents_[i].size());
std::transform(sitesExtents_[i].begin(), sitesExtents_[i].end(),
siteExtents_int64.begin(),
[](std::size_t x) { return static_cast<int64_t>(x); });
siteExtents_int64.begin(), [](std::size_t x) {
return static_cast<int64_t>(x);
});

sitesExtents_int64_.push_back(siteExtents_int64);
sitesExtentsPtr_int64_.push_back(sitesExtents_int64_.back().data());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,8 @@ template <class Precision, class Derived> class TensornetBase {
public:
TensornetBase() = delete;

explicit TensornetBase(const std::size_t numQubits) : numQubits_(numQubits) {
explicit TensornetBase(const std::size_t numQubits)
: numQubits_(numQubits) {
qubitDims_.resize(numQubits);
std::fill(qubitDims_.begin(), qubitDims_.end(), std::size_t{2});
}
Expand All @@ -44,7 +45,8 @@ template <class Precision, class Derived> class TensornetBase {
*
* @return const std::vector<std::size_t> &
*/
[[nodiscard]] auto getQubitDims() const -> const std::vector<std::size_t> & {
[[nodiscard]] auto getQubitDims() const
-> const std::vector<std::size_t> & {
return qubitDims_;
};

Expand All @@ -62,6 +64,8 @@ template <class Precision, class Derived> class TensornetBase {
*
* @return std::size_t
*/
[[nodiscard]] auto getNumQubits() const -> std::size_t { return numQubits_; };
[[nodiscard]] auto getNumQubits() const -> std::size_t {
return numQubits_;
};
};
} // namespace Pennylane::LightningTensor::Cutn
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,8 @@ class CudaTensor final : public TensorBase<PrecisionT, CudaTensor<PrecisionT>> {
using CFP_t = decltype(cuUtil::getCudaType(PrecisionT{}));

CudaTensor(const std::size_t rank, const std::vector<std::size_t> &modes,
const std::vector<std::size_t> &extents, const DevTag<int> &dev_tag,
bool device_alloc = true)
const std::vector<std::size_t> &extents,
const DevTag<int> &dev_tag, bool device_alloc = true)
: TensorBase<PrecisionT, CudaTensor<PrecisionT>>(rank, modes, extents),
data_buffer_{std::make_shared<DataBuffer<CFP_t>>(
BaseType::getLength(), dev_tag, device_alloc)} {}
Expand All @@ -63,7 +63,8 @@ class CudaTensor final : public TensorBase<PrecisionT, CudaTensor<PrecisionT>> {
* @param sv Complex data pointer to receive data from device.
*/
inline void CopyGpuDataToHost(std::complex<PrecisionT> *host_sv,
std::size_t length, bool async = false) const {
std::size_t length,
bool async = false) const {
PL_ABORT_IF_NOT(BaseType::getLength() == length,
"Sizes do not match for Host and GPU data");
data_buffer_->CopyGpuDataToHost(host_sv, length, async);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,9 @@ TEMPLATE_PRODUCT_TEST_CASE("CudaTensor::Constructibility",
using TensorT = TestType;

SECTION("TensorT<TestType>") { REQUIRE(!std::is_constructible_v<TensorT>); }
SECTION(
"TensorT<TestType> {const std::size_t, const std::vector<std::size_t> &, const "
"std::vector<std::size_t>&, DevTag<int> &}") {
SECTION("TensorT<TestType> {const std::size_t, const "
"std::vector<std::size_t> &, const "
"std::vector<std::size_t>&, DevTag<int> &}") {
REQUIRE(std::is_constructible_v<
TensorT, const std::size_t, const std::vector<std::size_t> &,
const std::vector<std::size_t> &, DevTag<int> &>);
Expand Down

0 comments on commit e606691

Please sign in to comment.