Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add GlobalPhase to all devices #579

Merged
merged 73 commits into from
Feb 15, 2024
Merged
Show file tree
Hide file tree
Changes from 15 commits
Commits
Show all changes
73 commits
Select commit Hold shift + click to select a range
efdd9a1
Add globalphase in LQ C++.
vincentmr Dec 13, 2023
7bbd3e0
Add GlobalPhase in supported ops and Grover test.
vincentmr Dec 13, 2023
1ebcd9c
Add global phase in L-Kokkos.
vincentmr Dec 13, 2023
1b864fa
Add global phase in L-GPU.
vincentmr Dec 13, 2023
4ac28c6
Auto update version
github-actions[bot] Dec 13, 2023
c4be3f4
Add workaround applyControlledGlobalPhase in L-Kokkos.
vincentmr Dec 13, 2023
ce18379
Add global_phase_diagonal to circumvent matrix method which takes too…
vincentmr Dec 14, 2023
e7ace0b
Add C(GlobalPhase) in L-GPU.
vincentmr Dec 14, 2023
43aa0d8
Merge branch 'master' into feature/lq_globalphase
vincentmr Dec 14, 2023
117c163
Auto update version
github-actions[bot] Dec 14, 2023
3ba8cb8
WIP (debug) [skip ci].
vincentmr Dec 14, 2023
fd01b13
Fix L-GPU gate cache bindings. Add gate tests.
vincentmr Dec 14, 2023
20621a4
Auto update version
github-actions[bot] Dec 14, 2023
6778176
Merge branch 'master' into feature/lq_globalphase
vincentmr Dec 14, 2023
e4b1379
Fix op1 cond for diagqubitU
vincentmr Dec 14, 2023
0a5175c
Merge branch 'master' into feature/lq_globalphase
vincentmr Jan 16, 2024
f32bbf4
Auto update version
github-actions[bot] Jan 16, 2024
5c233ef
trigger ci
vincentmr Jan 16, 2024
b57c8a5
Merge remote-tracking branch 'origin/master' into feature/lq_globalphase
vincentmr Feb 8, 2024
d81b6e9
Auto update version
github-actions[bot] Feb 8, 2024
6b1376e
Simplify param gate tests.
vincentmr Feb 8, 2024
e65caba
Fix test_gate_unitary_correct test.
vincentmr Feb 9, 2024
8de173b
Add catch when operation not found in LK and tests for GlobalPhase.
vincentmr Feb 9, 2024
f5152a7
Add C(GlobalPhase) C++ tests for LK.
vincentmr Feb 9, 2024
96d9e68
Add/fix N-controlled GlobalPhase in LQubit/LKokkos and add C++/Python…
vincentmr Feb 9, 2024
6a19e62
Update changelog.
vincentmr Feb 9, 2024
b6efb3e
Fix controlled global phase in L-GPU and add C++ tests.
vincentmr Feb 9, 2024
d5a3dcc
Fix edge case with C(GlobalPhase).
vincentmr Feb 9, 2024
f582063
Decorate conj/cmul with __device__ in CUDA helpers and import in initSV.
vincentmr Feb 9, 2024
08b06df
Update pennylane_lightning/core/src/simulators/lightning_gpu/utils/cu…
vincentmr Feb 12, 2024
b4a39d1
Update pennylane_lightning/core/src/simulators/lightning_gpu/utils/cu…
vincentmr Feb 12, 2024
737338a
Update datetime.
vincentmr Feb 12, 2024
9bba7bf
Format and fix wires in C(GP).
vincentmr Feb 12, 2024
ecd9c25
Fix format.
vincentmr Feb 12, 2024
8850dea
Revert PL_ASSERT removal
vincentmr Feb 12, 2024
3fdcbb6
Add adjoint-diff support for GlobalPhase.
vincentmr Feb 12, 2024
37dca4c
Add tests for C(Hadamard, S, T).
vincentmr Feb 12, 2024
8a42998
Merge branch 'master' into feature/lq_globalphase
vincentmr Feb 12, 2024
d811cee
Auto update version
github-actions[bot] Feb 12, 2024
c87aa8a
trigger ci
vincentmr Feb 12, 2024
d9411c4
Update gate_cache list
vincentmr Feb 12, 2024
1850bd6
Add controlled-op dynamic dispatcher overloads and tests for controll…
vincentmr Feb 12, 2024
05ce5f4
Merge branch 'master' into feature/lq_globalphase
vincentmr Feb 12, 2024
afc18d9
Instantiate RNGs in each tests at C++ layer.
vincentmr Feb 12, 2024
dbb2b7c
Instantiate RNGs in each tests at C++ layer.
vincentmr Feb 12, 2024
4140037
Auto update version
github-actions[bot] Feb 12, 2024
9c6ca4a
Instantiate RNGs in each tests at C++ layer.
vincentmr Feb 12, 2024
696afad
Small refactor of DynDispatcher.
vincentmr Feb 13, 2024
3ead6f6
Add some more C++ test of controlled-ops.
vincentmr Feb 13, 2024
fedc7c2
Try building doc with cuda-12
vincentmr Feb 13, 2024
333b87e
Try this.
vincentmr Feb 13, 2024
b81fcef
No sudo
vincentmr Feb 13, 2024
20fb91a
Try runfile.
vincentmr Feb 13, 2024
32e6006
--silent install
vincentmr Feb 13, 2024
b70cd8a
cat /tmp/cuda-installer.log
vincentmr Feb 13, 2024
3483d77
apt install network deb
vincentmr Feb 13, 2024
dbd2b0c
revert
vincentmr Feb 13, 2024
757490b
ubuntu-20.04
vincentmr Feb 13, 2024
6015352
Install gcc-10
vincentmr Feb 13, 2024
a74d6f3
Revert
vincentmr Feb 13, 2024
d2a56a1
Revert all to 3ead6f6e90a733
vincentmr Feb 13, 2024
7b6241c
Merge branch 'master' into feature/lq_globalphase
vincentmr Feb 14, 2024
cd5e3fa
Auto update version
github-actions[bot] Feb 14, 2024
07f9aaf
Comment triggers.
vincentmr Feb 14, 2024
57fb73e
provide --toolkitpath=cuda-12.0
vincentmr Feb 14, 2024
5e85e64
abs path
vincentmr Feb 14, 2024
5e10db0
export CUDAToolkit_ROOT
vincentmr Feb 14, 2024
3444718
CMAKE_ARGS=
vincentmr Feb 14, 2024
e309e33
export PATH
vincentmr Feb 14, 2024
e7d39ee
Revert triggers.
vincentmr Feb 14, 2024
15610f1
Fix test_controlled_globalphase device
vincentmr Feb 14, 2024
60e4def
Update requirements-dev.txt
vincentmr Feb 15, 2024
95f4f2a
Improve comment about global_phase_diagonal in C++ tests.
vincentmr Feb 15, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions pennylane_lightning/core/_serialize.py
Original file line number Diff line number Diff line change
Expand Up @@ -394,3 +394,18 @@ def get_wires(operation, single_op):
controlled_wires,
controlled_values,
), uses_stateprep


def global_phase_diagonal(par, wires, controls, control_values):
"""Returns the diagonal of a C(GlobalPhase) operator."""
diag = np.ones(2 ** len(wires), dtype=np.complex128)
controls = np.array(controls)
control_values = np.array(control_values)
ind = np.argsort(controls)
controls = controls[ind[-1::-1]]
control_values = control_values[ind[-1::-1]]
idx = np.arange(2 ** len(wires), dtype=np.int64).reshape([2 for _ in wires])
for c, w in zip(control_values, controls):
idx = np.take(idx, np.array(int(c)), w)
diag[idx.ravel()] = np.exp(-1j * par)
return diag
AmintorDusko marked this conversation as resolved.
Show resolved Hide resolved
2 changes: 1 addition & 1 deletion pennylane_lightning/core/_version.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,4 @@
Version number (major.minor.patch[-label])
"""

__version__ = "0.34.0-dev20"
__version__ = "0.34.0-dev21"
7 changes: 5 additions & 2 deletions pennylane_lightning/core/src/gates/Constant.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,8 @@ namespace Pennylane::Gates::Constant {
/**
* @brief List of multi-qubit gates
*/
[[maybe_unused]] constexpr std::array multi_qubit_gates{GateOperation::MultiRZ};
[[maybe_unused]] constexpr std::array multi_qubit_gates{
GateOperation::MultiRZ, GateOperation::GlobalPhase};
/**
* @brief List of multi-qubit generators
*/
Expand Down Expand Up @@ -78,7 +79,8 @@ using GateView = typename std::pair<GateOperation, std::string_view>;
GateView{GateOperation::DoubleExcitation, "DoubleExcitation"},
GateView{GateOperation::DoubleExcitationMinus, "DoubleExcitationMinus"},
GateView{GateOperation::DoubleExcitationPlus, "DoubleExcitationPlus"},
GateView{GateOperation::MultiRZ, "MultiRZ"}};
GateView{GateOperation::MultiRZ, "MultiRZ"},
GateView{GateOperation::GlobalPhase, "GlobalPhase"}};

using CGateView = typename std::pair<ControlledGateOperation, std::string_view>;
[[maybe_unused]] constexpr std::array controlled_gate_names = {
Expand Down Expand Up @@ -336,6 +338,7 @@ using GateNParams = typename std::pair<GateOperation, size_t>;
GateNParams{GateOperation::DoubleExcitationPlus, 1},
GateNParams{GateOperation::CSWAP, 0},
GateNParams{GateOperation::MultiRZ, 1},
GateNParams{GateOperation::GlobalPhase, 1},
};

/**
Expand Down
1 change: 1 addition & 0 deletions pennylane_lightning/core/src/gates/GateOperation.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ enum class GateOperation : uint32_t {
DoubleExcitationPlus,
/* Multi-qubit gates */
MultiRZ,
GlobalPhase,
/* END (placeholder) */
END
};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,24 @@
const size_t index, bool async,
cudaStream_t stream_id);

extern void globalPhaseStateVector_CUDA(cuComplex *sv, size_t num_sv,
cuComplex phase,
size_t thread_per_block,
cudaStream_t stream_id);
extern void globalPhaseStateVector_CUDA(cuDoubleComplex *sv, size_t num_sv,
cuDoubleComplex phase,
size_t thread_per_block,
cudaStream_t stream_id);

extern void cGlobalPhaseStateVector_CUDA(cuComplex *sv, size_t num_sv,
cuComplex *phase,
size_t thread_per_block,
cudaStream_t stream_id);
extern void cGlobalPhaseStateVector_CUDA(cuDoubleComplex *sv, size_t num_sv,
cuDoubleComplex *phase,
size_t thread_per_block,
cudaStream_t stream_id);
mlxd marked this conversation as resolved.
Show resolved Hide resolved

/**
* @brief Managed memory CUDA state-vector class using custateVec backed
* gate-calls.
Expand Down Expand Up @@ -195,6 +213,44 @@
thread_per_block, stream_id);
}

/**
* @brief Multiplies the state-vector by a global phase.
*
* @param adjoint Indicates whether to use adjoint of gate.
* @param param Complex phase generator.
*/
template <size_t thread_per_block = 256>
mlxd marked this conversation as resolved.
Show resolved Hide resolved
void globalPhaseStateVector(const bool adjoint, const Precision param) {
auto stream_id = BaseType::getDataBuffer().getDevTag().getStreamID();
std::complex<Precision> phase =
(adjoint) ? std::exp(std::complex<Precision>{0, param})
: std::exp(std::complex<Precision>{0, -param});
auto cuPhase = complexToCu(phase);
globalPhaseStateVector_CUDA(BaseType::getData(), BaseType::getLength(),

Check warning on line 229 in pennylane_lightning/core/src/simulators/lightning_gpu/StateVectorCudaManaged.hpp

View check run for this annotation

Codecov / codecov/patch

pennylane_lightning/core/src/simulators/lightning_gpu/StateVectorCudaManaged.hpp#L223-L229

Added lines #L223 - L229 were not covered by tests
cuPhase, thread_per_block, stream_id);
}

Check warning on line 231 in pennylane_lightning/core/src/simulators/lightning_gpu/StateVectorCudaManaged.hpp

View check run for this annotation

Codecov / codecov/patch

pennylane_lightning/core/src/simulators/lightning_gpu/StateVectorCudaManaged.hpp#L231

Added line #L231 was not covered by tests

/**
* @brief Multiplies the state-vector by a controlled global phase.
*
* @param phase Controlled complex phase vector.
*/
template <size_t thread_per_block = 256>
void cGlobalPhaseStateVector(const std::vector<CFP_t> &phase,

Check warning on line 239 in pennylane_lightning/core/src/simulators/lightning_gpu/StateVectorCudaManaged.hpp

View check run for this annotation

Codecov / codecov/patch

pennylane_lightning/core/src/simulators/lightning_gpu/StateVectorCudaManaged.hpp#L239

Added line #L239 was not covered by tests
const bool async = false) {
PL_ABORT_IF_NOT(BaseType::getLength() == phase.size(),

Check warning on line 241 in pennylane_lightning/core/src/simulators/lightning_gpu/StateVectorCudaManaged.hpp

View check run for this annotation

Codecov / codecov/patch

pennylane_lightning/core/src/simulators/lightning_gpu/StateVectorCudaManaged.hpp#L241

Added line #L241 was not covered by tests
"The state-vector data must have the same size as the "
"controlled-phase data.")
auto device_id = BaseType::getDataBuffer().getDevTag().getDeviceID();
auto stream_id = BaseType::getDataBuffer().getDevTag().getStreamID();
DataBuffer<CFP_t, int> d_phase{phase.size(), device_id, stream_id,

Check warning on line 246 in pennylane_lightning/core/src/simulators/lightning_gpu/StateVectorCudaManaged.hpp

View check run for this annotation

Codecov / codecov/patch

pennylane_lightning/core/src/simulators/lightning_gpu/StateVectorCudaManaged.hpp#L244-L246

Added lines #L244 - L246 were not covered by tests
true};
d_phase.CopyHostDataToGpu(phase.data(), d_phase.getLength(), async);
cGlobalPhaseStateVector_CUDA(BaseType::getData(), BaseType::getLength(),

Check warning on line 249 in pennylane_lightning/core/src/simulators/lightning_gpu/StateVectorCudaManaged.hpp

View check run for this annotation

Codecov / codecov/patch

pennylane_lightning/core/src/simulators/lightning_gpu/StateVectorCudaManaged.hpp#L248-L249

Added lines #L248 - L249 were not covered by tests
d_phase.getData(), thread_per_block,
stream_id);
}

Check warning on line 252 in pennylane_lightning/core/src/simulators/lightning_gpu/StateVectorCudaManaged.hpp

View check run for this annotation

Codecov / codecov/patch

pennylane_lightning/core/src/simulators/lightning_gpu/StateVectorCudaManaged.hpp#L252

Added line #L252 was not covered by tests

/**
* @brief Apply a single gate to the state-vector. Offloads to custatevec
* specific API calls if available. If unable, attempts to use prior cached
Expand Down Expand Up @@ -232,10 +288,10 @@
* @param params Optional parameter list for parametric gates.
* @param gate_matrix Gate data (in row-major format).
*/
void applyOperation(
const std::string &opName, const std::vector<size_t> &wires,
bool adjoint = false, const std::vector<Precision> &params = {0.0},
[[maybe_unused]] const std::vector<CFP_t> &gate_matrix = {}) {
void applyOperation(const std::string &opName,
const std::vector<size_t> &wires, bool adjoint = false,
const std::vector<Precision> &params = {0.0},
const std::vector<CFP_t> &gate_matrix = {}) {
mlxd marked this conversation as resolved.
Show resolved Hide resolved
const auto ctrl_offset = (BaseType::getCtrlMap().find(opName) !=
BaseType::getCtrlMap().end())
? BaseType::getCtrlMap().at(opName)
Expand All @@ -246,6 +302,10 @@
wires.end()};
if (opName == "Identity") {
return;
} else if (opName == "C(GlobalPhase)") {
cGlobalPhaseStateVector(gate_matrix);

Check warning on line 306 in pennylane_lightning/core/src/simulators/lightning_gpu/StateVectorCudaManaged.hpp

View check run for this annotation

Codecov / codecov/patch

pennylane_lightning/core/src/simulators/lightning_gpu/StateVectorCudaManaged.hpp#L306

Added line #L306 was not covered by tests
} else if (opName == "GlobalPhase") {
globalPhaseStateVector(adjoint, params[0]);

Check warning on line 308 in pennylane_lightning/core/src/simulators/lightning_gpu/StateVectorCudaManaged.hpp

View check run for this annotation

Codecov / codecov/patch

pennylane_lightning/core/src/simulators/lightning_gpu/StateVectorCudaManaged.hpp#L308

Added line #L308 was not covered by tests
} else if (native_gates_.find(opName) != native_gates_.end()) {
applyParametricPauliGate({opName}, ctrls, tgts, params.front(),
adjoint);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -155,18 +155,23 @@ void registerBackendClassSpecificBindings(PyClass &pyclass) {
"apply",
[](StateVectorT &sv, const std::string &str,
const std::vector<size_t> &wires, bool inv,
[[maybe_unused]] const std::vector<std::vector<ParamT>> &params,
[[maybe_unused]] const np_arr_c &gate_matrix) {
const std::vector<std::vector<ParamT>> &params,
const np_arr_c &gate_matrix) {
mlxd marked this conversation as resolved.
Show resolved Hide resolved
const auto m_buffer = gate_matrix.request();
std::vector<CFP_t> matrix_cu;
if (m_buffer.size) {
const auto m_ptr = static_cast<const CFP_t *>(m_buffer.ptr);
matrix_cu =
std::vector<CFP_t>{m_ptr, m_ptr + m_buffer.size};
}

sv.applyOperation(str, wires, inv, std::vector<ParamT>{},
matrix_cu);
if (params.empty()) {
sv.applyOperation(str, wires, inv, std::vector<ParamT>{},
matrix_cu);
} else {
PL_ABORT_IF(params.size() != 1,
"params should be a List[List[float]].")
sv.applyOperation(str, wires, inv, params[0], matrix_cu);
}
},
"Apply operation via the gate matrix");
}
Expand Down
123 changes: 123 additions & 0 deletions pennylane_lightning/core/src/simulators/lightning_gpu/initSV.cu
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,16 @@
#include "cuError.hpp"
#include <cuComplex.h>

namespace {
template <typename ComplexT>
__device__ inline ComplexT ComplexMul(ComplexT a, ComplexT b) {
mlxd marked this conversation as resolved.
Show resolved Hide resolved
ComplexT c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
}
} // namespace

namespace Pennylane::LightningGPU {

/**
Expand Down Expand Up @@ -69,6 +79,7 @@ __global__ void setStateVectorkernel(GPUDataT *sv, index_type num_indices,
sv[indices[i]] = value[i];
}
}

/**
* @brief The CUDA kernel call wrapper.
*
Expand All @@ -95,6 +106,91 @@ void setStateVector_CUDA_call(GPUDataT *sv, index_type &num_indices,
indices);
PL_CUDA_IS_SUCCESS(cudaGetLastError());
}

/**
* @brief The CUDA kernel that multiplies the state vector data on GPU device
* by a global phase.
*
* @param sv Complex data pointer of state vector on device.
* @param num_sv Number of state vector elements.
* @param phase Complex data pointer of input values (on device).
*/
template <class GPUDataT, class index_type>
__global__ void globalPhaseStateVectorkernel(GPUDataT *sv, index_type num_sv,
GPUDataT phase) {
const unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_sv) {
sv[i] = ComplexMul(sv[i], phase);
}
vincentmr marked this conversation as resolved.
Show resolved Hide resolved
}

/**
* @brief The CUDA kernel call wrapper.
*
* @param sv Complex data pointer of state vector on device.
* @param num_sv Number of state vector elements.
* @param phase Constant complex phase.
* @param thread_per_block Number of threads set per block.
* @param stream_id Stream id of CUDA calls
*/
template <class GPUDataT, class index_type>
void globalPhaseStateVector_CUDA_call(GPUDataT *sv, index_type num_sv,
GPUDataT phase, size_t thread_per_block,
cudaStream_t stream_id) {
auto dv = std::div(static_cast<long>(num_sv), thread_per_block);
size_t num_blocks = dv.quot + (dv.rem == 0 ? 0 : 1);
const size_t block_per_grid = (num_blocks == 0 ? 1 : num_blocks);
dim3 blockSize(thread_per_block, 1, 1);
dim3 gridSize(block_per_grid, 1);
vincentmr marked this conversation as resolved.
Show resolved Hide resolved

globalPhaseStateVectorkernel<GPUDataT, index_type>
<<<gridSize, blockSize, 0, stream_id>>>(sv, num_sv, phase);
PL_CUDA_IS_SUCCESS(cudaGetLastError());
}

/**
* @brief The CUDA kernel that multiplies the state vector data on GPU device
* by a controlled global phase.
*
* @param sv Complex data pointer of state vector on device.
* @param num_sv Number of state vector elements.
* @param phase Complex data pointer of controlled global phase values (on
* device).
*/
template <class GPUDataT, class index_type>
__global__ void cGlobalPhaseStateVectorkernel(GPUDataT *sv, index_type num_sv,
GPUDataT *phase) {
const unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_sv) {
sv[i] = ComplexMul(sv[i], phase[i]);
}
vincentmr marked this conversation as resolved.
Show resolved Hide resolved
}

/**
* @brief The CUDA kernel call wrapper.
*
* @param sv Complex data pointer of state vector on device.
* @param num_sv Number of state vector elements.
* @param phase Complex data pointer of controlled global phase values (on
* device).
* @param thread_per_block Number of threads set per block.
* @param stream_id Stream id of CUDA calls
*/
template <class GPUDataT, class index_type>
void cGlobalPhaseStateVector_CUDA_call(GPUDataT *sv, index_type num_sv,
GPUDataT *phase, size_t thread_per_block,
cudaStream_t stream_id) {
auto dv = std::div(static_cast<long>(num_sv), thread_per_block);
size_t num_blocks = dv.quot + (dv.rem == 0 ? 0 : 1);
const size_t block_per_grid = (num_blocks == 0 ? 1 : num_blocks);
dim3 blockSize(thread_per_block, 1, 1);
dim3 gridSize(block_per_grid, 1);

cGlobalPhaseStateVectorkernel<GPUDataT, index_type>
<<<gridSize, blockSize, 0, stream_id>>>(sv, num_sv, phase);
PL_CUDA_IS_SUCCESS(cudaGetLastError());
}

/**
* @brief CUDA runtime API call wrapper.
*
Expand Down Expand Up @@ -140,4 +236,31 @@ void setBasisState_CUDA(cuDoubleComplex *sv, cuDoubleComplex &value,
setBasisState_CUDA_call(sv, value, index, async, stream_id);
}

void globalPhaseStateVector_CUDA(cuComplex *sv, size_t num_sv, cuComplex phase,
size_t thread_per_block,
cudaStream_t stream_id) {
globalPhaseStateVector_CUDA_call(sv, num_sv, phase, thread_per_block,
stream_id);
}
void globalPhaseStateVector_CUDA(cuDoubleComplex *sv, size_t num_sv,
cuDoubleComplex phase, size_t thread_per_block,
cudaStream_t stream_id) {
globalPhaseStateVector_CUDA_call(sv, num_sv, phase, thread_per_block,
stream_id);
}

void cGlobalPhaseStateVector_CUDA(cuComplex *sv, size_t num_sv,
cuComplex *phase, size_t thread_per_block,
cudaStream_t stream_id) {
cGlobalPhaseStateVector_CUDA_call(sv, num_sv, phase, thread_per_block,
stream_id);
}
void cGlobalPhaseStateVector_CUDA(cuDoubleComplex *sv, size_t num_sv,
cuDoubleComplex *phase,
size_t thread_per_block,
cudaStream_t stream_id) {
cGlobalPhaseStateVector_CUDA_call(sv, num_sv, phase, thread_per_block,
stream_id);
}

mlxd marked this conversation as resolved.
Show resolved Hide resolved
} // namespace Pennylane::LightningGPU
Loading
Loading