Skip to content

Commit

Permalink
use gpu for pre processing
Browse files Browse the repository at this point in the history
  • Loading branch information
mblum94 committed Feb 20, 2024
1 parent 848955c commit 0d069f5
Show file tree
Hide file tree
Showing 3 changed files with 28 additions and 3 deletions.
28 changes: 25 additions & 3 deletions src/networks/misc.c
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ struct network_data_s network_data_empty = {

.create_out = false,
.load_mem = false,
.gpu = false,
.batch_flags = SLICE_FLAG | AVG_FLAG | BATCH_FLAG,

.nufft_conf = &nufft_conf_defaults,
Expand Down Expand Up @@ -222,7 +223,10 @@ static void compute_adjoint_noncart(struct network_data_s* nd)
md_select_dims(DIMS, ~nd->batch_flags, pat_dims_s, nd->pat_dims);
md_select_dims(DIMS, ~nd->batch_flags, col_dims_s, nd->col_dims);

auto model = sense_noncart_create(nd->N, trj_dims_s, pat_dims_s, ksp_dims_s, cim_dims_s, img_dims_s, col_dims_s, nd->bas_dims, nd->basis, *(nd->nufft_conf));
struct nufft_conf_s nufft_conf = *(nd->nufft_conf);
nufft_conf.cache_psf_grdding = true;

auto model = sense_noncart_create(nd->N, trj_dims_s, pat_dims_s, ksp_dims_s, cim_dims_s, img_dims_s, col_dims_s, nd->bas_dims, nd->basis, nufft_conf);
auto sense_adjoint = nlop_sense_adjoint_create(1, &model, true);

nd->ND = DIMS + 1;
Expand All @@ -239,7 +243,16 @@ static void compute_adjoint_noncart(struct network_data_s* nd)
complex float* dst[2] = { nd->adjoint, nd->psf };
const complex float* src[4] = { nd->kspace, nd->coil, nd->pattern, nd->trajectory };

nlop_generic_apply_loop_sameplace(sense_adjoint, nd->batch_flags, 2, DO, odims, dst, 4, DI, idims, src, nd->adjoint);
complex float* ref = NULL;

#ifdef USE_CUDA
if (nd->gpu)
ref = md_alloc_gpu(1, MD_DIMS(1), CFL_SIZE);
#endif

nlop_generic_apply_loop_sameplace(sense_adjoint, nd->batch_flags, 2, DO, odims, dst, 4, DI, idims, src, ref);

md_free(ref);

nlop_free(sense_adjoint);
sense_model_free(model);
Expand Down Expand Up @@ -394,7 +407,16 @@ void network_data_compute_init(struct network_data_s* nd, complex float lambda,
complex float* dst[1] = { nd->initialization };
const complex float* src[3] = { nd->adjoint, nd->coil, nd->psf };

nlop_generic_apply_loop_sameplace(nlop_normal_inv, loop_flags, 1, DO, odims, dst, 3, DI, idims, src, nd->adjoint);
complex float* ref = NULL;

#ifdef USE_CUDA
if (nd->gpu)
ref = md_alloc_gpu(1, MD_DIMS(1), CFL_SIZE);
#endif

nlop_generic_apply_loop_sameplace(nlop_normal_inv, loop_flags, 1, DO, odims, dst, 3, DI, idims, src, ref);

md_free(ref);

nlop_free(nlop_normal_inv);
}
Expand Down
1 change: 1 addition & 0 deletions src/networks/misc.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ struct network_data_s {

_Bool create_out;
_Bool load_mem;
_Bool gpu;

unsigned long batch_flags;
};
Expand Down
2 changes: 2 additions & 0 deletions src/reconet.c
Original file line number Diff line number Diff line change
Expand Up @@ -276,6 +276,7 @@ int main_reconet(int argc, char* argv[argc])
data.create_out = true;

data.load_mem = load_mem;
data.gpu = config.gpu;
load_network_data(&data);

Nb = MIN(Nb, network_data_get_tot(&data));
Expand Down Expand Up @@ -305,6 +306,7 @@ int main_reconet(int argc, char* argv[argc])
valid_data.filename_basis = data.filename_basis;

load_network_data(&valid_data);
valid_data.gpu = config.gpu;
network_data_slice_dim_to_batch_dim(&valid_data);

if (config.sense_init && (-1. != config.init_lambda_fixed))
Expand Down

0 comments on commit 0d069f5

Please sign in to comment.