-
Notifications
You must be signed in to change notification settings - Fork 16
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge branch 'Add_Brain_Age' into photon_dev
# Conflicts: # photonai/modelwrapper/SamplePairing.py
- Loading branch information
Showing
24 changed files
with
934 additions
and
65 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,99 @@ | ||
from .PhotonBase import PipelineElement | ||
from ..photonlogger import Logger | ||
import numpy as np | ||
|
||
|
||
class PhotonBatchElement(PipelineElement): | ||
|
||
def __init__(self, name, hyperparameters: dict=None, test_disabled: bool=False, disabled: bool =False, | ||
base_element=None, batch_size: int = 10, **kwargs): | ||
|
||
super(PhotonBatchElement, self).__init__(name, hyperparameters, test_disabled, disabled, base_element, **kwargs) | ||
# self.base_element = PipelineElement(base_element_name, hyperparameters=hyperparameters, **kwargs) | ||
|
||
self.batch_size = batch_size | ||
|
||
@staticmethod | ||
def chunker(nr_items, size): | ||
return [(pos, pos + size) for pos in range(0, nr_items, size)] | ||
|
||
def batch_call(self, delegate, X, y=None, call_with_y=True, **kwargs): | ||
|
||
# initialize return values | ||
processed_X = None | ||
processed_y = None | ||
processed_kwargs = dict() | ||
|
||
# iterate through data batchwise | ||
if isinstance(X, np.ndarray): | ||
nr = X.shape[0] | ||
dim = len(X.shape) | ||
else: | ||
nr = len(X) | ||
dim = 1 | ||
|
||
batch_idx = 0 | ||
for start, stop in PhotonBatchElement.chunker(nr, self.batch_size): | ||
|
||
batch_idx += 1 | ||
Logger().debug(self.name + " is processing batch nr " + str(batch_idx)) | ||
|
||
# split data in batches | ||
if dim > 1: | ||
X_batched = X[start:stop, :] | ||
else: | ||
X_batched = X[start:stop] | ||
|
||
# we are probably None anyway | ||
y_batched = y | ||
# if we are to batch then apply it | ||
if call_with_y and y is not None: | ||
y_batched = y[start:stop] | ||
|
||
kwargs_dict_batched = dict() | ||
for key, kwargs_list in kwargs.items(): | ||
if not isinstance(kwargs_list, np.ndarray): | ||
kwargs_list = np.array(kwargs_list) | ||
if len(kwargs_list.shape) > 1: | ||
kwargs_dict_batched[key] = kwargs_list[start:stop, :] | ||
else: | ||
kwargs_dict_batched[key] = kwargs_list[start:stop] | ||
|
||
# call the delegate | ||
X_new, y_new, kwargs_new = self.adjusted_delegate_call(delegate, X_batched, y_batched, **kwargs_dict_batched) | ||
|
||
# stack results | ||
processed_X = PhotonBatchElement.stack_results(X_new, processed_X) | ||
|
||
if call_with_y: | ||
processed_y = PhotonBatchElement.stack_results(y_new, processed_y) | ||
for proc_key, proc_values in kwargs_new.items(): | ||
new_kwargs_data = kwargs_new[proc_key] | ||
if proc_key not in processed_kwargs: | ||
processed_kwargs[proc_key] = new_kwargs_data | ||
else: | ||
processed_kwargs[proc_key] = PhotonBatchElement.stack_results(new_kwargs_data, processed_kwargs[proc_key]) | ||
else: | ||
processed_kwargs = kwargs | ||
processed_y = y | ||
return processed_X, processed_y, processed_kwargs | ||
|
||
@staticmethod | ||
def stack_results(new_a, existing_a): | ||
if existing_a is not None: | ||
if isinstance(new_a, list) or (isinstance(new_a, np.ndarray) and len(new_a.shape) < 2): | ||
if isinstance(existing_a, list): | ||
existing_a = existing_a + new_a | ||
else: | ||
existing_a = np.hstack((existing_a, new_a)) | ||
else: | ||
existing_a = np.vstack((existing_a, new_a)) | ||
else: | ||
existing_a = new_a | ||
return existing_a | ||
|
||
def transform(self, X, y=None, **kwargs): | ||
return self.batch_call(self.base_element.transform, X, y, **kwargs) | ||
|
||
def predict(self, X, y=None, **kwargs): | ||
return self.batch_call(self.base_element.predict, X, y, call_with_y=False, **kwargs) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,14 +1,18 @@ | ||
{ | ||
"ResampleImgs":[ | ||
"photonai.neuro.ImageBasics.ResamplingImgs", | ||
"ResampleImages":[ | ||
"photonai.neuro.ImageBasics.ResampleImages", | ||
"Transformer" | ||
], | ||
"SmoothImgs":[ | ||
"photonai.neuro.ImageBasics.SmoothImgs", | ||
"SmoothImages":[ | ||
"photonai.neuro.ImageBasics.SmoothImages", | ||
"Transformer" | ||
], | ||
"BrainAtlas":[ | ||
"photonai.neuro.BrainAtlas.BrainAtlas", | ||
"Transformer" | ||
], | ||
"PatchImages":[ | ||
"photonai.neuro.ImageBasics.PatchImages", | ||
"Transformer" | ||
] | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,101 @@ | ||
import numpy as np | ||
import pandas as pd | ||
import tensorflow as tf | ||
from sklearn.model_selection import KFold | ||
#from skopt import Optimizer | ||
#from skopt.optimizer import dummy_minimize | ||
#from skopt import dummy_minimize | ||
import scipy.io as sio | ||
import keras | ||
from photonai.base.PhotonBase import Hyperpipe, PipelineElement, PhotonRegister | ||
from photonai.base.PhotonBatchElement import PhotonBatchElement | ||
from photonai.validation import ResultsTreeHandler | ||
from photonai.neuro.BrainAtlas import AtlasLibrary | ||
from scipy.stats import itemfreq | ||
from photonai.investigator.Investigator import Investigator | ||
import matplotlib.pyplot as plt | ||
import pandas as pd | ||
from nilearn import image | ||
import time | ||
|
||
|
||
import os | ||
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" | ||
os.environ["CUDA_VISIBLE_DEVICES"]="2" | ||
|
||
|
||
# RandomCtrlData = np.ones((1792, 121, 145, 121)) | ||
# RandomCtrlData = np.ones((172, 121, 145, 121)) | ||
# RandomCtrlLabels = np.random.randn((RandomCtrlData.shape[0])) | ||
|
||
root_folder = '/spm-data/Scratch/spielwiese_ramona/PAC2018/' | ||
filename = 'PAC2018_age.csv' | ||
df = pd.read_csv(os.path.join(root_folder, filename)) | ||
|
||
X = df["PAC_ID"] | ||
X = [os.path.join(root_folder, 'data_all/' + x + ".nii") for x in X] | ||
y = df["Age"].values | ||
|
||
X = X[0:1500] | ||
y = y[0:1500] | ||
|
||
# | ||
PhotonRegister.save(photon_name='Brain_Age_Splitting_Wrapper', | ||
class_str='photonai.modelwrapper.Brain_Age_Splitting_Wrapper.Brain_Age_Splitting_Wrapper', element_type="Transformer") | ||
# | ||
# PhotonRegister.save(photon_name='Brain_Age_Splitting_CNN', | ||
# class_str='photonai.modelwrapper.Brain_Age_Splitting_CNN.Brain_Age_Splitting_CNN', element_type="Estimator") | ||
# | ||
PhotonRegister.save(photon_name='Brain_Age_Random_Forest', | ||
class_str='photonai.modelwrapper.Brain_Age_Random_Forest.Brain_Age_Random_Forest', element_type="Estimator") | ||
|
||
my_pipe = Hyperpipe('BrainAgePipe', | ||
optimizer='grid_search', | ||
metrics=['mean_absolute_error'], | ||
best_config_metric='mean_absolute_error', | ||
inner_cv=KFold(n_splits=5, shuffle=True, random_state=42), | ||
outer_cv=KFold(n_splits=5, shuffle=True, random_state=42), | ||
eval_final_performance=False, | ||
verbosity=2) | ||
|
||
# transformer = PipelineElement(, hyperparameters={}) | ||
# base_element=transformer | ||
batched_transformer = PhotonBatchElement("PatchImages", hyperparameters={'patch_size': [10, 25, 50, 75, 100]}, | ||
batch_size=100, | ||
nr_of_processes=10, | ||
cache_folder='/spm-data/vault-data1/tmp/photon_cache_vincent/') | ||
my_pipe += batched_transformer | ||
|
||
|
||
#my_pipe += PipelineElement('Brain_Age_Splitting_Wrapper') | ||
|
||
my_pipe += PipelineElement('Brain_Age_Random_Forest') | ||
|
||
my_pipe.fit(X, y) | ||
|
||
batched_transformer.base_element.clear_cache() | ||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
inner_performances = list() | ||
for i, fold in enumerate(my_pipe.result_tree.outer_folds[0].tested_config_list): | ||
inner_performances.append((fold.config_dict, fold.metrics_test[0].value)) | ||
print(inner_performances) | ||
|
||
plt.ylim(0.2, 0.8) | ||
plt.xticks(rotation=90) | ||
plt.margins(0.3) | ||
|
||
for i, lelles in inner_performances: | ||
print(i, lelles) | ||
Benis = ",".join(("{}={}".format(*p) for p in i.items())) | ||
plt.plot(Benis, lelles, 'ro') | ||
|
||
|
||
plt.show() |
Oops, something went wrong.