forked from rezazad68/TMUnet
-
Notifications
You must be signed in to change notification settings - Fork 0
/
loader.py
87 lines (73 loc) · 2.92 KB
/
loader.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
from torch.utils.data import Dataset, DataLoader
import torch
import numpy as np
import random
from einops.layers.torch import Rearrange
from scipy.ndimage.morphology import binary_dilation
# ===== normalize over the dataset
def dataset_normalized(imgs):
imgs_normalized = np.empty(imgs.shape)
imgs_std = np.std(imgs)
imgs_mean = np.mean(imgs)
imgs_normalized = (imgs-imgs_mean)/imgs_std
for i in range(imgs.shape[0]):
imgs_normalized[i] = ((imgs_normalized[i] - np.min(imgs_normalized[i])) / (np.max(imgs_normalized[i])-np.min(imgs_normalized[i])))*255
return imgs_normalized
class weak_annotation(torch.nn.Module):
def __init__(self, patch_size = 16, img_size = 256):
super().__init__()
self.arranger = Rearrange('c (ph h) (pw w) -> c (ph pw) h w', c=1, h=patch_size, ph=img_size//patch_size, w=patch_size, pw=img_size//patch_size)
def forward(self, x):
x = self.arranger(x)
x = torch.sum(x, dim = [-2, -1])
x = x/x.max()
return x
def Bextraction(img):
img = img[0].numpy()
img2 = binary_dilation(img, structure=np.ones((7,7))).astype(img.dtype)
img3 = img2 - img
img3 = np.expand_dims(img3, axis = 0)
return torch.tensor(img3.copy())
## Temporary
class isic_loader(Dataset):
""" dataset class for Brats datasets
"""
def __init__(self, path_Data, train = True, Test = False):
super(isic_loader, self)
self.train = train
if train:
self.data = np.load(path_Data+'data_train.npy')
self.mask = np.load(path_Data+'mask_train.npy')
else:
if Test:
self.data = np.load(path_Data+'data_test.npy')
self.mask = np.load(path_Data+'mask_test.npy')
else:
self.data = np.load(path_Data+'data_val.npy')
self.mask = np.load(path_Data+'mask_val.npy')
self.data = dataset_normalized(self.data)
self.mask = np.expand_dims(self.mask, axis=3)
self.mask = self.mask /255.
self.weak_annotation = weak_annotation(patch_size = 16, img_size = 256)
def __getitem__(self, indx):
img = self.data[indx]
seg = self.mask[indx]
if self.train:
img, seg = self.apply_augmentation(img, seg)
seg = torch.tensor(seg.copy())
img = torch.tensor(img.copy())
img = img.permute( 2, 0, 1)
seg = seg.permute( 2, 0, 1)
weak_ann = self.weak_annotation(seg)
boundary = Bextraction(seg)
return {'image': img,
'weak_ann': weak_ann,
'boundary': boundary,
'mask' : seg}
def apply_augmentation(self, img, seg):
if random.random() < 0.5:
img = np.flip(img, axis=1)
seg = np.flip(seg, axis=1)
return img, seg
def __len__(self):
return len(self.data)