forked from ianhuang0630/modifiedRedNet
-
Notifications
You must be signed in to change notification settings - Fork 0
/
simple_rgbd_classifier.py
90 lines (71 loc) · 4.63 KB
/
simple_rgbd_classifier.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
from keras import applications, optimizers
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.models import Sequential, Model
from keras.layers import convolutional, pooling, core, Input, concatenate
from keras.callbacks import ModelCheckpoint, EarlyStopping, ProgbarLogger
img_width, img_height = 256, 256
train_data_dir = "data/classifier_data/train/rgb"
validation_data_dir = "data/classifier_data/validate/rgb"
train_data_dir_nir = "data/classifier_data/train/depth"
validation_data_dir_nir = "data/classifier_data/validate/depth"
nb_train_samples = 1298*0.8
nb_validation_samples = 1298*0.2
batch_size = 20
epochs = 30
num_classes = 3
inp = Input(shape = (img_width , img_height, 3))
conv_layer1 = convolutional.Conv2D(8, (3,3), strides=(1, 1), padding='same', activation='relu')(inp)
conv_layer2 = convolutional.Conv2D(8, (3,3), strides=(1, 1), padding='same', activation='relu')(conv_layer1)
pool_layer1 = pooling.MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None)(conv_layer2)
conv_layer3 = convolutional.Conv2D(16, (3,3), strides=(1, 1), padding='same', activation='relu')(pool_layer1)
pool_layer2 = pooling.MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None)(conv_layer3)
conv_layer4 = convolutional.Conv2D(32, (3,3), strides=(1, 1), padding='same', activation='relu')(pool_layer2)
pool_layer3 = pooling.MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None)(conv_layer4)
conv_layer5 = convolutional.Conv2D(32, (3,3), strides=(1, 1), padding='same', activation='relu')(conv_layer3)
pool_layer4 = pooling.MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None)(conv_layer5)
flatten_layer = core.Flatten()(pool_layer4)
hidden1 = core.Dense(64, activation = 'relu')(flatten_layer)
inp_2 = Input(shape = (img_width , img_height, 3))
conv_layer1_2 = convolutional.Conv2D(8, (3,3), strides=(1, 1), padding='same', activation='relu')(inp_2)
conv_layer2_2 = convolutional.Conv2D(8, (3,3), strides=(1, 1), padding='same', activation='relu')(conv_layer1_2)
pool_layer1_2 = pooling.MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None)(conv_layer2_2)
conv_layer3_2 = convolutional.Conv2D(16, (3,3), strides=(1, 1), padding='same', activation='relu')(pool_layer1_2)
pool_layer2_2 = pooling.MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None)(conv_layer3_2)
conv_layer4_2 = convolutional.Conv2D(32, (3,3), strides=(1, 1), padding='same', activation='relu')(pool_layer2_2)
pool_layer3_2 = pooling.MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None)(conv_layer4_2)
conv_layer5_2 = convolutional.Conv2D(32, (3,3), strides=(1, 1), padding='same', activation='relu')(conv_layer3_2)
pool_layer4_2 = pooling.MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None)(conv_layer5_2)
flatten_layer_2 = core.Flatten()(pool_layer4_2)
hidden1_2 = core.Dense(64, activation = 'relu')(flatten_layer_2)
hidden_merge = concatenate([hidden1 , hidden1_2], axis=-1)
dropout1 = core.Dropout(0.2)(hidden_merge)
hidden2 = core.Dense(64,activation = 'relu')(dropout1)
out = core.Dense(num_classes,activation='softmax')(hidden2)
model1 = Model([inp, inp_2],out)
model1.compile(loss = "categorical_crossentropy", optimizer = 'rmsprop', metrics=["accuracy"])
seed = 7
def custom_iterator(Xp, Xs):
from itertools import izip
from keras.preprocessing.image import ImageDataGenerator
ig1 = ImageDataGenerator(rescale=1./255)
ig2 = ImageDataGenerator(rescale=1./255)
temp1 = ig1.flow_from_directory(Xp,target_size = (img_height, img_width), batch_size = batch_size,class_mode = "categorical",seed=seed)
temp2 = ig2.flow_from_directory(Xs,target_size = (img_height, img_width), batch_size = batch_size,class_mode = "categorical",seed=seed)
print(temp1.class_indices)
print(temp2.class_indices)
for batch in izip(temp1,temp2):
yield [batch[0][0], batch[1][0]], [batch[0][1]]
# Save the model according to the conditions
progbar = ProgbarLogger(count_mode='steps')
checkpoint = ModelCheckpoint("./models/rgbd.{epoch:02d}-{val_acc:.2f}.hdf5", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
early = EarlyStopping(monitor='val_acc', min_delta=0, patience=1, verbose=1, mode='auto')
# Train the model
model1.fit_generator(
custom_iterator(train_data_dir_nir, train_data_dir),
steps_per_epoch = nb_train_samples,
epochs = epochs,
validation_data = custom_iterator(validation_data_dir_nir, validation_data_dir),
validation_steps = nb_validation_samples,
callbacks = [progbar, checkpoint, early]
)