-
Notifications
You must be signed in to change notification settings - Fork 1
/
main.py
105 lines (82 loc) · 3.34 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
"Main user of nnet module"
from time import time
import numpy as np
from mnist import mnist
from nets.ganet import NeuralGA
from nets.nnet import NeuralNetwork
from nets.cynet import CyNet
AUTOENCODER = False # train autoencoder or classifier
SAVE_FILE = "params.npy"
DLAYERS = [784, 16, 16, 10] if not AUTOENCODER else [784, 64, 784]
EPOCHS = 16
BATCH_SIZE = 1000 # TESTMARK
# TODO: Network does not work for batches that are not exactly BATCH_SIZE size
assert 60000 % BATCH_SIZE == 0 and 10000 % BATCH_SIZE == 0
LOG_SAMPLE_FREQ = 1000 # How many samples between logs
assert LOG_SAMPLE_FREQ % BATCH_SIZE == 0, "should be multiples"
LOG_FREQ = LOG_SAMPLE_FREQ / BATCH_SIZE # How many batches between logs
np.random.seed(1)
# TODO: Save runs to a separate folder and don't overwrite them
def save_params(params):
with open(SAVE_FILE, 'wb') as f:
np.save(f, params)
def load_params():
with open(SAVE_FILE, 'rb') as f:
return np.load(f)
def genetic_main():
ga = NeuralGA(DLAYERS, 100000)
save_params(ga.best)
return ga.best
def test(trainbatches, testbatches, net: NeuralNetwork, epoch):
itime = time()
train_cumloss = train_cumhits = 0
test_cumloss = test_cumhits = 0
for batch in trainbatches:
cumloss, cumhits = net.batch_eval(batch, grads=False, hitrate=True)
train_cumloss += cumloss
train_cumhits += cumhits
for batch in testbatches:
cumloss, cumhits = net.batch_eval(batch, grads=False, hitrate=True)
test_cumloss += cumloss
test_cumhits += cumhits
train_avgloss = train_cumloss / len(trainbatches)
train_hitrate = train_cumhits / len(trainbatches)
test_avgloss = test_cumloss / len(testbatches)
test_hitrate = test_cumhits / len(testbatches)
test_time = time() - itime
print(
f"[E] [{epoch + 1}] {train_avgloss=:.6f} {train_hitrate=:.2f}% {test_avgloss=:.6f} {test_hitrate=:.2f}% {test_time=:.2f}s"
)
def train_epoch(trainbatches, net: NeuralNetwork, epoch):
log_loss = 0
log_time = time()
for i, batch in enumerate(trainbatches):
loss, gradients = net.batch_eval(batch)
net.update_weights(gradients)
# Assert improved loss
# new_loss = net.batch_eval(batch, grads=False)
# if new_loss > loss:
# print(f"[W] loss increased by {100 * (new_loss - loss) / loss:.2f}%")
log_loss += loss
if i % LOG_FREQ == LOG_FREQ - 1:
print(
f"[TR] [{epoch + 1}, {(i + 1) * BATCH_SIZE}] [{time() - log_time:.2f}s] "
f"avgloss: {log_loss / LOG_FREQ}"
)
log_time = time()
log_loss = 0
def train(net: NeuralNetwork, trainbatches_gen, testbatches_gen):
itime = time()
epoch_train_test = zip(range(EPOCHS), trainbatches_gen, testbatches_gen)
for epoch, trainbatches, testbatches in epoch_train_test:
train_epoch(trainbatches, net, epoch)
test(trainbatches, testbatches, net, epoch)
print(f"[FINISH] Training finished in {time() - itime:.2f}s.")
def main():
net = CyNet(DLAYERS, BATCH_SIZE)
trainbatches_gen = mnist.load("training", BATCH_SIZE, autoencoder=AUTOENCODER)
testbatches_gen = mnist.load("testing", BATCH_SIZE, autoencoder=AUTOENCODER)
train(net, trainbatches_gen, testbatches_gen)
if __name__ == "__main__":
# best_params = genetic_main()
main()