-
Notifications
You must be signed in to change notification settings - Fork 41
/
mnist.config.ini
executable file
·47 lines (43 loc) · 1.39 KB
/
mnist.config.ini
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
[default]
# Batch size during training
batch_size = 35
# Size for hidden dimension
hidden_dim = 128
# Number of layers in LSTM
num_layers = 2
sigma_prior = 1.0
sigma_init_low=50
sigma_init_high=10
# do you want to clip the norm of gradients during training? Make larger than zero to enable
clip_norm = -30.0
# Which optimizer to use. Use 'sgd' or 'adam'
optimizer_name = adam
# learning rate
learning_rate = 0.0025
# Maximum number of training steps
max_steps = 500000
# Size of the convolutional filter
filter_size = 6
# Number of filters per convolutional layer
num_filters=120,120,120
# Stride for each filter
stride = 2
# Method string for which experiments to perform
# <METHOD NAME>,<METHOD VARIABLE NAME>,<START VALUE>,<STOP VALUE>|
# For defining a new experiment, a function in util/mutilation.py must have the method name
experiments = rotation,angle,0,70|noise,sigma,0,1.5|warp,warpvalue,0,0.3
# |noise_clip,sigma,0,1
[sampling]
# Number of interpolations between START_VALUE and STOP_VALUE in an experiment
num_experiments = 50
# For dropout, this is the number of runs with different masks.
# For Bootstrap and Langevin, this is the number of parameter samples
# Please smile if you know why this number is twelve :)
num_runs = 12
# Batch size during testing
batch_size_test = 64
[direc]
data_direc = data/mnist
log_direc = log
restore_direc = log/mnist_final_augment/save/my-model
input_direc = input