-
Notifications
You must be signed in to change notification settings - Fork 0
/
checker.py
executable file
·141 lines (109 loc) · 4.57 KB
/
checker.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
################################################################################
# CSE 253: Programming Assignment 2
# Code snippet by Manjot Bilkhu
# Winter 2020
################################################################################
# We've provided you with the dataset in PA2.zip
################################################################################
# To install PyYaml, refer to the instructions for your system:
# https://pyyaml.org/wiki/PyYAMLDocumentation
################################################################################
# If you don't have NumPy installed, please use the instructions here:
# https://scipy.org/install.html
################################################################################
import neuralnet
import numpy as np
import yaml
import pickle
def get_data(path):
"""
Load the sabity data to verify your implementation.
"""
return pickle.load(open(path + 'sanity.pkl', 'rb'), encoding='latin1')
def load_config(path):
"""
Load the configuration from config.yaml.
"""
return yaml.load(open('config.yaml', 'r'), Loader=yaml.SafeLoader)
def check_error(error, msg):
"""
Verify that error is below the threshold.
"""
if error < 1e-6:
print(f"{msg} is CORRECT")
else:
print(f"{msg} is WRONG")
def sanity_layers(data):
"""
Check implementation of the forward and backward pass for all activations.
"""
# Set the seed to reproduce results.
np.random.seed(42)
# Pseudo-input.
random_input = np.random.randn(1, 100)
# Get the activations.
act_sigmoid = neuralnet.Activation('sigmoid')
act_tanh = neuralnet.Activation('tanh')
act_ReLU = neuralnet.Activation('ReLU')
# Get the outputs for forward-pass.
out_sigmoid = act_sigmoid(random_input)
out_tanh = act_tanh(random_input)
out_ReLU = act_ReLU(random_input)
# Compute the errors.
err_sigmoid = np.sum(np.abs(data['out_sigmoid'] - out_sigmoid))
err_tanh = np.sum(np.abs(data['out_tanh'] - out_tanh))
err_ReLU = np.sum(np.abs(data['out_ReLU'] - out_ReLU))
# Check the errors.
check_error(err_sigmoid, "Sigmoid Forward Pass")
check_error(err_tanh, "Tanh Forward Pass")
check_error(err_ReLU, "ReLU Forward Pass")
print(20 * "-", "\n")
# Compute the gradients.
grad_sigmoid = act_sigmoid.backward(1.0)
grad_tanh = act_tanh.backward(1.0)
grad_ReLU = act_ReLU.backward(1.0)
# Compute the errors.
err_sigmoid_grad = np.sum(np.abs(data['grad_sigmoid'] - grad_sigmoid))
err_tanh_grad = np.sum(np.abs(data['grad_tanh'] - grad_tanh))
err_ReLU_grad = np.sum(np.abs(data['grad_ReLU'] - grad_ReLU))
# Check the errors.
check_error(err_sigmoid_grad, "Sigmoid Gradient")
check_error(err_tanh_grad, "Tanh Gradient")
check_error(err_ReLU_grad, "ReLU Gradient")
print(20 * "-", "\n")
def sanity_network(data, default_config):
"""
Check implementation of the neural network's forward pass and backward pass.
"""
# Set seed to reproduce results.
np.random.seed(42)
# Random input for our network.
random_image = np.random.randn(1, 784)
# Initialize the network using the default configuration
nnet = neuralnet.Neuralnetwork(default_config)
# Compute the forward pass.
nnet(random_image, targets = np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0]))
# Compute the backward pass.
nnet.backward()
layer_no = 0
for layer_idx, layer in enumerate(nnet.layers):
if isinstance(layer, neuralnet.Layer):
layer_no += 1
error_x = np.sum(np.abs(data['nnet'].layers[layer_idx].x - layer.x))
error_w = np.sum(np.abs(data['nnet'].layers[layer_idx].w - layer.w))
error_b = np.sum(np.abs(data['nnet'].layers[layer_idx].b - layer.b))
error_d_w = np.sum(np.abs(data['nnet'].layers[layer_idx].d_w - layer.d_w))
error_d_b = np.sum(np.abs(data['nnet'].layers[layer_idx].d_b - layer.d_b))
check_error(error_x, f"Layer{layer_no}: Input")
check_error(error_w, f"Layer{layer_no}: Weights")
check_error(error_b, f"Layer{layer_no}: Biases")
check_error(error_d_w, f"Layer{layer_no}: Weight Gradient")
check_error(error_d_b, f"Layer{layer_no}: Bias Gradient")
print(20 * "-", "\n")
if __name__ == '__main__':
# Load the data and configuration.
sanity_data = get_data("./")
default_config = load_config("./")
# Run Sanity.
sanity_layers(sanity_data)
sanity_network(sanity_data, default_config)