From 19d0bca37b4e4425747901d8f2958a8290292047 Mon Sep 17 00:00:00 2001 From: Alexey Reshetnyak Date: Tue, 21 Mar 2017 15:19:34 +0300 Subject: [PATCH 1/8] Added files impulse_data.py and pulse.py. --- examples/impulse_data.py | 105 +++++++++++++++++++++++++++++++++++++++ examples/pulse.py | 67 +++++++++++++++++++++++++ 2 files changed, 172 insertions(+) create mode 100755 examples/impulse_data.py create mode 100755 examples/pulse.py diff --git a/examples/impulse_data.py b/examples/impulse_data.py new file mode 100755 index 00000000..e704b209 --- /dev/null +++ b/examples/impulse_data.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python + +import numpy as np +import matplotlib.pyplot as plt +from neon import NervanaObject +from neon.data.datasets import Dataset +from neon.data.dataiterator import ArrayIterator, NervanaDataIterator + +def gen_deltas(len_data): + deltas = np.zeros((1, len_data), np.float32) + i = 0 + while i < len_data: + a = np.random.randint(2) + deltas[0, i] = a + if a == 1: + i += 11 + i += 1 + return deltas + +def gen_steps(deltas): + steps = np.zeros_like(deltas, np.float32) + for i in range(0, deltas.shape[1]): + if deltas[0, i] == 1: + steps[0, i] = np.random.rand() + return steps + +def gen_pulses(deltas, steps): + pulses = np.zeros_like(deltas, np.float32) + for i in range(0, deltas.shape[1]): + if deltas[0, i] == 1: + pulse_len = int(steps[0, i] * 10) + if i + pulse_len < deltas.shape[1]: + pulses[0, i:i + pulse_len] = 1 + else: + pulses[0, i:-1] = 1 + return pulses + +def gen_data(data_len): + x = np.zeros((data_len, 2), np.float32) + deltas = gen_deltas(data_len) + x[:, 0] = deltas[0, :].T + steps = gen_steps(deltas) + x[:, 1] = steps[0, :].T + pulses = gen_pulses(deltas, steps) + y = np.zeros((data_len, 1), np.float32) + y[:, 0] = pulses[0, :].T + return x, y + +class rnn_iterator(NervanaObject): + def __init__(self, X, y, time_steps): + self.y = y + self.seq_length = time_steps + self.batch_index = 0 + self.nfeatures = X.shape[1] # ? + self.nsamples = X.shape[0] + self.ndata = X.shape[0] + self.shape = (self.nfeatures, time_steps) + + extra_examples = self.nsamples % (self.be.bsz * time_steps) + if extra_examples: + X = X[:-extra_examples] + y = y[:-extra_examples] + else: + X = X + y = y + + self.nbatches = self.nsamples // (self.be.bsz * time_steps) + self.X_dev = self.be.iobuf((self.nfeatures, time_steps)) + self.y_dev = self.be.iobuf((y.shape[1], time_steps)) + + self.X = X.reshape(self.be.bsz, self.nbatches, + time_steps, self.nfeatures) + self.y = y.reshape(self.be.bsz, self.nbatches, + time_steps, y.shape[1]) + + def reset(self): + self.batch_index = 0 + + def __iter__(self): + self.batch_index = 0 + while self.batch_index < self.nbatches: + X_batch = self.X[:, self.batch_index].T.reshape( + self.X_dev.shape).copy() + y_batch = self.y[:, self.batch_index].T.reshape( + self.y_dev.shape).copy() + + # make the data for this batch as backend tensor + self.X_dev.set(X_batch) + self.y_dev.set(y_batch) + + self.batch_index += 1 + + yield self.X_dev, self.y_dev + +if __name__ == '__main__': + data_len = 200 + x, y = gen_data(data_len) + plt.subplot(311) + plt.plot(x[:, 0]) + plt.subplot(312) + plt.plot(x[:, 1]) + plt.subplot(313) + plt.plot(y[:, 0]) + plt.show() + diff --git a/examples/pulse.py b/examples/pulse.py new file mode 100755 index 00000000..a33511e9 --- /dev/null +++ b/examples/pulse.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python + +from neon import logger as neon_logger +from neon.backends import gen_backend +from neon.data import IMDB +from neon.initializers import Uniform, GlorotUniform +from neon.layers import (GeneralizedCost, LSTM, Affine, Dropout, LookupTable, + RecurrentLast, Recurrent, DeepBiLSTM, DeepBiRNN) +from neon.models import Model +from neon.optimizers import Adagrad, GradientDescentMomentum +from neon.transforms import Logistic, Tanh, CrossEntropyMulti, Accuracy +from neon.callbacks.callbacks import Callbacks +from neon.transforms.cost import MeanSquared +from neon.data import ArrayIterator +import numpy as np +import pickle as pkl +from impulse_data import * + +batch_size = 1 +epochs = 1 +hidden_size = 16 + +# setup backend +be = gen_backend(backend='cpu', batch_size = batch_size) + +train_data_len = 100000 +test_data_len = 200 + +xt, yt = gen_data(train_data_len) +xtv, ytv = gen_data(test_data_len) + +train_set = rnn_iterator(xt, yt, 8) +valid_set = rnn_iterator(xtv, ytv, 8) + +# weight initialization +uni = Uniform(low=-0.9, high=0.9) + +rlayer = Recurrent(hidden_size, uni, activation=Logistic(), reset_cells=False) + +layers = [ + Recurrent(hidden_size, uni, activation=Logistic(), reset_cells=False), + Affine(1, uni, bias=uni, activation=Logistic()) +] +model = Model(layers=layers) + +cost = GeneralizedCost(costfunc=MeanSquared()) +optimizer = GradientDescentMomentum(0.1, momentum_coef=0.9) + +# configure callbacks +callbacks = Callbacks(model, eval_set=valid_set, serialize=1) + +# train model +model.fit(train_set, optimizer=optimizer, + num_epochs=epochs, cost=cost, callbacks=callbacks) +valid_output = model.get_outputs(valid_set).reshape(-1, 1) + +net_out = model.get_outputs(valid_set) +plt.subplot(411) +plt.plot(xtv[:, 0]) +plt.subplot(412) +plt.plot(xtv[:, 1]) +plt.subplot(413) +plt.plot(ytv[:, 0]) +plt.subplot(414) +plt.plot(valid_output) +plt.show() + From ad9a628ce073541eab194653c6a229da2d41e2bb Mon Sep 17 00:00:00 2001 From: Alexey Reshetnyak Date: Tue, 21 Mar 2017 15:45:19 +0300 Subject: [PATCH 2/8] Added license text in examples. --- examples/impulse_data.py | 14 ++++++++++++++ examples/pulse.py | 14 ++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/examples/impulse_data.py b/examples/impulse_data.py index e704b209..74e2ec15 100755 --- a/examples/impulse_data.py +++ b/examples/impulse_data.py @@ -1,4 +1,18 @@ #!/usr/bin/env python +# ---------------------------------------------------------------------------- +# Copyright 2015-2016 Alexey Reshetnyak. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ---------------------------------------------------------------------------- import numpy as np import matplotlib.pyplot as plt diff --git a/examples/pulse.py b/examples/pulse.py index a33511e9..d421f821 100755 --- a/examples/pulse.py +++ b/examples/pulse.py @@ -1,4 +1,18 @@ #!/usr/bin/env python +# ---------------------------------------------------------------------------- +# Copyright 2015-2016 Alexey Reshetnyak. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ---------------------------------------------------------------------------- from neon import logger as neon_logger from neon.backends import gen_backend From c9ca5272d06a859ac28e8f42be45063132b2087a Mon Sep 17 00:00:00 2001 From: Alexey Reshetnyak Date: Tue, 21 Mar 2017 17:38:14 +0300 Subject: [PATCH 3/8] Fixed eyar in the license. --- examples/impulse_data.py | 2 +- examples/pulse.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/impulse_data.py b/examples/impulse_data.py index 74e2ec15..5185d14e 100755 --- a/examples/impulse_data.py +++ b/examples/impulse_data.py @@ -1,6 +1,6 @@ #!/usr/bin/env python # ---------------------------------------------------------------------------- -# Copyright 2015-2016 Alexey Reshetnyak. +# Copyright 2015-2017 Alexey Reshetnyak. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/examples/pulse.py b/examples/pulse.py index d421f821..132ba3dd 100755 --- a/examples/pulse.py +++ b/examples/pulse.py @@ -1,6 +1,6 @@ #!/usr/bin/env python # ---------------------------------------------------------------------------- -# Copyright 2015-2016 Alexey Reshetnyak. +# Copyright 2015-2017 Alexey Reshetnyak. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at From 9b9e0863d9b06285b8741b56863c0c59d49fccde Mon Sep 17 00:00:00 2001 From: Alexey Reshetnyak Date: Tue, 21 Mar 2017 18:05:19 +0300 Subject: [PATCH 4/8] Removed unnecessary recurrent layer. --- examples/pulse.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/examples/pulse.py b/examples/pulse.py index 132ba3dd..288cd937 100755 --- a/examples/pulse.py +++ b/examples/pulse.py @@ -49,8 +49,6 @@ # weight initialization uni = Uniform(low=-0.9, high=0.9) -rlayer = Recurrent(hidden_size, uni, activation=Logistic(), reset_cells=False) - layers = [ Recurrent(hidden_size, uni, activation=Logistic(), reset_cells=False), Affine(1, uni, bias=uni, activation=Logistic()) From c100a34a87c5d076f9faeb5c783f8b0421dbf0e4 Mon Sep 17 00:00:00 2001 From: Alexey Reshetnyak Date: Wed, 22 Mar 2017 17:30:00 +0300 Subject: [PATCH 5/8] Added impulse_rnn.py. --- examples/impulse_data.py | 2 - examples/impulse_rnn.py | 147 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 147 insertions(+), 2 deletions(-) create mode 100755 examples/impulse_rnn.py diff --git a/examples/impulse_data.py b/examples/impulse_data.py index 5185d14e..c927ad65 100755 --- a/examples/impulse_data.py +++ b/examples/impulse_data.py @@ -17,8 +17,6 @@ import numpy as np import matplotlib.pyplot as plt from neon import NervanaObject -from neon.data.datasets import Dataset -from neon.data.dataiterator import ArrayIterator, NervanaDataIterator def gen_deltas(len_data): deltas = np.zeros((1, len_data), np.float32) diff --git a/examples/impulse_rnn.py b/examples/impulse_rnn.py new file mode 100755 index 00000000..f9f3d0ce --- /dev/null +++ b/examples/impulse_rnn.py @@ -0,0 +1,147 @@ +#!/usr/bin/env python +# ---------------------------------------------------------------------------- +# Copyright 2017, Alexey Reshetnyak. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ---------------------------------------------------------------------------- + +from neon.backends import gen_backend +from neon.initializers import Uniform +from neon.layers import GeneralizedCost, Affine, Recurrent +from neon.models import Model +from neon.optimizers import GradientDescentMomentum +from neon.transforms import Logistic +from neon.callbacks.callbacks import Callbacks +from neon.transforms.cost import MeanSquared +from neon import NervanaObject +import numpy as np +do_plots = True +try: + import matplotlib.pyplot as plt +except ImportError: + neon_logger.display('matplotlib needs to be installed manually to generate plots needed ' + 'for this example. Skipping plot generation') + do_plots = False + +class ImpulseData(object): + def __init__(self, data_len): + self.x = np.zeros((data_len, 2), np.float32) + self.y = np.zeros((data_len, 1), np.float32) + period = 11 + i = 0 + while i < data_len: + a = np.random.randint(2) + self.x[i, 0] = a + if a == 1: + b = np.random.rand() + self.x[i, 1] = b + pulse_len = int(b * 10) + if i + pulse_len < data_len: + self.y[i:i + pulse_len, 0] = 1 + else: + self.y[i:-1, 0] = 1 + i += period + i += 1 + +class ImpulseDataIterator(NervanaObject): + def __init__(self, X, y, time_steps): + self.y = y + self.seq_length = time_steps + self.batch_index = 0 + self.nfeatures = X.shape[1] + self.nsamples = X.shape[0] + self.ndata = X.shape[0] + self.shape = (self.nfeatures, time_steps) + + extra_examples = self.nsamples % (self.be.bsz * time_steps) + if extra_examples: + X = X[:-extra_examples] + y = y[:-extra_examples] + else: + X = X + y = y + + self.nbatches = self.nsamples // (self.be.bsz * time_steps) + self.X_dev = self.be.iobuf((self.nfeatures, time_steps)) + self.y_dev = self.be.iobuf((y.shape[1], time_steps)) + + self.X = X.reshape(self.be.bsz, self.nbatches, + time_steps, self.nfeatures) + self.y = y.reshape(self.be.bsz, self.nbatches, + time_steps, y.shape[1]) + + def reset(self): + self.batch_index = 0 + + def __iter__(self): + self.batch_index = 0 + while self.batch_index < self.nbatches: + X_batch = self.X[:, self.batch_index].T.reshape( + self.X_dev.shape).copy() + y_batch = self.y[:, self.batch_index].T.reshape( + self.y_dev.shape).copy() + + # make the data for this batch as backend tensor + self.X_dev.set(X_batch) + self.y_dev.set(y_batch) + + self.batch_index += 1 + + yield self.X_dev, self.y_dev + +train_data = ImpulseData(100000) +test_data = ImpulseData(200) + +batch_size = 1 +epochs = 1 +hidden_size = 16 + +# setup backend +be = gen_backend(backend='cpu', batch_size = batch_size) + +train_set = ImpulseDataIterator(train_data.x, train_data.y, 8) +valid_set = ImpulseDataIterator(test_data.x, test_data.y, 8) + +# weight initialization +uni = Uniform(low=-0.9, high=0.9) + +layers = [ + Recurrent(hidden_size, uni, activation=Logistic(), reset_cells=False), + Affine(1, uni, bias=uni, activation=Logistic()) +] +model = Model(layers=layers) + +cost = GeneralizedCost(costfunc=MeanSquared()) +optimizer = GradientDescentMomentum(0.1, momentum_coef=0.9) + +# configure callbacks +callbacks = Callbacks(model, eval_set=valid_set, serialize=1) + +# train model +model.fit(train_set, optimizer=optimizer, + num_epochs=epochs, cost=cost, callbacks=callbacks) +valid_output = model.get_outputs(valid_set).reshape(-1, 1) + +net_out = model.get_outputs(valid_set) + +if do_plots: + plt.subplot(411) + plt.plot(test_data.x[:, 0]) + plt.subplot(412) + plt.plot(test_data.x[:, 1]) + plt.subplot(413) + plt.plot(test_data.y[:, 0]) + plt.subplot(414) + plt.plot(valid_output) + plt.show() + + From 0de9955622138633d05dd0d6d80a4178eddd08c3 Mon Sep 17 00:00:00 2001 From: Alexey Reshetnyak Date: Wed, 22 Mar 2017 17:31:41 +0300 Subject: [PATCH 6/8] Removed impulse_data.py and pulse.py. --- examples/impulse_data.py | 117 --------------------------------------- examples/pulse.py | 79 -------------------------- 2 files changed, 196 deletions(-) delete mode 100755 examples/impulse_data.py delete mode 100755 examples/pulse.py diff --git a/examples/impulse_data.py b/examples/impulse_data.py deleted file mode 100755 index c927ad65..00000000 --- a/examples/impulse_data.py +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env python -# ---------------------------------------------------------------------------- -# Copyright 2015-2017 Alexey Reshetnyak. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ---------------------------------------------------------------------------- - -import numpy as np -import matplotlib.pyplot as plt -from neon import NervanaObject - -def gen_deltas(len_data): - deltas = np.zeros((1, len_data), np.float32) - i = 0 - while i < len_data: - a = np.random.randint(2) - deltas[0, i] = a - if a == 1: - i += 11 - i += 1 - return deltas - -def gen_steps(deltas): - steps = np.zeros_like(deltas, np.float32) - for i in range(0, deltas.shape[1]): - if deltas[0, i] == 1: - steps[0, i] = np.random.rand() - return steps - -def gen_pulses(deltas, steps): - pulses = np.zeros_like(deltas, np.float32) - for i in range(0, deltas.shape[1]): - if deltas[0, i] == 1: - pulse_len = int(steps[0, i] * 10) - if i + pulse_len < deltas.shape[1]: - pulses[0, i:i + pulse_len] = 1 - else: - pulses[0, i:-1] = 1 - return pulses - -def gen_data(data_len): - x = np.zeros((data_len, 2), np.float32) - deltas = gen_deltas(data_len) - x[:, 0] = deltas[0, :].T - steps = gen_steps(deltas) - x[:, 1] = steps[0, :].T - pulses = gen_pulses(deltas, steps) - y = np.zeros((data_len, 1), np.float32) - y[:, 0] = pulses[0, :].T - return x, y - -class rnn_iterator(NervanaObject): - def __init__(self, X, y, time_steps): - self.y = y - self.seq_length = time_steps - self.batch_index = 0 - self.nfeatures = X.shape[1] # ? - self.nsamples = X.shape[0] - self.ndata = X.shape[0] - self.shape = (self.nfeatures, time_steps) - - extra_examples = self.nsamples % (self.be.bsz * time_steps) - if extra_examples: - X = X[:-extra_examples] - y = y[:-extra_examples] - else: - X = X - y = y - - self.nbatches = self.nsamples // (self.be.bsz * time_steps) - self.X_dev = self.be.iobuf((self.nfeatures, time_steps)) - self.y_dev = self.be.iobuf((y.shape[1], time_steps)) - - self.X = X.reshape(self.be.bsz, self.nbatches, - time_steps, self.nfeatures) - self.y = y.reshape(self.be.bsz, self.nbatches, - time_steps, y.shape[1]) - - def reset(self): - self.batch_index = 0 - - def __iter__(self): - self.batch_index = 0 - while self.batch_index < self.nbatches: - X_batch = self.X[:, self.batch_index].T.reshape( - self.X_dev.shape).copy() - y_batch = self.y[:, self.batch_index].T.reshape( - self.y_dev.shape).copy() - - # make the data for this batch as backend tensor - self.X_dev.set(X_batch) - self.y_dev.set(y_batch) - - self.batch_index += 1 - - yield self.X_dev, self.y_dev - -if __name__ == '__main__': - data_len = 200 - x, y = gen_data(data_len) - plt.subplot(311) - plt.plot(x[:, 0]) - plt.subplot(312) - plt.plot(x[:, 1]) - plt.subplot(313) - plt.plot(y[:, 0]) - plt.show() - diff --git a/examples/pulse.py b/examples/pulse.py deleted file mode 100755 index 288cd937..00000000 --- a/examples/pulse.py +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env python -# ---------------------------------------------------------------------------- -# Copyright 2015-2017 Alexey Reshetnyak. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ---------------------------------------------------------------------------- - -from neon import logger as neon_logger -from neon.backends import gen_backend -from neon.data import IMDB -from neon.initializers import Uniform, GlorotUniform -from neon.layers import (GeneralizedCost, LSTM, Affine, Dropout, LookupTable, - RecurrentLast, Recurrent, DeepBiLSTM, DeepBiRNN) -from neon.models import Model -from neon.optimizers import Adagrad, GradientDescentMomentum -from neon.transforms import Logistic, Tanh, CrossEntropyMulti, Accuracy -from neon.callbacks.callbacks import Callbacks -from neon.transforms.cost import MeanSquared -from neon.data import ArrayIterator -import numpy as np -import pickle as pkl -from impulse_data import * - -batch_size = 1 -epochs = 1 -hidden_size = 16 - -# setup backend -be = gen_backend(backend='cpu', batch_size = batch_size) - -train_data_len = 100000 -test_data_len = 200 - -xt, yt = gen_data(train_data_len) -xtv, ytv = gen_data(test_data_len) - -train_set = rnn_iterator(xt, yt, 8) -valid_set = rnn_iterator(xtv, ytv, 8) - -# weight initialization -uni = Uniform(low=-0.9, high=0.9) - -layers = [ - Recurrent(hidden_size, uni, activation=Logistic(), reset_cells=False), - Affine(1, uni, bias=uni, activation=Logistic()) -] -model = Model(layers=layers) - -cost = GeneralizedCost(costfunc=MeanSquared()) -optimizer = GradientDescentMomentum(0.1, momentum_coef=0.9) - -# configure callbacks -callbacks = Callbacks(model, eval_set=valid_set, serialize=1) - -# train model -model.fit(train_set, optimizer=optimizer, - num_epochs=epochs, cost=cost, callbacks=callbacks) -valid_output = model.get_outputs(valid_set).reshape(-1, 1) - -net_out = model.get_outputs(valid_set) -plt.subplot(411) -plt.plot(xtv[:, 0]) -plt.subplot(412) -plt.plot(xtv[:, 1]) -plt.subplot(413) -plt.plot(ytv[:, 0]) -plt.subplot(414) -plt.plot(valid_output) -plt.show() - From a22c2705f1ca2534f5ebdaa9a2e12b6b1ab3dadd Mon Sep 17 00:00:00 2001 From: Alexey Reshetnyak Date: Wed, 22 Mar 2017 18:49:55 +0300 Subject: [PATCH 7/8] Training parameters were changed. --- examples/impulse_rnn.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/examples/impulse_rnn.py b/examples/impulse_rnn.py index f9f3d0ce..2f328e57 100755 --- a/examples/impulse_rnn.py +++ b/examples/impulse_rnn.py @@ -98,21 +98,22 @@ def __iter__(self): yield self.X_dev, self.y_dev -train_data = ImpulseData(100000) +train_data = ImpulseData(40000) test_data = ImpulseData(200) batch_size = 1 -epochs = 1 +epochs = 2 hidden_size = 16 +backprop_depth = 10 # setup backend be = gen_backend(backend='cpu', batch_size = batch_size) -train_set = ImpulseDataIterator(train_data.x, train_data.y, 8) -valid_set = ImpulseDataIterator(test_data.x, test_data.y, 8) +train_set = ImpulseDataIterator(train_data.x, train_data.y, backprop_depth) +valid_set = ImpulseDataIterator(test_data.x, test_data.y, backprop_depth) # weight initialization -uni = Uniform(low=-0.9, high=0.9) +uni = Uniform(low=-0.5, high=0.5) layers = [ Recurrent(hidden_size, uni, activation=Logistic(), reset_cells=False), @@ -121,7 +122,7 @@ def __iter__(self): model = Model(layers=layers) cost = GeneralizedCost(costfunc=MeanSquared()) -optimizer = GradientDescentMomentum(0.1, momentum_coef=0.9) +optimizer = GradientDescentMomentum(0.08, momentum_coef=0.9) # configure callbacks callbacks = Callbacks(model, eval_set=valid_set, serialize=1) @@ -144,4 +145,3 @@ def __iter__(self): plt.plot(valid_output) plt.show() - From ccb208faa3647d90bccc8e27160256a9934f686b Mon Sep 17 00:00:00 2001 From: Alexey Reshetnyak Date: Thu, 23 Mar 2017 15:26:22 +0300 Subject: [PATCH 8/8] Added description and plots labels. --- examples/impulse_rnn.py | 51 +++++++++++++++++++++++++++++++++-------- 1 file changed, 41 insertions(+), 10 deletions(-) diff --git a/examples/impulse_rnn.py b/examples/impulse_rnn.py index 2f328e57..87ce8c4d 100755 --- a/examples/impulse_rnn.py +++ b/examples/impulse_rnn.py @@ -13,6 +13,21 @@ # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- +""" + A simple example that demonstrates recurrent neural network training. + The network has two inputs and one output. The inputs of the network +synchronously receive impulses at random times. The amplitude of the impulse at +the first input is equal to one. The amplitude of the impulses on the second +input is a random variable that is uniformly distributed from zero to one. The +desired network output is a impulse which starts at the same time as the input +impulses. The desired impulse has an amplitude equal to one, but its duration +is proportional to the amplitude of second input. + +Usage: + + python examples/impulse_rnn.py + +""" from neon.backends import gen_backend from neon.initializers import Uniform @@ -32,7 +47,13 @@ 'for this example. Skipping plot generation') do_plots = False +np.random.seed() + class ImpulseData(object): + """ + x - network inputs + y - desired output + """ def __init__(self, data_len): self.x = np.zeros((data_len, 2), np.float32) self.y = np.zeros((data_len, 1), np.float32) @@ -132,16 +153,26 @@ def __iter__(self): num_epochs=epochs, cost=cost, callbacks=callbacks) valid_output = model.get_outputs(valid_set).reshape(-1, 1) -net_out = model.get_outputs(valid_set) - if do_plots: - plt.subplot(411) - plt.plot(test_data.x[:, 0]) - plt.subplot(412) - plt.plot(test_data.x[:, 1]) - plt.subplot(413) - plt.plot(test_data.y[:, 0]) - plt.subplot(414) - plt.plot(valid_output) + fig = plt.figure() + + ax1 = fig.add_subplot(411) + ax1.title.set_text('First input') + ax1.plot(test_data.x[:, 0]) + + ax2 = fig.add_subplot(412) + ax2.plot(test_data.x[:, 1]) + ax2.title.set_text('Second input') + + ax3 = fig.add_subplot(413) + ax3.title.set_text('Desired output') + ax3.plot(test_data.y[:, 0]) + + ax4 = fig.add_subplot(414) + ax4.title.set_text('Actual output') + ax4.plot(valid_output) + + plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0) + plt.show()