From 6b79eacd7d30abe4e9e59c731f178331aceb6bb3 Mon Sep 17 00:00:00 2001 From: ulises-jeremias Date: Sat, 7 Oct 2023 14:22:08 -0300 Subject: [PATCH] Updated examples --- examples/nn_mnist/main.v | 66 +++++++++++++++--------------- examples/nn_xor/main.v | 88 ++++++++++++++++++++-------------------- 2 files changed, 79 insertions(+), 75 deletions(-) diff --git a/examples/nn_mnist/main.v b/examples/nn_mnist/main.v index b72ec7c5..44dea651 100644 --- a/examples/nn_mnist/main.v +++ b/examples/nn_mnist/main.v @@ -12,53 +12,55 @@ const ( batches = 100 ) -// Autograd context / neuralnet graph -ctx := autograd.ctx[f64]() +fn main() { + // Autograd context / neuralnet graph + ctx := autograd.ctx[f64]() -// We create a neural network -mut model := models.sequential_from_ctx[f64](ctx) -model.input([1, 28, 28]) -model.mse_loss() + // We create a neural network + mut model := models.sequential_from_ctx[f64](ctx) + model.input([1, 28, 28]) + model.mse_loss() -// Load the MNIST dataset -mnist := datasets.load_mnist()! + // Load the MNIST dataset + mnist := datasets.load_mnist()! -// We reshape the data to fit the network -features := mnist.train_features.as_f64().divide_scalar(255.0)!.unsqueeze(axis: 1)! -labels := mnist.train_labels.as_int() + // We reshape the data to fit the network + features := mnist.train_features.as_f64().divide_scalar(255.0)!.unsqueeze(axis: 1)! + labels := mnist.train_labels.as_int() -mut losses := []&vtl.Tensor[f64]{cap: epochs} + mut losses := []&vtl.Tensor[f64]{cap: epochs} -// Stochastic Gradient Descent -mut optimizer := optimizers.sgd[f64](learning_rate: 0.01) + // Stochastic Gradient Descent + mut optimizer := optimizers.sgd[f64](learning_rate: 0.01) -println('Training...') + println('Training...') -for epoch in 0 .. epochs { - println('Epoch: ${epoch}') + for epoch in 0 .. epochs { + println('Epoch: ${epoch}') - for batch_id in 0 .. batches { - println('Batch id: ${batch_id}') + for batch_id in 0 .. batches { + println('Batch id: ${batch_id}') - offset := batch_id * batch_size + offset := batch_id * batch_size - mut x := ctx.variable(features.slice([offset, offset + batch_size])!) - target := labels.slice([offset, offset + batch_size])! + mut x := ctx.variable(features.slice([offset, offset + batch_size])!) + target := labels.slice([offset, offset + batch_size])! - // Running input through the network - y_pred := model.forward(mut x)! + // Running input through the network + y_pred := model.forward(mut x)! - // Compute the loss - mut loss := model.loss(y_pred, target)! + // Compute the loss + mut loss := model.loss(y_pred, target)! - println('Epoch: ${epoch}, Batch id: ${batch_id}, Loss: ${loss.value}') + println('Epoch: ${epoch}, Batch id: ${batch_id}, Loss: ${loss.value}') - losses << loss.value + losses << loss.value - // Compute the gradient (i.e. contribution of each parameter to the loss) - loss.backprop()! + // Compute the gradient (i.e. contribution of each parameter to the loss) + loss.backprop()! - // Correct the weights now that we have the gradient information - optimizer.update()! + // Correct the weights now that we have the gradient information + optimizer.update()! + } } } diff --git a/examples/nn_xor/main.v b/examples/nn_xor/main.v index 5ca51e16..c178116b 100644 --- a/examples/nn_xor/main.v +++ b/examples/nn_xor/main.v @@ -13,62 +13,64 @@ const ( // Learning XOR function with a neural network. -// Autograd context / neuralnet graph -ctx := autograd.ctx[f64]() +fn main() { + // Autograd context / neuralnet graph + ctx := autograd.ctx[f64]() -// We will create a tensor of size 3200 (100 batches of size 32) -// We create it as int between [0, 2] and convert to bool -x_train_bool := vtl.random(0, 2, [batch_size * 100, 2]).as_bool() + // We will create a tensor of size 3200 (100 batches of size 32) + // We create it as int between [0, 2] and convert to bool + x_train_bool := vtl.random(0, 2, [batch_size * 100, 2]).as_bool() -// Let's build our truth labels. We need to apply xor between the 2 columns of the tensors -x_train_bool_1 := x_train_bool.slice_hilo([]int{}, [0])! -x_train_bool_2 := x_train_bool.slice_hilo([]int{}, [1])! + // Let's build our truth labels. We need to apply xor between the 2 columns of the tensors + x_train_bool_1 := x_train_bool.slice_hilo([]int{}, [0])! + x_train_bool_2 := x_train_bool.slice_hilo([]int{}, [1])! -y_bool := x_train_bool_1.equal(x_train_bool_2)! + y_bool := x_train_bool_1.equal(x_train_bool_2)! -// We need to convert the bool tensor to a float tensor -mut x_train := ctx.variable(x_train_bool.as_f64(), - requires_grad: true -) -y := y_bool.as_f64() + // We need to convert the bool tensor to a float tensor + mut x_train := ctx.variable(x_train_bool.as_f64(), + requires_grad: true + ) + y := y_bool.as_f64() -// We create a neural network with 2 inputs, 2 hidden layers of 4 neurons each and 1 output -// We use the sigmoid activation function -mut model := models.sequential_from_ctx[f64](ctx) -model.input([2]) -model.linear(3) -model.relu() -model.linear(1) -model.sigmoid_cross_entropy_loss() + // We create a neural network with 2 inputs, 2 hidden layers of 4 neurons each and 1 output + // We use the sigmoid activation function + mut model := models.sequential_from_ctx[f64](ctx) + model.input([2]) + model.linear(3) + model.relu() + model.linear(1) + model.sigmoid_cross_entropy_loss() -// Stochastic Gradient Descent -mut optimizer := optimizers.sgd[f64](learning_rate: 0.01) + // Stochastic Gradient Descent + mut optimizer := optimizers.sgd[f64](learning_rate: 0.01) -mut losses := []&vtl.Tensor[f64]{cap: epochs * batches} + mut losses := []&vtl.Tensor[f64]{cap: epochs * batches} -// Learning loop -for epoch in 0 .. epochs { - println('Epoch: ${epoch}') - for batch_id in 0 .. batches { - // minibatch offset in the Tensor - offset := batch_id * batch_size - mut x := x_train.slice([offset, offset + batch_size])! - target := y.slice([offset, offset + batch_size])! + // Learning loop + for epoch in 0 .. epochs { + println('Epoch: ${epoch}') + for batch_id in 0 .. batches { + // minibatch offset in the Tensor + offset := batch_id * batch_size + mut x := x_train.slice([offset, offset + batch_size])! + target := y.slice([offset, offset + batch_size])! - // Running input through the network - y_pred := model.forward(mut x)! + // Running input through the network + y_pred := model.forward(mut x)! - // Compute the loss - mut loss := model.loss(y_pred, target)! + // Compute the loss + mut loss := model.loss(y_pred, target)! - println('Epoch: ${epoch}, Batch id: ${batch_id}, Loss: ${loss.value}') + println('Epoch: ${epoch}, Batch id: ${batch_id}, Loss: ${loss.value}') - losses << loss.value + losses << loss.value - // Compute the gradient (i.e. contribution of each parameter to the loss) - loss.backprop()! + // Compute the gradient (i.e. contribution of each parameter to the loss) + loss.backprop()! - // Correct the weights now that we have the gradient information - optimizer.update()! + // Correct the weights now that we have the gradient information + optimizer.update()! + } } }