Skip to content

Commit

Permalink
Updated examples
Browse files Browse the repository at this point in the history
  • Loading branch information
ulises-jeremias committed Oct 7, 2023
1 parent 5ab6f0a commit 6b79eac
Show file tree
Hide file tree
Showing 2 changed files with 79 additions and 75 deletions.
66 changes: 34 additions & 32 deletions examples/nn_mnist/main.v
Original file line number Diff line number Diff line change
Expand Up @@ -12,53 +12,55 @@ const (
batches = 100
)

// Autograd context / neuralnet graph
ctx := autograd.ctx[f64]()
fn main() {
// Autograd context / neuralnet graph
ctx := autograd.ctx[f64]()

// We create a neural network
mut model := models.sequential_from_ctx[f64](ctx)
model.input([1, 28, 28])
model.mse_loss()
// We create a neural network
mut model := models.sequential_from_ctx[f64](ctx)
model.input([1, 28, 28])
model.mse_loss()

// Load the MNIST dataset
mnist := datasets.load_mnist()!
// Load the MNIST dataset
mnist := datasets.load_mnist()!

// We reshape the data to fit the network
features := mnist.train_features.as_f64().divide_scalar(255.0)!.unsqueeze(axis: 1)!
labels := mnist.train_labels.as_int()
// We reshape the data to fit the network
features := mnist.train_features.as_f64().divide_scalar(255.0)!.unsqueeze(axis: 1)!
labels := mnist.train_labels.as_int()

mut losses := []&vtl.Tensor[f64]{cap: epochs}
mut losses := []&vtl.Tensor[f64]{cap: epochs}

// Stochastic Gradient Descent
mut optimizer := optimizers.sgd[f64](learning_rate: 0.01)
// Stochastic Gradient Descent
mut optimizer := optimizers.sgd[f64](learning_rate: 0.01)

println('Training...')
println('Training...')

for epoch in 0 .. epochs {
println('Epoch: ${epoch}')
for epoch in 0 .. epochs {
println('Epoch: ${epoch}')

for batch_id in 0 .. batches {
println('Batch id: ${batch_id}')
for batch_id in 0 .. batches {
println('Batch id: ${batch_id}')

offset := batch_id * batch_size
offset := batch_id * batch_size

mut x := ctx.variable(features.slice([offset, offset + batch_size])!)
target := labels.slice([offset, offset + batch_size])!
mut x := ctx.variable(features.slice([offset, offset + batch_size])!)
target := labels.slice([offset, offset + batch_size])!

// Running input through the network
y_pred := model.forward(mut x)!
// Running input through the network
y_pred := model.forward(mut x)!

// Compute the loss
mut loss := model.loss(y_pred, target)!
// Compute the loss
mut loss := model.loss(y_pred, target)!

println('Epoch: ${epoch}, Batch id: ${batch_id}, Loss: ${loss.value}')
println('Epoch: ${epoch}, Batch id: ${batch_id}, Loss: ${loss.value}')

losses << loss.value
losses << loss.value

// Compute the gradient (i.e. contribution of each parameter to the loss)
loss.backprop()!
// Compute the gradient (i.e. contribution of each parameter to the loss)
loss.backprop()!

// Correct the weights now that we have the gradient information
optimizer.update()!
// Correct the weights now that we have the gradient information
optimizer.update()!
}
}
}
88 changes: 45 additions & 43 deletions examples/nn_xor/main.v
Original file line number Diff line number Diff line change
Expand Up @@ -13,62 +13,64 @@ const (

// Learning XOR function with a neural network.

// Autograd context / neuralnet graph
ctx := autograd.ctx[f64]()
fn main() {
// Autograd context / neuralnet graph
ctx := autograd.ctx[f64]()

// We will create a tensor of size 3200 (100 batches of size 32)
// We create it as int between [0, 2] and convert to bool
x_train_bool := vtl.random(0, 2, [batch_size * 100, 2]).as_bool()
// We will create a tensor of size 3200 (100 batches of size 32)
// We create it as int between [0, 2] and convert to bool
x_train_bool := vtl.random(0, 2, [batch_size * 100, 2]).as_bool()

// Let's build our truth labels. We need to apply xor between the 2 columns of the tensors
x_train_bool_1 := x_train_bool.slice_hilo([]int{}, [0])!
x_train_bool_2 := x_train_bool.slice_hilo([]int{}, [1])!
// Let's build our truth labels. We need to apply xor between the 2 columns of the tensors
x_train_bool_1 := x_train_bool.slice_hilo([]int{}, [0])!
x_train_bool_2 := x_train_bool.slice_hilo([]int{}, [1])!

y_bool := x_train_bool_1.equal(x_train_bool_2)!
y_bool := x_train_bool_1.equal(x_train_bool_2)!

// We need to convert the bool tensor to a float tensor
mut x_train := ctx.variable(x_train_bool.as_f64(),
requires_grad: true
)
y := y_bool.as_f64()
// We need to convert the bool tensor to a float tensor
mut x_train := ctx.variable(x_train_bool.as_f64(),
requires_grad: true
)
y := y_bool.as_f64()

// We create a neural network with 2 inputs, 2 hidden layers of 4 neurons each and 1 output
// We use the sigmoid activation function
mut model := models.sequential_from_ctx[f64](ctx)
model.input([2])
model.linear(3)
model.relu()
model.linear(1)
model.sigmoid_cross_entropy_loss()
// We create a neural network with 2 inputs, 2 hidden layers of 4 neurons each and 1 output
// We use the sigmoid activation function
mut model := models.sequential_from_ctx[f64](ctx)
model.input([2])
model.linear(3)
model.relu()
model.linear(1)
model.sigmoid_cross_entropy_loss()

// Stochastic Gradient Descent
mut optimizer := optimizers.sgd[f64](learning_rate: 0.01)
// Stochastic Gradient Descent
mut optimizer := optimizers.sgd[f64](learning_rate: 0.01)

mut losses := []&vtl.Tensor[f64]{cap: epochs * batches}
mut losses := []&vtl.Tensor[f64]{cap: epochs * batches}

// Learning loop
for epoch in 0 .. epochs {
println('Epoch: ${epoch}')
for batch_id in 0 .. batches {
// minibatch offset in the Tensor
offset := batch_id * batch_size
mut x := x_train.slice([offset, offset + batch_size])!
target := y.slice([offset, offset + batch_size])!
// Learning loop
for epoch in 0 .. epochs {
println('Epoch: ${epoch}')
for batch_id in 0 .. batches {
// minibatch offset in the Tensor
offset := batch_id * batch_size
mut x := x_train.slice([offset, offset + batch_size])!
target := y.slice([offset, offset + batch_size])!

// Running input through the network
y_pred := model.forward(mut x)!
// Running input through the network
y_pred := model.forward(mut x)!

// Compute the loss
mut loss := model.loss(y_pred, target)!
// Compute the loss
mut loss := model.loss(y_pred, target)!

println('Epoch: ${epoch}, Batch id: ${batch_id}, Loss: ${loss.value}')
println('Epoch: ${epoch}, Batch id: ${batch_id}, Loss: ${loss.value}')

losses << loss.value
losses << loss.value

// Compute the gradient (i.e. contribution of each parameter to the loss)
loss.backprop()!
// Compute the gradient (i.e. contribution of each parameter to the loss)
loss.backprop()!

// Correct the weights now that we have the gradient information
optimizer.update()!
// Correct the weights now that we have the gradient information
optimizer.update()!
}
}
}

0 comments on commit 6b79eac

Please sign in to comment.