Skip to content

Commit

Permalink
chore: run v fmt -w . with latest V
Browse files Browse the repository at this point in the history
  • Loading branch information
spytheman committed Nov 16, 2024
1 parent ad01618 commit b758d5f
Show file tree
Hide file tree
Showing 27 changed files with 123 additions and 123 deletions.
4 changes: 2 additions & 2 deletions autograd/node.v
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,9 @@ pub mut:
// node
pub fn node[T](gate Gate, parents []&Variable[T], payload &Payload[T], name string) &Node[T] {
return &Node[T]{
gate: gate
gate: gate
parents: parents
payload: payload
name: name
name: name
}
}
6 changes: 3 additions & 3 deletions autograd/variable.v
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,9 @@ pub struct VariableData {
pub fn variable[T](context &Context[T], value &vtl.Tensor[T], data VariableData) &Variable[T] {
grad := if data.requires_grad { vtl.zeros_like[T](value) } else { value }
return &Variable[T]{
context: context
value: value
grad: grad
context: context
value: value
grad: grad
requires_grad: data.requires_grad
}
}
Expand Down
14 changes: 7 additions & 7 deletions datasets/imdb.v
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,11 @@ pub:
// load_imdb_helper loads the IMDB dataset for a given split.
fn load_imdb_helper(split string) !(&vtl.Tensor[string], &vtl.Tensor[int]) {
dataset_path := download_dataset(
dataset: 'imdb'
baseurl: datasets.imdb_base_url
compressed: true
dataset: 'imdb'
baseurl: imdb_base_url
compressed: true
uncompressed_dir: 'aclImdb'
file: datasets.imdb_file_name
file: imdb_file_name
)!

mut split_paths := []string{}
Expand Down Expand Up @@ -63,8 +63,8 @@ pub fn load_imdb() !ImdbDataset {

return ImdbDataset{
train_features: train_features
train_labels: train_labels
test_features: test_features
test_labels: test_labels
train_labels: train_labels
test_features: test_features
test_labels: test_labels
}
}
4 changes: 2 additions & 2 deletions datasets/loader.v
Original file line number Diff line number Diff line change
Expand Up @@ -85,8 +85,8 @@ fn download_dataset(data DatasetDownload) !string {
file_content := os.read_file(target)!
content := gzip.decompress(file_content.bytes(),
verify_header_checksum: true
verify_length: false
verify_checksum: false
verify_length: false
verify_checksum: false
)!
os.write_file(uncompressed_path, content.bytestr())!
}
Expand Down
22 changes: 11 additions & 11 deletions datasets/mnist.v
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,11 @@ pub:
// load_mnist_helper loads the MNIST dataset from the given filename.
fn load_mnist_helper(file string) !string {
dataset_path := download_dataset(
dataset: 'mnist'
baseurl: datasets.mnist_base_url
compressed: true
dataset: 'mnist'
baseurl: mnist_base_url
compressed: true
uncompressed_dir: file.all_before_last('.')
file: file
file: file
)!

return os.read_file(dataset_path)!
Expand All @@ -47,15 +47,15 @@ fn load_mnist_labels(filename string) !&vtl.Tensor[u8] {

// load_mnist loads the MNIST dataset.
pub fn load_mnist() !MnistDataset {
train_features := load_mnist_features(datasets.mnist_train_images_file)!
train_labels := load_mnist_labels(datasets.mnist_train_labels_file)!
test_features := load_mnist_features(datasets.mnist_test_images_file)!
test_labels := load_mnist_labels(datasets.mnist_test_labels_file)!
train_features := load_mnist_features(mnist_train_images_file)!
train_labels := load_mnist_labels(mnist_train_labels_file)!
test_features := load_mnist_features(mnist_test_images_file)!
test_labels := load_mnist_labels(mnist_test_labels_file)!

return MnistDataset{
train_features: train_features
train_labels: train_labels
test_features: test_features
test_labels: test_labels
train_labels: train_labels
test_features: test_features
test_labels: test_labels
}
}
8 changes: 4 additions & 4 deletions examples/vtl_plot_scatter_colorscale/main.v
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,11 @@ fn main() {

mut plt := plot.Plot.new()
plt.scatter(
x: x.to_array()
y: y
mode: 'lines+markers'
x: x.to_array()
y: y
mode: 'lines+markers'
colorscale: 'smoker'
marker: plot.Marker{
marker: plot.Marker{
size: []f64{len: x.size, init: 10.0}
}
)
Expand Down
2 changes: 1 addition & 1 deletion nn/gates/layers/flatten.v
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ pub:

pub fn flatten_gate[T](input &autograd.Variable[T], cached_shape []int) &FlattenGate[T] {
return &FlattenGate[T]{
input: input
input: input
cached_shape: cached_shape
}
}
Expand Down
4 changes: 2 additions & 2 deletions nn/gates/layers/linear.v
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@ pub:

pub fn linear_gate[T](input &autograd.Variable[T], weight &autograd.Variable[T], bias &autograd.Variable[T]) &LinearGate[T] {
return &LinearGate[T]{
input: input
input: input
weight: weight
bias: bias
bias: bias
}
}

Expand Down
8 changes: 4 additions & 4 deletions nn/gates/layers/maxpool.v
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,10 @@ pub:
pub fn maxpool2d_gate[T](max_indices &vtl.Tensor[int], kernel []int, shape []int, stride []int, padding []int) &MaxPool2DGate[T] {
return &MaxPool2DGate[T]{
max_indices: max_indices
kernel: kernel
shape: shape
stride: stride
padding: padding
kernel: kernel
shape: shape
stride: stride
padding: padding
}
}

Expand Down
2 changes: 1 addition & 1 deletion nn/gates/loss/mse.v
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ pub:

pub fn mse_gate[T](cache &autograd.Variable[T], target &vtl.Tensor[T]) &MseGate[T] {
return &MseGate[T]{
cache: cache
cache: cache
target: target
}
}
Expand Down
2 changes: 1 addition & 1 deletion nn/gates/loss/sigmoid_cross_entropy.v
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ pub:

pub fn sigmoid_cross_entropy_gate[T](cache &autograd.Variable[T], target &vtl.Tensor[T]) &SigmoidCrossEntropyGate[T] {
return &SigmoidCrossEntropyGate[T]{
cache: cache
cache: cache
target: target
}
}
Expand Down
2 changes: 1 addition & 1 deletion nn/gates/loss/softmax_cross_entropy.v
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ pub:

pub fn softmax_cross_entropy_gate[T](cache &autograd.Variable[T], target &vtl.Tensor[T]) &SoftmaxCrossEntropyGate[T] {
return &SoftmaxCrossEntropyGate[T]{
cache: cache
cache: cache
target: target
}
}
Expand Down
2 changes: 1 addition & 1 deletion nn/layers/dropout.v
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ pub struct DropoutLayer[T] {
pub fn dropout_layer[T](ctx &autograd.Context[T], output_shape []int, data DropoutLayerConfig) types.Layer[T] {
return types.Layer[T](&DropoutLayer[T]{
output_shape: output_shape.clone()
prob: 1.0 - data.prob
prob: 1.0 - data.prob
})
}

Expand Down
2 changes: 1 addition & 1 deletion nn/layers/elu.v
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ pub struct EluLayer[T] {
pub fn elu_layer[T](ctx &autograd.Context[T], output_shape []int, data EluLayerConfig) types.Layer[T] {
return types.Layer[T](&EluLayer[T]{
output_shape: output_shape.clone()
alpha: data.alpha
alpha: data.alpha
})
}

Expand Down
2 changes: 1 addition & 1 deletion nn/layers/linear.v
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ pub fn linear_layer[T](ctx &autograd.Context[T], input_dim int, output_dim int)
bias := vtl.zeros[T]([1, output_dim])
return types.Layer[T](&LinearLayer[T]{
weights: ctx.variable(weights)
bias: ctx.variable(bias)
bias: ctx.variable(bias)
})
}

Expand Down
6 changes: 3 additions & 3 deletions nn/layers/maxpool.v
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,9 @@ pub struct MaxPool2DLayer[T] {
pub fn maxpool2d_layer[T](ctx &autograd.Context[T], input_shape []int, kernel []int, padding []int, stride []int) types.Layer[T] {
return types.Layer[T](&MaxPool2DLayer[T]{
input_shape: input_shape.clone()
kernel: kernel.clone()
padding: padding.clone()
stride: stride.clone()
kernel: kernel.clone()
padding: padding.clone()
stride: stride.clone()
})
}

Expand Down
4 changes: 2 additions & 2 deletions nn/models/sequential_info.v
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,9 @@ pub mut:
// with an empty list of layers.
pub fn sequential_info[T](ctx &autograd.Context[T], layers_ []types.Layer[T]) &SequentialInfo[T] {
return &SequentialInfo[T]{
ctx: ctx
ctx: ctx
layers: layers_
loss: unsafe { nil }
loss: unsafe { nil }
}
}

Expand Down
6 changes: 3 additions & 3 deletions nn/optimizers/adam.v
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,9 @@ pub struct AdamOptimizerConfig {
pub fn adam_optimizer[T](config AdamOptimizerConfig) &AdamOptimizer[T] {
return &AdamOptimizer[T]{
learning_rate: config.learning_rate
beta1: config.beta1
beta2: config.beta2
epsilon: config.epsilon
beta1: config.beta1
beta2: config.beta2
epsilon: config.epsilon
}
}

Expand Down
6 changes: 3 additions & 3 deletions src/broadcast.v
Original file line number Diff line number Diff line change
Expand Up @@ -80,9 +80,9 @@ pub fn (t &Tensor[T]) broadcast_to[T](shape []int) !&Tensor[T] {
strides := strides_from_shape(shape, .row_major)
result_strides := broadcast_strides(shape, t.shape, strides, t.strides)!
return &Tensor[T]{
data: t.data
shape: shape
size: size
data: t.data
shape: shape
size: size
strides: result_strides
}
}
Expand Down
6 changes: 3 additions & 3 deletions src/broadcast_d_vcl.v
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,9 @@ pub fn (t &VclTensor[T]) broadcast_to[T](shape []int) !&VclTensor[T] {
strides := strides_from_shape(shape, .row_major)
result_strides := broadcast_strides(shape, t.shape, strides, t.strides)!
return &VclTensor[T]{
data: t.data
shape: shape
size: size
data: t.data
shape: shape
size: size
strides: result_strides
}
}
56 changes: 28 additions & 28 deletions src/build.v
Original file line number Diff line number Diff line change
Expand Up @@ -19,20 +19,20 @@ pub fn from_array[T](arr []T, shape []int, params TensorData) !&Tensor[T] {
data_storage := storage.from_array[T](arr)
if shape.len == 0 {
return &Tensor[T]{
memory: params.memory
memory: params.memory
strides: [1]
shape: []
size: size
data: data_storage
shape: []
size: size
data: data_storage
}
}
strides := strides_from_shape(shape, params.memory)
return &Tensor[T]{
shape: shape
shape: shape
strides: strides
memory: params.memory
size: size
data: data_storage
memory: params.memory
size: size
data: data_storage
}
}

Expand All @@ -41,22 +41,22 @@ pub fn tensor[T](init T, shape []int, params TensorData) &Tensor[T] {
if shape.len == 0 {
data_storage := storage.storage[T](1, 0, init)
return &Tensor[T]{
memory: params.memory
memory: params.memory
strides: [1]
shape: []
size: 1
data: data_storage
shape: []
size: 1
data: data_storage
}
}
strides := strides_from_shape(shape, params.memory)
size := size_from_shape(shape)
data_storage := storage.storage[T](size, 0, init)
return &Tensor[T]{
shape: shape.clone()
memory: params.memory
shape: shape.clone()
memory: params.memory
strides: strides
size: size
data: data_storage
size: size
data: data_storage
}
}

Expand All @@ -65,11 +65,11 @@ pub fn tensor[T](init T, shape []int, params TensorData) &Tensor[T] {
pub fn tensor_like[T](t &Tensor[T]) &Tensor[T] {
data_storage := t.data.like[T]()
return &Tensor[T]{
shape: t.shape.clone()
shape: t.shape.clone()
strides: t.strides.clone()
memory: t.memory
size: t.size
data: data_storage
memory: t.memory
size: t.size
data: data_storage
}
}

Expand All @@ -80,11 +80,11 @@ pub fn tensor_like_with_shape[T](t &Tensor[T], shape []int) &Tensor[T] {
size := size_from_shape(shape)
data_storage := t.data.like_with_len[T](size)
return &Tensor[T]{
shape: shape.clone()
shape: shape.clone()
strides: strides
memory: t.memory
size: size
data: data_storage
memory: t.memory
size: size
data: data_storage
}
}

Expand All @@ -94,10 +94,10 @@ fn tensor_like_with_shape_and_strides[T](t &Tensor[T], shape []int, strides []in
size := size_from_shape(shape)
data_storage := t.data.like_with_len[T](size)
return &Tensor[T]{
shape: shape.clone()
shape: shape.clone()
strides: strides.clone()
memory: t.memory
size: size
data: data_storage
memory: t.memory
size: size
data: data_storage
}
}
Loading

0 comments on commit b758d5f

Please sign in to comment.