diff --git a/datasets/mnist_test.v b/datasets/mnist_test.v index 30a05274..45562759 100644 --- a/datasets/mnist_test.v +++ b/datasets/mnist_test.v @@ -2,10 +2,14 @@ import vtl.datasets fn test_mnist() { + unbuffer_stdout() + println('start') mnist := datasets.load_mnist()! + println('mnist dataset loaded') assert mnist.train_features.shape == [60000, 28, 28] assert mnist.test_features.shape == [10000, 28, 28] assert mnist.train_labels.shape == [60000] assert mnist.test_labels.shape == [10000] + println('done') } diff --git a/src/creation.v b/src/creation.v index c0033051..413f844f 100644 --- a/src/creation.v +++ b/src/creation.v @@ -93,6 +93,7 @@ pub fn from_1d[T](arr []T, params TensorData) !&Tensor[T] { // from_2d takes a two dimensional array of floating point values // and returns a two-dimensional Tensor if possible +@[direct_array_access] pub fn from_2d[T](a [][]T, params TensorData) !&Tensor[T] { mut arr := []T{cap: a.len * a[0].len} for i in 0 .. a.len { diff --git a/src/fun.v b/src/fun.v index 9dde3bfc..303decb9 100644 --- a/src/fun.v +++ b/src/fun.v @@ -135,6 +135,7 @@ pub fn (t &Tensor[T]) as_strided[T](shape []int, strides []int) !&Tensor[T] { // transpose permutes the axes of an tensor in a specified // order and returns a view of the data +@[direct_array_access] pub fn (t &Tensor[T]) transpose[T](order []int) !&Tensor[T] { mut ret := t.view() n := order.len @@ -187,6 +188,7 @@ fn fabs(x f64) f64 { } // slice returns a tensor from a variadic list of indexing operations +@[direct_array_access] pub fn (t &Tensor[T]) slice[T](idx ...[]int) !&Tensor[T] { mut newshape := t.shape.clone() mut newstrides := t.strides.clone() @@ -266,6 +268,7 @@ pub fn (t &Tensor[T]) slice[T](idx ...[]int) !&Tensor[T] { // slice_hilo returns a view of an array from a list of starting // indices and a list of closing indices. +@[direct_array_access] pub fn (t &Tensor[T]) slice_hilo[T](idx1 []int, idx2 []int) !&Tensor[T] { mut newshape := t.shape.clone() mut newstrides := t.strides.clone() diff --git a/src/fun_logical.v b/src/fun_logical.v index 5810b360..a6d008d6 100644 --- a/src/fun_logical.v +++ b/src/fun_logical.v @@ -105,7 +105,7 @@ pub fn (t &Tensor[T]) array_equiv[T](other &Tensor[T]) bool { return true } -@[inline] +@[direct_array_access; inline] fn handle_equal[T](vals []T, _ []int) bool { mut equal := true for v in vals { diff --git a/src/iter.v b/src/iter.v index 4bec4e78..6b128682 100644 --- a/src/iter.v +++ b/src/iter.v @@ -129,6 +129,7 @@ fn handle_flatten_iteration[T](mut s TensorIterator[T]) T { return val } +@[direct_array_access] fn tensor_backstrides[T](t &Tensor[T]) []int { rank := t.rank() shape := t.shape diff --git a/src/split.v b/src/split.v index 01914ca5..34c46a44 100644 --- a/src/split.v +++ b/src/split.v @@ -6,6 +6,7 @@ module vtl // integer that does not equally divide the axis. For an array of length // l that should be split into n sections, it returns l % n sub-arrays of // size l//n + 1 and the rest of size l//n. +@[direct_array_access] pub fn (t &Tensor[T]) array_split[T](ind int, axis int) ![]&Tensor[T] { ntotal := t.shape[axis] neach := ntotal / ind @@ -125,7 +126,11 @@ pub fn (t &Tensor[T]) dsplit_expl[T](ind []int) ![]&Tensor[T] { // splitter implements a generic splitting function that contains the underlying functionality // for all split operations +@[direct_array_access] fn (t &Tensor[T]) splitter[T](axis int, n int, div_points []int) ![]&Tensor[T] { + if n > 0 && div_points.len <= n { + return error('splitter error, div_points.len <= n') + } mut subary := []&Tensor[T]{} sary := t.swapaxes(axis, 0)! for i in 0 .. n { diff --git a/src/util.v b/src/util.v index 120dea90..30717145 100644 --- a/src/util.v +++ b/src/util.v @@ -60,6 +60,7 @@ pub fn (mut t Tensor[T]) ensure_memory[T]() { // assert_shape_off_axis ensures that the shapes of Tensors match // for concatenation, except along the axis being joined +@[direct_array_access] fn assert_shape_off_axis[T](ts []&Tensor[T], axis int, shape []int) ![]int { mut retshape := shape.clone() for t in ts { @@ -91,6 +92,7 @@ fn assert_shape[T](shape []int, ts []&Tensor[T]) ! { // is_col_major_contiguous checks if an array is contiguous with a col-major // memory layout +@[direct_array_access] fn is_col_major_contiguous(shape []int, strides []int, ndims int) bool { if ndims == 0 { return true @@ -114,6 +116,7 @@ fn is_col_major_contiguous(shape []int, strides []int, ndims int) bool { // is_row_major_contiguous checks if an array is contiguous with a row-major // memory layout +@[direct_array_access] fn is_row_major_contiguous(shape []int, strides []int, ndims int) bool { if ndims == 0 { return true @@ -150,6 +153,7 @@ fn clip_axis(axis int, size int) !int { } // strides_from_shape returns the strides from a shape and memory format +@[direct_array_access] fn strides_from_shape(shape []int, memory MemoryFormat) []int { mut accum := 1 mut result := []int{len: shape.len} @@ -207,6 +211,7 @@ fn shape_with_autosize(shape []int, size int) !([]int, int) { // filter_shape_not_strides removes 0 size dimensions from the shape // and strides of an array +@[direct_array_access] fn filter_shape_not_strides(shape []int, strides []int) !([]int, []int) { mut newshape := []int{} mut newstrides := []int{}