From 66df10ac48322102ab3dae6310e364a8426aea5e Mon Sep 17 00:00:00 2001 From: Mark Dokter Date: Mon, 15 Jul 2024 17:58:03 +0200 Subject: [PATCH] changes to quick test lenet-train using more builtins (that need CUDA) these builtins still need proper script wrapping --- scripts/nn/networks/lenet-train.daph | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/scripts/nn/networks/lenet-train.daph b/scripts/nn/networks/lenet-train.daph index 3dc2e1080..5d56705d6 100644 --- a/scripts/nn/networks/lenet-train.daph +++ b/scripts/nn/networks/lenet-train.daph @@ -100,24 +100,28 @@ def predict(X:matrix, C, Hin, Win, W1:matrix, b1:matrix, W2:matrix, b2:matrix, W # Compute forward pass ## layer 1: conv1 -> relu1 -> pool1 outc1, Houtc1, Woutc1 = conv2d.forward(X_batch, W1, b1, C, Hin, Win, Hf, Wf, stride, stride, pad, pad); - outr1 = relu.forward(outc1); + outr1 = relu(outc1); + #outr1 = relu.forward(outc1); outp1, Houtp1, Woutp1 = max_pool2d.forward(outr1, F1, Houtc1, Woutc1, 2, 2, 2, 2, 0, 0); print("predict fwd layer 1 done"); ## layer 2: conv2 -> relu2 -> pool2 outc2, Houtc2, Woutc2 = conv2d.forward(outp1, W2, b2, F1, Houtp1, Woutp1, Hf, Wf, stride, stride, pad, pad); - outr2 = relu.forward(outc2); + #outr2 = relu.forward(outc2); + outr2 = relu(outc2); outp2, Houtp2, Woutp2 = max_pool2d.forward(outr2, F2, Houtp1, Woutp1, 2, 2, 2, 2, 0, 0); print("predict fwd layer 2 done"); ## layer 3: affine3 -> relu3 -> dropout # outa3 = affine.forward(outp2, W3, b3); outa3 = affine(outp2, W3, b3); - outr3 = relu.forward(outa3); + #outr3 = relu.forward(outa3); + outr3 = relu(outa3); outd3, maskd3 = dropout.forward(outr3, 0.5, -1); print("predict fwd layer 3 done"); ## layer 4: affine4 -> softmax #outa4 = affine.forward(outd3, W4, b4); outa4 = affine(outd3, W4, b4); - probs_batch = softmax.forward(outa4); + #probs_batch = softmax.forward(outa4); + probs_batch = softmax(outa4); print("predict fwd layer 4 done"); # Store predictions probs[beg:end,] = probs_batch; @@ -218,25 +222,29 @@ print("start training layer 1"); ## layer 1: conv1 -> relu1 -> pool1 outc1, Houtc1, Woutc1 = conv2d.forward(X_batch, W1, b1, C, Hin, Win, Hf, Wf, stride, stride, pad, pad); print("conv1 done"); - outr1 = relu.forward(outc1); + #outr1 = relu.forward(outc1); + outr1 = relu(outc1); print("layer 1 relu done"); outp1, Houtp1, Woutp1 = max_pool2d.forward(outr1, F1, Houtc1, Woutc1, 2, 2, 2, 2, 0, 0); print("train fwd layer 1 done"); ## layer 2: conv2 -> relu2 -> pool2 outc2, Houtc2, Woutc2 = conv2d.forward(outp1, W2, b2, F1, Houtp1, Woutp1, Hf, Wf, stride, stride, pad, pad); - outr2 = relu.forward(outc2); + #outr2 = relu.forward(outc2); + outr2 = relu(outc2); outp2, Houtp2, Woutp2 = max_pool2d.forward(outr2, F2, Houtp1, Woutp1, 2, 2, 2, 2, 0, 0); print("train fwd layer 2 done"); ## layer 3: affine3 -> relu3 -> dropout #outa3 = affine.forward(outp2, W3, b3); outa3 = affine(outp2, W3, b3); - outr3 = relu.forward(outa3); + #outr3 = relu.forward(outa3); + outr3 = relu(outa3); outd3, maskd3 = dropout.forward(outr3, 0.5, -1); print("train fwd layer 3 done"); ## layer 4: affine4 -> softmax #outa4 = affine.forward(outd3, W4, b4); outa4 = affine(outd3, W4, b4); - probs = softmax.forward(outa4); + #probs = softmax.forward(outa4); + probs = softmax(outa4); print("train fwd layer 4 done"); # Compute loss & accuracy for training & validation data every 100 iterations. if (i % 100 == 0)