Skip to content

Commit

Permalink
Merge pull request #11 from birm/style-1
Browse files Browse the repository at this point in the history
Fix Style Build
  • Loading branch information
birm authored May 27, 2020
2 parents 3317c08 + 2c82389 commit e70d009
Show file tree
Hide file tree
Showing 9 changed files with 82 additions and 217 deletions.
143 changes: 0 additions & 143 deletions .travis/config.hpp

This file was deleted.

2 changes: 1 addition & 1 deletion Kaggle/DigitRecognizer/src/DigitRecognizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ int main()
train.n_cols - 1) / 255.0;
const mat validX = valid.submat(1, 0, valid.n_rows - 1,
valid.n_cols - 1) / 255.0;

const int ITERATIONS_PER_CYCLE = trainX.n_cols;
// According to NegativeLogLikelihood output layer of NN, labels should
// specify class of a data point and be in the interval from 1 to
Expand Down
10 changes: 5 additions & 5 deletions Kaggle/DigitRecognizerBatchNorm/src/DigitRecognizerBatchNorm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@
* solving Digit Recognizer problem from Kaggle website.
*
* The full description of a problem as well as datasets for training
* and testing are available here https://www.kaggle.com/c/digit-recognizer using BatchNorm
* and testing are available here https://www.kaggle.com/c/digit-recognizer
* using BatchNorm
*
* mlpack is free software; you may redistribute it and/or modify it under the
* terms of the 3-clause BSD license. You should have received a copy of the
Expand Down Expand Up @@ -97,15 +98,15 @@ int main() {
// data and PRelU layer. Parameters specify the number of input features
// and number of neurons in the next layer.
model.Add<Linear<> >(trainX.n_rows, H1);
// The first PReLU activation layer. parameter can be set as constructor argument.
// The first PReLU activation layer. parameter can be set as constructor arg.
model.Add<PReLU<> >();
// BatchNorm layer applied after PReLU activation as it gives better results practically.
// BatchNorm layer applied after PReLU activation as it gives better results.
model.Add<BatchNorm<> >(H1);
// Intermediate layer between PReLU activation layers.
model.Add<Linear<> >(H1, H2);
// The second PReLU layer.
model.Add<PReLU<> >();
//Second BatchNorm layer
// Second BatchNorm layer
model.Add<BatchNorm<> >(H2);
// Intermediate layer.
model.Add<Linear<> >(H2, 10);
Expand Down Expand Up @@ -136,7 +137,6 @@ int main() {
// Cycles for monitoring the process of a solution.
for (int i = 0; i <= CYCLES; i++)
{

// Train neural network. If this is the first iteration, weights are
// random, using current values as starting point otherwise.
model.Train(trainX, trainY, optimizer);
Expand Down
13 changes: 5 additions & 8 deletions Kaggle/DigitRecognizerCNN/src/DigitRecognizerCNN.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -116,8 +116,7 @@ int main()
0, // Padding width.
0, // Padding height.
28, // Input width.
28 // Input height.
);
28); // Input Height

// Add first ReLU.
model.Add<LeakyReLU<> >();
Expand All @@ -128,8 +127,7 @@ int main()
2, // Height of field.
2, // Stride along width.
2, // Stride along height.
true
);
true);

// Add the second convolution layer.
model.Add<Convolution<> >(
Expand All @@ -142,8 +140,7 @@ int main()
0, // Padding width.
0, // Padding height.
12, // Input width.
12 // Input height.
);
12); // Input Height

// Add the second ReLU.
model.Add<LeakyReLU<> >();
Expand Down Expand Up @@ -223,6 +220,6 @@ int main()

// Saving results into Kaggle compatibe CSV file.
save("Kaggle/results.csv", "ImageId,Label", testPred);
cout << "Results were saved to Kaggle/results.csv. This file can be uploaded to "
<< "https://www.kaggle.com/c/digit-recognizer/submissions." << endl;
cout << "Results were saved to Kaggle/results.csv. This file can be uploaded "
<< "to https://www.kaggle.com/c/digit-recognizer/submissions." << endl;
}
26 changes: 13 additions & 13 deletions Kaggle/kaggle_utils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,20 +71,20 @@ double accuracy(arma::Row<size_t> predLabels, const arma::mat& realY)
void save(const std::string filename, std::string header,
const arma::Row<size_t>& predLabels)
{
std::ofstream out(filename);
out << header << std::endl;
for (size_t j = 0; j < predLabels.n_cols; ++j)
{
// j + 1 because Kaggle indexes start from 1
// pred - 1 because 1st class is 0, 2nd class is 1 and etc.
out << j + 1 << "," << std::round(predLabels(j)) - 1;
std::ofstream out(filename);
out << header << std::endl;
for (size_t j = 0; j < predLabels.n_cols; ++j)
{
// j + 1 because Kaggle indexes start from 1
// pred - 1 because 1st class is 0, 2nd class is 1 and etc.
out << j + 1 << "," << std::round(predLabels(j)) - 1;
// to avoid an empty line in the end of the file
if (j < predLabels.n_cols - 1)
{
out << std::endl;
}
}
out.close();
if (j < predLabels.n_cols - 1)
{
out << std::endl;
}
}
out.close();
}

#endif
21 changes: 11 additions & 10 deletions LSTM/TimeSeries-Multivariate/src/LSTMTimeSeriesMultivariate.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
*/

/*
NOTE: the data need to be sorted by date in ascending order! The RNN learns from
NOTE: the data need to be sorted by date in ascending order! The RNN learns from
oldest to newest!
date close volume open high low
Expand Down Expand Up @@ -101,7 +101,8 @@ void SaveResults(const string filename,
// The prediction results are the (high, low) for the next day and come from
// the last slice from the prediction.
flatDataAndPreds.rows(flatDataAndPreds.n_rows - 2,
flatDataAndPreds.n_rows - 1) = predictions.slice(predictions.n_slices - 1);
flatDataAndPreds.n_rows - 1) = predictions.slice(
predictions.n_slices - 1);

scale.InverseTransform(flatDataAndPreds, flatDataAndPreds);

Expand Down Expand Up @@ -193,10 +194,10 @@ int main()
size_t inputSize = 5, outputSize = 2;

// Split the dataset into training and validation sets.
arma::mat trainData = dataset.submat(arma::span(),arma::span(0, (1 - RATIO) *
arma::mat trainData = dataset.submat(arma::span(), arma::span(0, (1 - RATIO) *
dataset.n_cols));
arma::mat testData = dataset.submat(arma::span(), arma::span((1 - RATIO) * dataset.n_cols,
dataset.n_cols - 1));
arma::mat testData = dataset.submat(arma::span(),
arma::span((1 - RATIO) * dataset.n_cols, dataset.n_cols - 1));

// Number of epochs for training.
const int EPOCHS = 150;
Expand Down Expand Up @@ -255,13 +256,13 @@ int main()
BATCH_SIZE, // Batch size. Number of data points that are used in each
// iteration.
trainData.n_cols * EPOCHS, // Max number of iterations.
1e-8,// Tolerance.
1e-8, // Tolerance.
true, // Shuffle.
AdamUpdate(1e-8, 0.9, 0.999)); // Adam update policy.

// Instead of terminating based on the tolerance of the objective function,
// we'll depend on the maximum number of iterations, and terminate early using
// the EarlyStopAtMinLoss callback.
// we'll depend on the maximum number of iterations, and terminate early
// using the EarlyStopAtMinLoss callback.
optimizer.Tolerance() = -1;

cout << "Training ..." << endl;
Expand Down Expand Up @@ -305,6 +306,6 @@ int main()
SaveResults(predFile, predOutP, scale, testX);

// Use this on Windows in order to keep the console window open.
//cout << "Ready!" << endl;
//getchar();
// cout << "Ready!" << endl;
// getchar();
}
25 changes: 13 additions & 12 deletions LSTM/TimeSeries-Univariate/src/LSTMTimeSeriesUnivariate.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/**
* An example of using Recurrent Neural Network (RNN)
* An example of using Recurrent Neural Network (RNN)
* to make forcasts on a time series of number of kilowatt-hours used in a
* residential home over a 3.5 month period, 25 November 2011 to 17 March 2012,
* which we aim to solve using a simple LSTM neural network. Electricity usage
Expand All @@ -16,7 +16,7 @@
*/

/*
NOTE: the data need to be sorted by date in ascending order! The RNN learns from
NOTE: the data need to be sorted by date in ascending order! The RNN learns from
oldest to newest!
DateTime,Consumption kWh,Off-peak,Mid-peak,On-peak
Expand Down Expand Up @@ -72,7 +72,7 @@ void CreateTimeSeriesData(InputDataType dataset,
LabelType& y,
const size_t rho)
{
for(size_t i = 0; i < dataset.n_cols - rho; i++)
for (size_t i = 0; i < dataset.n_cols - rho; i++)
{
X.subcube(arma::span(), arma::span(i), arma::span()) =
dataset.submat(arma::span(), arma::span(i, i + rho - 1));
Expand All @@ -98,7 +98,8 @@ void SaveResults(const string& filename,
// The prediction result is the energy consumption for the next hour and comes
// from the last slice of the prediction.
flatDataAndPreds.rows(flatDataAndPreds.n_rows - 1,
flatDataAndPreds.n_rows - 1) = predictions.slice(predictions.n_slices - 1);
flatDataAndPreds.n_rows - 1) = predictions.slice(
predictions.n_slices - 1);

scale.InverseTransform(flatDataAndPreds, flatDataAndPreds);
// We need to remove the last column because it was not used for training
Expand Down Expand Up @@ -188,10 +189,10 @@ int main()
dataset = dataset.submat(1, 1, 1, dataset.n_cols - 1);

// Split the dataset into training and validation sets.
arma::mat trainData = dataset.submat(arma::span(),arma::span(0, (1 - RATIO) *
arma::mat trainData = dataset.submat(arma::span(), arma::span(0, (1 - RATIO) *
dataset.n_cols));
arma::mat testData = dataset.submat(arma::span(), arma::span((1 - RATIO) * dataset.n_cols,
dataset.n_cols - 1));
arma::mat testData = dataset.submat(arma::span(),
arma::span((1 - RATIO) * dataset.n_cols, dataset.n_cols - 1));

// Number of iterations per cycle.
const int EPOCHS = 150;
Expand Down Expand Up @@ -243,15 +244,15 @@ int main()
// Set parameters for the Stochastic Gradient Descent (SGD) optimizer.
SGD<AdamUpdate> optimizer(
STEP_SIZE, // Step size of the optimizer.
BATCH_SIZE, // Batch size. Number of data points that are used in each iteration.
BATCH_SIZE, // Batch size. Number of data points used per iteration.
trainData.n_cols * EPOCHS, // Max number of iterations.
1e-8, // Tolerance.
true, // Shuffle.
AdamUpdate(1e-8, 0.9, 0.999)); // Adam update policy.

// Instead of terminating based on the tolerance of the objective function,
// we'll depend on the maximum number of iterations, and terminate early using
// the EarlyStopAtMinLoss callback.
// we'll depend on the maximum number of iterations, and terminate early
// using the EarlyStopAtMinLoss callback.
optimizer.Tolerance() = -1;

cout << "Training ..." << endl;
Expand Down Expand Up @@ -296,6 +297,6 @@ int main()
SaveResults(predFile, predOutP, scale, testX);

// Use this on Windows in order to keep the console window open.
//cout << "Ready!" << endl;
//getchar();
// cout << "Ready!" << endl;
// getchar();
}
Loading

0 comments on commit e70d009

Please sign in to comment.