generated from learning-process/parallel_programming_course
-
Notifications
You must be signed in to change notification settings - Fork 169
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[FIX REVERT] Морозов Егор. Вариант 5. "Умножение разреженных матриц. …
…Элементы типа double. Формат хранения матрицы – столбцовый (CCS)." (#857) FIX REVERT --------- Co-authored-by: Egor1dzeN-learn <[email protected]>
- Loading branch information
1 parent
dc5ea31
commit 850ea16
Showing
8 changed files
with
1,285 additions
and
0 deletions.
There are no files selected for viewing
293 changes: 293 additions & 0 deletions
293
tasks/mpi/morozov_e_mult_sparse_matrix/func_tests/main.cpp
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,293 @@ | ||
|
||
#include <gtest/gtest.h> | ||
|
||
#include <boost/mpi/communicator.hpp> | ||
#include <boost/mpi/environment.hpp> | ||
#include <cstdlib> | ||
#include <vector> | ||
|
||
#include "mpi/morozov_e_mult_sparse_matrix/include/ops_mpi.hpp" | ||
namespace morozov_e_mult_sparse_matrix { | ||
std::vector<std::vector<double>> generateRandomMatrix(int rows, int columns) { | ||
std::vector<std::vector<double>> result(rows, std::vector<double>(columns, 0)); | ||
for (int i = 0; i < rows; ++i) { | ||
for (int j = 0; j < columns; ++j) { | ||
result[i][j] = (static_cast<double>(rand()) / RAND_MAX) * 2000 - 1000; | ||
} | ||
} | ||
return result; | ||
} | ||
} // namespace morozov_e_mult_sparse_matrix | ||
TEST(morozov_e_mult_sparse_matrix, Test_Validation_colsA_notEqual_rowsB) { | ||
boost::mpi::communicator world; | ||
std::vector<std::vector<double>> matrixA = {{0, 2}, {1, 0}, {0, 4}}; | ||
std::vector<std::vector<double>> matrixB = {{0, 2, 0}, {1, 0, 3}, {0, 4, 0}}; | ||
std::vector<double> dA; | ||
std::vector<int> row_indA; | ||
std::vector<int> col_indA; | ||
morozov_e_mult_sparse_matrix::convertToCCS(matrixA, dA, row_indA, col_indA); | ||
std::vector<double> dB; | ||
std::vector<int> row_indB; | ||
std::vector<int> col_indB; | ||
morozov_e_mult_sparse_matrix::convertToCCS(matrixB, dB, row_indB, col_indB); | ||
// Create TaskData | ||
std::shared_ptr<ppc::core::TaskData> taskData = std::make_shared<ppc::core::TaskData>(); | ||
std::vector<std::vector<double>> out(matrixA.size(), std::vector<double>(matrixB[0].size(), 0)); | ||
if (world.rank() == 0) { | ||
morozov_e_mult_sparse_matrix::fillData(taskData, matrixA.size(), matrixA[0].size(), matrixB.size(), | ||
matrixB[0].size(), dA, row_indA, col_indA, dB, row_indB, col_indB, out); | ||
} | ||
|
||
morozov_e_mult_sparse_matrix::TestMPITaskParallel testMpiTaskParallel(taskData); | ||
if (world.rank() == 0) { | ||
ASSERT_FALSE(testMpiTaskParallel.validation()); | ||
} | ||
} | ||
TEST(morozov_e_mult_sparse_matrix, Test_Validation_colsAns_notEqual_colsB) { | ||
boost::mpi::communicator world; | ||
std::vector<std::vector<double>> matrixA = {{0, 2, 0}, {1, 0, 3}, {0, 4, 0}}; | ||
std::vector<std::vector<double>> matrixB = {{0, 2, 0}, {1, 0, 3}, {0, 4, 0}}; | ||
std::vector<double> dA; | ||
std::vector<int> row_indA; | ||
std::vector<int> col_indA; | ||
morozov_e_mult_sparse_matrix::convertToCCS(matrixA, dA, row_indA, col_indA); | ||
std::vector<double> dB; | ||
std::vector<int> row_indB; | ||
std::vector<int> col_indB; | ||
morozov_e_mult_sparse_matrix::convertToCCS(matrixB, dB, row_indB, col_indB); | ||
// Create TaskData | ||
std::shared_ptr<ppc::core::TaskData> taskData = std::make_shared<ppc::core::TaskData>(); | ||
std::vector<std::vector<double>> out(matrixA.size(), std::vector<double>(matrixB[0].size() + 1, 0)); | ||
if (world.rank() == 0) { | ||
morozov_e_mult_sparse_matrix::fillData(taskData, matrixA.size(), matrixA[0].size(), matrixB.size(), | ||
matrixB[0].size(), dA, row_indA, col_indA, dB, row_indB, col_indB, out); | ||
} | ||
|
||
morozov_e_mult_sparse_matrix::TestMPITaskParallel testMpiTaskParallel(taskData); | ||
if (world.rank() == 0) { | ||
ASSERT_FALSE(testMpiTaskParallel.validation()); | ||
} | ||
} | ||
TEST(morozov_e_mult_sparse_matrix, Test_Validation_rowsAns_notEqual_rowsA) { | ||
boost::mpi::communicator world; | ||
std::vector<std::vector<double>> matrixA = {{0, 2, 0}, {1, 0, 3}, {0, 4, 0}}; | ||
std::vector<std::vector<double>> matrixB = {{0, 2, 0}, {1, 0, 3}, {0, 4, 0}}; | ||
std::vector<double> dA; | ||
std::vector<int> row_indA; | ||
std::vector<int> col_indA; | ||
morozov_e_mult_sparse_matrix::convertToCCS(matrixA, dA, row_indA, col_indA); | ||
std::vector<double> dB; | ||
std::vector<int> row_indB; | ||
std::vector<int> col_indB; | ||
morozov_e_mult_sparse_matrix::convertToCCS(matrixB, dB, row_indB, col_indB); | ||
// Create TaskData | ||
std::shared_ptr<ppc::core::TaskData> taskData = std::make_shared<ppc::core::TaskData>(); | ||
std::vector<std::vector<double>> out(matrixA.size() + 2, std::vector<double>(matrixB[0].size(), 0)); | ||
if (world.rank() == 0) { | ||
morozov_e_mult_sparse_matrix::fillData(taskData, matrixA.size(), matrixA[0].size(), matrixB.size(), | ||
matrixB[0].size(), dA, row_indA, col_indA, dB, row_indB, col_indB, out); | ||
} | ||
|
||
morozov_e_mult_sparse_matrix::TestMPITaskParallel testMpiTaskParallel(taskData); | ||
if (world.rank() == 0) { | ||
ASSERT_FALSE(testMpiTaskParallel.validation()); | ||
} | ||
} | ||
TEST(morozov_e_mult_sparse_matrix, Test_Main1) { | ||
boost::mpi::communicator world; | ||
std::vector<std::vector<double>> matrixA = {{0, 2, 0}, {1, 0, 3}, {0, 4, 0}}; | ||
std::vector<std::vector<double>> matrixB = {{0, 2, 0}, {1, 0, 3}, {0, 4, 0}}; | ||
std::vector<double> dA; | ||
std::vector<int> row_indA; | ||
std::vector<int> col_indA; | ||
morozov_e_mult_sparse_matrix::convertToCCS(matrixA, dA, row_indA, col_indA); | ||
std::vector<double> dB; | ||
std::vector<int> row_indB; | ||
std::vector<int> col_indB; | ||
morozov_e_mult_sparse_matrix::convertToCCS(matrixB, dB, row_indB, col_indB); | ||
// Create TaskData | ||
std::shared_ptr<ppc::core::TaskData> taskDataPar = std::make_shared<ppc::core::TaskData>(); | ||
std::shared_ptr<ppc::core::TaskData> taskDataSeq = std::make_shared<ppc::core::TaskData>(); | ||
std::vector<std::vector<double>> outPar(matrixA.size(), std::vector<double>(matrixB[0].size(), 0)); | ||
std::vector<std::vector<double>> outSeq(matrixA.size(), std::vector<double>(matrixB[0].size(), 0)); | ||
if (world.rank() == 0) { | ||
// parallel version | ||
morozov_e_mult_sparse_matrix::fillData(taskDataPar, matrixA.size(), matrixA[0].size(), matrixB.size(), | ||
matrixB[0].size(), dA, row_indA, col_indA, dB, row_indB, col_indB, outPar); | ||
// seq version | ||
morozov_e_mult_sparse_matrix::fillData(taskDataSeq, matrixA.size(), matrixA[0].size(), matrixB.size(), | ||
matrixB[0].size(), dA, row_indA, col_indA, dB, row_indB, col_indB, outSeq); | ||
} | ||
|
||
morozov_e_mult_sparse_matrix::TestMPITaskParallel testMpiTaskParallel(taskDataPar); | ||
ASSERT_EQ(testMpiTaskParallel.validation(), true); | ||
testMpiTaskParallel.pre_processing(); | ||
testMpiTaskParallel.run(); | ||
testMpiTaskParallel.post_processing(); | ||
if (world.rank() == 0) { | ||
std::vector<std::vector<double>> ansPar(matrixA.size(), std::vector<double>(matrixB[0].size(), 0)); | ||
std::vector<std::vector<double>> ansSeq(matrixA.size(), std::vector<double>(matrixB[0].size(), 0)); | ||
for (size_t i = 0; i < outPar.size(); ++i) { | ||
auto *ptr = reinterpret_cast<double *>(taskDataPar->outputs[i]); | ||
ansPar[i] = std::vector(ptr, ptr + matrixB.size()); | ||
} | ||
morozov_e_mult_sparse_matrix::TestTaskSequential testMpiTaskSeq(taskDataSeq); | ||
ASSERT_EQ(testMpiTaskSeq.validation(), true); | ||
testMpiTaskSeq.pre_processing(); | ||
testMpiTaskSeq.run(); | ||
testMpiTaskSeq.post_processing(); | ||
for (size_t i = 0; i < outSeq.size(); ++i) { | ||
auto *ptr = reinterpret_cast<double *>(taskDataSeq->outputs[i]); | ||
ansSeq[i] = std::vector(ptr, ptr + matrixB.size()); | ||
} | ||
ASSERT_EQ(ansSeq, ansPar); | ||
} | ||
} | ||
TEST(morozov_e_mult_sparse_matrix, Test_Main2) { | ||
boost::mpi::communicator world; | ||
std::vector<std::vector<double>> matrixA = {{1, 0, 0}, {0, 1, 0}, {0, 0, 1}}; | ||
std::vector<std::vector<double>> matrixB = {{2, 0}, {0, 3}, {10, 4}}; | ||
std::vector<double> dA; | ||
std::vector<int> row_indA; | ||
std::vector<int> col_indA; | ||
morozov_e_mult_sparse_matrix::convertToCCS(matrixA, dA, row_indA, col_indA); | ||
std::vector<double> dB; | ||
std::vector<int> row_indB; | ||
std::vector<int> col_indB; | ||
morozov_e_mult_sparse_matrix::convertToCCS(matrixB, dB, row_indB, col_indB); | ||
// Create TaskData | ||
std::shared_ptr<ppc::core::TaskData> taskDataPar = std::make_shared<ppc::core::TaskData>(); | ||
std::shared_ptr<ppc::core::TaskData> taskDataSeq = std::make_shared<ppc::core::TaskData>(); | ||
std::vector<std::vector<double>> outPar(matrixA.size(), std::vector<double>(matrixB[0].size(), 0)); | ||
std::vector<std::vector<double>> outSeq(matrixA.size(), std::vector<double>(matrixB[0].size(), 0)); | ||
if (world.rank() == 0) { | ||
// par version | ||
morozov_e_mult_sparse_matrix::fillData(taskDataPar, matrixA.size(), matrixA[0].size(), matrixB.size(), | ||
matrixB[0].size(), dA, row_indA, col_indA, dB, row_indB, col_indB, outPar); | ||
// seq version | ||
morozov_e_mult_sparse_matrix::fillData(taskDataSeq, matrixA.size(), matrixA[0].size(), matrixB.size(), | ||
matrixB[0].size(), dA, row_indA, col_indA, dB, row_indB, col_indB, outSeq); | ||
} | ||
|
||
morozov_e_mult_sparse_matrix::TestMPITaskParallel testMpiTaskParallel(taskDataPar); | ||
ASSERT_EQ(testMpiTaskParallel.validation(), true); | ||
testMpiTaskParallel.pre_processing(); | ||
testMpiTaskParallel.run(); | ||
testMpiTaskParallel.post_processing(); | ||
if (world.rank() == 0) { | ||
std::vector<std::vector<double>> ansPar(matrixA.size(), std::vector<double>(matrixB[0].size(), 0)); | ||
std::vector<std::vector<double>> ansSeq(matrixA.size(), std::vector<double>(matrixB[0].size(), 0)); | ||
for (size_t i = 0; i < outPar.size(); ++i) { | ||
auto *ptr = reinterpret_cast<double *>(taskDataPar->outputs[i]); | ||
ansPar[i] = std::vector(ptr, ptr + matrixB[0].size()); | ||
} | ||
morozov_e_mult_sparse_matrix::TestTaskSequential testMpiTaskSeq(taskDataSeq); | ||
ASSERT_EQ(testMpiTaskSeq.validation(), true); | ||
testMpiTaskSeq.pre_processing(); | ||
testMpiTaskSeq.run(); | ||
testMpiTaskSeq.post_processing(); | ||
for (size_t i = 0; i < outSeq.size(); ++i) { | ||
auto *ptr = reinterpret_cast<double *>(taskDataSeq->outputs[i]); | ||
ansSeq[i] = std::vector(ptr, ptr + matrixB[0].size()); | ||
} | ||
ASSERT_EQ(ansSeq, ansPar); | ||
} | ||
} | ||
TEST(morozov_e_mult_sparse_matrix, Test_Main3) { | ||
boost::mpi::communicator world; | ||
std::vector<std::vector<double>> matrixA = {{0.2, 0, 0.5}, {0, 0.7, 6}, {0.1, 0, 0.8}}; | ||
std::vector<std::vector<double>> matrixB = {{0.15, 0}, {0, 0.3}, {0.4, 0}}; | ||
std::vector<double> dA; | ||
std::vector<int> row_indA; | ||
std::vector<int> col_indA; | ||
morozov_e_mult_sparse_matrix::convertToCCS(matrixA, dA, row_indA, col_indA); | ||
std::vector<double> dB; | ||
std::vector<int> row_indB; | ||
std::vector<int> col_indB; | ||
morozov_e_mult_sparse_matrix::convertToCCS(matrixB, dB, row_indB, col_indB); | ||
// Create TaskData | ||
std::shared_ptr<ppc::core::TaskData> taskDataPar = std::make_shared<ppc::core::TaskData>(); | ||
std::shared_ptr<ppc::core::TaskData> taskDataSeq = std::make_shared<ppc::core::TaskData>(); | ||
std::vector<std::vector<double>> outPar(matrixA.size(), std::vector<double>(matrixB[0].size(), 0)); | ||
std::vector<std::vector<double>> outSeq(matrixA.size(), std::vector<double>(matrixB[0].size(), 0)); | ||
if (world.rank() == 0) { | ||
morozov_e_mult_sparse_matrix::fillData(taskDataPar, matrixA.size(), matrixA[0].size(), matrixB.size(), | ||
matrixB[0].size(), dA, row_indA, col_indA, dB, row_indB, col_indB, outPar); | ||
// seq version | ||
morozov_e_mult_sparse_matrix::fillData(taskDataSeq, matrixA.size(), matrixA[0].size(), matrixB.size(), | ||
matrixB[0].size(), dA, row_indA, col_indA, dB, row_indB, col_indB, outSeq); | ||
} | ||
|
||
morozov_e_mult_sparse_matrix::TestMPITaskParallel testMpiTaskParallel(taskDataPar); | ||
ASSERT_EQ(testMpiTaskParallel.validation(), true); | ||
testMpiTaskParallel.pre_processing(); | ||
testMpiTaskParallel.run(); | ||
testMpiTaskParallel.post_processing(); | ||
if (world.rank() == 0) { | ||
std::vector<std::vector<double>> ansPar(matrixA.size(), std::vector<double>(matrixB[0].size(), 0)); | ||
std::vector<std::vector<double>> ansSeq(matrixA.size(), std::vector<double>(matrixB[0].size(), 0)); | ||
for (size_t i = 0; i < outPar.size(); ++i) { | ||
auto *ptr = reinterpret_cast<double *>(taskDataPar->outputs[i]); | ||
ansPar[i] = std::vector(ptr, ptr + matrixB[0].size()); | ||
} | ||
morozov_e_mult_sparse_matrix::TestTaskSequential testMpiTaskSeq(taskDataSeq); | ||
ASSERT_EQ(testMpiTaskSeq.validation(), true); | ||
testMpiTaskSeq.pre_processing(); | ||
testMpiTaskSeq.run(); | ||
testMpiTaskSeq.post_processing(); | ||
for (size_t i = 0; i < outSeq.size(); ++i) { | ||
auto *ptr = reinterpret_cast<double *>(taskDataSeq->outputs[i]); | ||
ansSeq[i] = std::vector(ptr, ptr + matrixB[0].size()); | ||
} | ||
ASSERT_EQ(ansSeq, ansPar); | ||
} | ||
} | ||
TEST(morozov_e_mult_sparse_matrix, Test_Main4) { | ||
boost::mpi::communicator world; | ||
std::vector<std::vector<double>> matrixA = morozov_e_mult_sparse_matrix::generateRandomMatrix(2, 3); | ||
std::vector<std::vector<double>> matrixB = morozov_e_mult_sparse_matrix::generateRandomMatrix(3, 2); | ||
std::vector<double> dA; | ||
std::vector<int> row_indA; | ||
std::vector<int> col_indA; | ||
morozov_e_mult_sparse_matrix::convertToCCS(matrixA, dA, row_indA, col_indA); | ||
std::vector<double> dB; | ||
std::vector<int> row_indB; | ||
std::vector<int> col_indB; | ||
morozov_e_mult_sparse_matrix::convertToCCS(matrixB, dB, row_indB, col_indB); | ||
// Create TaskData | ||
std::shared_ptr<ppc::core::TaskData> taskDataPar = std::make_shared<ppc::core::TaskData>(); | ||
std::shared_ptr<ppc::core::TaskData> taskDataSeq = std::make_shared<ppc::core::TaskData>(); | ||
std::vector<std::vector<double>> outPar(matrixA.size(), std::vector<double>(matrixB[0].size(), 0)); | ||
std::vector<std::vector<double>> outSeq(matrixA.size(), std::vector<double>(matrixB[0].size(), 0)); | ||
if (world.rank() == 0) { | ||
morozov_e_mult_sparse_matrix::fillData(taskDataPar, matrixA.size(), matrixA[0].size(), matrixB.size(), | ||
matrixB[0].size(), dA, row_indA, col_indA, dB, row_indB, col_indB, outPar); | ||
// seq version | ||
morozov_e_mult_sparse_matrix::fillData(taskDataSeq, matrixA.size(), matrixA[0].size(), matrixB.size(), | ||
matrixB[0].size(), dA, row_indA, col_indA, dB, row_indB, col_indB, outSeq); | ||
} | ||
|
||
morozov_e_mult_sparse_matrix::TestMPITaskParallel testMpiTaskParallel(taskDataPar); | ||
ASSERT_EQ(testMpiTaskParallel.validation(), true); | ||
testMpiTaskParallel.pre_processing(); | ||
testMpiTaskParallel.run(); | ||
testMpiTaskParallel.post_processing(); | ||
if (world.rank() == 0) { | ||
std::vector<std::vector<double>> ansPar(matrixA.size(), std::vector<double>(matrixB[0].size(), 0)); | ||
std::vector<std::vector<double>> ansSeq(matrixA.size(), std::vector<double>(matrixB[0].size(), 0)); | ||
for (size_t i = 0; i < outPar.size(); ++i) { | ||
auto *ptr = reinterpret_cast<double *>(taskDataPar->outputs[i]); | ||
ansPar[i] = std::vector(ptr, ptr + matrixB[0].size()); | ||
} | ||
morozov_e_mult_sparse_matrix::TestTaskSequential testMpiTaskSeq(taskDataSeq); | ||
ASSERT_EQ(testMpiTaskSeq.validation(), true); | ||
testMpiTaskSeq.pre_processing(); | ||
testMpiTaskSeq.run(); | ||
testMpiTaskSeq.post_processing(); | ||
for (size_t i = 0; i < outSeq.size(); ++i) { | ||
auto *ptr = reinterpret_cast<double *>(taskDataSeq->outputs[i]); | ||
ansSeq[i] = std::vector(ptr, ptr + matrixB[0].size()); | ||
} | ||
ASSERT_EQ(ansSeq, ansPar); | ||
} | ||
} |
61 changes: 61 additions & 0 deletions
61
tasks/mpi/morozov_e_mult_sparse_matrix/include/ops_mpi.hpp
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,61 @@ | ||
#pragma once | ||
|
||
#include <gtest/gtest.h> | ||
|
||
#include <boost/mpi/collectives.hpp> | ||
#include <boost/mpi/communicator.hpp> | ||
#include <memory> | ||
#include <numeric> | ||
#include <string> | ||
#include <utility> | ||
#include <vector> | ||
|
||
#include "core/task/include/task.hpp" | ||
|
||
namespace morozov_e_mult_sparse_matrix { | ||
template <typename T> | ||
T scalMultOfVectors(const std::vector<T> &vA, const std::vector<T> &vB); | ||
std::pair<std::vector<std::vector<double>>, std::vector<std::vector<double>>> convertToBasicMatrixs( | ||
const std::vector<double> &dA, const std::vector<int> &row_indA, const std::vector<int> &col_indA, | ||
const std::vector<double> &dB, const std::vector<int> &row_indB, const std::vector<int> &col_indB, int rowsA, | ||
int columnsA, int rowsB, int columnsB); | ||
void convertToCCS(const std::vector<std::vector<double>> &matrix, std::vector<double> &values, | ||
std::vector<int> &row_indices, std::vector<int> &col_pointers); | ||
void fillData(std::shared_ptr<ppc::core::TaskData> &taskData, int rowsA, int columnsA, int rowsB, int columnsB, | ||
std::vector<double> &dA, std::vector<int> &row_indA, std::vector<int> &col_indA, std::vector<double> &dB, | ||
std::vector<int> &row_indB, std::vector<int> &col_indB, std::vector<std::vector<double>> &out); | ||
class TestTaskSequential : public ppc::core::Task { | ||
public: | ||
explicit TestTaskSequential(std::shared_ptr<ppc::core::TaskData> taskData_) : Task(std::move(taskData_)) {} | ||
bool pre_processing() override; | ||
bool validation() override; | ||
bool run() override; | ||
bool post_processing() override; | ||
|
||
private: | ||
std::vector<std::vector<double>> ans; | ||
std::vector<double> dA, dB; | ||
std::vector<int> row_indA, row_indB, col_indA, col_indB; | ||
int rowsA, rowsB, columnsA, columnsB, dA_size, dB_size, row_indA_size, row_indB_size, col_indA_size, col_indB_size; | ||
}; | ||
template <typename T> | ||
void printMatrix(std::vector<std::vector<T>> m); | ||
template <typename T> | ||
void printVector(std::vector<T> v); | ||
|
||
class TestMPITaskParallel : public ppc::core::Task { | ||
public: | ||
explicit TestMPITaskParallel(std::shared_ptr<ppc::core::TaskData> taskData_) : Task(std::move(taskData_)) {} | ||
bool pre_processing() override; | ||
bool validation() override; | ||
bool run() override; | ||
bool post_processing() override; | ||
|
||
private: | ||
std::vector<std::vector<double>> ans; | ||
std::vector<double> dA, dB, local_input_A, local_input_B; | ||
std::vector<int> row_indA, row_indB, col_indA, col_indB, local_input_; | ||
int rowsA, rowsB, columnsA, columnsB, dA_size, dB_size, row_indA_size, row_indB_size, col_indA_size, col_indB_size; | ||
boost::mpi::communicator world; | ||
}; | ||
} // namespace morozov_e_mult_sparse_matrix |
Oops, something went wrong.