generated from learning-process/parallel_programming_course
-
Notifications
You must be signed in to change notification settings - Fork 169
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[FIX REVERT] Вершинина Александра. Задача 3. Вариант 1. Умножение пло…
…тных матриц. Элементы типа double. Блочная схема, алгоритм Кэннона (#858)
- Loading branch information
Showing
8 changed files
with
1,049 additions
and
0 deletions.
There are no files selected for viewing
254 changes: 254 additions & 0 deletions
254
tasks/mpi/vershinina_a_cannons_algorithm/func_tests/main.cpp
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,254 @@ | ||
#include <gtest/gtest.h> | ||
|
||
#include <boost/mpi/communicator.hpp> | ||
#include <boost/mpi/environment.hpp> | ||
#include <random> | ||
#include <vector> | ||
|
||
#include "mpi/vershinina_a_cannons_algorithm/include/ops_mpi.hpp" | ||
|
||
std::vector<double> getRandomMatrix(double r) { | ||
std::random_device dev; | ||
std::mt19937 gen(dev()); | ||
std::uniform_int_distribution<> distr(0, 100); | ||
std::vector<double> matrix(r * r, 0.0); | ||
for (int i = 0; i < r * r; i++) { | ||
matrix[i] = distr(gen); | ||
} | ||
return matrix; | ||
} | ||
|
||
TEST(vershinina_a_cannons_algorithm, Test_1) { | ||
boost::mpi::communicator world; | ||
if (world.size() < 4) { | ||
GTEST_SKIP(); | ||
} | ||
|
||
int n = 3; | ||
auto lhs = getRandomMatrix(3); | ||
auto rhs = getRandomMatrix(3); | ||
|
||
std::vector<double> res(n * n, 0.0); | ||
std::shared_ptr<ppc::core::TaskData> taskDataPar = std::make_shared<ppc::core::TaskData>(); | ||
if (world.rank() == 0) { | ||
taskDataPar->inputs_count.emplace_back(n); | ||
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t*>(lhs.data())); | ||
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t*>(rhs.data())); | ||
taskDataPar->outputs.emplace_back(reinterpret_cast<uint8_t*>(res.data())); | ||
} | ||
vershinina_a_cannons_algorithm::TestMPITaskParallel testTaskPar(taskDataPar); | ||
if (!testTaskPar.validation()) { | ||
GTEST_SKIP(); | ||
} | ||
testTaskPar.pre_processing(); | ||
testTaskPar.run(); | ||
testTaskPar.post_processing(); | ||
if (world.rank() == 0) { | ||
std::vector<double> ref_res(n * n, 0.0); | ||
std::shared_ptr<ppc::core::TaskData> taskDataSeq = std::make_shared<ppc::core::TaskData>(); | ||
taskDataSeq->inputs_count.emplace_back(n); | ||
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t*>(lhs.data())); | ||
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t*>(rhs.data())); | ||
taskDataSeq->outputs.emplace_back(reinterpret_cast<uint8_t*>(ref_res.data())); | ||
|
||
vershinina_a_cannons_algorithm::TestMPITaskSequential testTaskSeq(taskDataSeq); | ||
ASSERT_TRUE(testTaskSeq.validation()); | ||
testTaskSeq.pre_processing(); | ||
testTaskSeq.run(); | ||
testTaskSeq.post_processing(); | ||
for (int i = 0; i < (int)res.size(); i++) { | ||
ASSERT_NEAR(res[i], ref_res[i], 0.1); | ||
} | ||
} | ||
} | ||
|
||
TEST(vershinina_a_cannons_algorithm, Test_2) { | ||
boost::mpi::communicator world; | ||
if (world.size() < 4) { | ||
GTEST_SKIP(); | ||
} | ||
|
||
int n = 5; | ||
auto lhs = getRandomMatrix(5); | ||
auto rhs = getRandomMatrix(5); | ||
|
||
std::vector<double> res(n * n, 0.0); | ||
|
||
std::shared_ptr<ppc::core::TaskData> taskDataPar = std::make_shared<ppc::core::TaskData>(); | ||
if (world.rank() == 0) { | ||
taskDataPar->inputs_count.emplace_back(n); | ||
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t*>(lhs.data())); | ||
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t*>(rhs.data())); | ||
taskDataPar->outputs.emplace_back(reinterpret_cast<uint8_t*>(res.data())); | ||
} | ||
|
||
vershinina_a_cannons_algorithm::TestMPITaskParallel testTaskPar(taskDataPar); | ||
if (!testTaskPar.validation()) { | ||
GTEST_SKIP(); | ||
} | ||
testTaskPar.pre_processing(); | ||
testTaskPar.run(); | ||
testTaskPar.post_processing(); | ||
|
||
if (world.rank() == 0) { | ||
std::vector<double> ref_res(n * n, 0.0); | ||
std::shared_ptr<ppc::core::TaskData> taskDataSeq = std::make_shared<ppc::core::TaskData>(); | ||
taskDataSeq->inputs_count.emplace_back(n); | ||
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t*>(lhs.data())); | ||
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t*>(rhs.data())); | ||
taskDataSeq->outputs.emplace_back(reinterpret_cast<uint8_t*>(ref_res.data())); | ||
|
||
vershinina_a_cannons_algorithm::TestMPITaskSequential testTaskSeq(taskDataSeq); | ||
ASSERT_TRUE(testTaskSeq.validation()); | ||
testTaskSeq.pre_processing(); | ||
testTaskSeq.run(); | ||
testTaskSeq.post_processing(); | ||
|
||
for (int i = 0; i < (int)res.size(); i++) { | ||
ASSERT_NEAR(res[i], ref_res[i], 0.1); | ||
} | ||
} | ||
} | ||
|
||
TEST(vershinina_a_cannons_algorithm, Test_3) { | ||
boost::mpi::communicator world; | ||
if (world.size() < 4) { | ||
GTEST_SKIP(); | ||
} | ||
|
||
int n = 10; | ||
auto lhs = getRandomMatrix(10); | ||
auto rhs = getRandomMatrix(10); | ||
|
||
std::vector<double> res(n * n, 0.0); | ||
|
||
std::shared_ptr<ppc::core::TaskData> taskDataPar = std::make_shared<ppc::core::TaskData>(); | ||
if (world.rank() == 0) { | ||
taskDataPar->inputs_count.emplace_back(n); | ||
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t*>(lhs.data())); | ||
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t*>(rhs.data())); | ||
taskDataPar->outputs.emplace_back(reinterpret_cast<uint8_t*>(res.data())); | ||
} | ||
|
||
vershinina_a_cannons_algorithm::TestMPITaskParallel testTaskPar(taskDataPar); | ||
if (!testTaskPar.validation()) { | ||
GTEST_SKIP(); | ||
} | ||
testTaskPar.pre_processing(); | ||
testTaskPar.run(); | ||
testTaskPar.post_processing(); | ||
|
||
if (world.rank() == 0) { | ||
std::vector<double> ref_res(n * n, 0.0); | ||
std::shared_ptr<ppc::core::TaskData> taskDataSeq = std::make_shared<ppc::core::TaskData>(); | ||
taskDataSeq->inputs_count.emplace_back(n); | ||
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t*>(lhs.data())); | ||
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t*>(rhs.data())); | ||
taskDataSeq->outputs.emplace_back(reinterpret_cast<uint8_t*>(ref_res.data())); | ||
|
||
vershinina_a_cannons_algorithm::TestMPITaskSequential testTaskSeq(taskDataSeq); | ||
ASSERT_TRUE(testTaskSeq.validation()); | ||
testTaskSeq.pre_processing(); | ||
testTaskSeq.run(); | ||
testTaskSeq.post_processing(); | ||
|
||
for (int i = 0; i < (int)res.size(); i++) { | ||
ASSERT_NEAR(res[i], ref_res[i], 0.1); | ||
} | ||
} | ||
} | ||
|
||
TEST(vershinina_a_cannons_algorithm, Test_4) { | ||
boost::mpi::communicator world; | ||
if (world.size() < 4) { | ||
GTEST_SKIP(); | ||
} | ||
|
||
int n = 15; | ||
auto lhs = getRandomMatrix(15); | ||
auto rhs = getRandomMatrix(15); | ||
|
||
std::vector<double> res(n * n, 0.0); | ||
|
||
std::shared_ptr<ppc::core::TaskData> taskDataPar = std::make_shared<ppc::core::TaskData>(); | ||
if (world.rank() == 0) { | ||
taskDataPar->inputs_count.emplace_back(n); | ||
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t*>(lhs.data())); | ||
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t*>(rhs.data())); | ||
taskDataPar->outputs.emplace_back(reinterpret_cast<uint8_t*>(res.data())); | ||
} | ||
|
||
vershinina_a_cannons_algorithm::TestMPITaskParallel testTaskPar(taskDataPar); | ||
if (!testTaskPar.validation()) { | ||
GTEST_SKIP(); | ||
} | ||
testTaskPar.pre_processing(); | ||
testTaskPar.run(); | ||
testTaskPar.post_processing(); | ||
|
||
if (world.rank() == 0) { | ||
std::vector<double> ref_res(n * n, 0.0); | ||
std::shared_ptr<ppc::core::TaskData> taskDataSeq = std::make_shared<ppc::core::TaskData>(); | ||
taskDataSeq->inputs_count.emplace_back(n); | ||
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t*>(lhs.data())); | ||
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t*>(rhs.data())); | ||
taskDataSeq->outputs.emplace_back(reinterpret_cast<uint8_t*>(ref_res.data())); | ||
|
||
vershinina_a_cannons_algorithm::TestMPITaskSequential testTaskSeq(taskDataSeq); | ||
ASSERT_TRUE(testTaskSeq.validation()); | ||
testTaskSeq.pre_processing(); | ||
testTaskSeq.run(); | ||
testTaskSeq.post_processing(); | ||
|
||
for (int i = 0; i < (int)res.size(); i++) { | ||
ASSERT_NEAR(res[i], ref_res[i], 0.1); | ||
} | ||
} | ||
} | ||
TEST(vershinina_a_cannons_algorithm, Test_5) { | ||
boost::mpi::communicator world; | ||
if (world.size() < 4) { | ||
GTEST_SKIP(); | ||
} | ||
|
||
int n = 30; | ||
auto lhs = getRandomMatrix(30); | ||
auto rhs = getRandomMatrix(30); | ||
|
||
std::vector<double> res(n * n, 0.0); | ||
|
||
std::shared_ptr<ppc::core::TaskData> taskDataPar = std::make_shared<ppc::core::TaskData>(); | ||
if (world.rank() == 0) { | ||
taskDataPar->inputs_count.emplace_back(n); | ||
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t*>(lhs.data())); | ||
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t*>(rhs.data())); | ||
taskDataPar->outputs.emplace_back(reinterpret_cast<uint8_t*>(res.data())); | ||
} | ||
|
||
vershinina_a_cannons_algorithm::TestMPITaskParallel testTaskPar(taskDataPar); | ||
if (!testTaskPar.validation()) { | ||
GTEST_SKIP(); | ||
} | ||
testTaskPar.pre_processing(); | ||
testTaskPar.run(); | ||
testTaskPar.post_processing(); | ||
|
||
if (world.rank() == 0) { | ||
std::vector<double> ref_res(n * n, 0.0); | ||
std::shared_ptr<ppc::core::TaskData> taskDataSeq = std::make_shared<ppc::core::TaskData>(); | ||
taskDataSeq->inputs_count.emplace_back(n); | ||
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t*>(lhs.data())); | ||
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t*>(rhs.data())); | ||
taskDataSeq->outputs.emplace_back(reinterpret_cast<uint8_t*>(ref_res.data())); | ||
|
||
vershinina_a_cannons_algorithm::TestMPITaskSequential testTaskSeq(taskDataSeq); | ||
ASSERT_TRUE(testTaskSeq.validation()); | ||
testTaskSeq.pre_processing(); | ||
testTaskSeq.run(); | ||
testTaskSeq.post_processing(); | ||
|
||
for (int i = 0; i < (int)res.size(); i++) { | ||
ASSERT_NEAR(res[i], ref_res[i], 0.1); | ||
} | ||
} | ||
} |
111 changes: 111 additions & 0 deletions
111
tasks/mpi/vershinina_a_cannons_algorithm/include/ops_mpi.hpp
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,111 @@ | ||
#pragma once | ||
|
||
#include <boost/mpi/communicator.hpp> | ||
#include <memory> | ||
#include <utility> | ||
#include <vector> | ||
|
||
#include "core/task/include/task.hpp" | ||
|
||
namespace vershinina_a_cannons_algorithm { | ||
|
||
template <class T> | ||
struct TMatrix { | ||
size_t n; | ||
|
||
std::vector<T> data{}; | ||
size_t hshift{}; | ||
size_t vshift{}; | ||
|
||
void set_horizontal_shift(size_t shift) { hshift = shift; } | ||
void set_vertical_shift(size_t shift) { vshift = shift; } | ||
|
||
const T& at(size_t row, size_t col) const noexcept { return data[row * n + col]; } | ||
T& at(size_t row, size_t col) noexcept { return const_cast<T&>(std::as_const(*this).at(row, col)); } | ||
|
||
const T& at_h(size_t row, size_t col) const noexcept { | ||
size_t actual_hshift = (hshift + row) % n; | ||
if (col < n - actual_hshift) { | ||
col += actual_hshift; | ||
} else { | ||
col = col - (n - actual_hshift); | ||
} | ||
return data[row * n + col]; | ||
} | ||
T& at_h(size_t row, size_t col) noexcept { return const_cast<T&>(std::as_const(*this).at_h(row, col)); } | ||
|
||
const T& at_v(size_t row, size_t col) const noexcept { | ||
size_t actual_vshift = (vshift + col) % n; | ||
if (row < n - actual_vshift) { | ||
row += actual_vshift; | ||
} else { | ||
row = row - (n - actual_vshift); | ||
} | ||
return data[row * n + col]; | ||
} | ||
T& at_v(size_t row, size_t col) noexcept { return const_cast<T&>(std::as_const(*this).at_v(row, col)); } | ||
|
||
bool operator==(const TMatrix& other) const noexcept { return n == other.n && data == other.data; } | ||
|
||
void read(const T* src) { data.assign(src, src + n * n); } | ||
|
||
friend std::ostream& operator<<(std::ostream& os, const TMatrix& m) { | ||
os << "M(" << m.n << "," << m.n << "): ["; | ||
for (const auto& e : m.data) { | ||
os << e << ' '; | ||
} | ||
os << ']'; | ||
return os; | ||
} | ||
|
||
static TMatrix create(size_t n, std::initializer_list<T> intl = {}) { | ||
TMatrix mat = {n, std::vector<T>(intl)}; | ||
mat.data.resize(n * n); | ||
return mat; | ||
} | ||
TMatrix operator*(const TMatrix& rhs) const { | ||
auto res = create(n); | ||
for (size_t i = 0; i < n; i++) { | ||
for (size_t j = 0; j < rhs.n; j++) { | ||
res.at(i, j) = 0; | ||
for (size_t k = 0; k < rhs.n; k++) { | ||
res.at(i, j) += at(i, k) * rhs.at(k, j); | ||
} | ||
} | ||
} | ||
return res; | ||
} | ||
}; | ||
|
||
class TestMPITaskSequential : public ppc::core::Task { | ||
public: | ||
explicit TestMPITaskSequential(std::shared_ptr<ppc::core::TaskData> taskData_) : Task(std::move(taskData_)) {} | ||
bool pre_processing() override; | ||
bool validation() override; | ||
bool run() override; | ||
bool post_processing() override; | ||
int n{}; | ||
|
||
private: | ||
TMatrix<double> lhs_{}; | ||
TMatrix<double> rhs_{}; | ||
TMatrix<double> res_{}; | ||
TMatrix<double> res_c{}; | ||
}; | ||
|
||
class TestMPITaskParallel : public ppc::core::Task { | ||
public: | ||
explicit TestMPITaskParallel(std::shared_ptr<ppc::core::TaskData> taskData_) : Task(std::move(taskData_)) {} | ||
bool pre_processing() override; | ||
bool validation() override; | ||
bool run() override; | ||
bool post_processing() override; | ||
|
||
private: | ||
int n; | ||
std::pair<std::vector<double>, std::vector<double>> in_; | ||
std::vector<double> res_; | ||
boost::mpi::communicator world; | ||
}; | ||
|
||
} // namespace vershinina_a_cannons_algorithm |
Oops, something went wrong.