generated from learning-process/parallel_programming_course
-
Notifications
You must be signed in to change notification settings - Fork 169
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Коробейников Арсений. Задача 2. Вариант 18. Ленточная горизонтальная …
…схема А, вертикальное В. (#534) Описание последовательного алгоритма: Наивное умножение матриц Описание параллельного алгоритма: Наши матрицы представлены как структура со следующими полями: вектор из всех элементов матрицы (строка за строкой), количество строк и количество столбцов. Каждый процесс получит от матрицы А некоторое количество строк, а от матрицы B некоторое количество столбцов. А именно каждый процесс получает количество строк матрицы А делённое на количество процессов (последний процесс получает ещё сверху количество строк равное остатку от деления), тоже самое со столбцами матрицы В. После рассылки данных из нулевого процесса мы начинаем вычисления. Заполнение результирующей матрицы мы производим по столбцам слева направо: процесс, которому принадлежит вычисляемый столбец, раздает его остальным процессам, и каждый вычисляет в этом столбце элемент в своих строках и отправляет результат в нулевой процесс. В конечном итоге получаем результирующую матрицу. Для проверки корректности алгоритмов создано 3 теста с константными данными (матрицами разных размеров), и после этого тест с матрицами рандомных элементов.
- Loading branch information
1 parent
920ae38
commit 1f52a55
Showing
8 changed files
with
1,647 additions
and
0 deletions.
There are no files selected for viewing
598 changes: 598 additions & 0 deletions
598
...rix_multiplication_horizontal_scheme_A_vertical_scheme_B/func_tests/main_korobeinikov.cpp
Large diffs are not rendered by default.
Oops, something went wrong.
82 changes: 82 additions & 0 deletions
82
...rix_multiplication_horizontal_scheme_A_vertical_scheme_B/include/ops_mpi_korobeinikov.hpp
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,82 @@ | ||
// Copyright 2024 Korobeinikov Arseny | ||
#pragma once | ||
|
||
#include <gtest/gtest.h> | ||
|
||
#include <boost/mpi/collectives.hpp> | ||
#include <boost/mpi/communicator.hpp> | ||
#include <memory> | ||
#include <numeric> | ||
#include <string> | ||
#include <utility> | ||
#include <vector> | ||
|
||
#include "core/task/include/task.hpp" | ||
|
||
namespace korobeinikov_a_test_task_mpi_lab_02 { | ||
|
||
struct Matrix { | ||
std::vector<int> data; | ||
int count_rows; | ||
int count_cols; | ||
|
||
Matrix() { | ||
count_rows = 0; | ||
count_cols = 0; | ||
data = std::vector<int>(); | ||
} | ||
|
||
Matrix(int count_rows_, int count_cols_) { | ||
count_rows = count_rows_; | ||
count_cols = count_cols_; | ||
data = std::vector<int>(count_cols * count_rows); | ||
} | ||
|
||
int& get_el(int row, int col) { | ||
size_t index = row * count_cols + col; | ||
assert(index < data.size()); | ||
return data[index]; | ||
} | ||
|
||
const int& get_el(int row, int col) const { | ||
size_t index = row * count_cols + col; | ||
assert(index < data.size()); | ||
return data[index]; | ||
} | ||
}; | ||
|
||
class TestMPITaskSequential : public ppc::core::Task { | ||
public: | ||
explicit TestMPITaskSequential(std::shared_ptr<ppc::core::TaskData> taskData_) : Task(std::move(taskData_)) {} | ||
bool pre_processing() override; | ||
bool validation() override; | ||
bool run() override; | ||
bool post_processing() override; | ||
|
||
private: | ||
Matrix A; | ||
Matrix B; | ||
|
||
Matrix res; | ||
}; | ||
|
||
class TestMPITaskParallel : public ppc::core::Task { | ||
public: | ||
explicit TestMPITaskParallel(std::shared_ptr<ppc::core::TaskData> taskData_) : Task(std::move(taskData_)) {} | ||
bool pre_processing() override; | ||
bool validation() override; | ||
bool run() override; | ||
bool post_processing() override; | ||
|
||
private: | ||
std::vector<int> input_, local_input_; | ||
Matrix A; | ||
Matrix B; | ||
std::vector<int> local_A_rows; | ||
std::vector<int> local_B_cols; | ||
|
||
Matrix res; | ||
boost::mpi::communicator world; | ||
}; | ||
|
||
} // namespace korobeinikov_a_test_task_mpi_lab_02 |
155 changes: 155 additions & 0 deletions
155
...rix_multiplication_horizontal_scheme_A_vertical_scheme_B/perf_tests/main_korobeinikov.cpp
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,155 @@ | ||
// Copyright 2024 Korobeinikov Arseny | ||
#include <gtest/gtest.h> | ||
|
||
#include <boost/mpi/timer.hpp> | ||
|
||
#include "core/perf/include/perf.hpp" | ||
#include "mpi/korobeinikov_matrix_multiplication_horizontal_scheme_A_vertical_scheme_B/include/ops_mpi_korobeinikov.hpp" | ||
|
||
// mpiexec -n 4 mpi_perf_tests | ||
|
||
TEST(mpi_korobeinikov_perf_test_lab_02, test_pipeline_run) { | ||
boost::mpi::communicator world; | ||
|
||
// Create data | ||
std::vector<int> A; | ||
std::vector<int> B; | ||
int count_rows_A = 150; | ||
int count_cols_A = 150; | ||
int count_rows_B = 150; | ||
int count_cols_B = 150; | ||
|
||
std::vector<int> out; | ||
int count_rows_out = 0; | ||
int count_cols_out = 0; | ||
int count_rows_RA = 150; | ||
int count_cols_RA = 150; | ||
|
||
// Create TaskData | ||
std::shared_ptr<ppc::core::TaskData> taskDataPar = std::make_shared<ppc::core::TaskData>(); | ||
if (world.rank() == 0) { | ||
A = std::vector<int>(22500, 1); | ||
B = std::vector<int>(22500, 1); | ||
out = std::vector<int>(22500, 0); | ||
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t *>(A.data())); | ||
taskDataPar->inputs_count.emplace_back(A.size()); | ||
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t *>(&count_rows_A)); | ||
taskDataPar->inputs_count.emplace_back(1); | ||
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t *>(&count_cols_A)); | ||
taskDataPar->inputs_count.emplace_back(1); | ||
|
||
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t *>(B.data())); | ||
taskDataPar->inputs_count.emplace_back(B.size()); | ||
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t *>(&count_rows_B)); | ||
taskDataPar->inputs_count.emplace_back(1); | ||
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t *>(&count_cols_B)); | ||
taskDataPar->inputs_count.emplace_back(1); | ||
|
||
taskDataPar->outputs.emplace_back(reinterpret_cast<uint8_t *>(out.data())); | ||
taskDataPar->outputs_count.emplace_back(out.size()); | ||
taskDataPar->outputs.emplace_back(reinterpret_cast<uint8_t *>(&count_rows_out)); | ||
taskDataPar->outputs_count.emplace_back(1); | ||
taskDataPar->outputs.emplace_back(reinterpret_cast<uint8_t *>(&count_cols_out)); | ||
taskDataPar->outputs_count.emplace_back(1); | ||
} | ||
|
||
auto testMpiTaskParallel = std::make_shared<korobeinikov_a_test_task_mpi_lab_02::TestMPITaskParallel>(taskDataPar); | ||
ASSERT_EQ(testMpiTaskParallel->validation(), true); | ||
testMpiTaskParallel->pre_processing(); | ||
testMpiTaskParallel->run(); | ||
testMpiTaskParallel->post_processing(); | ||
|
||
// Create Perf attributes | ||
auto perfAttr = std::make_shared<ppc::core::PerfAttr>(); | ||
perfAttr->num_running = 10; | ||
const boost::mpi::timer current_timer; | ||
perfAttr->current_timer = [&] { return current_timer.elapsed(); }; | ||
|
||
// Create and init perf results | ||
auto perfResults = std::make_shared<ppc::core::PerfResults>(); | ||
|
||
// Create Perf analyzer | ||
auto perfAnalyzer = std::make_shared<ppc::core::Perf>(testMpiTaskParallel); | ||
perfAnalyzer->pipeline_run(perfAttr, perfResults); | ||
if (world.rank() == 0) { | ||
ppc::core::Perf::print_perf_statistic(perfResults); | ||
for (size_t i = 0; i < out.size(); i++) { | ||
ASSERT_EQ(150, out[i]); | ||
} | ||
ASSERT_EQ(count_rows_out, count_rows_RA); | ||
ASSERT_EQ(count_cols_out, count_cols_RA); | ||
} | ||
} | ||
|
||
TEST(mpi_korobeinikov_perf_test_lab_02, test_task_run) { | ||
boost::mpi::communicator world; | ||
|
||
// Create data | ||
std::vector<int> A; | ||
std::vector<int> B; | ||
int count_rows_A = 150; | ||
int count_cols_A = 150; | ||
int count_rows_B = 150; | ||
int count_cols_B = 150; | ||
|
||
std::vector<int> out; | ||
int count_rows_out = 0; | ||
int count_cols_out = 0; | ||
int count_rows_RA = 150; | ||
int count_cols_RA = 150; | ||
|
||
// Create TaskData | ||
std::shared_ptr<ppc::core::TaskData> taskDataPar = std::make_shared<ppc::core::TaskData>(); | ||
if (world.rank() == 0) { | ||
A = std::vector<int>(22500, 1); | ||
B = std::vector<int>(22500, 1); | ||
out = std::vector<int>(22500, 0); | ||
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t *>(A.data())); | ||
taskDataPar->inputs_count.emplace_back(A.size()); | ||
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t *>(&count_rows_A)); | ||
taskDataPar->inputs_count.emplace_back(1); | ||
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t *>(&count_cols_A)); | ||
taskDataPar->inputs_count.emplace_back(1); | ||
|
||
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t *>(B.data())); | ||
taskDataPar->inputs_count.emplace_back(B.size()); | ||
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t *>(&count_rows_B)); | ||
taskDataPar->inputs_count.emplace_back(1); | ||
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t *>(&count_cols_B)); | ||
taskDataPar->inputs_count.emplace_back(1); | ||
|
||
taskDataPar->outputs.emplace_back(reinterpret_cast<uint8_t *>(out.data())); | ||
taskDataPar->outputs_count.emplace_back(out.size()); | ||
taskDataPar->outputs.emplace_back(reinterpret_cast<uint8_t *>(&count_rows_out)); | ||
taskDataPar->outputs_count.emplace_back(1); | ||
taskDataPar->outputs.emplace_back(reinterpret_cast<uint8_t *>(&count_cols_out)); | ||
taskDataPar->outputs_count.emplace_back(1); | ||
} | ||
|
||
auto testMpiTaskParallel = std::make_shared<korobeinikov_a_test_task_mpi_lab_02::TestMPITaskParallel>(taskDataPar); | ||
ASSERT_EQ(testMpiTaskParallel->validation(), true); | ||
testMpiTaskParallel->pre_processing(); | ||
testMpiTaskParallel->run(); | ||
testMpiTaskParallel->post_processing(); | ||
|
||
// Create Perf attributes | ||
auto perfAttr = std::make_shared<ppc::core::PerfAttr>(); | ||
perfAttr->num_running = 10; | ||
const boost::mpi::timer current_timer; | ||
perfAttr->current_timer = [&] { return current_timer.elapsed(); }; | ||
|
||
// Create and init perf results | ||
auto perfResults = std::make_shared<ppc::core::PerfResults>(); | ||
|
||
// Create Perf analyzer | ||
auto perfAnalyzer = std::make_shared<ppc::core::Perf>(testMpiTaskParallel); | ||
perfAnalyzer->task_run(perfAttr, perfResults); | ||
if (world.rank() == 0) { | ||
ppc::core::Perf::print_perf_statistic(perfResults); | ||
for (size_t i = 0; i < out.size(); i++) { | ||
ASSERT_EQ(150, out[i]); | ||
} | ||
ASSERT_EQ(count_rows_out, count_rows_RA); | ||
ASSERT_EQ(count_cols_out, count_cols_RA); | ||
} | ||
} |
Oops, something went wrong.