-
Notifications
You must be signed in to change notification settings - Fork 169
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Седова Ольга. Задача 2. Вариант 16. Ленточная вертикальная схема умножения матрицы на вектор #386
Changes from 56 commits
8b0ab78
7dce768
28bcc37
84812c1
f886942
1c4ad60
c154ea1
417c2c4
b6c3ba2
70a1ea0
c8079b7
5023aee
08cf145
bb6e666
cbe04fb
8858284
139fb33
c4a1c4a
f5d168d
ded3844
867ea87
6597ed1
78a8092
c7108d4
8154f0d
8ed32f3
e7cfc1c
83701fe
b53e3a4
2067c3f
ac7ec69
c3db3b3
ba75abf
c20007b
b36f06b
201fa4a
56f4e82
fbf1750
ad04917
281cf08
e676cdd
459c367
4fb7db1
ff428fd
82e218e
aae88e0
ef9ce44
a8c52d6
430b02c
8f18bde
db89208
c1b132f
c122be7
e2b33d3
b6abff7
8ff9488
b8a16d5
88a97d8
9d44188
7d6493f
05cdfc4
8d72524
3394b12
29fc2b7
86e6f99
fcf8fa7
0ae2afd
202d94c
b79c93f
99aaf4c
2e0d34a
f3b909d
e3059c1
4db4164
5b3ce50
9919138
8262df6
6f651b6
cb98697
bf706aa
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change | ||||
---|---|---|---|---|---|---|
@@ -0,0 +1,192 @@ | ||||||
#include <gtest/gtest.h> | ||||||
|
||||||
#include <boost/mpi/communicator.hpp> | ||||||
#include <boost/mpi/environment.hpp> | ||||||
#include <random> | ||||||
#include <vector> | ||||||
|
||||||
#include "mpi/sedova_o_vertical_ribbon_scheme/include/ops_mpi.hpp" | ||||||
|
||||||
TEST(sedova_o_vertical_ribbon_scheme_mpi, Test_3) { | ||||||
int rows_ = 5; | ||||||
int cols_ = 3; | ||||||
int count_proc = 5; | ||||||
std::vector<int> proc_(count_proc, 0); | ||||||
std::vector<int> off(count_proc, 0); | ||||||
if (count_proc > rows_) { | ||||||
for (int i = 0; i < rows_; ++i) { | ||||||
off[i] = i * cols_; | ||||||
proc_[i] = cols_; | ||||||
} | ||||||
for (int i = rows_; i < count_proc; ++i) { | ||||||
off[i] = -1; | ||||||
proc_[i] = 0; | ||||||
} | ||||||
} else { | ||||||
int count_proc_ = rows_ / count_proc; | ||||||
int surplus = rows_ % count_proc; | ||||||
int offset = 0; | ||||||
for (int i = 0; i < count_proc; ++i) { | ||||||
if (surplus > 0) { | ||||||
proc_[i] = (count_proc_ + 1) * cols_; | ||||||
--surplus; | ||||||
} else { | ||||||
proc_[i] = count_proc_ * cols_; | ||||||
} | ||||||
off[i] = offset; | ||||||
offset += proc_[i]; | ||||||
} | ||||||
} | ||||||
std::vector<int> expected_proc = {3, 3, 3, 3, 3}; | ||||||
std::vector<int> expected_off = {0, 3, 6, 9, 12}; | ||||||
EXPECT_EQ(proc_, expected_proc); | ||||||
EXPECT_EQ(off, expected_off); | ||||||
} | ||||||
|
||||||
TEST(sedova_o_vertical_ribbon_scheme_mpi, Test_4) { | ||||||
int rows_ = 5; | ||||||
int cols_ = 3; | ||||||
int count_proc = 3; | ||||||
std::vector<int> proc_(count_proc, 0); | ||||||
std::vector<int> off(count_proc, 0); | ||||||
if (count_proc > rows_) { | ||||||
for (int i = 0; i < rows_; ++i) { | ||||||
off[i] = i * cols_; | ||||||
proc_[i] = cols_; | ||||||
} | ||||||
for (int i = rows_; i < count_proc; ++i) { | ||||||
off[i] = -1; | ||||||
proc_[i] = 0; | ||||||
} | ||||||
} else { | ||||||
int count_proc_ = rows_ / count_proc; | ||||||
int surplus = rows_ % count_proc; | ||||||
int offset = 0; | ||||||
for (int i = 0; i < count_proc; ++i) { | ||||||
if (surplus > 0) { | ||||||
proc_[i] = (count_proc_ + 1) * cols_; | ||||||
--surplus; | ||||||
} else { | ||||||
proc_[i] = count_proc_ * cols_; | ||||||
} | ||||||
off[i] = offset; | ||||||
offset += proc_[i]; | ||||||
} | ||||||
} | ||||||
std::vector<int> expected_proc = {6, 6, 3}; | ||||||
std::vector<int> expected_off = {0, 6, 12}; | ||||||
EXPECT_EQ(proc_, expected_proc); | ||||||
EXPECT_EQ(off, expected_off); | ||||||
} | ||||||
|
||||||
TEST(sedova_o_vertical_ribbon_scheme_mpi, Test_5) { | ||||||
int rows_ = 5; | ||||||
int cols_ = 4; | ||||||
int count_proc = 6; | ||||||
std::vector<int> proc_(count_proc, 0); | ||||||
std::vector<int> off(count_proc, 0); | ||||||
if (count_proc > rows_) { | ||||||
for (int i = 0; i < rows_; ++i) { | ||||||
off[i] = i * cols_; | ||||||
proc_[i] = cols_; | ||||||
} | ||||||
for (int i = rows_; i < count_proc; ++i) { | ||||||
off[i] = -1; | ||||||
proc_[i] = 0; | ||||||
} | ||||||
} else { | ||||||
int count_proc_ = rows_ / count_proc; | ||||||
int surplus = rows_ % count_proc; | ||||||
int offset = 0; | ||||||
for (int i = 0; i < count_proc; ++i) { | ||||||
if (surplus > 0) { | ||||||
proc_[i] = (count_proc_ + 1) * cols_; | ||||||
--surplus; | ||||||
} else { | ||||||
proc_[i] = count_proc_ * cols_; | ||||||
} | ||||||
off[i] = offset; | ||||||
offset += proc_[i]; | ||||||
} | ||||||
} | ||||||
std::vector<int> expected_proc = {4, 4, 4, 4, 4, 0}; | ||||||
std::vector<int> expected_off = {0, 4, 8, 12, 16, -1}; | ||||||
EXPECT_EQ(proc_, expected_proc); | ||||||
EXPECT_EQ(off, expected_off); | ||||||
} | ||||||
|
||||||
TEST(sedova_o_vertical_ribbon_scheme_mpi, Test_6) { | ||||||
int rows_ = 10; | ||||||
int cols_ = 4; | ||||||
int count_proc = 8; | ||||||
std::vector<int> proc_(count_proc, 0); | ||||||
std::vector<int> off(count_proc, 0); | ||||||
if (count_proc > rows_) { | ||||||
for (int i = 0; i < rows_; ++i) { | ||||||
off[i] = i * cols_; | ||||||
proc_[i] = cols_; | ||||||
} | ||||||
for (int i = rows_; i < count_proc; ++i) { | ||||||
off[i] = -1; | ||||||
proc_[i] = 0; | ||||||
} | ||||||
} else { | ||||||
int count_proc_ = rows_ / count_proc; | ||||||
int surplus = rows_ % count_proc; | ||||||
int offset = 0; | ||||||
for (int i = 0; i < count_proc; ++i) { | ||||||
if (surplus > 0) { | ||||||
proc_[i] = (count_proc_ + 1) * cols_; | ||||||
--surplus; | ||||||
} else { | ||||||
proc_[i] = count_proc_ * cols_; | ||||||
} | ||||||
off[i] = offset; | ||||||
offset += proc_[i]; | ||||||
} | ||||||
} | ||||||
std::vector<int> expected_proc = {8, 8, 4, 4, 4, 4, 4, 4}; | ||||||
std::vector<int> expected_off = {0, 8, 16, 20, 24, 28, 32, 36}; | ||||||
EXPECT_EQ(proc_, expected_proc); | ||||||
EXPECT_EQ(off, expected_off); | ||||||
} | ||||||
|
||||||
TEST(sedova_o_vertical_ribbon_scheme_mpi, Test_7) { | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. А начиная отсюда что это тесты на валидация |
||||||
std::vector<int> matrix = {1, 2, 3}; | ||||||
std::vector<int> vector = {7, 8}; | ||||||
std::vector<int> result(3, 0); | ||||||
|
||||||
std::shared_ptr<ppc::core::TaskData> taskDataSeq = std::make_shared<ppc::core::TaskData>(); | ||||||
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t*>(matrix.data())); | ||||||
taskDataSeq->inputs_count.emplace_back(matrix.size()); | ||||||
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t*>(vector.data())); | ||||||
taskDataSeq->inputs_count.emplace_back(vector.size()); | ||||||
taskDataSeq->outputs.emplace_back(reinterpret_cast<uint8_t*>(result.data())); | ||||||
taskDataSeq->outputs_count.emplace_back(result.size()); | ||||||
|
||||||
sedova_o_vertical_ribbon_scheme_mpi::SequentialMPI TestSequential(taskDataSeq); | ||||||
ASSERT_EQ(TestSequential.validation(), false); | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Используйте EXPECT_FALSE
Suggested change
|
||||||
} | ||||||
|
||||||
TEST(sedova_o_vertical_ribbon_scheme_mpi, Test_8) { | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. А это что тест на функциональность |
||||||
std::vector<int> matrix = {1, 2, 3, 4, 5, 6}; | ||||||
std::vector<int> vector = {7, 8}; | ||||||
std::vector<int> result(3, 0); | ||||||
|
||||||
std::shared_ptr<ppc::core::TaskData> taskDataSeq = std::make_shared<ppc::core::TaskData>(); | ||||||
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t*>(matrix.data())); | ||||||
taskDataSeq->inputs_count.emplace_back(matrix.size()); | ||||||
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t*>(vector.data())); | ||||||
taskDataSeq->inputs_count.emplace_back(vector.size()); | ||||||
taskDataSeq->outputs.emplace_back(reinterpret_cast<uint8_t*>(result.data())); | ||||||
taskDataSeq->outputs_count.emplace_back(result.size()); | ||||||
|
||||||
sedova_o_vertical_ribbon_scheme_mpi::SequentialMPI TestSequential(taskDataSeq); | ||||||
ASSERT_EQ(TestSequential.validation(), true); | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||
TestSequential.pre_processing(); | ||||||
TestSequential.run(); | ||||||
TestSequential.post_processing(); | ||||||
|
||||||
std::vector<int> expected_result = {39, 54, 69}; | ||||||
ASSERT_EQ(result, expected_result); | ||||||
} | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Тут нужно все же чуть больше тестов добавить, с генерациями случайных входных данных, и сравнением результата работы mpi с seq версией (чтобы при запусках репозитория было ясно точно ли все корректно работает с случайными значениями) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,53 @@ | ||
// Copyright 2024 Sedova Olga | ||
#pragma once | ||
|
||
#include <gtest/gtest.h> | ||
|
||
#include <boost/mpi/collectives.hpp> | ||
#include <boost/mpi/communicator.hpp> | ||
#include <memory> | ||
#include <numeric> | ||
#include <string> | ||
#include <utility> | ||
#include <vector> | ||
|
||
#include "core/task/include/task.hpp" | ||
|
||
namespace sedova_o_vertical_ribbon_scheme_mpi { | ||
|
||
class ParallelMPI : public ppc::core::Task { | ||
public: | ||
explicit ParallelMPI(std::shared_ptr<ppc::core::TaskData> taskData_) : Task(std::move(taskData_)) {} | ||
bool pre_processing() override; | ||
bool validation() override; | ||
bool run() override; | ||
bool post_processing() override; | ||
|
||
private: | ||
int rows_{}; | ||
int cols_{}; | ||
std::vector<int> input_matrix_1; | ||
std::vector<int> input_vector_1; | ||
std::vector<int> result_vector_; | ||
std::vector<int> proc; | ||
std::vector<int> off; | ||
boost::mpi::communicator world; | ||
}; | ||
|
||
class SequentialMPI : public ppc::core::Task { | ||
public: | ||
explicit SequentialMPI(std::shared_ptr<ppc::core::TaskData> taskData_) : Task(std::move(taskData_)) {} | ||
bool pre_processing() override; | ||
bool validation() override; | ||
bool run() override; | ||
bool post_processing() override; | ||
|
||
private: | ||
int* matrix_; | ||
int* vector_; | ||
std::vector<int> result_vector_; | ||
int count; | ||
int rows_; | ||
int cols_; | ||
}; | ||
} // namespace sedova_o_vertical_ribbon_scheme_mpi |
Original file line number | Diff line number | Diff line change | ||||||||
---|---|---|---|---|---|---|---|---|---|---|
@@ -0,0 +1,134 @@ | ||||||||||
#include <gtest/gtest.h> | ||||||||||
|
||||||||||
#include <boost/mpi/timer.hpp> | ||||||||||
#include <random> | ||||||||||
|
||||||||||
#include "core/perf/include/perf.hpp" | ||||||||||
#include "mpi/sedova_o_vertical_ribbon_scheme/include/ops_mpi.hpp" | ||||||||||
|
||||||||||
TEST(sedova_o_vertical_ribbon_scheme_mpi, Performance_Pipeline_Run) { | ||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Perf tests should be renamed There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. fixed |
||||||||||
boost::mpi::environment env; | ||||||||||
boost::mpi::communicator world; | ||||||||||
std::vector<int> global_matrix; | ||||||||||
std::vector<int> global_vector; | ||||||||||
std::vector<int> global_result; | ||||||||||
std::shared_ptr<ppc::core::TaskData> taskDataPar = std::make_shared<ppc::core::TaskData>(); | ||||||||||
int rows_; | ||||||||||
int cols_; | ||||||||||
if (world.rank() == 0) { | ||||||||||
rows_ = 2024; | ||||||||||
cols_ = 2024; | ||||||||||
global_vector.resize(cols_); | ||||||||||
global_matrix.resize(rows_ * cols_); | ||||||||||
for (int j = 0; j < rows_; ++j) { | ||||||||||
for (int i = 0; i < cols_; ++i) { | ||||||||||
global_matrix[j * cols_ + i] = (rand() % 101) - 50; | ||||||||||
} | ||||||||||
} | ||||||||||
for (int i = 0; i < rows_; ++i) { | ||||||||||
global_vector[i] = (rand() % 100) - 50; | ||||||||||
} | ||||||||||
global_result.resize(cols_, 0); | ||||||||||
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t*>(global_matrix.data())); | ||||||||||
taskDataPar->inputs_count.emplace_back(global_matrix.size()); | ||||||||||
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t*>(global_vector.data())); | ||||||||||
taskDataPar->inputs_count.emplace_back(global_vector.size()); | ||||||||||
taskDataPar->outputs.emplace_back(reinterpret_cast<uint8_t*>(global_result.data())); | ||||||||||
taskDataPar->outputs_count.emplace_back(global_result.size()); | ||||||||||
} | ||||||||||
auto taskParallel = std::make_shared<sedova_o_vertical_ribbon_scheme_mpi::ParallelMPI>(taskDataPar); | ||||||||||
ASSERT_TRUE(taskParallel->validation()); | ||||||||||
taskParallel->pre_processing(); | ||||||||||
taskParallel->run(); | ||||||||||
taskParallel->post_processing(); | ||||||||||
auto perfAttr = std::make_shared<ppc::core::PerfAttr>(); | ||||||||||
perfAttr->num_running = 10; | ||||||||||
const boost::mpi::timer current_timer; | ||||||||||
perfAttr->current_timer = [&] { return current_timer.elapsed(); }; | ||||||||||
auto perfResults = std::make_shared<ppc::core::PerfResults>(); | ||||||||||
auto perfAnalyzer = std::make_shared<ppc::core::Perf>(taskParallel); | ||||||||||
perfAnalyzer->pipeline_run(perfAttr, perfResults); | ||||||||||
if (world.rank() == 0) { | ||||||||||
ppc::core::Perf::print_perf_statistic(perfResults); | ||||||||||
std::vector<int> seq_result(global_result.size(), 0); | ||||||||||
auto taskDataSeq = std::make_shared<ppc::core::TaskData>(); | ||||||||||
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t*>(global_matrix.data())); | ||||||||||
taskDataSeq->inputs_count.emplace_back(global_matrix.size()); | ||||||||||
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t*>(global_vector.data())); | ||||||||||
taskDataSeq->inputs_count.emplace_back(global_vector.size()); | ||||||||||
taskDataSeq->outputs.emplace_back(reinterpret_cast<uint8_t*>(seq_result.data())); | ||||||||||
taskDataSeq->outputs_count.emplace_back(seq_result.size()); | ||||||||||
auto taskSequential = std::make_shared<sedova_o_vertical_ribbon_scheme_mpi::SequentialMPI>(taskDataSeq); | ||||||||||
ASSERT_TRUE(taskSequential->validation()); | ||||||||||
taskSequential->pre_processing(); | ||||||||||
taskSequential->run(); | ||||||||||
taskSequential->post_processing(); | ||||||||||
ASSERT_EQ(global_result.size(), seq_result.size()); | ||||||||||
for (size_t i = 0; i < global_result.size(); ++i) { | ||||||||||
ASSERT_EQ(global_result[i], seq_result[i]); | ||||||||||
} | ||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||||||
} | ||||||||||
} | ||||||||||
TEST(sedova_o_vertical_ribbon_scheme_mpi, Performance_Task_Run) { | ||||||||||
boost::mpi::environment env; | ||||||||||
boost::mpi::communicator world; | ||||||||||
std::vector<int> global_matrix; | ||||||||||
std::vector<int> global_vector; | ||||||||||
std::vector<int> global_result; | ||||||||||
std::shared_ptr<ppc::core::TaskData> taskDataPar = std::make_shared<ppc::core::TaskData>(); | ||||||||||
int rows_; | ||||||||||
int cols_; | ||||||||||
if (world.rank() == 0) { | ||||||||||
rows_ = 2000; | ||||||||||
cols_ = 2000; | ||||||||||
global_matrix.resize(rows_ * cols_); | ||||||||||
global_vector.resize(cols_); | ||||||||||
for (int j = 0; j < rows_; ++j) { | ||||||||||
for (int i = 0; i < cols_; ++i) { | ||||||||||
global_matrix[j * cols_ + i] = (rand() % 101) - 50; | ||||||||||
} | ||||||||||
} | ||||||||||
for (int i = 0; i < rows_; ++i) { | ||||||||||
global_vector[i] = (rand() % 100) - 50; | ||||||||||
} | ||||||||||
global_result.resize(cols_, 0); | ||||||||||
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t*>(global_matrix.data())); | ||||||||||
taskDataPar->inputs_count.emplace_back(global_matrix.size()); | ||||||||||
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t*>(global_vector.data())); | ||||||||||
taskDataPar->inputs_count.emplace_back(global_vector.size()); | ||||||||||
taskDataPar->outputs.emplace_back(reinterpret_cast<uint8_t*>(global_result.data())); | ||||||||||
taskDataPar->outputs_count.emplace_back(global_result.size()); | ||||||||||
} | ||||||||||
auto taskParallel = std::make_shared<sedova_o_vertical_ribbon_scheme_mpi::ParallelMPI>(taskDataPar); | ||||||||||
ASSERT_TRUE(taskParallel->validation()); | ||||||||||
taskParallel->pre_processing(); | ||||||||||
taskParallel->run(); | ||||||||||
taskParallel->post_processing(); | ||||||||||
auto perfAttr = std::make_shared<ppc::core::PerfAttr>(); | ||||||||||
perfAttr->num_running = 10; | ||||||||||
const boost::mpi::timer current_timer; | ||||||||||
perfAttr->current_timer = [&] { return current_timer.elapsed(); }; | ||||||||||
auto perfResults = std::make_shared<ppc::core::PerfResults>(); | ||||||||||
auto perfAnalyzer = std::make_shared<ppc::core::Perf>(taskParallel); | ||||||||||
perfAnalyzer->task_run(perfAttr, perfResults); | ||||||||||
if (world.rank() == 0) { | ||||||||||
ppc::core::Perf::print_perf_statistic(perfResults); | ||||||||||
std::vector<int> seq_result(global_result.size(), 0); | ||||||||||
auto taskDataSeq = std::make_shared<ppc::core::TaskData>(); | ||||||||||
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t*>(global_matrix.data())); | ||||||||||
taskDataSeq->inputs_count.emplace_back(global_matrix.size()); | ||||||||||
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t*>(global_vector.data())); | ||||||||||
taskDataSeq->inputs_count.emplace_back(global_vector.size()); | ||||||||||
taskDataSeq->outputs.emplace_back(reinterpret_cast<uint8_t*>(seq_result.data())); | ||||||||||
taskDataSeq->outputs_count.emplace_back(seq_result.size()); | ||||||||||
auto taskSequential = std::make_shared<sedova_o_vertical_ribbon_scheme_mpi::SequentialMPI>(taskDataSeq); | ||||||||||
ASSERT_TRUE(taskSequential->validation()); | ||||||||||
taskSequential->pre_processing(); | ||||||||||
taskSequential->run(); | ||||||||||
taskSequential->post_processing(); | ||||||||||
ASSERT_EQ(global_result.size(), seq_result.size()); | ||||||||||
for (size_t i = 0; i < global_result.size(); ++i) { | ||||||||||
ASSERT_EQ(global_result[i], seq_result[i]); | ||||||||||
} | ||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||||||
} | ||||||||||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Назовите тесты понятнее, что это на проверку функциональности вашего алгоритма распределения