Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Седова Ольга. Задача 2. Вариант 16. Ленточная вертикальная схема умножения матрицы на вектор #386

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
80 commits
Select commit Hold shift + click to select a range
8b0ab78
seq
Sedova-Olga Nov 25, 2024
7dce768
cf1
Sedova-Olga Nov 25, 2024
28bcc37
msvc
Sedova-Olga Nov 25, 2024
84812c1
macos
Sedova-Olga Nov 25, 2024
f886942
cf2
Sedova-Olga Nov 25, 2024
1c4ad60
cf3
Sedova-Olga Nov 25, 2024
c154ea1
cf4
Sedova-Olga Nov 25, 2024
417c2c4
cf5
Sedova-Olga Nov 25, 2024
b6c3ba2
macos1
Sedova-Olga Nov 25, 2024
70a1ea0
macos2
Sedova-Olga Nov 25, 2024
c8079b7
1
Sedova-Olga Nov 25, 2024
5023aee
cf6
Sedova-Olga Nov 25, 2024
08cf145
msvc
Sedova-Olga Nov 25, 2024
bb6e666
msvc
Sedova-Olga Nov 25, 2024
cbe04fb
fix_log
Sedova-Olga Nov 26, 2024
8858284
cf6
Sedova-Olga Nov 26, 2024
139fb33
cf6
Sedova-Olga Nov 26, 2024
c4a1c4a
test
Sedova-Olga Nov 27, 2024
f5d168d
seq
Sedova-Olga Nov 27, 2024
ded3844
seq
Sedova-Olga Nov 27, 2024
867ea87
mpi_hpp
Sedova-Olga Nov 27, 2024
6597ed1
mpi_hpp
Sedova-Olga Nov 27, 2024
78a8092
mpi_cpp
Sedova-Olga Nov 27, 2024
c7108d4
mpi_cpp
Sedova-Olga Nov 27, 2024
8154f0d
mpi_cpp_pt
Sedova-Olga Nov 27, 2024
8ed32f3
mpi_cpp_ft
Sedova-Olga Nov 27, 2024
e7cfc1c
mpi_cpp_ft
Sedova-Olga Nov 27, 2024
83701fe
mpi_cpp_test
Sedova-Olga Nov 27, 2024
b53e3a4
mpi_cpp_ft
Sedova-Olga Nov 27, 2024
2067c3f
1
Sedova-Olga Nov 27, 2024
ac7ec69
ft
Sedova-Olga Nov 27, 2024
c3db3b3
test_macos
Sedova-Olga Nov 27, 2024
ba75abf
test_macos
Sedova-Olga Nov 27, 2024
c20007b
test_macos
Sedova-Olga Nov 27, 2024
b36f06b
Merge branch 'learning-process:master' into sedova_o_vertical_ribbon_…
Sedova-Olga Nov 27, 2024
201fa4a
mpi
Sedova-Olga Nov 28, 2024
56f4e82
fix_file_valid
Sedova-Olga Nov 28, 2024
fbf1750
fix_tidy
Sedova-Olga Nov 28, 2024
ad04917
1
Sedova-Olga Nov 29, 2024
281cf08
2
Sedova-Olga Nov 29, 2024
e676cdd
func_test
Sedova-Olga Nov 29, 2024
459c367
seq_ft_pt
Sedova-Olga Dec 4, 2024
4fb7db1
mpi
Sedova-Olga Dec 6, 2024
ff428fd
mpi
Sedova-Olga Dec 6, 2024
82e218e
mpi
Sedova-Olga Dec 6, 2024
aae88e0
fix
Sedova-Olga Dec 7, 2024
ef9ce44
fix1
Sedova-Olga Dec 7, 2024
a8c52d6
clang-format
Sedova-Olga Dec 7, 2024
430b02c
ft
Sedova-Olga Dec 7, 2024
8f18bde
fix
Sedova-Olga Dec 7, 2024
db89208
fix
Sedova-Olga Dec 7, 2024
c1b132f
fix
Sedova-Olga Dec 7, 2024
c122be7
test11
Sedova-Olga Dec 8, 2024
e2b33d3
final
Sedova-Olga Dec 8, 2024
b6abff7
cf
Sedova-Olga Dec 8, 2024
8ff9488
cf
Sedova-Olga Dec 8, 2024
b8a16d5
fix_name_and_validation
Sedova-Olga Dec 8, 2024
88a97d8
Merge branch 'learning-process:master' into sedova_o_vertical_ribbon_…
Sedova-Olga Dec 8, 2024
9d44188
test
Sedova-Olga Dec 8, 2024
7d6493f
test
Sedova-Olga Dec 8, 2024
05cdfc4
test
Sedova-Olga Dec 8, 2024
8d72524
final
Sedova-Olga Dec 8, 2024
3394b12
src
Sedova-Olga Dec 9, 2024
29fc2b7
src
Sedova-Olga Dec 9, 2024
86e6f99
src
Sedova-Olga Dec 9, 2024
fcf8fa7
src
Sedova-Olga Dec 9, 2024
0ae2afd
src
Sedova-Olga Dec 9, 2024
202d94c
test
Sedova-Olga Dec 9, 2024
b79c93f
test
Sedova-Olga Dec 9, 2024
99aaf4c
test
Sedova-Olga Dec 9, 2024
2e0d34a
test
Sedova-Olga Dec 9, 2024
f3b909d
test
Sedova-Olga Dec 9, 2024
e3059c1
test
Sedova-Olga Dec 9, 2024
4db4164
test
Sedova-Olga Dec 9, 2024
5b3ce50
test
Sedova-Olga Dec 9, 2024
9919138
test_src
Sedova-Olga Dec 9, 2024
8262df6
test_src
Sedova-Olga Dec 9, 2024
6f651b6
test
Sedova-Olga Dec 10, 2024
cb98697
test
Sedova-Olga Dec 10, 2024
bf706aa
remame
Sedova-Olga Dec 10, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
209 changes: 209 additions & 0 deletions tasks/mpi/sedova_o_vertical_ribbon_scheme/func_tests/main.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,209 @@
#include <gtest/gtest.h>

#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
#include <random>
#include <vector>

#include "mpi/sedova_o_vertical_ribbon_scheme/include/ops_mpi.hpp"

TEST(sedova_o_vertical_ribbon_scheme_mpi, distribution1) {
int rows_ = 5;
int cols_ = 3;
int count_proc = 5;
std::vector<int> proc_(count_proc, 0);
std::vector<int> off(count_proc, 0);
if (count_proc > rows_) {
for (int i = 0; i < rows_; ++i) {
off[i] = i * cols_;
proc_[i] = cols_;
}
for (int i = rows_; i < count_proc; ++i) {
off[i] = -1;
proc_[i] = 0;
}
} else {
int count_proc_ = rows_ / count_proc;
int surplus = rows_ % count_proc;
int offset = 0;
for (int i = 0; i < count_proc; ++i) {
if (surplus > 0) {
proc_[i] = (count_proc_ + 1) * cols_;
--surplus;
} else {
proc_[i] = count_proc_ * cols_;
}
off[i] = offset;
offset += proc_[i];
}
}
std::vector<int> expected_proc = {3, 3, 3, 3, 3};
std::vector<int> expected_off = {0, 3, 6, 9, 12};
EXPECT_EQ(proc_, expected_proc);
EXPECT_EQ(off, expected_off);
}
Comment on lines +10 to +44
Copy link
Contributor

@kmichaelk kmichaelk Dec 10, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What are you testing in those tests? If you want to test the distribution resolving functionality from the Task class, then test it, testing a copy-pasted code does not guarantee that the actual code you use in Task is still working correctly as it may've been changed.


TEST(sedova_o_vertical_ribbon_scheme_mpi, distribution2) {
int rows_ = 5;
int cols_ = 3;
int count_proc = 3;
std::vector<int> proc_(count_proc, 0);
std::vector<int> off(count_proc, 0);
if (count_proc > rows_) {
for (int i = 0; i < rows_; ++i) {
off[i] = i * cols_;
proc_[i] = cols_;
}
for (int i = rows_; i < count_proc; ++i) {
off[i] = -1;
proc_[i] = 0;
}
} else {
int count_proc_ = rows_ / count_proc;
int surplus = rows_ % count_proc;
int offset = 0;
for (int i = 0; i < count_proc; ++i) {
if (surplus > 0) {
proc_[i] = (count_proc_ + 1) * cols_;
--surplus;
} else {
proc_[i] = count_proc_ * cols_;
}
off[i] = offset;
offset += proc_[i];
}
}
std::vector<int> expected_proc = {6, 6, 3};
std::vector<int> expected_off = {0, 6, 12};
EXPECT_EQ(proc_, expected_proc);
EXPECT_EQ(off, expected_off);
}

TEST(sedova_o_vertical_ribbon_scheme_mpi, distribution3) {
int rows_ = 5;
int cols_ = 4;
int count_proc = 6;
std::vector<int> proc_(count_proc, 0);
std::vector<int> off(count_proc, 0);
if (count_proc > rows_) {
for (int i = 0; i < rows_; ++i) {
off[i] = i * cols_;
proc_[i] = cols_;
}
for (int i = rows_; i < count_proc; ++i) {
off[i] = -1;
proc_[i] = 0;
}
} else {
int count_proc_ = rows_ / count_proc;
int surplus = rows_ % count_proc;
int offset = 0;
for (int i = 0; i < count_proc; ++i) {
if (surplus > 0) {
proc_[i] = (count_proc_ + 1) * cols_;
--surplus;
} else {
proc_[i] = count_proc_ * cols_;
}
off[i] = offset;
offset += proc_[i];
}
}
std::vector<int> expected_proc = {4, 4, 4, 4, 4, 0};
std::vector<int> expected_off = {0, 4, 8, 12, 16, -1};
EXPECT_EQ(proc_, expected_proc);
EXPECT_EQ(off, expected_off);
}

TEST(sedova_o_vertical_ribbon_scheme_mpi, distribution4) {
int rows_ = 10;
int cols_ = 4;
int count_proc = 8;
std::vector<int> proc_(count_proc, 0);
std::vector<int> off(count_proc, 0);
if (count_proc > rows_) {
for (int i = 0; i < rows_; ++i) {
off[i] = i * cols_;
proc_[i] = cols_;
}
for (int i = rows_; i < count_proc; ++i) {
off[i] = -1;
proc_[i] = 0;
}
} else {
int count_proc_ = rows_ / count_proc;
int surplus = rows_ % count_proc;
int offset = 0;
for (int i = 0; i < count_proc; ++i) {
if (surplus > 0) {
proc_[i] = (count_proc_ + 1) * cols_;
--surplus;
} else {
proc_[i] = count_proc_ * cols_;
}
off[i] = offset;
offset += proc_[i];
}
}
std::vector<int> expected_proc = {8, 8, 4, 4, 4, 4, 4, 4};
std::vector<int> expected_off = {0, 8, 16, 20, 24, 28, 32, 36};
EXPECT_EQ(proc_, expected_proc);
EXPECT_EQ(off, expected_off);
}

TEST(sedova_o_vertical_ribbon_scheme_mpi, false_validation) {
std::vector<int> matrix = {1, 2, 3};
std::vector<int> vector = {7, 8};
std::vector<int> result(3, 0);

std::shared_ptr<ppc::core::TaskData> taskDataSeq = std::make_shared<ppc::core::TaskData>();
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t *>(matrix.data()));
taskDataSeq->inputs_count.emplace_back(matrix.size());
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t *>(vector.data()));
taskDataSeq->inputs_count.emplace_back(vector.size());
taskDataSeq->outputs.emplace_back(reinterpret_cast<uint8_t *>(result.data()));
taskDataSeq->outputs_count.emplace_back(result.size());

sedova_o_vertical_ribbon_scheme_mpi::SequentialMPI TestSequential(taskDataSeq);
EXPECT_FALSE(TestSequential.validation());
}

TEST(sedova_o_vertical_ribbon_scheme_mpi, true_validation) {
std::vector<int> matrix = {1, 2, 3, 4};
std::vector<int> vector = {7, 8};
std::vector<int> result(2, 0);

std::shared_ptr<ppc::core::TaskData> taskDataPar = std::make_shared<ppc::core::TaskData>();
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t *>(matrix.data()));
taskDataPar->inputs_count.emplace_back(matrix.size());
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t *>(vector.data()));
taskDataPar->inputs_count.emplace_back(vector.size());
taskDataPar->outputs.emplace_back(reinterpret_cast<uint8_t *>(result.data()));
taskDataPar->outputs_count.emplace_back(result.size());

sedova_o_vertical_ribbon_scheme_mpi::ParallelMPI taskParallel(taskDataPar);
EXPECT_TRUE(taskParallel.validation());
}

TEST(sedova_o_vertical_ribbon_scheme_mpi, correct_matrix_and_vector_seq) {
std::vector<int> matrix = {1, 2, 3, 4, 5, 6};
std::vector<int> vector = {7, 8};
std::vector<int> result(3, 0);

std::shared_ptr<ppc::core::TaskData> taskDataSeq = std::make_shared<ppc::core::TaskData>();
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t *>(matrix.data()));
taskDataSeq->inputs_count.emplace_back(matrix.size());
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t *>(vector.data()));
taskDataSeq->inputs_count.emplace_back(vector.size());
taskDataSeq->outputs.emplace_back(reinterpret_cast<uint8_t *>(result.data()));
taskDataSeq->outputs_count.emplace_back(result.size());

sedova_o_vertical_ribbon_scheme_mpi::SequentialMPI TestSequential(taskDataSeq);
ASSERT_TRUE(TestSequential.validation());
TestSequential.pre_processing();
TestSequential.run();
TestSequential.post_processing();

std::vector<int> expected_result = {39, 54, 69};
ASSERT_EQ(result, expected_result);
}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Тут нужно все же чуть больше тестов добавить, с генерациями случайных входных данных, и сравнением результата работы mpi с seq версией (чтобы при запусках репозитория было ясно точно ли все корректно работает с случайными значениями)

52 changes: 52 additions & 0 deletions tasks/mpi/sedova_o_vertical_ribbon_scheme/include/ops_mpi.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
// Copyright 2024 Sedova Olga
#pragma once

#include <gtest/gtest.h>

#include <boost/mpi/collectives.hpp>
#include <boost/mpi/communicator.hpp>
#include <memory>
#include <numeric>
#include <string>
#include <utility>
#include <vector>

#include "core/task/include/task.hpp"

namespace sedova_o_vertical_ribbon_scheme_mpi {

class ParallelMPI : public ppc::core::Task {
public:
explicit ParallelMPI(std::shared_ptr<ppc::core::TaskData> taskData_) : Task(std::move(taskData_)) {}
bool pre_processing() override;
bool validation() override;
bool run() override;
bool post_processing() override;

private:
int rows_{};
int cols_{};
std::vector<int> input_matrix_1;
std::vector<int> input_vector_1;
std::vector<int> result_vector_;
std::vector<int> proc;
std::vector<int> off;
boost::mpi::communicator world;
};

class SequentialMPI : public ppc::core::Task {
public:
explicit SequentialMPI(std::shared_ptr<ppc::core::TaskData> taskData_) : Task(std::move(taskData_)) {}
bool pre_processing() override;
bool validation() override;
bool run() override;
bool post_processing() override;

private:
int* matrix_;
int* vector_;
std::vector<int> result_vector_;
int rows_;
int cols_;
};
} // namespace sedova_o_vertical_ribbon_scheme_mpi
130 changes: 130 additions & 0 deletions tasks/mpi/sedova_o_vertical_ribbon_scheme/perf_tests/main.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
#include <gtest/gtest.h>

#include <boost/mpi/timer.hpp>
#include <random>

#include "core/perf/include/perf.hpp"
#include "mpi/sedova_o_vertical_ribbon_scheme/include/ops_mpi.hpp"

TEST(sedova_o_vertical_ribbon_scheme_mpi, test_pipeline_run) {
boost::mpi::environment env;
boost::mpi::communicator world;
std::vector<int> global_matrix;
std::vector<int> global_vector;
std::vector<int> global_result;
std::shared_ptr<ppc::core::TaskData> taskDataPar = std::make_shared<ppc::core::TaskData>();
int rows_;
int cols_;
if (world.rank() == 0) {
rows_ = 2024;
cols_ = 2024;
global_vector.resize(cols_);
global_matrix.resize(rows_ * cols_);
for (int j = 0; j < rows_; ++j) {
for (int i = 0; i < cols_; ++i) {
global_matrix[j * cols_ + i] = (rand() % 101) - 50;
}
}
for (int i = 0; i < rows_; ++i) {
global_vector[i] = (rand() % 100) - 50;
}
global_result.resize(cols_, 0);
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t*>(global_matrix.data()));
taskDataPar->inputs_count.emplace_back(global_matrix.size());
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t*>(global_vector.data()));
taskDataPar->inputs_count.emplace_back(global_vector.size());
taskDataPar->outputs.emplace_back(reinterpret_cast<uint8_t*>(global_result.data()));
taskDataPar->outputs_count.emplace_back(global_result.size());
}
auto taskParallel = std::make_shared<sedova_o_vertical_ribbon_scheme_mpi::ParallelMPI>(taskDataPar);
ASSERT_TRUE(taskParallel->validation());
taskParallel->pre_processing();
taskParallel->run();
taskParallel->post_processing();
auto perfAttr = std::make_shared<ppc::core::PerfAttr>();
perfAttr->num_running = 10;
const boost::mpi::timer current_timer;
perfAttr->current_timer = [&] { return current_timer.elapsed(); };
auto perfResults = std::make_shared<ppc::core::PerfResults>();
auto perfAnalyzer = std::make_shared<ppc::core::Perf>(taskParallel);
perfAnalyzer->pipeline_run(perfAttr, perfResults);
if (world.rank() == 0) {
ppc::core::Perf::print_perf_statistic(perfResults);
std::vector<int> seq_result(global_result.size(), 0);
auto taskDataSeq = std::make_shared<ppc::core::TaskData>();
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t*>(global_matrix.data()));
taskDataSeq->inputs_count.emplace_back(global_matrix.size());
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t*>(global_vector.data()));
taskDataSeq->inputs_count.emplace_back(global_vector.size());
taskDataSeq->outputs.emplace_back(reinterpret_cast<uint8_t*>(seq_result.data()));
taskDataSeq->outputs_count.emplace_back(seq_result.size());
auto taskSequential = std::make_shared<sedova_o_vertical_ribbon_scheme_mpi::SequentialMPI>(taskDataSeq);
ASSERT_TRUE(taskSequential->validation());
taskSequential->pre_processing();
taskSequential->run();
taskSequential->post_processing();
ASSERT_EQ(global_result.size(), seq_result.size());
EXPECT_EQ(global_result, seq_result);
}
}
TEST(sedova_o_vertical_ribbon_scheme_mpi, test_task_run) {
boost::mpi::environment env;
boost::mpi::communicator world;
std::vector<int> global_matrix;
std::vector<int> global_vector;
std::vector<int> global_result;
std::shared_ptr<ppc::core::TaskData> taskDataPar = std::make_shared<ppc::core::TaskData>();
int rows_;
int cols_;
if (world.rank() == 0) {
rows_ = 2000;
cols_ = 2000;
global_matrix.resize(rows_ * cols_);
global_vector.resize(cols_);
for (int j = 0; j < rows_; ++j) {
for (int i = 0; i < cols_; ++i) {
global_matrix[j * cols_ + i] = (rand() % 101) - 50;
}
}
for (int i = 0; i < rows_; ++i) {
global_vector[i] = (rand() % 100) - 50;
}
global_result.resize(cols_, 0);
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t*>(global_matrix.data()));
taskDataPar->inputs_count.emplace_back(global_matrix.size());
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t*>(global_vector.data()));
taskDataPar->inputs_count.emplace_back(global_vector.size());
taskDataPar->outputs.emplace_back(reinterpret_cast<uint8_t*>(global_result.data()));
taskDataPar->outputs_count.emplace_back(global_result.size());
}
auto taskParallel = std::make_shared<sedova_o_vertical_ribbon_scheme_mpi::ParallelMPI>(taskDataPar);
ASSERT_TRUE(taskParallel->validation());
taskParallel->pre_processing();
taskParallel->run();
taskParallel->post_processing();
auto perfAttr = std::make_shared<ppc::core::PerfAttr>();
perfAttr->num_running = 10;
const boost::mpi::timer current_timer;
perfAttr->current_timer = [&] { return current_timer.elapsed(); };
auto perfResults = std::make_shared<ppc::core::PerfResults>();
auto perfAnalyzer = std::make_shared<ppc::core::Perf>(taskParallel);
perfAnalyzer->task_run(perfAttr, perfResults);
if (world.rank() == 0) {
ppc::core::Perf::print_perf_statistic(perfResults);
std::vector<int> seq_result(global_result.size(), 0);
auto taskDataSeq = std::make_shared<ppc::core::TaskData>();
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t*>(global_matrix.data()));
taskDataSeq->inputs_count.emplace_back(global_matrix.size());
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t*>(global_vector.data()));
taskDataSeq->inputs_count.emplace_back(global_vector.size());
taskDataSeq->outputs.emplace_back(reinterpret_cast<uint8_t*>(seq_result.data()));
taskDataSeq->outputs_count.emplace_back(seq_result.size());
auto taskSequential = std::make_shared<sedova_o_vertical_ribbon_scheme_mpi::SequentialMPI>(taskDataSeq);
ASSERT_TRUE(taskSequential->validation());
taskSequential->pre_processing();
taskSequential->run();
taskSequential->post_processing();
ASSERT_EQ(global_result.size(), seq_result.size());
EXPECT_EQ(global_result, seq_result);
}
}
Loading
Loading