Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Revert "Гордеева Таисия. Задача 2. Вариант 4. Спящий брадобрей" #757

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
56 commits
Select commit Hold shift + click to select a range
5bfddcd
add files
Dec 2, 2024
6225155
add files_2
Dec 2, 2024
e43a971
add files_3
Dec 2, 2024
c49553c
add_files_end
Dec 4, 2024
939c863
add_files_end_1
Dec 4, 2024
5e60842
add_files_end_2
Dec 4, 2024
3688101
add_file_enddd
Dec 4, 2024
1bdfcf7
addd
Dec 4, 2024
63cf693
addd_enddd
Dec 5, 2024
9bdc80a
addd_endddihope
Dec 5, 2024
df108ca
addd_endddihope0
Dec 5, 2024
6e0e6be
111
Dec 5, 2024
095e986
222
Dec 5, 2024
81634fb
without_perf
Dec 5, 2024
abf5b13
11
Dec 5, 2024
467051e
endfinal
Dec 5, 2024
775ba78
wefwepfjw
Dec 5, 2024
1cf8dbb
result
Dec 5, 2024
59388ab
res1
Dec 5, 2024
892fade
res_end
Dec 5, 2024
c819587
yrayra
Dec 5, 2024
20c5fe3
reviewer_1
Dec 6, 2024
018aaf5
reviewer_2
Dec 6, 2024
fad7e17
oh
Dec 9, 2024
fcb3e50
oh1
Dec 9, 2024
2a81fa3
reviewers_3_and_4
Dec 9, 2024
03f3516
reviewers_3_and_4_end
Dec 9, 2024
0ff6e09
reviewers_3_and_4_end_1
Dec 9, 2024
a423190
1
Dec 18, 2024
5c499cc
1
Dec 23, 2024
52c1ff4
1
Dec 24, 2024
0466116
revert2
Dec 25, 2024
7bbfcfc
revert_2
Dec 25, 2024
72f4c7c
without lab_1
Dec 25, 2024
aed037a
2
Dec 26, 2024
88fd67e
correct conflicts
Dec 29, 2024
45d3977
2
Dec 29, 2024
97edfb8
end
Dec 29, 2024
f1846cf
1
Dec 29, 2024
8f5011a
11
Dec 29, 2024
ea0c61c
yra?
Dec 29, 2024
f5d6668
yra?!
Dec 29, 2024
f2d6287
pls end
Dec 29, 2024
68094dc
correct end
Dec 29, 2024
1471e48
correct
Dec 29, 2024
5cccf69
correct
Dec 29, 2024
d7f824a
1
Dec 29, 2024
081d280
111
Dec 29, 2024
e96f2b9
Merge branch 'master' into gordeeva_sleeping_barber_test
TayaGordeeva Dec 29, 2024
cc67844
plsplspls
TayaGordeeva Dec 29, 2024
48cbbc8
doooone
TayaGordeeva Dec 29, 2024
0d6efab
yees
TayaGordeeva Dec 29, 2024
f20b21d
correeeend
TayaGordeeva Dec 29, 2024
3447da6
Done result
TayaGordeeva Dec 29, 2024
4244f4d
Update
TayaGordeeva Dec 29, 2024
5df62a0
Update
TayaGordeeva Dec 29, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
306 changes: 306 additions & 0 deletions tasks/mpi/gordeeva_t_shell_sort_batcher_merge/func_tests/main.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,306 @@
#include <gtest/gtest.h>

#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
#include <iostream>
#include <random>
#include <vector>

#include "mpi/gordeeva_t_shell_sort_batcher_merge/include/ops_mpi.hpp"

std::vector<int> rand_vec(int size, int down = -100, int upp = 100) {
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> dis(down, upp);
std::vector<int> v(size);
for (auto &number : v) {
number = dis(gen);
}
return v;
}

TEST(gordeeva_t_shell_sort_batcher_merge_mpi, Shell_sort_with_fixed) {
boost::mpi::environment env;
boost::mpi::communicator world;
const int size = 10;
std::vector<int> input_vec = {3, 4, 7, 1, 8, 9, 5, 2, 6, 0};
std::vector<int> result_parallel(size);
std::vector<int> result_seq(size);

std::shared_ptr<ppc::core::TaskData> taskDataPar = std::make_shared<ppc::core::TaskData>();

if (world.rank() == 0) {
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t *>(input_vec.data()));
taskDataPar->inputs_count = {static_cast<size_t>(size)};
taskDataPar->outputs.emplace_back(reinterpret_cast<uint8_t *>(result_parallel.data()));
taskDataPar->outputs_count = {static_cast<size_t>(size)};
}

gordeeva_t_shell_sort_batcher_merge_mpi::TestMPITaskParallel testPar(taskDataPar);

ASSERT_TRUE(testPar.validation());

ASSERT_TRUE(testPar.pre_processing());
ASSERT_TRUE(testPar.run());
ASSERT_TRUE(testPar.post_processing());

world.barrier();

if (world.rank() == 0) {
std::shared_ptr<ppc::core::TaskData> taskDataSeq = std::make_shared<ppc::core::TaskData>();
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t *>(input_vec.data()));

taskDataSeq->inputs_count = {static_cast<size_t>(size)};
taskDataSeq->outputs.emplace_back(reinterpret_cast<uint8_t *>(result_seq.data()));
taskDataSeq->outputs_count = {static_cast<size_t>(size)};

gordeeva_t_shell_sort_batcher_merge_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq);

ASSERT_EQ(testMpiTaskSequential.validation(), true);
testMpiTaskSequential.pre_processing();
testMpiTaskSequential.run();
testMpiTaskSequential.post_processing();
}
ASSERT_EQ(result_parallel, result_seq);
}

TEST(gordeeva_t_shell_sort_batcher_merge_mpi, Shell_sort_Zero_Value) {
boost::mpi::environment env;
boost::mpi::communicator world;

const int size = 0;
std::vector<int> input_vec;
std::vector<int> result_parallel(size);

std::shared_ptr<ppc::core::TaskData> taskDataPar = std::make_shared<ppc::core::TaskData>();

if (world.rank() == 0) {
input_vec = rand_vec(size, 0, 1000);
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t *>(input_vec.data()));
taskDataPar->inputs_count = {size};
taskDataPar->outputs.emplace_back(reinterpret_cast<uint8_t *>(result_parallel.data()));
taskDataPar->outputs_count = {size};
}

gordeeva_t_shell_sort_batcher_merge_mpi::TestMPITaskParallel testPar(taskDataPar);

if (world.rank() == 0) {
ASSERT_FALSE(testPar.validation());
}
}

TEST(gordeeva_t_shell_sort_batcher_merge_mpi, Shell_sort_Empty_Output) {
boost::mpi::environment env;
boost::mpi::communicator world;
const int size = 0;

std::vector<int> input_vec;
std::vector<int> result_parallel(size);

std::shared_ptr<ppc::core::TaskData> taskDataPar = std::make_shared<ppc::core::TaskData>();

if (world.rank() == 0) {
input_vec = rand_vec(size, 0, 1000);
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t *>(input_vec.data()));
taskDataPar->inputs_count = {size};
}

gordeeva_t_shell_sort_batcher_merge_mpi::TestMPITaskParallel testPar(taskDataPar);

if (world.rank() == 0) {
ASSERT_FALSE(testPar.validation());
}
}

TEST(gordeeva_t_shell_sort_batcher_merge_mpi, Shell_sort_17_with_random) {
boost::mpi::environment env;
boost::mpi::communicator world;

const int size = 17;
std::vector<int> input_vec;
std::vector<int> result_parallel(size);
std::vector<int> result_seq(size);

std::shared_ptr<ppc::core::TaskData> taskDataPar = std::make_shared<ppc::core::TaskData>();

if (world.rank() == 0) {
input_vec = rand_vec(size, 0, 1000);
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t *>(input_vec.data()));
taskDataPar->inputs_count = {static_cast<size_t>(size)};
taskDataPar->outputs.emplace_back(reinterpret_cast<uint8_t *>(result_parallel.data()));
taskDataPar->outputs_count = {static_cast<size_t>(size)};
}

gordeeva_t_shell_sort_batcher_merge_mpi::TestMPITaskParallel testPar(taskDataPar);

ASSERT_TRUE(testPar.validation());

ASSERT_TRUE(testPar.pre_processing());
ASSERT_TRUE(testPar.run());
ASSERT_TRUE(testPar.post_processing());

world.barrier();

if (world.rank() == 0) {
std::shared_ptr<ppc::core::TaskData> taskDataSeq = std::make_shared<ppc::core::TaskData>();
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t *>(input_vec.data()));

taskDataSeq->inputs_count = {static_cast<size_t>(size)};
taskDataSeq->outputs.emplace_back(reinterpret_cast<uint8_t *>(result_seq.data()));
taskDataSeq->outputs_count = {static_cast<size_t>(size)};

gordeeva_t_shell_sort_batcher_merge_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq);

ASSERT_EQ(testMpiTaskSequential.validation(), true);
testMpiTaskSequential.pre_processing();
testMpiTaskSequential.run();
testMpiTaskSequential.post_processing();
}
ASSERT_EQ(result_parallel, result_seq);
}

TEST(gordeeva_t_shell_sort_batcher_merge_mpi, Shell_sort_100_with_random) {
boost::mpi::environment env;
boost::mpi::communicator world;

const int size = 100;
std::vector<int> input_vec;
std::vector<int> result_parallel(size);
std::vector<int> result_seq(size);

std::shared_ptr<ppc::core::TaskData> taskDataPar = std::make_shared<ppc::core::TaskData>();

if (world.rank() == 0) {
input_vec = rand_vec(size, 0, 1000);
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t *>(input_vec.data()));
taskDataPar->inputs_count = {static_cast<size_t>(size)};
taskDataPar->outputs.emplace_back(reinterpret_cast<uint8_t *>(result_parallel.data()));
taskDataPar->outputs_count = {static_cast<size_t>(size)};
}

gordeeva_t_shell_sort_batcher_merge_mpi::TestMPITaskParallel testPar(taskDataPar);

ASSERT_TRUE(testPar.validation());

ASSERT_TRUE(testPar.pre_processing());
ASSERT_TRUE(testPar.run());
ASSERT_TRUE(testPar.post_processing());

world.barrier();

if (world.rank() == 0) {
std::shared_ptr<ppc::core::TaskData> taskDataSeq = std::make_shared<ppc::core::TaskData>();
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t *>(input_vec.data()));

taskDataSeq->inputs_count = {static_cast<size_t>(size)};
taskDataSeq->outputs.emplace_back(reinterpret_cast<uint8_t *>(result_seq.data()));
taskDataSeq->outputs_count = {static_cast<size_t>(size)};

gordeeva_t_shell_sort_batcher_merge_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq);

ASSERT_EQ(testMpiTaskSequential.validation(), true);
testMpiTaskSequential.pre_processing();
testMpiTaskSequential.run();
testMpiTaskSequential.post_processing();
}
ASSERT_EQ(result_parallel, result_seq);
}

TEST(gordeeva_t_shell_sort_batcher_merge_mpi, Shell_sort_1000_with_random) {
boost::mpi::environment env;
boost::mpi::communicator world;

const int size = 1000;
std::vector<int> input_vec;
std::vector<int> result_parallel(size);
std::vector<int> result_seq(size);

std::shared_ptr<ppc::core::TaskData> taskDataPar = std::make_shared<ppc::core::TaskData>();

if (world.rank() == 0) {
input_vec = rand_vec(size, 0, 1000);
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t *>(input_vec.data()));
taskDataPar->inputs_count = {static_cast<size_t>(size)};
taskDataPar->outputs.emplace_back(reinterpret_cast<uint8_t *>(result_parallel.data()));
taskDataPar->outputs_count = {static_cast<size_t>(size)};
}

gordeeva_t_shell_sort_batcher_merge_mpi::TestMPITaskParallel testPar(taskDataPar);

ASSERT_TRUE(testPar.validation());

ASSERT_TRUE(testPar.pre_processing());
ASSERT_TRUE(testPar.run());
ASSERT_TRUE(testPar.post_processing());

world.barrier();

if (world.rank() == 0) {
std::shared_ptr<ppc::core::TaskData> taskDataSeq = std::make_shared<ppc::core::TaskData>();
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t *>(input_vec.data()));

taskDataSeq->inputs_count = {static_cast<size_t>(size)};
taskDataSeq->outputs.emplace_back(reinterpret_cast<uint8_t *>(result_seq.data()));
taskDataSeq->outputs_count = {static_cast<size_t>(size)};

gordeeva_t_shell_sort_batcher_merge_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq);

ASSERT_EQ(testMpiTaskSequential.validation(), true);
testMpiTaskSequential.pre_processing();
testMpiTaskSequential.run();
testMpiTaskSequential.post_processing();
}

if (world.rank() == 0) {
ASSERT_EQ(result_parallel, result_seq);
}
}

TEST(gordeeva_t_shell_sort_batcher_merge_mpi, Shell_sort_5000_with_random) {
boost::mpi::environment env;
boost::mpi::communicator world;

const int size = 5000;
std::vector<int> input_vec;
std::vector<int> result_parallel(size);
std::vector<int> result_seq(size);

std::shared_ptr<ppc::core::TaskData> taskDataPar = std::make_shared<ppc::core::TaskData>();

if (world.rank() == 0) {
input_vec = rand_vec(size, 0, 1000);
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t *>(input_vec.data()));
taskDataPar->inputs_count = {size};
taskDataPar->outputs.emplace_back(reinterpret_cast<uint8_t *>(result_parallel.data()));
taskDataPar->outputs_count = {size};
}

gordeeva_t_shell_sort_batcher_merge_mpi::TestMPITaskParallel testPar(taskDataPar);

ASSERT_TRUE(testPar.validation());
testPar.pre_processing();
testPar.run();
testPar.post_processing();

world.barrier();

if (world.rank() == 0) {
std::shared_ptr<ppc::core::TaskData> taskDataSeq = std::make_shared<ppc::core::TaskData>();
taskDataSeq->inputs.emplace_back(reinterpret_cast<uint8_t *>(input_vec.data()));

taskDataSeq->inputs_count = {static_cast<size_t>(size)};
taskDataSeq->outputs.emplace_back(reinterpret_cast<uint8_t *>(result_seq.data()));
taskDataSeq->outputs_count = {static_cast<size_t>(size)};

gordeeva_t_shell_sort_batcher_merge_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq);

ASSERT_EQ(testMpiTaskSequential.validation(), true);
testMpiTaskSequential.pre_processing();
testMpiTaskSequential.run();
testMpiTaskSequential.post_processing();
}

if (world.rank() == 0) {
ASSERT_EQ(result_parallel, result_seq);
}
}
46 changes: 46 additions & 0 deletions tasks/mpi/gordeeva_t_shell_sort_batcher_merge/include/ops_mpi.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
#pragma once

#include <gtest/gtest.h>

#include <boost/mpi/collectives.hpp>
#include <boost/mpi/communicator.hpp>
#include <utility>
#include <vector>

#include "core/task/include/task.hpp"

namespace gordeeva_t_shell_sort_batcher_merge_mpi {

void shellSort(std::vector<int>& arr);

class TestMPITaskSequential : public ppc::core::Task {
public:
explicit TestMPITaskSequential(std::shared_ptr<ppc::core::TaskData> taskData_) : Task(std::move(taskData_)) {}
bool pre_processing() override;
bool validation() override;
bool run() override;
bool post_processing() override;

private:
std::vector<int> input_;
std::vector<int> res_;
};

class TestMPITaskParallel : public ppc::core::Task {
public:
explicit TestMPITaskParallel(std::shared_ptr<ppc::core::TaskData> taskData_) : Task(std::move(taskData_)) {}
bool pre_processing() override;
bool validation() override;
bool run() override;
bool post_processing() override;

void batcher_merge(size_t rank1, size_t rank2, std::vector<int>& local_input_local);

private:
std::vector<int> input_, local_input_;
std::vector<int> res_;
size_t sz_mpi = 0;
boost::mpi::communicator world;
};

} // namespace gordeeva_t_shell_sort_batcher_merge_mpi
Loading
Loading