Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Лаганина Елена. Задача 3. Вариант 22. Поиск кратчайших путей из одной вершины (Алгоритм Дейкстры). С CRS графов. #811

Merged
merged 16 commits into from
Dec 29, 2024
472 changes: 472 additions & 0 deletions tasks/mpi/laganina_e_dejkstras_a/func_tests/main.cpp

Large diffs are not rendered by default.

112 changes: 112 additions & 0 deletions tasks/mpi/laganina_e_dejkstras_a/include/ops_mpi.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
#pragma once

#include <gtest/gtest.h>

#include <boost/mpi/collectives.hpp>
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>
#include <queue>
#include <random>
#include <vector>

#include "core/task/include/task.hpp"

namespace laganina_e_dejskras_a_mpi {

std::vector<int> getRandomgraph(int v);
int minDistanceVertex(const std::vector<int>& dist, const std::vector<int>& marker);

class TestMPITaskSequential : public ppc::core::Task {
public:
explicit TestMPITaskSequential(std::shared_ptr<ppc::core::TaskData> taskData_) : Task(std::move(taskData_)) {}
bool pre_processing() override;
bool validation() override;
bool run() override;
bool post_processing() override;
// static void dijkstra(int start_vertex, const std::vector<int>& row_ptr, const std::vector<int>& col_ind,
// const std::vector<int>& data, int v, std::vector<int>& distances);
static void get_children_with_weights(int vertex, const std::vector<int>& row_ptr, const std::vector<int>& col_ind,
const std::vector<int>& data, std::vector<int>& neighbor,
std::vector<int>& weight) {
// Get the beginning and end of edges for a given vertex
int start = row_ptr[vertex];
int end = row_ptr[vertex + 1];

for (int i = start; i < end; ++i) {
neighbor.push_back(col_ind[i]); // Neighboring vertex
weight.push_back(data[i]); // Edge weight
}
}
static void dijkstra(int start_vertex, const std::vector<int>& row_ptr, const std::vector<int>& col_ind,
const std::vector<int>& data, int v, std::vector<int>& distances) {
// Initialize distances
distances.resize(v, std::numeric_limits<int>::max());
distances[start_vertex] = 0;

// Array to track visited vertices
std::vector<bool> visited(v, false);

// Priority queue for storing pairs (distance, vertex)
std::priority_queue<std::pair<int, int>, std::vector<std::pair<int, int>>, std::greater<>> priority_queue;
priority_queue.emplace(0, start_vertex); // Use start_vertex instead of 0

while (!priority_queue.empty()) {
int current_distance = priority_queue.top().first;
int current_vertex = priority_queue.top().second;
priority_queue.pop();

// If the vertex has already been visited, skip it
if (visited[current_vertex]) {
continue;
}

// Mark the vertex as visited
visited[current_vertex] = true;

// Process all neighboring vertices
int start = row_ptr[current_vertex];
int end = row_ptr[current_vertex + 1];
for (int i = start; i < end; ++i) {
int neighbor_vertex = col_ind[i];
int weight = data[i];
int new_distance = current_distance + weight;

// If a shorter distance is found, update it
if (new_distance < distances[neighbor_vertex]) {
distances[neighbor_vertex] = new_distance;
priority_queue.emplace(new_distance, neighbor_vertex);
}
}
}
}

// static void get_children_with_weights(int vertex, const std::vector<int>& row_ptr, const std::vector<int>& col_ind,
// const std::vector<int>& data, std::vector<int>& neighbor,
// std::vector<int>& weight);

private:
std::vector<int> row_ptr;
std::vector<int> col_ind;
std::vector<int> data;
int v{}; // dimension
std::vector<int> distances;
};
class TestMPITaskParallel : public ppc::core::Task {
public:
explicit TestMPITaskParallel(std::shared_ptr<ppc::core::TaskData> taskData_) : Task(std::move(taskData_)) {}
bool pre_processing() override;
bool validation() override;
bool run() override;
bool post_processing() override;

private:
std::vector<int> row_ptr;
std::vector<int> col_ind;
std::vector<int> data;
int v{}; // dimension
std::vector<int> distances;

boost::mpi::communicator world;
};

} // namespace laganina_e_dejskras_a_mpi
117 changes: 117 additions & 0 deletions tasks/mpi/laganina_e_dejkstras_a/perf_tests/main.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
#include <gtest/gtest.h>

#include "core/perf/include/perf.hpp"
#include "mpi/laganina_e_dejkstras_a/include/ops_mpi.hpp"

TEST(laganina_e_dejkstras_a_mpi, test_pipeline_run) {
boost::mpi::communicator world;
int v_ = 1000;
// Create data
std::vector<int> graph(v_ * v_, 0);
for (int i = 0; i < v_ - 1; i++) {
for (int j = 1; j < v_; j++) {
graph[i * v_ + j] = 1;
}
}
for (int k = 0; k < v_ * v_; k += (v_ + 1)) {
graph[k] = 0;
}
std::vector<int> expectResult(v_, 0);
for (int i = 1; i < v_; i++) {
expectResult[i] = 1;
}
std::vector<int> trueResult(v_, 0);

// Create TaskData
std::shared_ptr<ppc::core::TaskData> taskDataPar = std::make_shared<ppc::core::TaskData>();
if (world.rank() == 0) {
taskDataPar->inputs_count.emplace_back(v_);
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t*>(graph.data()));
taskDataPar->outputs.emplace_back(reinterpret_cast<uint8_t*>(trueResult.data()));
taskDataPar->outputs_count.emplace_back(trueResult.size());
}
// Create Task
auto testTaskParallel = std::make_shared<laganina_e_dejskras_a_mpi::TestMPITaskParallel>(taskDataPar);

ASSERT_TRUE(testTaskParallel->validation());
testTaskParallel->pre_processing();
testTaskParallel->run();
testTaskParallel->post_processing();

// Create Perf attributes
auto perfAttr = std::make_shared<ppc::core::PerfAttr>();
perfAttr->num_running = 10;
const auto t0 = std::chrono::high_resolution_clock::now();
perfAttr->current_timer = [&] {
auto current_time_point = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::nanoseconds>(current_time_point - t0).count();
return static_cast<double>(duration) * 1e-9;
};

// Create and init perf results
auto perfResults = std::make_shared<ppc::core::PerfResults>();

// Create Perf analyzer
auto perfAnalyzer = std::make_shared<ppc::core::Perf>(testTaskParallel);
perfAnalyzer->pipeline_run(perfAttr, perfResults);
ppc::core::Perf::print_perf_statistic(perfResults);
if (world.rank() == 0) {
ASSERT_EQ(expectResult, trueResult);
}
}

TEST(laganina_e_dejkstras_a_mpi, test_task_run) {
boost::mpi::communicator world;
int v_ = 1000;
// Create data
std::vector<int> graph(v_ * v_, 0);
for (int i = 0; i < v_ - 1; i++) {
for (int j = 1; j < v_; j++) {
graph[i * v_ + j] = 1;
}
}
for (int k = 0; k < v_ * v_; k += (v_ + 1)) {
graph[k] = 0;
}
std::vector<int> expectResult(v_, 0);
for (int i = 1; i < v_; i++) {
expectResult[i] = 1;
}
std::vector<int> trueResult(v_, 0);

// Create TaskData
std::shared_ptr<ppc::core::TaskData> taskDataPar = std::make_shared<ppc::core::TaskData>();
if (world.rank() == 0) {
taskDataPar->inputs_count.emplace_back(v_);
taskDataPar->inputs.emplace_back(reinterpret_cast<uint8_t*>(graph.data()));
taskDataPar->outputs.emplace_back(reinterpret_cast<uint8_t*>(trueResult.data()));
taskDataPar->outputs_count.emplace_back(trueResult.size());
}
// Create Task
auto testTaskParallel = std::make_shared<laganina_e_dejskras_a_mpi::TestMPITaskParallel>(taskDataPar);

ASSERT_TRUE(testTaskParallel->validation());
testTaskParallel->pre_processing();
testTaskParallel->run();
testTaskParallel->post_processing();

// Create Perf attributes
auto perfAttr = std::make_shared<ppc::core::PerfAttr>();
perfAttr->num_running = 10;
const auto t0 = std::chrono::high_resolution_clock::now();
perfAttr->current_timer = [&] {
auto current_time_point = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::nanoseconds>(current_time_point - t0).count();
return static_cast<double>(duration) * 1e-9;
};
// Create and init perf results
auto perfResults = std::make_shared<ppc::core::PerfResults>();

// Create Perf analyzer
auto perfAnalyzer = std::make_shared<ppc::core::Perf>(testTaskParallel);
perfAnalyzer->task_run(perfAttr, perfResults);
ppc::core::Perf::print_perf_statistic(perfResults);
if (world.rank() == 0) {
ASSERT_EQ(expectResult, trueResult);
}
}
Loading
Loading