diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 10fc78a3a0d..9d26069a9fd 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -5,6 +5,7 @@ on: pull_request: schedule: - cron: '0 12 * * *' + workflow_dispatch: jobs: ubuntu-gcc-build: @@ -44,9 +45,20 @@ jobs: env: CC: gcc-13 CXX: g++-13 - - name: Run func tests + - name: Run func tests (num_proc=2) run: | export OMP_NUM_THREADS=4 + export PROC_COUNT=2 + source scripts/run.sh + - name: Run func tests (num_proc=3) + run: | + export OMP_NUM_THREADS=4 + export PROC_COUNT=3 + source scripts/run.sh + - name: Run func tests (num_proc=4) + run: | + export OMP_NUM_THREADS=4 + export PROC_COUNT=4 source scripts/run.sh ubuntu-clang-build: runs-on: ubuntu-latest @@ -85,9 +97,20 @@ jobs: env: CC: clang-18 CXX: clang++-18 - - name: Run tests + - name: Run func tests (num_proc=2) + run: | + export OMP_NUM_THREADS=4 + export PROC_COUNT=2 + source scripts/run.sh + - name: Run func tests (num_proc=3) + run: | + export OMP_NUM_THREADS=4 + export PROC_COUNT=3 + source scripts/run.sh + - name: Run func tests (num_proc=4) run: | export OMP_NUM_THREADS=4 + export PROC_COUNT=4 source scripts/run.sh ubuntu-clang-sanitizer-build: runs-on: ubuntu-latest @@ -130,6 +153,7 @@ jobs: run: | export OMP_NUM_THREADS=4 export ASAN_RUN=1 + export PROC_COUNT=4 source scripts/run.sh macos-clang-build: runs-on: macOS-latest @@ -163,6 +187,7 @@ jobs: - name: Run tests run: | export OMP_NUM_THREADS=4 + export PROC_COUNT=2 source scripts/run.sh windows-msvc-build: runs-on: windows-latest @@ -249,7 +274,7 @@ jobs: sudo apt-get install mpich libmpich* mpi* openmpi-bin sudo apt-get install libomp-dev sudo apt-get install valgrind - sudo apt-get install gcovr + sudo apt-get install gcovr lcov - name: CMake configure run: > cmake -S . -B build @@ -264,14 +289,26 @@ jobs: - name: Run tests run: | export OMP_NUM_THREADS=4 + export PROC_COUNT=4 source scripts/run.sh - - name: Generate Coverage Data + - name: Generate gcovr Coverage Data run: | cd build gcovr -r ../ --xml --output ../coverage.xml - cat ../coverage.xml - name: Upload coverage reports to Codecov uses: codecov/codecov-action@v4.0.1 with: - token: ${{ secrets.CODECOV_TOKEN }} + token: 01474879-5a86-4c59-bd1d-6e29d3ed9550 files: coverage.xml + - name: Generate lcov Coverage Data + run: | + cd build + lcov --capture --directory . --output-file ../coverage.info + lcov --remove ../coverage.info '*/3rdparty/*' '/usr/*' '*/perf_tests/*' '*/func_tests/*' --output-file ../coverage.info + cd .. + genhtml coverage.info --output-directory cov-report + - name: Upload coverage report artifact + uses: actions/upload-artifact@v4 + with: + name: cov-report + path: 'cov-report' diff --git a/.github/workflows/perf-statistic.yml b/.github/workflows/perf-statistic.yml index 86b37373bbc..bd26d5cf7d3 100644 --- a/.github/workflows/perf-statistic.yml +++ b/.github/workflows/perf-statistic.yml @@ -3,6 +3,7 @@ name: Collect performance statistic on: schedule: - cron: '0 12 * * *' + workflow_dispatch: jobs: ubuntu-gcc-build: diff --git a/.github/workflows/static-analysis-pr.yml b/.github/workflows/static-analysis-pr.yml index 1e824a306d0..4954a5e6e38 100644 --- a/.github/workflows/static-analysis-pr.yml +++ b/.github/workflows/static-analysis-pr.yml @@ -9,8 +9,6 @@ jobs: - uses: actions/checkout@v4 with: submodules: recursive - - name: Update submodules - run: git submodule update --init --recursive - name: ccache uses: hendrikmuhs/ccache-action@v1.2 with: @@ -36,8 +34,6 @@ jobs: - uses: actions/checkout@v4 with: submodules: recursive - - name: Update submodules - run: git submodule update --init --recursive - name: ccache uses: hendrikmuhs/ccache-action@v1.2 with: @@ -53,4 +49,6 @@ jobs: split_workflow: true lgtm_comment_body: "" - if: steps.review.outputs.total_comments > 0 - run: exit 1 + run: | + echo "clang-tidy run has failed. See previous 'Run clang-tidy' stage logs" + exit 1 diff --git a/.gitignore b/.gitignore index e87fe912cdf..11a9324ee55 100644 --- a/.gitignore +++ b/.gitignore @@ -1,12 +1,15 @@ build +out mpich cmake-build-release* cmake-build-debug* .idea/ +.vs/ .vscode/ scripts/variants.csv scripts/variants.xlsx venv* sln/ +CMakeSettings.json .DS_Store .cache diff --git a/README.md b/README.md index 3a767dc7e29..c015df52932 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ The following parallel programming technologies are considered in practice: ## 0. Download all submodules ``` - git submodule update --init --recursive + git submodule update --init --recursive --depth=1 ``` ## 1. Set up your environment ### Static analysis of project (optional) @@ -24,10 +24,16 @@ The following parallel programming technologies are considered in practice: Unsupported operating system! - * **Linux (`gcc` and `clang`)**: + * **Ubuntu / Debian (`gcc` and `clang`)**: ``` sudo apt install -y cppcheck ``` + + * **NixOS / Nix (with flakes enabled)**: + ``` + nix develop . + ``` + * **MacOS (apple clang)**: ``` brew install cppcheck @@ -44,10 +50,16 @@ Code style is checked using [clang-format](https://clang.llvm.org/docs/ClangForm [Installers link.](https://www.microsoft.com/en-us/download/details.aspx?id=105289) You have to install `msmpisdk.msi` and `msmpisetup.exe`. - * **Linux (`gcc` and `clang`)**: + * **Ubuntu / Debian (`gcc` and `clang`)**: ``` sudo apt install -y mpich openmpi-bin libopenmpi-dev ``` + + * **NixOS / Nix (with flakes enabled)**: + ``` + nix develop . + ``` + * **MacOS (apple clang)**: ``` brew install open-mpi @@ -57,10 +69,16 @@ Code style is checked using [clang-format](https://clang.llvm.org/docs/ClangForm `OpenMP` is included into `gcc` and `msvc`, but some components should be installed additionally: - * **Linux (`gcc` and `clang`)**: + * **Ubuntu / Debian (`gcc` and `clang`)**: ``` sudo apt install -y libomp-dev ``` + + * **NixOS / Nix (with flakes enabled)**: + ``` + nix develop . + ``` + * **MacOS (`llvm`)**: ``` brew install llvm @@ -81,7 +99,7 @@ Navigate to a source code folder. ``` mkdir build && cd build - cmake -D USE_SEQ=ON -D USE_MPI=ON -D USE_OMP=ON -D USE_TBB=ON -D USE_STL=ON -D USE_FUNC_TESTS=ON -D USE_PERF_TESTS=ON -D USE_CPPCHECK=ON -D CMAKE_BUILD_TYPE=Release .. + cmake -D USE_SEQ=ON -D USE_MPI=ON -D USE_OMP=ON -D USE_TBB=ON -D USE_STL=ON -D USE_FUNC_TESTS=ON -D USE_PERF_TESTS=ON -D CMAKE_BUILD_TYPE=Release .. ``` *Help on CMake keys:* - `-D USE_SEQ=ON` enable `Sequential` labs (based on OpenMP's CMakeLists.txt). diff --git a/appveyor.yml b/appveyor.yml index 69d867d4f8f..99ff9fa8fce 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -18,7 +18,7 @@ install: - python -m pip install xlsxwriter build_script: - - cmd: git submodule update --init --recursive + - cmd: git submodule update --init --recursive --depth=1 - cmd: mkdir build - cmd: cmake -S . -B build ^ -D USE_SEQ=ON ^ diff --git a/cmake/boost.cmake b/cmake/boost.cmake index b95a2c63eaa..9042385fd58 100644 --- a/cmake/boost.cmake +++ b/cmake/boost.cmake @@ -11,8 +11,10 @@ ExternalProject_Add(ppc_boost BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}/ppc_boost/build" INSTALL_DIR "${CMAKE_CURRENT_BINARY_DIR}/ppc_boost/install" CONFIGURE_COMMAND "${CMAKE_COMMAND}" -S "${CMAKE_SOURCE_DIR}/3rdparty/boost/" -B "${CMAKE_CURRENT_BINARY_DIR}/ppc_boost/build/" - -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -G${CMAKE_GENERATOR} -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} -DBOOST_ENABLE_MPI=ON + -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -G${CMAKE_GENERATOR} -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} + -DBOOST_INCLUDE_LIBRARIES=mpi -DBOOST_ENABLE_MPI=ON -D MPI_INCLUDE_PATH=${MPI_INCLUDE_PATH} -D MPI_LIBRARIES=${MPI_LIBRARIES} -D MPI_COMPILE_FLAGS=${MPI_COMPILE_FLAGS} -D MPI_LINK_FLAGS=${MPI_LINK_FLAGS} + -DCMAKE_INSTALL_LIBDIR=lib BUILD_COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_CURRENT_BINARY_DIR}/ppc_boost/build" --config ${CMAKE_BUILD_TYPE} INSTALL_COMMAND "${CMAKE_COMMAND}" --install "${CMAKE_CURRENT_BINARY_DIR}/ppc_boost/build" --prefix "${CMAKE_CURRENT_BINARY_DIR}/ppc_boost/install" TEST_COMMAND "") diff --git a/cmake/configure.cmake b/cmake/configure.cmake index fe2e64f8cbb..5f5153965ea 100644 --- a/cmake/configure.cmake +++ b/cmake/configure.cmake @@ -59,7 +59,7 @@ MACRO(CPPCHECK_TEST ProjectId ALL_SOURCE_FILES) endif () endforeach () if (NOT APPLE) - find_program(CPPCHECK_EXEC /usr/bin/cppcheck) + find_program(CPPCHECK_EXEC cppcheck) add_custom_target( "${ProjectId}_cppcheck" ALL COMMAND ${CPPCHECK_EXEC} @@ -74,5 +74,5 @@ MACRO(CPPCHECK_TEST ProjectId ALL_SOURCE_FILES) ${ALL_SOURCE_FILES} ) ENDIF () - endif( UNIX ) + endif( UNIX AND USE_CPPCHECK) ENDMACRO() diff --git a/cmake/gtest.cmake b/cmake/gtest.cmake index dac97619ecf..11708107811 100644 --- a/cmake/gtest.cmake +++ b/cmake/gtest.cmake @@ -9,5 +9,6 @@ ExternalProject_Add(ppc_googletest CONFIGURE_COMMAND "${CMAKE_COMMAND}" -S "${CMAKE_SOURCE_DIR}/3rdparty/googletest/" -B "${CMAKE_CURRENT_BINARY_DIR}/ppc_googletest/build/" -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -G${CMAKE_GENERATOR} -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} -Dgtest_force_shared_crt=ON + -DCMAKE_INSTALL_LIBDIR=lib BUILD_COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_CURRENT_BINARY_DIR}/ppc_googletest/build" --config ${CMAKE_BUILD_TYPE} INSTALL_COMMAND "${CMAKE_COMMAND}" --install "${CMAKE_CURRENT_BINARY_DIR}/ppc_googletest/build" --prefix "${CMAKE_CURRENT_BINARY_DIR}/ppc_googletest/install") diff --git a/cmake/onetbb.cmake b/cmake/onetbb.cmake index d1dd8a8d398..1cd614f09e1 100644 --- a/cmake/onetbb.cmake +++ b/cmake/onetbb.cmake @@ -21,6 +21,7 @@ else(MSVC) INSTALL_DIR "${CMAKE_CURRENT_BINARY_DIR}/ppc_onetbb/install" CONFIGURE_COMMAND "${CMAKE_COMMAND}" -S "${CMAKE_SOURCE_DIR}/3rdparty/onetbb/" -B "${CMAKE_CURRENT_BINARY_DIR}/ppc_onetbb/build/" -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -G${CMAKE_GENERATOR} -DTBB_TEST=OFF -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} + -DCMAKE_INSTALL_LIBDIR=lib BUILD_COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_CURRENT_BINARY_DIR}/ppc_onetbb/build" --config ${CMAKE_BUILD_TYPE} INSTALL_COMMAND "${CMAKE_COMMAND}" --install "${CMAKE_CURRENT_BINARY_DIR}/ppc_onetbb/build" --prefix "${CMAKE_CURRENT_BINARY_DIR}/ppc_onetbb/install") endif(MSVC) \ No newline at end of file diff --git a/flake.lock b/flake.lock new file mode 100644 index 00000000000..9b3e22e70f9 --- /dev/null +++ b/flake.lock @@ -0,0 +1,96 @@ +{ + "nodes": { + "flake-utils": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1726560853, + "narHash": "sha256-X6rJYSESBVr3hBoH0WbKE5KvhPU5bloyZ2L4K60/fPQ=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "c1dfcf08411b08f6b8615f7d8971a2bfa81d5e8a", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1728492678, + "narHash": "sha256-9UTxR8eukdg+XZeHgxW5hQA9fIKHsKCdOIUycTryeVw=", + "owner": "nixos", + "repo": "nixpkgs", + "rev": "5633bcff0c6162b9e4b5f1264264611e950c8ec7", + "type": "github" + }, + "original": { + "owner": "nixos", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_2": { + "locked": { + "lastModified": 1726871744, + "narHash": "sha256-V5LpfdHyQkUF7RfOaDPrZDP+oqz88lTJrMT1+stXNwo=", + "owner": "nixos", + "repo": "nixpkgs", + "rev": "a1d92660c6b3b7c26fb883500a80ea9d33321be2", + "type": "github" + }, + "original": { + "owner": "nixos", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "root": { + "inputs": { + "flake-utils": "flake-utils", + "nixpkgs": "nixpkgs", + "treefmt-nix": "treefmt-nix" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + }, + "treefmt-nix": { + "inputs": { + "nixpkgs": "nixpkgs_2" + }, + "locked": { + "lastModified": 1727984844, + "narHash": "sha256-xpRqITAoD8rHlXQafYZOLvUXCF6cnZkPfoq67ThN0Hc=", + "owner": "numtide", + "repo": "treefmt-nix", + "rev": "4446c7a6fc0775df028c5a3f6727945ba8400e64", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "treefmt-nix", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/flake.nix b/flake.nix new file mode 100644 index 00000000000..67fde3df112 --- /dev/null +++ b/flake.nix @@ -0,0 +1,38 @@ +{ + inputs = { + nixpkgs.url = "github:nixos/nixpkgs?ref=nixos-unstable"; + flake-utils.url = "github:numtide/flake-utils"; + treefmt-nix.url = "github:numtide/treefmt-nix"; + }; + outputs = { + self, + nixpkgs, + flake-utils, + treefmt-nix, + }: + flake-utils.lib.eachDefaultSystem (system: let + pkgs = import nixpkgs { + inherit system; + }; + py3 = pkgs.python3.withPackages (ps: [ps.xlsxwriter]); + in { + devShells.default = pkgs.mkShell { + packages = with pkgs; [ + gcc + ninja + cmake + openmpi + py3 + cppcheck + ]; + }; + formatter = + (treefmt-nix.lib.evalModule pkgs { + projectRootFile = "flake.nix"; + programs.alejandra.enable = true; + }) + .config + .build + .wrapper; + }); +} diff --git a/modules/core/perf/include/perf.hpp b/modules/core/perf/include/perf.hpp index 1270aaac3a9..8ed56e7d767 100644 --- a/modules/core/perf/include/perf.hpp +++ b/modules/core/perf/include/perf.hpp @@ -24,7 +24,6 @@ struct PerfResults { double time_sec = 0.0; enum TypeOfRunning { PIPELINE, TASK_RUN, NONE } type_of_running = NONE; constexpr const static double MAX_TIME = 10.0; - constexpr const static double MIN_TIME = 0.05; }; class Perf { diff --git a/modules/core/perf/src/perf.cpp b/modules/core/perf/src/perf.cpp index 3f47feabb28..4d56b9946f9 100644 --- a/modules/core/perf/src/perf.cpp +++ b/modules/core/perf/src/perf.cpp @@ -78,14 +78,14 @@ void ppc::core::Perf::print_perf_statistic(const std::shared_ptr& p relative_path.erase(last_found_position, relative_path.length() - 1); std::stringstream perf_res_str; - if (time_secs > PerfResults::MIN_TIME && time_secs < PerfResults::MAX_TIME) { + if (time_secs < PerfResults::MAX_TIME) { perf_res_str << std::fixed << std::setprecision(10) << time_secs; } else { std::cerr << "Task execute time need to be: "; - std::cerr << PerfResults::MIN_TIME << " secs. < time < " << PerfResults::MAX_TIME << " secs." << std::endl; + std::cerr << " time < " << PerfResults::MAX_TIME << " secs." << std::endl; std::cerr << "Original time in secs: " << time_secs; perf_res_str << std::fixed << std::setprecision(10) << -1.0; - EXPECT_TRUE(time_secs > PerfResults::MIN_TIME && time_secs < PerfResults::MAX_TIME); + EXPECT_TRUE(time_secs < PerfResults::MAX_TIME); } std::cout << relative_path << ":" << type_test_name << ":" << perf_res_str.str() << std::endl; diff --git a/scripts/generate_perf_results.sh b/scripts/generate_perf_results.sh index e746fe66714..6e590dc6e76 100644 --- a/scripts/generate_perf_results.sh +++ b/scripts/generate_perf_results.sh @@ -1,3 +1,3 @@ mkdir build/perf_stat_dir -source scripts/run_perf_collector.sh &> build/perf_stat_dir/perf_log.txt +source scripts/run_perf_collector.sh 2>&1 | tee build/perf_stat_dir/perf_log.txt python3 scripts/create_perf_table.py --input build/perf_stat_dir/perf_log.txt --output build/perf_stat_dir diff --git a/scripts/run.sh b/scripts/run.sh index 48fb3b0a781..8e751e5c8fa 100644 --- a/scripts/run.sh +++ b/scripts/run.sh @@ -16,11 +16,11 @@ fi if [[ -z "$ASAN_RUN" ]]; then if [[ $OSTYPE == "linux-gnu" ]]; then - mpirun --oversubscribe -np 4 ./build/bin/sample_mpi - mpirun --oversubscribe -np 4 ./build/bin/sample_mpi_boost + mpirun --oversubscribe -np $PROC_COUNT ./build/bin/sample_mpi + mpirun --oversubscribe -np $PROC_COUNT ./build/bin/sample_mpi_boost elif [[ $OSTYPE == "darwin"* ]]; then - mpirun -np 2 ./build/bin/sample_mpi - mpirun -np 2 ./build/bin/sample_mpi_boost + mpirun -np $PROC_COUNT ./build/bin/sample_mpi + mpirun -np $PROC_COUNT ./build/bin/sample_mpi_boost fi fi ./build/bin/sample_omp @@ -37,13 +37,17 @@ fi #fi #echo "NUM_PROC: " $NUM_PROC -if [[ -z "$ASAN_RUN" ]]; then - if [[ $OSTYPE == "linux-gnu" ]]; then - mpirun --oversubscribe -np 4 ./build/bin/mpi_func_tests --gtest_also_run_disabled_tests --gtest_repeat=10 --gtest_recreate_environments_when_repeating - elif [[ $OSTYPE == "darwin"* ]]; then - mpirun -np 2 ./build/bin/mpi_func_tests --gtest_also_run_disabled_tests --gtest_repeat=10 --gtest_recreate_environments_when_repeating +# separate tests for debug +for test_item in $(./build/bin/mpi_func_tests --gtest_list_tests | awk '/\./{ SUITE=$1 } / / { print SUITE $1 }') +do + if [[ -z "$ASAN_RUN" ]]; then + if [[ $OSTYPE == "linux-gnu" ]]; then + mpirun --oversubscribe -np 4 ./build/bin/mpi_func_tests --gtest_filter="$test_item" --gtest_repeat=10 + elif [[ $OSTYPE == "darwin"* ]]; then + mpirun -np 2 ./build/bin/mpi_func_tests --gtest_filter="$test_item" --gtest_repeat=10 + fi fi -fi +done ./build/bin/omp_func_tests --gtest_also_run_disabled_tests --gtest_repeat=10 --gtest_recreate_environments_when_repeating ./build/bin/seq_func_tests --gtest_also_run_disabled_tests --gtest_repeat=10 --gtest_recreate_environments_when_repeating diff --git a/scripts/run_perf_collector.sh b/scripts/run_perf_collector.sh index 124620fafe8..040f4eb0a52 100644 --- a/scripts/run_perf_collector.sh +++ b/scripts/run_perf_collector.sh @@ -1,12 +1,16 @@ #!/bin/bash - -if [[ -z "$ASAN_RUN" ]]; then - if [[ $OSTYPE == "linux-gnu" ]]; then - mpirun --oversubscribe -np 4 ./build/bin/mpi_perf_tests - elif [[ $OSTYPE == "darwin"* ]]; then - mpirun -np 2 ./build/bin/mpi_perf_tests +# separate tests for debug +for test_item in $(./build/bin/mpi_perf_tests --gtest_list_tests | awk '/\./{ SUITE=$1 } / / { print SUITE $1 }') +do + if [[ -z "$ASAN_RUN" ]]; then + if [[ $OSTYPE == "linux-gnu" ]]; then + mpirun --oversubscribe -np 4 ./build/bin/mpi_perf_tests --gtest_filter="$test_item" + elif [[ $OSTYPE == "darwin"* ]]; then + mpirun -np 2 ./build/bin/mpi_perf_tests --gtest_filter="$test_item" + fi fi -fi +done + ./build/bin/omp_perf_tests ./build/bin/seq_perf_tests ./build/bin/stl_perf_tests diff --git a/tasks/CMakeLists.txt b/tasks/CMakeLists.txt index 2af1a3f6206..3fe79f18ec6 100644 --- a/tasks/CMakeLists.txt +++ b/tasks/CMakeLists.txt @@ -98,7 +98,7 @@ foreach(TASK_TYPE ${LIST_OF_TASKS}) add_dependencies(${EXEC_FUNC} ppc_boost) target_link_directories(${EXEC_FUNC} PUBLIC ${CMAKE_BINARY_DIR}/ppc_boost/install/lib) if (NOT MSVC) - target_link_libraries(${EXEC_FUNC} PUBLIC boost_mpi) + target_link_libraries(${EXEC_FUNC} PUBLIC boost_mpi boost_serialization) endif () elseif ("${MODULE_NAME}" STREQUAL "tbb") add_dependencies(${EXEC_FUNC} ppc_onetbb) diff --git a/tasks/mpi/Shurygin_S_max_po_stolbam_matrix/func_tests/main.cpp b/tasks/mpi/Shurygin_S_max_po_stolbam_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..c0c80de631e --- /dev/null +++ b/tasks/mpi/Shurygin_S_max_po_stolbam_matrix/func_tests/main.cpp @@ -0,0 +1,264 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "mpi/Shurygin_S_max_po_stolbam_matrix/include/ops_mpi.hpp" + +TEST(Shurygin_S_max_po_stolbam_matrix_mpi, EmptyInputs) { + boost::mpi::communicator world; + std::shared_ptr taskDataPar = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + + if (world.rank() == 0) { + ASSERT_FALSE(testMpiTaskParallel.validation()); + } +} + +TEST(Shurygin_S_max_po_stolbam_matrix_mpi, EmptyOutputs) { + boost::mpi::communicator world; + std::shared_ptr taskDataPar = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + + if (world.rank() == 0) { + taskDataPar->inputs_count.push_back(3); + taskDataPar->inputs_count.push_back(4); + taskDataPar->inputs.push_back(reinterpret_cast(new int[12])); + ASSERT_FALSE(testMpiTaskParallel.validation()); + + delete[] reinterpret_cast(taskDataPar->inputs[0]); + } +} + +TEST(Shurygin_S_max_po_stolbam_matrix_mpi, IncorrectInputsCountSize) { + boost::mpi::communicator world; + std::shared_ptr taskDataPar = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + + if (world.rank() == 0) { + taskDataPar->inputs_count.push_back(3); + taskDataPar->inputs.push_back(reinterpret_cast(new int[12])); + taskDataPar->outputs_count.push_back(4); + ASSERT_FALSE(testMpiTaskParallel.validation()); + + delete[] reinterpret_cast(taskDataPar->inputs[0]); + } +} + +TEST(Shurygin_S_max_po_stolbam_matrix_mpi, IncorrectInputsCountValue) { + boost::mpi::communicator world; + std::shared_ptr taskDataPar = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + + if (world.rank() == 0) { + taskDataPar->inputs_count.push_back(3); + taskDataPar->inputs_count.push_back(0); + taskDataPar->inputs.push_back(reinterpret_cast(new int[12])); + taskDataPar->outputs_count.push_back(4); + ASSERT_FALSE(testMpiTaskParallel.validation()); + + delete[] reinterpret_cast(taskDataPar->inputs[0]); + } +} + +TEST(Shurygin_S_max_po_stolbam_matrix_mpi, IncorrectOutputsCountSize) { + boost::mpi::communicator world; + std::shared_ptr taskDataPar = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + + if (world.rank() == 0) { + taskDataPar->inputs_count.push_back(3); + taskDataPar->inputs_count.push_back(4); + taskDataPar->inputs.push_back(reinterpret_cast(new int[12])); + taskDataPar->outputs_count.push_back(3); + ASSERT_FALSE(testMpiTaskParallel.validation()); + + delete[] reinterpret_cast(taskDataPar->inputs[0]); + } +} + +TEST(Shurygin_S_max_po_stolbam_matrix_mpi, IncorrectOutputsCountValue) { + boost::mpi::communicator world; + std::shared_ptr taskDataPar = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + + if (world.rank() == 0) { + taskDataPar->inputs_count.push_back(3); + taskDataPar->inputs_count.push_back(4); + taskDataPar->inputs.push_back(reinterpret_cast(new int[12])); + taskDataPar->outputs_count.push_back(5); + ASSERT_FALSE(testMpiTaskParallel.validation()); + + delete[] reinterpret_cast(taskDataPar->inputs[0]); + } +} + +TEST(Shurygin_S_max_po_stolbam_matrix_mpi, find_max_val_in_col_10x10_matrix) { + boost::mpi::communicator world; + const int count_rows = 10; + const int count_columns = 10; + std::vector> global_matrix; + std::vector global_max(count_columns, INT_MIN); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_matrix = + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential::generate_random_matrix(count_rows, count_columns); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataPar->inputs_count = {count_rows, count_columns}; + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector reference_max(count_columns, INT_MIN); + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataSeq->inputs_count = {count_rows, count_columns}; + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + for (int j = 0; j < count_columns; j++) { + ASSERT_EQ(global_max[j], 200); + } + } +} + +TEST(Shurygin_S_max_po_stolbam_matrix_mpi, find_max_val_in_col_100x100_matrix) { + boost::mpi::communicator world; + const int count_rows = 100; + const int count_columns = 100; + std::vector> global_matrix; + std::vector global_max(count_columns, INT_MIN); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_matrix = + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential::generate_random_matrix(count_rows, count_columns); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataPar->inputs_count = {count_rows, count_columns}; + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector reference_max(count_columns, INT_MIN); + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataSeq->inputs_count = {count_rows, count_columns}; + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + for (int j = 0; j < count_columns; j++) { + ASSERT_EQ(global_max[j], 200); + } + } +} + +TEST(Shurygin_S_max_po_stolbam_matrix_mpi, find_max_val_in_col_100x500_matrix) { + boost::mpi::communicator world; + const int count_rows = 100; + const int count_columns = 500; + std::vector> global_matrix; + std::vector global_max(count_columns, INT_MIN); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_matrix = + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential::generate_random_matrix(count_rows, count_columns); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataPar->inputs_count = {count_rows, count_columns}; + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector reference_max(count_columns, INT_MIN); + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataSeq->inputs_count = {count_rows, count_columns}; + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + for (int j = 0; j < count_columns; j++) { + ASSERT_EQ(global_max[j], 200); + } + } +} + +TEST(Shurygin_S_max_po_stolbam_matrix_mpi, find_max_val_in_col_3000x3000_matrix) { + boost::mpi::communicator world; + const int count_rows = 3000; + const int count_columns = 3000; + std::vector> global_matrix; + std::vector global_max(count_columns, INT_MIN); + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_matrix = + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential::generate_random_matrix(count_rows, count_columns); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataPar->inputs_count = {count_rows, count_columns}; + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + std::vector reference_max(count_columns, INT_MIN); + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataSeq->inputs_count = {count_rows, count_columns}; + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + for (int j = 0; j < count_columns; j++) { + ASSERT_EQ(global_max[j], 200); + } + } +} \ No newline at end of file diff --git a/tasks/mpi/Shurygin_S_max_po_stolbam_matrix/include/ops_mpi.hpp b/tasks/mpi/Shurygin_S_max_po_stolbam_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..f92e3eaed9f --- /dev/null +++ b/tasks/mpi/Shurygin_S_max_po_stolbam_matrix/include/ops_mpi.hpp @@ -0,0 +1,47 @@ +// Copyright 2023 Nesterov Alexander +#pragma once +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace Shurygin_S_max_po_stolbam_matrix_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + static std::vector generate_random_vector(int size, int lower_bound = 0, int upper_bound = 50); + static std::vector> generate_random_matrix(int rows, int columns); + + private: + std::vector> input_; + std::vector res_; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + std::vector> local_input_; + std::vector res_; + boost::mpi::communicator world; +}; + +} // namespace Shurygin_S_max_po_stolbam_matrix_mpi diff --git a/tasks/mpi/Shurygin_S_max_po_stolbam_matrix/perf_tests/main.cpp b/tasks/mpi/Shurygin_S_max_po_stolbam_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..7692a301b68 --- /dev/null +++ b/tasks/mpi/Shurygin_S_max_po_stolbam_matrix/perf_tests/main.cpp @@ -0,0 +1,70 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/Shurygin_S_max_po_stolbam_matrix/include/ops_mpi.hpp" + +TEST(Shurygin_S_max_po_stolbam_matrix_mpi_perf_test, test_pipeline_run_max) { + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_max; + std::shared_ptr taskDataPar = std::make_shared(); + int count_rows = 5000; + int count_columns = 5000; + if (world.rank() == 0) { + global_matrix = + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential::generate_random_matrix(count_rows, count_columns); + global_max.resize(count_columns, INT_MIN); + for (auto& row : global_matrix) { + taskDataPar->inputs.emplace_back(reinterpret_cast(row.data())); + } + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + if (world.rank() == 0) { + for (size_t j = 0; j < global_max.size(); ++j) { + ASSERT_EQ(global_max[j], 200); + } + } +} + +TEST(Shurygin_S_max_po_stolbam_matrix_mpi_perf_test, test_task_run_max) { + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_max; + std::shared_ptr taskDataPar = std::make_shared(); + int count_rows = 4560; + int count_columns = 4560; + if (world.rank() == 0) { + global_matrix = + Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential::generate_random_matrix(count_rows, count_columns); + global_max.resize(count_columns, INT_MIN); + for (auto& row : global_matrix) { + taskDataPar->inputs.emplace_back(reinterpret_cast(row.data())); + } + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + if (world.rank() == 0) { + for (size_t j = 0; j < global_max.size(); ++j) { + ASSERT_EQ(global_max[j], 200); + } + } +} \ No newline at end of file diff --git a/tasks/mpi/Shurygin_S_max_po_stolbam_matrix/src/ops_mpi.cpp b/tasks/mpi/Shurygin_S_max_po_stolbam_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..45b757260fe --- /dev/null +++ b/tasks/mpi/Shurygin_S_max_po_stolbam_matrix/src/ops_mpi.cpp @@ -0,0 +1,170 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/Shurygin_S_max_po_stolbam_matrix/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + int rows = taskData->inputs_count[0]; + int cols = taskData->inputs_count[1]; + input_.resize(rows, std::vector(cols)); + for (int i = 0; i < rows; i++) { + int* input_matrix = reinterpret_cast(taskData->inputs[i]); + for (int j = 0; j < cols; j++) { + input_[i][j] = input_matrix[j]; + } + } + res_.resize(cols); + return true; +} + +bool Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + if (taskData->inputs.empty() || taskData->outputs.empty()) { + return false; + } + if (taskData->inputs_count.size() < 2 || taskData->inputs_count[0] <= 0 || taskData->inputs_count[1] <= 0) { + return false; + } + if (taskData->outputs_count.size() != 1 || taskData->outputs_count[0] != taskData->inputs_count[1]) { + return false; + } + return true; +} + +bool Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential::run() { + internal_order_test(); + for (size_t j = 0; j < input_[0].size(); j++) { + int max_val = input_[0][j]; + for (size_t i = 1; i < input_.size(); i++) { + if (input_[i][j] > max_val) { + max_val = input_[i][j]; + } + } + res_[j] = max_val; + } + return true; +} + +bool Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + int* output_matrix = reinterpret_cast(taskData->outputs[0]); + for (size_t i = 0; i < res_.size(); i++) { + output_matrix[i] = res_[i]; + } + return true; +} + +bool Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + int rows = 0; + int cols = 0; + if (world.rank() == 0) { + rows = taskData->inputs_count[0]; + cols = taskData->inputs_count[1]; + } + broadcast(world, rows, 0); + broadcast(world, cols, 0); + int delta = rows / world.size(); + int extra = rows % world.size(); + if (world.rank() == 0) { + input_.resize(rows, std::vector(cols)); + for (int i = 0; i < rows; i++) { + int* input_matrix = reinterpret_cast(taskData->inputs[i]); + input_[i].assign(input_matrix, input_matrix + cols); + } + for (int proc = 1; proc < world.size(); proc++) { + int start_row = proc * delta + std::min(proc, extra); + int num_rows = delta + (proc < extra ? 1 : 0); + for (int r = start_row; r < start_row + num_rows; r++) { + world.send(proc, 0, input_[r].data(), cols); + } + } + } + int local_rows = delta + (world.rank() < extra ? 1 : 0); + local_input_.resize(local_rows, std::vector(cols)); + if (world.rank() == 0) { + std::copy(input_.begin(), input_.begin() + local_rows, local_input_.begin()); + } else { + for (int r = 0; r < local_rows; r++) { + world.recv(0, 0, local_input_[r].data(), cols); + } + } + res_.resize(cols); + return true; +} + +bool Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + if (taskData->inputs.empty() || taskData->outputs.empty()) return false; + if (taskData->inputs_count.size() < 2 || taskData->inputs_count[0] <= 0 || taskData->inputs_count[1] <= 0) + return false; + if (taskData->outputs_count.size() != 1 || taskData->outputs_count[0] != taskData->inputs_count[1]) return false; + } + return true; +} + +bool Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskParallel::run() { + internal_order_test(); + std::vector local_maxes(local_input_[0].size(), INT_MIN); + for (size_t j = 0; j < local_input_[0].size(); j++) { + for (size_t i = 0; i < local_input_.size(); i++) { + local_maxes[j] = std::max(local_maxes[j], local_input_[i][j]); + } + } + if (world.rank() == 0) { + std::vector global_maxes(res_.size(), INT_MIN); + std::copy(local_maxes.begin(), local_maxes.end(), global_maxes.begin()); + for (int proc = 1; proc < world.size(); proc++) { + std::vector proc_maxes(res_.size()); + world.recv(proc, 0, proc_maxes.data(), res_.size()); + for (size_t j = 0; j < res_.size(); j++) { + global_maxes[j] = std::max(global_maxes[j], proc_maxes[j]); + } + } + std::copy(global_maxes.begin(), global_maxes.end(), res_.begin()); + } else { + world.send(0, 0, local_maxes.data(), local_maxes.size()); + } + return true; +} + +bool Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + int* output_matrix = reinterpret_cast(taskData->outputs[0]); + std::copy(res_.begin(), res_.end(), output_matrix); + } + return true; +} + +std::vector Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential::generate_random_vector(int size, + int lower_bound, + int upper_bound) { + std::vector v1(size); + for (auto& num : v1) { + num = lower_bound + std::rand() % (upper_bound - lower_bound + 1); + } + return v1; +} + +std::vector> Shurygin_S_max_po_stolbam_matrix_mpi::TestMPITaskSequential::generate_random_matrix( + int rows, int columns) { + std::vector> matrix1(rows, std::vector(columns)); + for (int i = 0; i < rows; ++i) { + matrix1[i] = generate_random_vector(columns, 1, 100); + } + for (int j = 0; j < columns; ++j) { + int random_row = std::rand() % rows; + matrix1[random_row][j] = 200; + } + return matrix1; +} \ No newline at end of file diff --git a/tasks/mpi/baranov_a_num_of_orderly_violations/func_tests/main.cpp b/tasks/mpi/baranov_a_num_of_orderly_violations/func_tests/main.cpp new file mode 100644 index 00000000000..714e9513fe6 --- /dev/null +++ b/tasks/mpi/baranov_a_num_of_orderly_violations/func_tests/main.cpp @@ -0,0 +1,283 @@ +#include + +#include "mpi/baranov_a_num_of_orderly_violations/include/header.hpp" + +TEST(baranov_a_num_of_orderly_violations_mpi, Test_viol_0_int) { + const int N = 0; + // Create data + boost::mpi::communicator world; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, arr.size()); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + baranov_a_num_of_orderly_violations_mpi::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(out[0], num); +} +TEST(baranov_a_num_of_orderly_violations_mpi, Test_viol_100_int) { + const int N = 100; + // Create data + boost::mpi::communicator world; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, arr.size()); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + baranov_a_num_of_orderly_violations_mpi::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(out[0], num); +} + +TEST(baranov_a_num_of_orderly_violations_mpi, Test_viol_10000_int) { + const int N = 10000; + // Create data + boost::mpi::communicator world; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, arr.size()); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + baranov_a_num_of_orderly_violations_mpi::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(num, out[0]); +} +TEST(baranov_a_num_of_orderly_violations_mpi, Test_viol_0_double) { + const int N = 0; + // Create data + boost::mpi::communicator world; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, arr.size()); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + baranov_a_num_of_orderly_violations_mpi::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(num, out[0]); +} + +TEST(baranov_a_num_of_orderly_violations_mpi, Test_viol_10000_double) { + const int N = 10000; + // Create data + boost::mpi::communicator world; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, arr.size()); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + baranov_a_num_of_orderly_violations_mpi::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(num, out[0]); +} +TEST(baranov_a_num_of_orderly_violations_mpi, Test_viol_size_of_vec_is_equal_to_world_size) { + // Create data + boost::mpi::communicator world; + const int N = world.size(); + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, arr.size()); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + baranov_a_num_of_orderly_violations_mpi::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(num, out[0]); +} +TEST(baranov_a_num_of_orderly_violations_mpi, Test_viol_size_of_vec_is_less_than_world_size) { + // Create data + boost::mpi::communicator world; + const int N = world.size() - 1; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, arr.size()); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + baranov_a_num_of_orderly_violations_mpi::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(num, out[0]); +} + +TEST(baranov_a_num_of_orderly_violations_mpi, Test_viol_100000_unsigned_int) { + const int N = 100000; + // Create data + boost::mpi::communicator world; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, arr.size()); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + baranov_a_num_of_orderly_violations_mpi::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(num, out[0]); +} + +TEST(baranov_a_num_of_orderly_violations_mpi, Test_viol_odd_numbers_int_1) { + const int N = 78527; + // Create data + boost::mpi::communicator world; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, arr.size()); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + baranov_a_num_of_orderly_violations_mpi::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(out[0], num); +} +TEST(baranov_a_num_of_orderly_violations_mpi, Test_viol_odd_numbers_int_2) { + const int N = 2356895; + // Create data + boost::mpi::communicator world; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, arr.size()); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + baranov_a_num_of_orderly_violations_mpi::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(out[0], num); +} +TEST(baranov_a_num_of_orderly_violations_mpi, Test_viol_odd_numbers_int_3) { + const int N = 17; + // Create data + boost::mpi::communicator world; + std::vector arr(N); + std::vector out(1); + std::shared_ptr data_seq = std::make_shared(); + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, arr.size()); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + } + baranov_a_num_of_orderly_violations_mpi::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(out[0], num); +} diff --git a/tasks/mpi/baranov_a_num_of_orderly_violations/include/header.hpp b/tasks/mpi/baranov_a_num_of_orderly_violations/include/header.hpp new file mode 100644 index 00000000000..bff2b076a02 --- /dev/null +++ b/tasks/mpi/baranov_a_num_of_orderly_violations/include/header.hpp @@ -0,0 +1,30 @@ +#pragma once +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace baranov_a_num_of_orderly_violations_mpi { +template +class num_of_orderly_violations : public ppc::core::Task { + public: + explicit num_of_orderly_violations(std::shared_ptr taskData_) : Task(taskData_) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + cntype seq_proc(std::vector vec); + + private: + std::vector input_; + std::vector loc_vec_; + cntype num_; + boost::mpi::communicator world; + int my_loc_vec_size; +}; +} // namespace baranov_a_num_of_orderly_violations_mpi diff --git a/tasks/mpi/baranov_a_num_of_orderly_violations/perf_tests/main.cpp b/tasks/mpi/baranov_a_num_of_orderly_violations/perf_tests/main.cpp new file mode 100644 index 00000000000..737b8f4a6ca --- /dev/null +++ b/tasks/mpi/baranov_a_num_of_orderly_violations/perf_tests/main.cpp @@ -0,0 +1,77 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/baranov_a_num_of_orderly_violations/src/source.cpp" +TEST(mpi_baranov_a_num_of_orderly_violations_perf_test, test_pipeline_run) { + const int count_size_vector = 10000000; + boost::mpi::communicator world; + std::vector global_vec(count_size_vector); + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, global_vec.size()); + std::generate(global_vec.begin(), global_vec.end(), [&dist, &reng] { return dist(reng); }); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + auto testMpiTaskParallel = + std::make_shared>(taskDataPar); + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + // Create and init perf results + auto perfResults = std::make_shared(); + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + auto temp = testMpiTaskParallel->seq_proc(global_vec); + ASSERT_EQ(temp, out[0]); + } +} +TEST(mpi_baranov_a_num_of_orderly_violations_perf_test, test_task_run) { + const int count_size_vector = 10000000; + boost::mpi::communicator world; + std::vector global_vec(count_size_vector); + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, global_vec.size()); + std::generate(global_vec.begin(), global_vec.end(), [&dist, &reng] { return dist(reng); }); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + auto testMpiTaskParallel = + std::make_shared>(taskDataPar); + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + // Create and init perf results + auto perfResults = std::make_shared(); + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + auto temp = testMpiTaskParallel->seq_proc(global_vec); + ASSERT_EQ(out[0], temp); + } +} diff --git a/tasks/mpi/baranov_a_num_of_orderly_violations/src/source.cpp b/tasks/mpi/baranov_a_num_of_orderly_violations/src/source.cpp new file mode 100644 index 00000000000..dda48e2b4ee --- /dev/null +++ b/tasks/mpi/baranov_a_num_of_orderly_violations/src/source.cpp @@ -0,0 +1,100 @@ +#include "mpi/baranov_a_num_of_orderly_violations/include/header.hpp" + +namespace baranov_a_num_of_orderly_violations_mpi { +template +cntype num_of_orderly_violations::seq_proc(std::vector vec) { + cntype num = 0; + int n = vec.size(); + for (int i = 0; i < n - 1; ++i) { + if (vec[i + 1] < vec[i]) { + num++; + } + } + return num; +} +template +bool num_of_orderly_violations::pre_processing() { + internal_order_test(); + int myid = world.rank(); + int world_size = world.size(); + int n; + if (myid == 0) { + n = taskData->inputs_count[0]; + input_ = std::vector(n + 1); + void* ptr_r = taskData->inputs[0]; + void* ptr_d = input_.data(); + memcpy(ptr_d, ptr_r, + sizeof(iotype) * n); // there input_ is a vector of pure data not uint8 so we can scatter to loc_vectors + num_ = 0; + } + broadcast(world, n, 0); // for each proc we calculate size and then scatter + int vec_send_size = n / world_size; + int overflow_size = n % world_size; + std::vector send_counts(world_size, vec_send_size); + std::vector displs(world_size, 0); + int loc_vec_size = 0; + if (myid == 0) { + for (int i = 0; i != world_size - 1; ++i) { + if (i < overflow_size) { + ++send_counts[i]; + } + displs[i + 1] = ((send_counts[i] - 1) + displs[i]); + ++send_counts[i + 1]; + } + loc_vec_size = send_counts[0]; + } else { + if (myid < overflow_size) { + ++send_counts[myid]; + } + ++send_counts[myid]; + loc_vec_size = send_counts[myid]; + } + loc_vec_.reserve(loc_vec_size); + if (myid == 0) { + boost::mpi::scatterv(world, input_, send_counts, displs, loc_vec_.data(), loc_vec_size, 0); + } else { + boost::mpi::scatterv(world, loc_vec_.data(), loc_vec_size, 0); + } + my_loc_vec_size = loc_vec_size; + return true; +} +template +bool num_of_orderly_violations::run() { + internal_order_test(); + int loc_num = 0; + for (int i = 0; i < my_loc_vec_size - 1; ++i) { + if (loc_vec_[i + 1] < loc_vec_[i]) { + loc_num++; + } + } + + reduce(world, loc_num, num_, std::plus(), 0); + return true; +} +template +bool num_of_orderly_violations::post_processing() { + internal_order_test(); + + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = num_; + } + return true; +} +template +bool num_of_orderly_violations::validation() { + internal_order_test(); + // Check count elements of output + if (world.rank() == 0) { + if (taskData->outputs_count[0] == 1 && taskData->inputs_count.size() == 1 && taskData->inputs_count[0] >= 0) { + return true; + } + } + return true; +} + +template class baranov_a_num_of_orderly_violations_mpi::num_of_orderly_violations; + +template class baranov_a_num_of_orderly_violations_mpi::num_of_orderly_violations; + +template class baranov_a_num_of_orderly_violations_mpi::num_of_orderly_violations; +} // namespace baranov_a_num_of_orderly_violations_mpi \ No newline at end of file diff --git a/tasks/mpi/beskhmelnova_k_most_different_neighbor_elements/func_tests/main.cpp b/tasks/mpi/beskhmelnova_k_most_different_neighbor_elements/func_tests/main.cpp new file mode 100644 index 00000000000..385ff5877f2 --- /dev/null +++ b/tasks/mpi/beskhmelnova_k_most_different_neighbor_elements/func_tests/main.cpp @@ -0,0 +1,244 @@ +#include + +#include + +#include "mpi/beskhmelnova_k_most_different_neighbor_elements/include/mpi.hpp" +#include "mpi/beskhmelnova_k_most_different_neighbor_elements/src/mpi.cpp" + +TEST(beskhmelnova_k_most_different_neighbor_elements_mpi, Test_vector_size_100) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_out(2); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 100; + global_vec = beskhmelnova_k_most_different_neighbor_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(2); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential( + taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_NEAR(reference_out[0], global_out[0], 1e-10); + ASSERT_NEAR(reference_out[1], global_out[1], 1e-10); + } +} + +TEST(beskhmelnova_k_most_different_neighbor_elements_mpi, Test_vector_size_100_with_equal_elements) { + boost::mpi::communicator world; + std::vector global_vec(100, 1); + std::vector global_out(2); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(2); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential( + taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_NEAR(reference_out[0], global_out[0], 1e-10); + ASSERT_NEAR(reference_out[1], global_out[1], 1e-10); + } +} + +TEST(beskhmelnova_k_most_different_neighbor_elements_mpi, Test_vector_size_10000) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_out(2); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 10000; + global_vec = beskhmelnova_k_most_different_neighbor_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(2); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential( + taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_NEAR(reference_out[0], global_out[0], 1e-10); + ASSERT_NEAR(reference_out[1], global_out[1], 1e-10); + } +} + +TEST(beskhmelnova_k_most_different_neighbor_elements_mpi, Test_vector_uneven_size_10001) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_out(2); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 1001; + global_vec = beskhmelnova_k_most_different_neighbor_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(2); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential( + taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_NEAR(reference_out[0], global_out[0], 1e-10); + ASSERT_NEAR(reference_out[1], global_out[1], 1e-10); + } +} + +TEST(beskhmelnova_k_most_different_neighbor_elements_mpi, Test_vector_size_100000) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_out(2); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 100000; + global_vec = beskhmelnova_k_most_different_neighbor_elements_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(2); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskSequential testMpiTaskSequential( + taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_NEAR(reference_out[0], global_out[0], 1e-10); + ASSERT_NEAR(reference_out[1], global_out[1], 1e-10); + } +} diff --git a/tasks/mpi/beskhmelnova_k_most_different_neighbor_elements/include/mpi.hpp b/tasks/mpi/beskhmelnova_k_most_different_neighbor_elements/include/mpi.hpp new file mode 100644 index 00000000000..013a008c928 --- /dev/null +++ b/tasks/mpi/beskhmelnova_k_most_different_neighbor_elements/include/mpi.hpp @@ -0,0 +1,53 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace beskhmelnova_k_most_different_neighbor_elements_mpi { + +template +std::vector getRandomVector(int sz); + +template +int position_of_first_neighbour_seq(std::vector vector); + +template +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + DataType res[2]; +}; + +template +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + int local_input_size; + DataType res[2]; + boost::mpi::communicator world; +}; +} // namespace beskhmelnova_k_most_different_neighbor_elements_mpi \ No newline at end of file diff --git a/tasks/mpi/beskhmelnova_k_most_different_neighbor_elements/perf_tests/main.cpp b/tasks/mpi/beskhmelnova_k_most_different_neighbor_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..1f75b6a0218 --- /dev/null +++ b/tasks/mpi/beskhmelnova_k_most_different_neighbor_elements/perf_tests/main.cpp @@ -0,0 +1,95 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/beskhmelnova_k_most_different_neighbor_elements/include/mpi.hpp" +#include "mpi/beskhmelnova_k_most_different_neighbor_elements/src/mpi.cpp" + +TEST(mpi_beskhmelnova_k_most_different_neighbor_elements_perf_test, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_out(2); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 10000000; + global_vec = std::vector(count_size_vector, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + auto testMpiTaskParallel = + std::make_shared>(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + int index = beskhmelnova_k_most_different_neighbor_elements_mpi::position_of_first_neighbour_seq(global_vec); + ASSERT_EQ(global_vec[index], global_out[0]); + ASSERT_EQ(global_vec[index + 1], global_out[1]); + } +} + +TEST(mpi_beskhmelnova_k_most_different_neighbor_elements_perf_test, test_task_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_out(2); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 25000000; + global_vec = std::vector(count_size_vector, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + auto testMpiTaskParallel = + std::make_shared>(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + int index = beskhmelnova_k_most_different_neighbor_elements_mpi::position_of_first_neighbour_seq(global_vec); + ASSERT_EQ(global_vec[index], global_out[0]); + ASSERT_EQ(global_vec[index + 1], global_out[1]); + } +} diff --git a/tasks/mpi/beskhmelnova_k_most_different_neighbor_elements/src/mpi.cpp b/tasks/mpi/beskhmelnova_k_most_different_neighbor_elements/src/mpi.cpp new file mode 100644 index 00000000000..8a0d9d5a217 --- /dev/null +++ b/tasks/mpi/beskhmelnova_k_most_different_neighbor_elements/src/mpi.cpp @@ -0,0 +1,189 @@ +#include "mpi/beskhmelnova_k_most_different_neighbor_elements/include/mpi.hpp" + +template +std::vector beskhmelnova_k_most_different_neighbor_elements_mpi::getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} + +template +int beskhmelnova_k_most_different_neighbor_elements_mpi::position_of_first_neighbour_seq(std::vector vector) { + int n = vector.size(); + if (n == 0 || n == 1) return -1; + DataType max_dif = abs(vector[0] - vector[1]); + DataType dif; + int index = 0; + for (int i = 1; i < n - 1; i++) { + dif = abs(vector[i] - vector[i + 1]); + if (dif > max_dif) { + max_dif = dif; + index = i; + } + } + return index; +} + +template +bool beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + // Init value for input + int n = taskData->inputs_count[0]; + input_ = std::vector(n); + void* ptr_r = taskData->inputs[0]; + void* ptr_d = input_.data(); + memcpy(ptr_d, ptr_r, sizeof(DataType) * n); + return true; +} + +template +bool beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->outputs_count[0] == 2 && taskData->inputs_count[0] > 1; +} + +template +bool beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskSequential::run() { + internal_order_test(); + int index = position_of_first_neighbour_seq(input_); + if (index == -1) { + res[0] = -1; + res[1] = -1; + return true; + } + res[0] = input_[index]; + res[1] = input_[index + 1]; + return true; +} + +template +bool beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res[0]; + reinterpret_cast(taskData->outputs[0])[1] = res[1]; + return true; +} + +template +bool beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + unsigned int delta = 0; + if (world.rank() == 0) { + delta = taskData->inputs_count[0] / world.size(); + } + broadcast(world, delta, 0); + if (world.rank() == 0) { + input_ = std::vector(taskData->inputs_count[0]); + auto* tempPtr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tempPtr[i]; + } + for (int process = 1; process < world.size(); process++) { + world.send(process, 0, input_.data() + process * delta, delta); + } + } + local_input_ = std::vector(delta); + if (world.rank() == 0) { + local_input_ = std::vector(input_.begin(), input_.begin() + delta); + } else { + world.recv(0, 0, local_input_.data(), delta); + } + res[0] = 0; + res[1] = 1; + return true; +} + +template +bool beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return taskData->outputs_count[0] == 2 && taskData->inputs_count[0] > 1; + } + return true; +} + +// Struct of 2 most different neighbour elements +template +struct NeighborDifference { + DataType first; + DataType second; + DataType dif; +}; + +template +NeighborDifference find_max_difference(const std::vector& vector) { + int n = vector.size(); + if (n == 0 || n == 1) return {1, 1, -1}; + NeighborDifference max_dif = {vector[0], vector[1], std::abs(vector[1] - vector[0])}; + for (int i = 1; i < n - 1; ++i) { + DataType dif = std::abs(vector[i + 1] - vector[i]); + if (dif > max_dif.dif) { + max_dif = {vector[i], vector[i + 1], dif}; + } + } + return max_dif; +} + +template +void reduce_max_difference(const DataType* in_data, DataType* inout_data, int* len, MPI_Datatype* dptr) { + if (in_data[2] > inout_data[2]) { + inout_data[0] = in_data[0]; + inout_data[1] = in_data[1]; + inout_data[2] = in_data[2]; + } +} + +template +bool beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskParallel::run() { + internal_order_test(); + NeighborDifference local_result = find_max_difference(local_input_); + DataType last_element = local_input_.back(); + DataType first_element = local_input_.front(); + DataType next_first_element = 0; + DataType prev_last_element = 0; + if (world.rank() < world.size() - 1) { + world.send(world.rank() + 1, 0, last_element); + world.recv(world.rank() + 1, 0, next_first_element); + } + if (world.rank() > 0) { + world.send(world.rank() - 1, 0, first_element); + world.recv(world.rank() - 1, 0, prev_last_element); + } + if (world.rank() > 0) { + DataType dif = std::abs(first_element - prev_last_element); + if (dif > local_result.dif) local_result = {prev_last_element, first_element, dif}; + } + if (world.rank() < world.size() - 1) { + DataType dif = std::abs(next_first_element - last_element); + if (dif > local_result.dif) local_result = {last_element, next_first_element, dif}; + } + DataType local_data[3] = {local_result.first, local_result.second, local_result.dif}; + DataType global_data[3] = {0, 0, 0}; + MPI_Op custom_op; + MPI_Op_create(reinterpret_cast(&reduce_max_difference), 1, &custom_op); + if (typeid(DataType) == typeid(int)) MPI_Reduce(local_data, global_data, 3, MPI_INT, custom_op, 0, MPI_COMM_WORLD); + if (typeid(DataType) == typeid(double)) + MPI_Reduce(local_data, global_data, 3, MPI_DOUBLE, custom_op, 0, MPI_COMM_WORLD); + if (world.rank() == 0) { + res[0] = global_data[0]; + res[1] = global_data[1]; + } + MPI_Op_free(&custom_op); + return true; +} + +template +bool beskhmelnova_k_most_different_neighbor_elements_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res[0]; + reinterpret_cast(taskData->outputs[0])[1] = res[1]; + } + return true; +} diff --git a/tasks/mpi/chernykh_a_num_of_alternations_signs/func_tests/main.cpp b/tasks/mpi/chernykh_a_num_of_alternations_signs/func_tests/main.cpp new file mode 100644 index 00000000000..f5917c25762 --- /dev/null +++ b/tasks/mpi/chernykh_a_num_of_alternations_signs/func_tests/main.cpp @@ -0,0 +1,336 @@ +#include + +#include +#include +#include + +#include "mpi/chernykh_a_num_of_alternations_signs/include/ops_mpi.hpp" + +std::vector getRandomVector(size_t size) { + auto dev = std::random_device(); + auto gen = std::mt19937(dev()); + auto dist = std::uniform_int_distribution(-100'000, 100'000); + auto result = std::vector(size); + for (auto &val : result) { + val = dist(gen); + } + return result; +} + +TEST(chernykh_a_num_of_alternations_signs_mpi, random_input) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(1, 0); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input = getRandomVector(100'000); + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = chernykh_a_num_of_alternations_signs_mpi::ParallelTask(par_task_data); + + ASSERT_TRUE(par_task.validation()); + ASSERT_TRUE(par_task.pre_processing()); + ASSERT_TRUE(par_task.run()); + ASSERT_TRUE(par_task.post_processing()); + + if (world.rank() == 0) { + // Create data + auto seq_output = std::vector(1, 0); + + // Create TaskData + auto seq_task_data = std::make_shared(); + seq_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + seq_task_data->inputs_count.emplace_back(input.size()); + seq_task_data->outputs.emplace_back(reinterpret_cast(seq_output.data())); + seq_task_data->outputs_count.emplace_back(seq_output.size()); + + // Create Task + auto seq_task = chernykh_a_num_of_alternations_signs_mpi::SequentialTask(seq_task_data); + + ASSERT_TRUE(seq_task.validation()); + ASSERT_TRUE(seq_task.pre_processing()); + ASSERT_TRUE(seq_task.run()); + ASSERT_TRUE(seq_task.post_processing()); + ASSERT_EQ(seq_output[0], par_output[0]); + } +} + +TEST(chernykh_a_num_of_alternations_signs_mpi, large_random_input) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(1, 0); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input = getRandomVector(1'000'000); + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = chernykh_a_num_of_alternations_signs_mpi::ParallelTask(par_task_data); + + ASSERT_TRUE(par_task.validation()); + ASSERT_TRUE(par_task.pre_processing()); + ASSERT_TRUE(par_task.run()); + ASSERT_TRUE(par_task.post_processing()); + + if (world.rank() == 0) { + // Create data + auto seq_output = std::vector(1, 0); + + // Create TaskData + auto seq_task_data = std::make_shared(); + seq_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + seq_task_data->inputs_count.emplace_back(input.size()); + seq_task_data->outputs.emplace_back(reinterpret_cast(seq_output.data())); + seq_task_data->outputs_count.emplace_back(seq_output.size()); + + // Create Task + auto seq_task = chernykh_a_num_of_alternations_signs_mpi::SequentialTask(seq_task_data); + + ASSERT_TRUE(seq_task.validation()); + ASSERT_TRUE(seq_task.pre_processing()); + ASSERT_TRUE(seq_task.run()); + ASSERT_TRUE(seq_task.post_processing()); + ASSERT_EQ(seq_output[0], par_output[0]); + } +} + +TEST(chernykh_a_num_of_alternations_signs_mpi, input_size_less_than_two_fails_validation) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(1, 0); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = chernykh_a_num_of_alternations_signs_mpi::ParallelTask(par_task_data); + + if (world.rank() == 0) { + ASSERT_FALSE(par_task.validation()); + } + + if (world.rank() == 0) { + // Create data + auto seq_output = std::vector(1, 0); + + // Create TaskData + auto seq_task_data = std::make_shared(); + seq_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + seq_task_data->inputs_count.emplace_back(input.size()); + seq_task_data->outputs.emplace_back(reinterpret_cast(seq_output.data())); + seq_task_data->outputs_count.emplace_back(seq_output.size()); + + // Create Task + auto seq_task = chernykh_a_num_of_alternations_signs_mpi::SequentialTask(seq_task_data); + + ASSERT_FALSE(seq_task.validation()); + } +} + +TEST(chernykh_a_num_of_alternations_signs_mpi, output_size_not_equal_one_fails_validation) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input = getRandomVector(1000); + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = chernykh_a_num_of_alternations_signs_mpi::ParallelTask(par_task_data); + + if (world.rank() == 0) { + ASSERT_FALSE(par_task.validation()); + } + + if (world.rank() == 0) { + // Create data + auto seq_output = std::vector(); + + // Create TaskData + auto seq_task_data = std::make_shared(); + seq_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + seq_task_data->inputs_count.emplace_back(input.size()); + seq_task_data->outputs.emplace_back(reinterpret_cast(seq_output.data())); + seq_task_data->outputs_count.emplace_back(seq_output.size()); + + // Create Task + auto seq_task = chernykh_a_num_of_alternations_signs_mpi::SequentialTask(seq_task_data); + + ASSERT_FALSE(seq_task.validation()); + } +} + +TEST(chernykh_a_num_of_alternations_signs_mpi, all_elements_are_equal) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(1, 0); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input = std::vector(1000, 0); + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = chernykh_a_num_of_alternations_signs_mpi::ParallelTask(par_task_data); + + ASSERT_TRUE(par_task.validation()); + ASSERT_TRUE(par_task.pre_processing()); + ASSERT_TRUE(par_task.run()); + ASSERT_TRUE(par_task.post_processing()); + + if (world.rank() == 0) { + // Create data + auto seq_output = std::vector(1, 0); + + // Create TaskData + auto seq_task_data = std::make_shared(); + seq_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + seq_task_data->inputs_count.emplace_back(input.size()); + seq_task_data->outputs.emplace_back(reinterpret_cast(seq_output.data())); + seq_task_data->outputs_count.emplace_back(seq_output.size()); + + // Create Task + auto seq_task = chernykh_a_num_of_alternations_signs_mpi::SequentialTask(seq_task_data); + + ASSERT_TRUE(seq_task.validation()); + ASSERT_TRUE(seq_task.pre_processing()); + ASSERT_TRUE(seq_task.run()); + ASSERT_TRUE(seq_task.post_processing()); + ASSERT_EQ(seq_output[0], par_output[0]); + } +} + +TEST(chernykh_a_num_of_alternations_signs_mpi, sign_change_at_borders_of_two_chunks) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(1, 0); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input = std::vector{1, 1, 1, 1, 1, -1, -1, -1, -1, -1}; + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = chernykh_a_num_of_alternations_signs_mpi::ParallelTask(par_task_data); + + ASSERT_TRUE(par_task.validation()); + ASSERT_TRUE(par_task.pre_processing()); + ASSERT_TRUE(par_task.run()); + ASSERT_TRUE(par_task.post_processing()); + + if (world.rank() == 0) { + // Create data + auto seq_output = std::vector(1, 0); + + // Create TaskData + auto seq_task_data = std::make_shared(); + seq_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + seq_task_data->inputs_count.emplace_back(input.size()); + seq_task_data->outputs.emplace_back(reinterpret_cast(seq_output.data())); + seq_task_data->outputs_count.emplace_back(seq_output.size()); + + // Create Task + auto seq_task = chernykh_a_num_of_alternations_signs_mpi::SequentialTask(seq_task_data); + + ASSERT_TRUE(seq_task.validation()); + ASSERT_TRUE(seq_task.pre_processing()); + ASSERT_TRUE(seq_task.run()); + ASSERT_TRUE(seq_task.post_processing()); + ASSERT_EQ(seq_output[0], par_output[0]); + } +} + +TEST(chernykh_a_num_of_alternations_signs_mpi, sign_change_at_borders_of_three_chunks) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(1, 0); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input = std::vector{1, 1, 1, -1, -1, -1, 1, 1, 1}; + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = chernykh_a_num_of_alternations_signs_mpi::ParallelTask(par_task_data); + + ASSERT_TRUE(par_task.validation()); + ASSERT_TRUE(par_task.pre_processing()); + ASSERT_TRUE(par_task.run()); + ASSERT_TRUE(par_task.post_processing()); + + if (world.rank() == 0) { + // Create data + auto seq_output = std::vector(1, 0); + + // Create TaskData + auto seq_task_data = std::make_shared(); + seq_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + seq_task_data->inputs_count.emplace_back(input.size()); + seq_task_data->outputs.emplace_back(reinterpret_cast(seq_output.data())); + seq_task_data->outputs_count.emplace_back(seq_output.size()); + + // Create Task + auto seq_task = chernykh_a_num_of_alternations_signs_mpi::SequentialTask(seq_task_data); + + ASSERT_TRUE(seq_task.validation()); + ASSERT_TRUE(seq_task.pre_processing()); + ASSERT_TRUE(seq_task.run()); + ASSERT_TRUE(seq_task.post_processing()); + ASSERT_EQ(seq_output[0], par_output[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/chernykh_a_num_of_alternations_signs/include/ops_mpi.hpp b/tasks/mpi/chernykh_a_num_of_alternations_signs/include/ops_mpi.hpp new file mode 100644 index 00000000000..727cd9e1869 --- /dev/null +++ b/tasks/mpi/chernykh_a_num_of_alternations_signs/include/ops_mpi.hpp @@ -0,0 +1,37 @@ +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace chernykh_a_num_of_alternations_signs_mpi { + +class SequentialTask : public ppc::core::Task { + public: + explicit SequentialTask(std::shared_ptr task_data) : Task(std::move(task_data)) {} + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input; + int result{}; +}; + +class ParallelTask : public ppc::core::Task { + public: + explicit ParallelTask(std::shared_ptr task_data) : Task(std::move(task_data)) {} + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input, chunk; + int result{}; + boost::mpi::communicator world; +}; + +} // namespace chernykh_a_num_of_alternations_signs_mpi \ No newline at end of file diff --git a/tasks/mpi/chernykh_a_num_of_alternations_signs/perf_tests/main.cpp b/tasks/mpi/chernykh_a_num_of_alternations_signs/perf_tests/main.cpp new file mode 100644 index 00000000000..6028f113f9f --- /dev/null +++ b/tasks/mpi/chernykh_a_num_of_alternations_signs/perf_tests/main.cpp @@ -0,0 +1,359 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/chernykh_a_num_of_alternations_signs/include/ops_mpi.hpp" + +TEST(chernykh_a_num_of_alternations_signs_mpi, test_pipeline_run_with_input_size_10000) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(1, 0); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input = std::vector(10'000, 0); + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = std::make_shared(par_task_data); + + ASSERT_TRUE(par_task->validation()); + ASSERT_TRUE(par_task->pre_processing()); + ASSERT_TRUE(par_task->run()); + ASSERT_TRUE(par_task->post_processing()); + + // Create PerfAttributes + auto perf_attributes = std::make_shared(); + perf_attributes->num_running = 10; + auto start = boost::mpi::timer(); + perf_attributes->current_timer = [&] { return start.elapsed(); }; + + // Create PerfResults + auto perf_results = std::make_shared(); + + // Create Perf analyzer + auto perf_analyzer = std::make_shared(par_task); + + perf_analyzer->pipeline_run(perf_attributes, perf_results); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perf_results); + ASSERT_EQ(0, par_output[0]); + } +} + +TEST(chernykh_a_num_of_alternations_signs_mpi, test_pipeline_run_with_input_size_100000) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(1, 0); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input = std::vector(100'000, 0); + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = std::make_shared(par_task_data); + + ASSERT_TRUE(par_task->validation()); + ASSERT_TRUE(par_task->pre_processing()); + ASSERT_TRUE(par_task->run()); + ASSERT_TRUE(par_task->post_processing()); + + // Create PerfAttributes + auto perf_attributes = std::make_shared(); + perf_attributes->num_running = 10; + auto start = boost::mpi::timer(); + perf_attributes->current_timer = [&] { return start.elapsed(); }; + + // Create PerfResults + auto perf_results = std::make_shared(); + + // Create Perf analyzer + auto perf_analyzer = std::make_shared(par_task); + + perf_analyzer->pipeline_run(perf_attributes, perf_results); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perf_results); + ASSERT_EQ(0, par_output[0]); + } +} + +TEST(chernykh_a_num_of_alternations_signs_mpi, test_pipeline_run_with_input_size_1000000) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(1, 0); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input = std::vector(1'000'000, 0); + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = std::make_shared(par_task_data); + + ASSERT_TRUE(par_task->validation()); + ASSERT_TRUE(par_task->pre_processing()); + ASSERT_TRUE(par_task->run()); + ASSERT_TRUE(par_task->post_processing()); + + // Create PerfAttributes + auto perf_attributes = std::make_shared(); + perf_attributes->num_running = 10; + auto start = boost::mpi::timer(); + perf_attributes->current_timer = [&] { return start.elapsed(); }; + + // Create PerfResults + auto perf_results = std::make_shared(); + + // Create Perf analyzer + auto perf_analyzer = std::make_shared(par_task); + + perf_analyzer->pipeline_run(perf_attributes, perf_results); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perf_results); + ASSERT_EQ(0, par_output[0]); + } +} + +TEST(chernykh_a_num_of_alternations_signs_mpi, test_pipeline_run_with_input_size_10000000) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(1, 0); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input = std::vector(10'000'000, 0); + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = std::make_shared(par_task_data); + + ASSERT_TRUE(par_task->validation()); + ASSERT_TRUE(par_task->pre_processing()); + ASSERT_TRUE(par_task->run()); + ASSERT_TRUE(par_task->post_processing()); + + // Create PerfAttributes + auto perf_attributes = std::make_shared(); + perf_attributes->num_running = 10; + auto start = boost::mpi::timer(); + perf_attributes->current_timer = [&] { return start.elapsed(); }; + + // Create PerfResults + auto perf_results = std::make_shared(); + + // Create Perf analyzer + auto perf_analyzer = std::make_shared(par_task); + + perf_analyzer->pipeline_run(perf_attributes, perf_results); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perf_results); + ASSERT_EQ(0, par_output[0]); + } +} + +TEST(chernykh_a_num_of_alternations_signs_mpi, test_task_run_with_input_size_10000) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(1, 0); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input = std::vector(10'000, 0); + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = std::make_shared(par_task_data); + + ASSERT_TRUE(par_task->validation()); + ASSERT_TRUE(par_task->pre_processing()); + ASSERT_TRUE(par_task->run()); + ASSERT_TRUE(par_task->post_processing()); + + // Create PerfAttributes + auto perf_attributes = std::make_shared(); + perf_attributes->num_running = 10; + auto start = boost::mpi::timer(); + perf_attributes->current_timer = [&] { return start.elapsed(); }; + + // Create PerfResults + auto perf_results = std::make_shared(); + + // Create Perf analyzer + auto perf_analyzer = std::make_shared(par_task); + + perf_analyzer->task_run(perf_attributes, perf_results); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perf_results); + ASSERT_EQ(0, par_output[0]); + } +} + +TEST(chernykh_a_num_of_alternations_signs_mpi, test_task_run_with_input_size_100000) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(1, 0); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input = std::vector(100'000, 0); + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = std::make_shared(par_task_data); + + ASSERT_TRUE(par_task->validation()); + ASSERT_TRUE(par_task->pre_processing()); + ASSERT_TRUE(par_task->run()); + ASSERT_TRUE(par_task->post_processing()); + + // Create PerfAttributes + auto perf_attributes = std::make_shared(); + perf_attributes->num_running = 10; + auto start = boost::mpi::timer(); + perf_attributes->current_timer = [&] { return start.elapsed(); }; + + // Create PerfResults + auto perf_results = std::make_shared(); + + // Create Perf analyzer + auto perf_analyzer = std::make_shared(par_task); + + perf_analyzer->task_run(perf_attributes, perf_results); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perf_results); + ASSERT_EQ(0, par_output[0]); + } +} + +TEST(chernykh_a_num_of_alternations_signs_mpi, test_task_run_with_input_size_1000000) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(1, 0); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input = std::vector(1'000'000, 0); + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = std::make_shared(par_task_data); + + ASSERT_TRUE(par_task->validation()); + ASSERT_TRUE(par_task->pre_processing()); + ASSERT_TRUE(par_task->run()); + ASSERT_TRUE(par_task->post_processing()); + + // Create PerfAttributes + auto perf_attributes = std::make_shared(); + perf_attributes->num_running = 10; + auto start = boost::mpi::timer(); + perf_attributes->current_timer = [&] { return start.elapsed(); }; + + // Create PerfResults + auto perf_results = std::make_shared(); + + // Create Perf analyzer + auto perf_analyzer = std::make_shared(par_task); + + perf_analyzer->task_run(perf_attributes, perf_results); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perf_results); + ASSERT_EQ(0, par_output[0]); + } +} + +TEST(chernykh_a_num_of_alternations_signs_mpi, test_task_run_with_input_size_10000000) { + auto world = boost::mpi::communicator(); + + // Create data + auto input = std::vector(); + auto par_output = std::vector(1, 0); + + // Create TaskData + auto par_task_data = std::make_shared(); + if (world.rank() == 0) { + input = std::vector(10'000'000, 0); + par_task_data->inputs.emplace_back(reinterpret_cast(input.data())); + par_task_data->inputs_count.emplace_back(input.size()); + par_task_data->outputs.emplace_back(reinterpret_cast(par_output.data())); + par_task_data->outputs_count.emplace_back(par_output.size()); + } + + // Create Task + auto par_task = std::make_shared(par_task_data); + + ASSERT_TRUE(par_task->validation()); + ASSERT_TRUE(par_task->pre_processing()); + ASSERT_TRUE(par_task->run()); + ASSERT_TRUE(par_task->post_processing()); + + // Create PerfAttributes + auto perf_attributes = std::make_shared(); + perf_attributes->num_running = 10; + auto start = boost::mpi::timer(); + perf_attributes->current_timer = [&] { return start.elapsed(); }; + + // Create PerfResults + auto perf_results = std::make_shared(); + + // Create Perf analyzer + auto perf_analyzer = std::make_shared(par_task); + + perf_analyzer->task_run(perf_attributes, perf_results); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perf_results); + ASSERT_EQ(0, par_output[0]); + } +} diff --git a/tasks/mpi/chernykh_a_num_of_alternations_signs/src/ops_mpi.cpp b/tasks/mpi/chernykh_a_num_of_alternations_signs/src/ops_mpi.cpp new file mode 100644 index 00000000000..7826137dc30 --- /dev/null +++ b/tasks/mpi/chernykh_a_num_of_alternations_signs/src/ops_mpi.cpp @@ -0,0 +1,87 @@ +#include "mpi/chernykh_a_num_of_alternations_signs/include/ops_mpi.hpp" + +#include +#include + +bool chernykh_a_num_of_alternations_signs_mpi::SequentialTask::validation() { + internal_order_test(); + return taskData->inputs_count[0] >= 2 && taskData->outputs_count[0] == 1; +} + +bool chernykh_a_num_of_alternations_signs_mpi::SequentialTask::pre_processing() { + internal_order_test(); + auto* input_ptr = reinterpret_cast(taskData->inputs[0]); + auto input_size = taskData->inputs_count[0]; + input = std::vector(input_ptr, input_ptr + input_size); + result = 0; + return true; +} + +bool chernykh_a_num_of_alternations_signs_mpi::SequentialTask::run() { + internal_order_test(); + auto input_size = input.size(); + for (size_t i = 0; i < input_size - 1; i++) { + if ((input[i] ^ input[i + 1]) < 0) { + result++; + } + } + return true; +} + +bool chernykh_a_num_of_alternations_signs_mpi::SequentialTask::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = result; + return true; +} + +bool chernykh_a_num_of_alternations_signs_mpi::ParallelTask::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->inputs_count[0] >= 2 && taskData->outputs_count[0] == 1; + } + return true; +} + +bool chernykh_a_num_of_alternations_signs_mpi::ParallelTask::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + auto input_size = taskData->inputs_count[0]; + auto chunk_size = input_size / world.size(); + auto* input_ptr = reinterpret_cast(taskData->inputs[0]); + input = std::vector(input_ptr, input_ptr + input_size); + chunk = std::vector(input_ptr, input_ptr + chunk_size + uint32_t(world.size() > 1)); + + for (int proc = 1; proc < world.size(); proc++) { + auto start = proc * chunk_size; + auto size = (proc == world.size() - 1) ? input_size - start : chunk_size + 1; + world.send(proc, 0, std::vector(input_ptr + start, input_ptr + start + size)); + } + } else { + world.recv(0, 0, chunk); + } + + result = 0; + return true; +} + +bool chernykh_a_num_of_alternations_signs_mpi::ParallelTask::run() { + internal_order_test(); + auto chunk_result = 0; + auto chunk_size = chunk.size(); + for (size_t i = 0; i < chunk_size - 1; i++) { + if ((chunk[i] ^ chunk[i + 1]) < 0) { + chunk_result++; + } + } + boost::mpi::reduce(world, chunk_result, result, std::plus(), 0); + return true; +} + +bool chernykh_a_num_of_alternations_signs_mpi::ParallelTask::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + *reinterpret_cast(taskData->outputs[0]) = result; + } + return true; +} diff --git a/tasks/mpi/chistov_a_sum_of_matrix_elements/func_tests/main.cpp b/tasks/mpi/chistov_a_sum_of_matrix_elements/func_tests/main.cpp new file mode 100644 index 00000000000..521e385b149 --- /dev/null +++ b/tasks/mpi/chistov_a_sum_of_matrix_elements/func_tests/main.cpp @@ -0,0 +1,255 @@ +#include + +#include +#include +#include + +#include "mpi/chistov_a_sum_of_matrix_elements/include/ops_mpi.hpp" + +TEST(chistov_a_sum_of_matrix_elements, test_wrong_validation_parallel) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_sum(2, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int n = 3; + const int m = 4; + global_matrix = chistov_a_sum_of_matrix_elements::get_random_matrix(n, m); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + + chistov_a_sum_of_matrix_elements::TestMPITaskParallel TestMPITaskParallel(taskDataPar); + ASSERT_EQ(TestMPITaskParallel.validation(), false); + } +} + +TEST(chistov_a_sum_of_matrix_elements, test_int_sum_parallel) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_sum(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + const int n = 3; + const int m = 4; + + if (world.rank() == 0) { + global_matrix = chistov_a_sum_of_matrix_elements::get_random_matrix(n, m); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + chistov_a_sum_of_matrix_elements::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), true); + testMPITaskParallel.pre_processing(); + testMPITaskParallel.run(); + testMPITaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_sum(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + chistov_a_sum_of_matrix_elements::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} + +TEST(chistov_a_sum_of_matrix_elements, test_double_sum_parallel) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_sum(1, 0.0); + + std::shared_ptr taskDataPar = std::make_shared(); + const int n = 3; + const int m = 4; + + if (world.rank() == 0) { + global_matrix = chistov_a_sum_of_matrix_elements::get_random_matrix(n, m); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + chistov_a_sum_of_matrix_elements::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), true); + testMPITaskParallel.pre_processing(); + testMPITaskParallel.run(); + testMPITaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_sum(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + chistov_a_sum_of_matrix_elements::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_NEAR(reference_sum[0], global_sum[0], 1e-6); + } +} + +TEST(chistov_a_sum_of_matrix_elements, test_with_empty_matrix_parallel) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_sum(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + const int n = 0; + const int m = 0; + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + chistov_a_sum_of_matrix_elements::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), true); + testMPITaskParallel.pre_processing(); + testMPITaskParallel.run(); + testMPITaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_sum[0], 0); + } +} + +TEST(chistov_a_sum_of_matrix_elements, returns_empty_matrix_when_small_n_or_m_) { + auto matrix1 = chistov_a_sum_of_matrix_elements::get_random_matrix(0, 1); + EXPECT_TRUE(matrix1.empty()); + auto matrix2 = chistov_a_sum_of_matrix_elements::get_random_matrix(1, 0); + EXPECT_TRUE(matrix2.empty()); +} + +TEST(chistov_a_sum_of_matrix_elements, test_with_large_matrix_parallel) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_sum(1, 0); + const int n = 1000; + const int m = 1000; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = chistov_a_sum_of_matrix_elements::get_random_matrix(n, m); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + chistov_a_sum_of_matrix_elements::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), true); + testMPITaskParallel.pre_processing(); + testMPITaskParallel.run(); + testMPITaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_sum(1, 0); + for (int val : global_matrix) { + reference_sum[0] += val; + } + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} + +TEST(chistov_a_sum_of_matrix_elements, short_and_thick_test_parallel) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_sum(1, 0); + + const int n = 1000000; + const int m = 1; + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = chistov_a_sum_of_matrix_elements::get_random_matrix(n, m); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + chistov_a_sum_of_matrix_elements::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), true); + testMPITaskParallel.pre_processing(); + testMPITaskParallel.run(); + testMPITaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_sum(1, 0); + for (int val : global_matrix) { + reference_sum[0] += val; + } + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} + +TEST(chistov_a_sum_of_matrix_elements, long_and_thin_test_parallel) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_sum(1, 0); + + const int n = 1; + const int m = 100000; + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = chistov_a_sum_of_matrix_elements::get_random_matrix(n, m); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + chistov_a_sum_of_matrix_elements::TestMPITaskParallel testMPITaskParallel(taskDataPar); + ASSERT_EQ(testMPITaskParallel.validation(), true); + testMPITaskParallel.pre_processing(); + testMPITaskParallel.run(); + testMPITaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_sum(1, 0); + for (int val : global_matrix) { + reference_sum[0] += val; + } + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} diff --git a/tasks/mpi/chistov_a_sum_of_matrix_elements/include/ops_mpi.hpp b/tasks/mpi/chistov_a_sum_of_matrix_elements/include/ops_mpi.hpp new file mode 100644 index 00000000000..0c36b36a86c --- /dev/null +++ b/tasks/mpi/chistov_a_sum_of_matrix_elements/include/ops_mpi.hpp @@ -0,0 +1,74 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace chistov_a_sum_of_matrix_elements { +template +std::vector get_random_matrix(const int n, const int m) { + if (n <= 0 || m <= 0) { + return std::vector(); + } + + std::vector matrix(n * m); + for (int i = 0; i < n * m; ++i) { + matrix[i] = static_cast((std::rand() % 201) - 100); + } + return matrix; +} + +template +T classic_way(const std::vector matrix, const int n, const int m) { + T result = 0; + for (int i = 0; i < n; ++i) { + for (int j = 0; j < m; ++j) { + result += matrix[i * m + j]; + } + } + return result; +} + +template +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + T res{}; +}; + +template +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + T res{}; + int n{}; + int m{}; + boost::mpi::communicator world; +}; + +} // namespace chistov_a_sum_of_matrix_elements diff --git a/tasks/mpi/chistov_a_sum_of_matrix_elements/perf_tests/main.cpp b/tasks/mpi/chistov_a_sum_of_matrix_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..d78e59e527d --- /dev/null +++ b/tasks/mpi/chistov_a_sum_of_matrix_elements/perf_tests/main.cpp @@ -0,0 +1,89 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/chistov_a_sum_of_matrix_elements/include/ops_mpi.hpp" + +TEST(chistov_a_sum_of_matrix_elements, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_sum(1, 0); + const int n = 4000; + const int m = 4000; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_matrix = std::vector(n * m, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto testMpiTaskParallel = std::make_shared>(taskDataPar); + + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(std::accumulate(global_matrix.begin(), global_matrix.end(), 0), global_sum[0]); + } +} + +TEST(chistov_a_sum_of_matrix_elements, test_task_run) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_sum(1, 0); + const int n = 6000; + const int m = 6000; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_matrix = std::vector(n * m, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs_count.emplace_back(n); + taskDataPar->inputs_count.emplace_back(m); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto testMpiTaskParallel = std::make_shared>(taskDataPar); + + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(std::accumulate(global_matrix.begin(), global_matrix.end(), 0), global_sum[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/chistov_a_sum_of_matrix_elements/src/ops_mpi.cpp b/tasks/mpi/chistov_a_sum_of_matrix_elements/src/ops_mpi.cpp new file mode 100644 index 00000000000..f5b9d6cfbc4 --- /dev/null +++ b/tasks/mpi/chistov_a_sum_of_matrix_elements/src/ops_mpi.cpp @@ -0,0 +1,116 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/chistov_a_sum_of_matrix_elements/include/ops_mpi.hpp" + +namespace chistov_a_sum_of_matrix_elements { + +template +bool TestMPITaskSequential::pre_processing() { + internal_order_test(); + + T* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + input_.assign(tmp_ptr, tmp_ptr + taskData->inputs_count[0]); + return true; +} + +template +bool TestMPITaskSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} + +template +bool TestMPITaskSequential::run() { + internal_order_test(); + + res = std::accumulate(input_.begin(), input_.end(), 0); + return true; +} + +template +bool TestMPITaskSequential::post_processing() { + internal_order_test(); + + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +template +bool TestMPITaskParallel::pre_processing() { + internal_order_test(); + + int delta1 = 0; + int delta2 = 0; + + if (world.rank() == 0) { + n = static_cast(taskData->inputs_count[1]); + m = static_cast(taskData->inputs_count[2]); + int total_elements = n * m; + delta1 = total_elements / world.size(); + delta2 = total_elements % world.size(); + } + + boost::mpi::broadcast(world, delta1, 0); + boost::mpi::broadcast(world, delta2, 0); + + if (world.rank() == 0) { + input_ = std::vector(n * m); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (int i = 0; i < static_cast(taskData->inputs_count[0]); i++) { + input_[i] = tmp_ptr[i]; + } + + int start_index = delta1 + (delta2 > 0 ? 1 : 0); + for (int proc = 1; proc < world.size(); proc++) { + int current_delta = delta1 + (proc < delta2 ? 1 : 0); + world.send(proc, 0, input_.data() + start_index, current_delta); + start_index += current_delta; + } + } + + int local_size = delta1 + (world.rank() < delta2 ? 1 : 0); + local_input_ = std::vector(local_size); + + if (world.rank() == 0) { + local_input_ = std::vector(input_.begin(), input_.begin() + local_size); + } else { + world.recv(0, 0, local_input_.data(), local_size); + } + + return true; +} + +template +bool TestMPITaskParallel::validation() { + internal_order_test(); + + if (world.rank() == 0) { + return (taskData->outputs_count[0] == 1 && !(taskData->inputs.empty())); + } + return true; +} + +template +bool TestMPITaskParallel::run() { + internal_order_test(); + + T local_res = std::accumulate(local_input_.begin(), local_input_.end(), 0); + reduce(world, local_res, res, std::plus(), 0); + + return true; +} + +template +bool TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res; + } + return true; +} + +template class TestMPITaskSequential; +template class TestMPITaskSequential; +template class TestMPITaskParallel; +template class TestMPITaskParallel; + +} // namespace chistov_a_sum_of_matrix_elements diff --git a/tasks/mpi/drozhdinov_d_sum_cols_matrix/func_tests/main.cpp b/tasks/mpi/drozhdinov_d_sum_cols_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..e27d4e974be --- /dev/null +++ b/tasks/mpi/drozhdinov_d_sum_cols_matrix/func_tests/main.cpp @@ -0,0 +1,276 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "mpi/drozhdinov_d_sum_cols_matrix/include/ops_mpi.hpp" + +TEST(drozhdinov_d_sum_cols_matrix_mpi, EmptyMatrixTest) { + boost::mpi::communicator world; + + int cols = 0; + int rows = 0; + + // Create data + std::vector matrix = {}; + std::vector expres_par(cols, 0); + std::vector ans = {}; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->outputs.emplace_back(reinterpret_cast(expres_par.data())); + taskDataPar->outputs_count.emplace_back(expres_par.size()); + } + + drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector expres_seq(cols, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres_seq.data())); + taskDataSeq->outputs_count.emplace_back(expres_seq.size()); + + // Create Task + drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(expres_par, expres_seq); + } +} + +TEST(drozhdinov_d_sum_cols_matrix_mpi, RandomMatrixTest) { + boost::mpi::communicator world; + + int cols = 200; + int rows = 500; + + // Create data + std::vector matrix = drozhdinov_d_sum_cols_matrix_mpi::getRandomVector(cols * rows); + std::vector expres_par(cols, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->outputs.emplace_back(reinterpret_cast(expres_par.data())); + taskDataPar->outputs_count.emplace_back(expres_par.size()); + } + + drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector expres_seq(cols, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres_seq.data())); + taskDataSeq->outputs_count.emplace_back(expres_seq.size()); + + // Create Task + drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(expres_par, expres_seq); + } +} + +TEST(drozhdinov_d_sum_cols_matrix_mpi, ParallelTest1) { + boost::mpi::communicator world; + + int cols = 2; + int rows = 2; + + // Create data + std::vector matrix = {1, 0, 2, 1}; + std::vector expres_par(cols, 0); + std::vector ans = {3, 1}; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->outputs.emplace_back(reinterpret_cast(expres_par.data())); + taskDataPar->outputs_count.emplace_back(expres_par.size()); + } + + drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector expres_seq(cols, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres_seq.data())); + taskDataSeq->outputs_count.emplace_back(expres_seq.size()); + + // Create Task + drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(expres_par, expres_seq); + } +} + +TEST(drozhdinov_d_sum_cols_matrix_mpi, ParallelTest2) { + boost::mpi::communicator world; + + int cols = 1000; + int rows = 1000; + + // Create data + std::vector matrix(cols * rows, 0); + matrix[1] = 1; + std::vector expres_par(cols, 0); + std::vector ans(cols, 0); + ans[1] = 1; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->outputs.emplace_back(reinterpret_cast(expres_par.data())); + taskDataPar->outputs_count.emplace_back(expres_par.size()); + } + + drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector expres_seq(cols, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres_seq.data())); + taskDataSeq->outputs_count.emplace_back(expres_seq.size()); + + // Create Task + drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(expres_par, expres_seq); + } +} + +TEST(drozhdinov_d_sum_cols_matrix_mpi, ParallelTest3) { + boost::mpi::communicator world; + + int cols = 2000; + int rows = 2000; + + // Create data + std::vector matrix(cols * rows, 0); + matrix[1] = 1; + std::vector expres_par(cols, 0); + std::vector ans(cols, 0); + ans[1] = 1; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->outputs.emplace_back(reinterpret_cast(expres_par.data())); + taskDataPar->outputs_count.emplace_back(expres_par.size()); + } + + drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector expres_seq(cols, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres_seq.data())); + taskDataSeq->outputs_count.emplace_back(expres_seq.size()); + + // Create Task + drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(expres_par, expres_seq); + } +} \ No newline at end of file diff --git a/tasks/mpi/drozhdinov_d_sum_cols_matrix/include/ops_mpi.hpp b/tasks/mpi/drozhdinov_d_sum_cols_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..7f3076fecd4 --- /dev/null +++ b/tasks/mpi/drozhdinov_d_sum_cols_matrix/include/ops_mpi.hpp @@ -0,0 +1,52 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace drozhdinov_d_sum_cols_matrix_mpi { + +std::vector getRandomVector(int sz); +int makeLinCoords(int x, int y, int xSize); +std::vector calcMatSumSeq(const std::vector& matrix, int xSize, int ySize, int fromX, int toX); +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + std::vector res; + int cols{}; + int rows{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + std::vector res; + int cols{}; + int rows{}; + boost::mpi::communicator world; +}; + +} // namespace drozhdinov_d_sum_cols_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/drozhdinov_d_sum_cols_matrix/perf_tests/main.cpp b/tasks/mpi/drozhdinov_d_sum_cols_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..1bafbed9243 --- /dev/null +++ b/tasks/mpi/drozhdinov_d_sum_cols_matrix/perf_tests/main.cpp @@ -0,0 +1,106 @@ +// Copyright 2023 Nesterov Alexander +// drozhdinov_d_sum_cols_matrix perf +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/drozhdinov_d_sum_cols_matrix/include/ops_mpi.hpp" + +TEST(drozhdinov_d_sum_cols_matrix, test_pipeline_run) { + boost::mpi::communicator world; + + int cols = 5000; + int rows = 5000; + + // Create data + std::vector matrix(cols * rows, 0); + matrix[1] = 1; + std::vector expres(cols, 0); + std::vector ans(cols, 0); + ans[1] = 1; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->outputs.emplace_back(reinterpret_cast(expres.data())); + taskDataPar->outputs_count.emplace_back(expres.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(expres, ans); + } +} + +TEST(drozhdinov_d_sum_cols_matrix, test_task_run) { + boost::mpi::communicator world; + int cols = 5000; + int rows = 5000; + + // Create data + std::vector matrix(cols * rows, 0); + matrix[1] = 1; + std::vector expres(cols, 0); + std::vector ans(cols, 0); + ans[1] = 1; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs_count.emplace_back(cols); + taskDataPar->inputs_count.emplace_back(rows); + taskDataPar->outputs.emplace_back(reinterpret_cast(expres.data())); + taskDataPar->outputs_count.emplace_back(expres.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(expres, ans); + } +} \ No newline at end of file diff --git a/tasks/mpi/drozhdinov_d_sum_cols_matrix/src/ops_mpi.cpp b/tasks/mpi/drozhdinov_d_sum_cols_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..45ba245b78c --- /dev/null +++ b/tasks/mpi/drozhdinov_d_sum_cols_matrix/src/ops_mpi.cpp @@ -0,0 +1,134 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/drozhdinov_d_sum_cols_matrix/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +std::vector drozhdinov_d_sum_cols_matrix_mpi::getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = (gen() % 100) - 49; + } + return vec; +} + +int drozhdinov_d_sum_cols_matrix_mpi::makeLinCoords(int x, int y, int xSize) { return y * xSize + x; } + +std::vector drozhdinov_d_sum_cols_matrix_mpi::calcMatSumSeq(const std::vector& matrix, int xSize, int ySize, + int fromX, int toX) { + std::vector result; + for (int x = fromX; x < toX; x++) { + int columnSum = 0; + for (int y = 0; y < ySize; y++) { + int linearizedCoordinate = makeLinCoords(x, y, xSize); + columnSum += matrix[linearizedCoordinate]; + } + result.push_back(columnSum); + } + return result; +} + +bool drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + input_ = std::vector(taskData->inputs_count[0]); + auto* ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = ptr[i]; + } + cols = taskData->inputs_count[1]; + rows = taskData->inputs_count[2]; + res = std::vector(cols, 0); + return true; +} + +bool drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[1] == taskData->outputs_count[0]; +} + +bool drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskSequential::run() { + internal_order_test(); + res = calcMatSumSeq(input_, cols, rows, 0, cols); + return true; +} + +bool drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + for (int i = 0; i < cols; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + return true; +} + +bool drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + if (world.rank() == 0) { + rows = taskData->inputs_count[2]; + cols = taskData->inputs_count[1]; + } + broadcast(world, cols, 0); + broadcast(world, rows, 0); + // fbd nt ncssr dlt + if (world.rank() == 0) { + // Init vectors + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + } else { + input_ = std::vector(cols * rows); + } + broadcast(world, input_.data(), cols * rows, 0); + // Init value for output + res = std::vector(cols, 0); + return true; +} + +bool drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return taskData->outputs_count[0] == taskData->inputs_count[1]; + } + return true; +} + +bool drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskParallel::run() { + internal_order_test(); + int delta = cols / world.size(); + delta += (cols % world.size() == 0) ? 0 : 1; + int lastCol = std::min(cols, delta * (world.rank() + 1)); + auto localSum = calcMatSumSeq(input_, cols, rows, delta * world.rank(), lastCol); + localSum.resize(delta); + if (world.rank() == 0) { + std::vector localRes(cols + delta * world.size()); + std::vector sizes(world.size(), delta); + boost::mpi::gatherv(world, localSum.data(), localSum.size(), localRes.data(), sizes, 0); + localRes.resize(cols); + res = localRes; + } else { + boost::mpi::gatherv(world, localSum.data(), localSum.size(), 0); + } + return true; +} + +bool drozhdinov_d_sum_cols_matrix_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + for (int i = 0; i < cols; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + } + return true; +} diff --git a/tasks/mpi/ermolaev_v_min_matrix/func_tests/main.cpp b/tasks/mpi/ermolaev_v_min_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..9c1db410fc9 --- /dev/null +++ b/tasks/mpi/ermolaev_v_min_matrix/func_tests/main.cpp @@ -0,0 +1,224 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "mpi/ermolaev_v_min_matrix/include/ops_mpi.hpp" + +TEST(ermolaev_v_min_matrix_mpi, Test_Min_10x10) { + const int count_rows = 10; + const int count_columns = 10; + const int gen_min = -500; + const int gen_max = 500; + + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_min(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = ermolaev_v_min_matrix_mpi::getRandomMatrix(count_rows, count_columns, gen_min, gen_max); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + ermolaev_v_min_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, INT_MAX); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + ermolaev_v_min_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} + +TEST(ermolaev_v_min_matrix_mpi, Test_Min_10x100) { + const int count_rows = 10; + const int count_columns = 100; + const int gen_min = -500; + const int gen_max = 500; + + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_min(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = ermolaev_v_min_matrix_mpi::getRandomMatrix(count_rows, count_columns, gen_min, gen_max); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + ermolaev_v_min_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, INT_MAX); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + ermolaev_v_min_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} + +TEST(ermolaev_v_min_matrix_mpi, Test_Min_100x10) { + const int count_rows = 100; + const int count_columns = 10; + const int gen_min = -500; + const int gen_max = 500; + + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_min(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = ermolaev_v_min_matrix_mpi::getRandomMatrix(count_rows, count_columns, gen_min, gen_max); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + ermolaev_v_min_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, INT_MAX); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + ermolaev_v_min_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} + +TEST(ermolaev_v_min_matrix_mpi, Test_Min_100x100) { + const int count_rows = 100; + const int count_columns = 100; + const int gen_min = -500; + const int gen_max = 500; + + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_min(1, INT_MAX); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = ermolaev_v_min_matrix_mpi::getRandomMatrix(count_rows, count_columns, gen_min, gen_max); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + ermolaev_v_min_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, INT_MAX); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + ermolaev_v_min_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} diff --git a/tasks/mpi/ermolaev_v_min_matrix/include/ops_mpi.hpp b/tasks/mpi/ermolaev_v_min_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..54e61fbfa67 --- /dev/null +++ b/tasks/mpi/ermolaev_v_min_matrix/include/ops_mpi.hpp @@ -0,0 +1,48 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace ermolaev_v_min_matrix_mpi { + +std::vector getRandomVector(int sz, int min = 0, int max = 100); +std::vector> getRandomMatrix(int rows, int columns, int min = 0, int max = 100); + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + int res_{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + int res_{}; + boost::mpi::communicator world; +}; + +} // namespace ermolaev_v_min_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/ermolaev_v_min_matrix/perf_tests/main.cpp b/tasks/mpi/ermolaev_v_min_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..c81a6b24e2e --- /dev/null +++ b/tasks/mpi/ermolaev_v_min_matrix/perf_tests/main.cpp @@ -0,0 +1,119 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/ermolaev_v_min_matrix/include/ops_mpi.hpp" + +TEST(ermolaev_v_min_matrix_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_min(1, INT_MAX); + int ref = INT_MIN; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + std::random_device dev; + std::mt19937 gen(dev()); + + int count_rows = 4000; + int count_columns = 4000; + int gen_min = -500; + int gen_max = 500; + + global_matrix = ermolaev_v_min_matrix_mpi::getRandomMatrix(count_rows, count_columns, gen_min, gen_max); + int index = gen() % (count_rows * count_columns); + global_matrix[index / count_columns][index / count_rows] = ref; + + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ref, global_min[0]); + } +} + +TEST(ermolaev_v_min_matrix_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_sum(1, INT_MAX); + int ref = INT_MIN; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + std::random_device dev; + std::mt19937 gen(dev()); + + int count_rows = 4000; + int count_columns = 4000; + int gen_min = -500; + int gen_max = 500; + + global_matrix = ermolaev_v_min_matrix_mpi::getRandomMatrix(count_rows, count_columns, gen_min, gen_max); + int index = gen() % (count_rows * count_columns); + global_matrix[index / count_columns][index / count_rows] = ref; + + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ref, global_sum[0]); + } +} diff --git a/tasks/mpi/ermolaev_v_min_matrix/src/ops_mpi.cpp b/tasks/mpi/ermolaev_v_min_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..fda1b0bbc2d --- /dev/null +++ b/tasks/mpi/ermolaev_v_min_matrix/src/ops_mpi.cpp @@ -0,0 +1,133 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/ermolaev_v_min_matrix/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +std::vector ermolaev_v_min_matrix_mpi::getRandomVector(int sz, int min, int max) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = min + gen() % (max - min + 1); + } + return vec; +} + +std::vector> ermolaev_v_min_matrix_mpi::getRandomMatrix(int rows, int columns, int min, int max) { + std::vector> vec(rows); + for (int i = 0; i < rows; i++) { + vec[i] = ermolaev_v_min_matrix_mpi::getRandomVector(columns, min, max); + } + return vec; +} + +bool ermolaev_v_min_matrix_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + // Init vectors + input_ = std::vector>(taskData->inputs_count[0], std::vector(taskData->inputs_count[1])); + + for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[1], input_[i].begin()); + } + + // Init value for output + res_ = INT_MAX; + return true; +} + +bool ermolaev_v_min_matrix_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->outputs_count[0] == 1 && taskData->inputs_count[0] > 0 && taskData->inputs_count[1] > 0; +} + +bool ermolaev_v_min_matrix_mpi::TestMPITaskSequential::run() { + internal_order_test(); + std::vector local_res(input_.size()); + + for (unsigned int i = 0; i < input_.size(); i++) { + local_res[i] = *std::min_element(input_[i].begin(), input_[i].end()); + } + + res_ = *std::min_element(local_res.begin(), local_res.end()); + return true; +} + +bool ermolaev_v_min_matrix_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res_; + return true; +} + +bool ermolaev_v_min_matrix_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + unsigned int delta = 0; + if (world.rank() == 0) { + delta = taskData->inputs_count[0] * taskData->inputs_count[1] / world.size(); + } + broadcast(world, delta, 0); + + if (world.rank() == 0) { + // Init vectors + + unsigned int rows = taskData->inputs_count[0]; + unsigned int columns = taskData->inputs_count[1]; + input_ = std::vector(rows * columns); + + for (unsigned int i = 0; i < rows; i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + for (unsigned int j = 0; j < columns; j++) { + input_[i * columns + j] = tmp_ptr[j]; + } + } + + for (int proc = 1; proc < world.size(); proc++) { + world.send(proc, 0, input_.data() + delta * proc, delta); + } + } + + local_input_ = std::vector(delta); + if (world.rank() == 0) { + local_input_ = std::vector(input_.begin(), input_.begin() + delta); + } else { + world.recv(0, 0, local_input_.data(), delta); + } + + // Init value for output + res_ = INT_MAX; + return true; +} + +bool ermolaev_v_min_matrix_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return taskData->outputs_count[0] == 1 && !taskData->inputs.empty(); + } + return true; +} + +bool ermolaev_v_min_matrix_mpi::TestMPITaskParallel::run() { + internal_order_test(); + + int local_res = *std::min_element(local_input_.begin(), local_input_.end()); + reduce(world, local_res, res_, boost::mpi::minimum(), 0); + + return true; +} + +bool ermolaev_v_min_matrix_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res_; + } + return true; +} diff --git a/tasks/mpi/example/src/ops_mpi.cpp b/tasks/mpi/example/src/ops_mpi.cpp index 2066953d1e3..af08ff9fde2 100644 --- a/tasks/mpi/example/src/ops_mpi.cpp +++ b/tasks/mpi/example/src/ops_mpi.cpp @@ -112,7 +112,6 @@ bool nesterov_a_test_task_mpi::TestMPITaskParallel::run() { } else if (ops == "max") { reduce(world, local_res, res, boost::mpi::maximum(), 0); } - std::this_thread::sleep_for(20ms); return true; } diff --git a/tasks/mpi/filatev_v_sum_of_matrix_elements/func_tests/main.cpp b/tasks/mpi/filatev_v_sum_of_matrix_elements/func_tests/main.cpp new file mode 100644 index 00000000000..7ed222c75cc --- /dev/null +++ b/tasks/mpi/filatev_v_sum_of_matrix_elements/func_tests/main.cpp @@ -0,0 +1,293 @@ +// Filatev Vladislav Sum_of_matrix_elements +#include + +#include +#include +#include +#include + +#include "mpi/filatev_v_sum_of_matrix_elements/include/ops_mpi.hpp" + +std::vector> getRandomMatrix(int size_n, int size_m) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector> matrix(size_m, std::vector(size_n)); + + for (int i = 0; i < size_m; ++i) { + for (int j = 0; j < size_n; ++j) { + matrix[i][j] = gen() % 200 - 100; + } + } + return matrix; +} + +TEST(filatev_v_sum_of_matrix_elements_mpi, Test_Sum_10_10_1) { + boost::mpi::communicator world; + std::vector out; + std::vector> in; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count = 10; + in = std::vector>(count, std::vector(count, 1)); + out = std::vector(1, 0); + for (int i = 0; i < count; i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + filatev_v_sum_of_matrix_elements_mpi::SumMatrixParallel sumMatrixparallel(taskDataPar, world); + ASSERT_EQ(sumMatrixparallel.validation(), true); + sumMatrixparallel.pre_processing(); + sumMatrixparallel.run(); + sumMatrixparallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(100, out[0]); + } +} + +TEST(filatev_v_sum_of_matrix_elements_mpi, Test_Sum_10_10_r) { + boost::mpi::communicator world; + const int count = 10; + std::vector out; + std::vector> in; + std::vector> refIn; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + in = getRandomMatrix(count, count); + refIn = in; + out = std::vector(1, 0); + for (int i = 0; i < count; i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + filatev_v_sum_of_matrix_elements_mpi::SumMatrixParallel sumMatrixParallel(taskDataPar, world); + ASSERT_EQ(sumMatrixParallel.validation(), true); + sumMatrixParallel.pre_processing(); + sumMatrixParallel.run(); + sumMatrixParallel.post_processing(); + + if (world.rank() == 0) { + std::vector refOut; + // Create TaskData + std::shared_ptr TaskDataSeq = std::make_shared(); + refOut = std::vector(1, 0); + for (int i = 0; i < count; i++) { + TaskDataSeq->inputs.emplace_back(reinterpret_cast(refIn[i].data())); + } + TaskDataSeq->inputs_count.emplace_back(count); + TaskDataSeq->inputs_count.emplace_back(count); + TaskDataSeq->outputs.emplace_back(reinterpret_cast(refOut.data())); + TaskDataSeq->outputs_count.emplace_back(1); + + filatev_v_sum_of_matrix_elements_mpi::SumMatrixSeq sumMatriSeq(TaskDataSeq); + ASSERT_EQ(sumMatriSeq.validation(), true); + sumMatriSeq.pre_processing(); + sumMatriSeq.run(); + sumMatriSeq.post_processing(); + + ASSERT_EQ(out[0], refOut[0]); + } +} + +TEST(filatev_v_sum_of_matrix_elements_mpi, Test_Sum_10_20_r) { + boost::mpi::communicator world; + const int size_m = 10; + const int size_n = 20; + std::vector out; + std::vector> in; + std::vector> refIn; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + in = getRandomMatrix(size_n, size_m); + refIn = in; + out = std::vector(1, 0); + for (int i = 0; i < size_m; i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataPar->inputs_count.emplace_back(size_n); + taskDataPar->inputs_count.emplace_back(size_m); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + filatev_v_sum_of_matrix_elements_mpi::SumMatrixParallel sumMatrixParallel(taskDataPar, world); + ASSERT_EQ(sumMatrixParallel.validation(), true); + sumMatrixParallel.pre_processing(); + sumMatrixParallel.run(); + sumMatrixParallel.post_processing(); + + if (world.rank() == 0) { + std::vector refOut; + // Create TaskData + std::shared_ptr TaskDataSeq = std::make_shared(); + refOut = std::vector(1, 0); + for (int i = 0; i < size_m; i++) { + TaskDataSeq->inputs.emplace_back(reinterpret_cast(refIn[i].data())); + } + TaskDataSeq->inputs_count.emplace_back(size_n); + TaskDataSeq->inputs_count.emplace_back(size_m); + TaskDataSeq->outputs.emplace_back(reinterpret_cast(refOut.data())); + TaskDataSeq->outputs_count.emplace_back(1); + + filatev_v_sum_of_matrix_elements_mpi::SumMatrixSeq sumMatriSeq(TaskDataSeq); + ASSERT_EQ(sumMatriSeq.validation(), true); + sumMatriSeq.pre_processing(); + sumMatriSeq.run(); + sumMatriSeq.post_processing(); + + ASSERT_EQ(out[0], refOut[0]); + } +} + +TEST(filatev_v_sum_of_matrix_elements_mpi, Test_Sum_20_10_r) { + boost::mpi::communicator world; + const int size_m = 20; + const int size_n = 10; + std::vector out; + std::vector> in; + std::vector> refIn; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + in = getRandomMatrix(size_n, size_m); + refIn = in; + out = std::vector(1, 0); + for (int i = 0; i < size_m; i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataPar->inputs_count.emplace_back(size_n); + taskDataPar->inputs_count.emplace_back(size_m); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + filatev_v_sum_of_matrix_elements_mpi::SumMatrixParallel sumMatrixParallel(taskDataPar, world); + ASSERT_EQ(sumMatrixParallel.validation(), true); + sumMatrixParallel.pre_processing(); + sumMatrixParallel.run(); + sumMatrixParallel.post_processing(); + + if (world.rank() == 0) { + std::vector refOut; + // Create TaskData + std::shared_ptr TaskDataSeq = std::make_shared(); + refOut = std::vector(1, 0); + for (int i = 0; i < size_m; i++) { + TaskDataSeq->inputs.emplace_back(reinterpret_cast(refIn[i].data())); + } + TaskDataSeq->inputs_count.emplace_back(size_n); + TaskDataSeq->inputs_count.emplace_back(size_m); + TaskDataSeq->outputs.emplace_back(reinterpret_cast(refOut.data())); + TaskDataSeq->outputs_count.emplace_back(1); + + filatev_v_sum_of_matrix_elements_mpi::SumMatrixSeq sumMatriSeq(TaskDataSeq); + ASSERT_EQ(sumMatriSeq.validation(), true); + sumMatriSeq.pre_processing(); + sumMatriSeq.run(); + sumMatriSeq.post_processing(); + + ASSERT_EQ(out[0], refOut[0]); + } +} + +TEST(filatev_v_sum_of_matrix_elements_mpi, Test_Sum_1_1_r) { + boost::mpi::communicator world; + const int size_m = 1; + const int size_n = 1; + std::vector out; + std::vector> in; + std::vector> refIn; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + in = getRandomMatrix(size_n, size_m); + refIn = in; + out = std::vector(1, 0); + for (int i = 0; i < size_m; i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataPar->inputs_count.emplace_back(size_n); + taskDataPar->inputs_count.emplace_back(size_m); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + filatev_v_sum_of_matrix_elements_mpi::SumMatrixParallel sumMatrixParallel(taskDataPar, world); + ASSERT_EQ(sumMatrixParallel.validation(), true); + sumMatrixParallel.pre_processing(); + sumMatrixParallel.run(); + sumMatrixParallel.post_processing(); + + if (world.rank() == 0) { + std::vector refOut; + // Create TaskData + std::shared_ptr TaskDataSeq = std::make_shared(); + refOut = std::vector(1, 0); + for (int i = 0; i < size_m; i++) { + TaskDataSeq->inputs.emplace_back(reinterpret_cast(refIn[i].data())); + } + TaskDataSeq->inputs_count.emplace_back(size_n); + TaskDataSeq->inputs_count.emplace_back(size_m); + TaskDataSeq->outputs.emplace_back(reinterpret_cast(refOut.data())); + TaskDataSeq->outputs_count.emplace_back(1); + + filatev_v_sum_of_matrix_elements_mpi::SumMatrixSeq sumMatriSeq(TaskDataSeq); + ASSERT_EQ(sumMatriSeq.validation(), true); + sumMatriSeq.pre_processing(); + sumMatriSeq.run(); + sumMatriSeq.post_processing(); + + ASSERT_EQ(out[0], refOut[0]); + } +} + +TEST(filatev_v_sum_of_matrix_elements_mpi, Test_Empty_Matrix) { + boost::mpi::communicator world; + const int count = 0; + std::vector out; + std::vector> in; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + in = std::vector>(count, std::vector(count, 1)); + out = std::vector(1, 0); + for (int i = 0; i < count; i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + filatev_v_sum_of_matrix_elements_mpi::SumMatrixParallel sumMatrixparallel(taskDataPar, world); + ASSERT_EQ(sumMatrixparallel.validation(), true); + sumMatrixparallel.pre_processing(); + sumMatrixparallel.run(); + sumMatrixparallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(0, out[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/filatev_v_sum_of_matrix_elements/include/ops_mpi.hpp b/tasks/mpi/filatev_v_sum_of_matrix_elements/include/ops_mpi.hpp new file mode 100644 index 00000000000..734168deffb --- /dev/null +++ b/tasks/mpi/filatev_v_sum_of_matrix_elements/include/ops_mpi.hpp @@ -0,0 +1,49 @@ +// Filatev Vladislav Sum_of_matrix_elements +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace filatev_v_sum_of_matrix_elements_mpi { + +class SumMatrixSeq : public ppc::core::Task { + public: + explicit SumMatrixSeq(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector matrix; + long long summ = 0; + int size_n, size_m; +}; + +class SumMatrixParallel : public ppc::core::Task { + public: + explicit SumMatrixParallel(std::shared_ptr taskData_, boost::mpi::communicator world) + : Task(std::move(taskData_)), world(std::move(world)) {}; + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector matrix; + long long summ = 0; + std::vector local_vector; + int size_n, size_m; + boost::mpi::communicator world; +}; + +} // namespace filatev_v_sum_of_matrix_elements_mpi \ No newline at end of file diff --git a/tasks/mpi/filatev_v_sum_of_matrix_elements/perf_tests/main.cpp b/tasks/mpi/filatev_v_sum_of_matrix_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..27f65019e1d --- /dev/null +++ b/tasks/mpi/filatev_v_sum_of_matrix_elements/perf_tests/main.cpp @@ -0,0 +1,284 @@ +// Filatev Vladislav Sum_of_matrix_elements +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/filatev_v_sum_of_matrix_elements/include/ops_mpi.hpp" + +TEST(filatev_v_sum_of_matrix_elements_mpi, test_pipeline_run_2000) { + const int count = 2000; + boost::mpi::communicator world; + std::vector out; + std::vector> in; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + in = std::vector>(count, std::vector(count, 1)); + out = std::vector(1, 0); + for (int i = 0; i < count; i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + auto sumMatrixparallel = + std::make_shared(taskDataPar, world); + ASSERT_EQ(sumMatrixparallel->validation(), true); + sumMatrixparallel->pre_processing(); + sumMatrixparallel->run(); + sumMatrixparallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(sumMatrixparallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count * count, out[0]); + } +} + +TEST(filatev_v_sum_of_matrix_elements_mpi, test_task_run_2000) { + const int count = 2000; + boost::mpi::communicator world; + std::vector out; + std::vector> in; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + in = std::vector>(count, std::vector(count, 1)); + out = std::vector(1, 0); + for (int i = 0; i < count; i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + auto sumMatrixparallel = + std::make_shared(taskDataPar, world); + ASSERT_EQ(sumMatrixparallel->validation(), true); + sumMatrixparallel->pre_processing(); + sumMatrixparallel->run(); + sumMatrixparallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 30; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(sumMatrixparallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count * count, out[0]); + } +} + +TEST(filatev_v_sum_of_matrix_elements_mpi, test_pipeline_run_3000) { + const int count = 3000; + boost::mpi::communicator world; + std::vector out; + std::vector> in; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + in = std::vector>(count, std::vector(count, 1)); + out = std::vector(1, 0); + for (int i = 0; i < count; i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + auto sumMatrixparallel = + std::make_shared(taskDataPar, world); + ASSERT_EQ(sumMatrixparallel->validation(), true); + sumMatrixparallel->pre_processing(); + sumMatrixparallel->run(); + sumMatrixparallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(sumMatrixparallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count * count, out[0]); + } +} + +TEST(filatev_v_sum_of_matrix_elements_mpi, test_task_run_3000) { + const int count = 3000; + boost::mpi::communicator world; + std::vector out; + std::vector> in; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + in = std::vector>(count, std::vector(count, 1)); + out = std::vector(1, 0); + for (int i = 0; i < count; i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + auto sumMatrixparallel = + std::make_shared(taskDataPar, world); + ASSERT_EQ(sumMatrixparallel->validation(), true); + sumMatrixparallel->pre_processing(); + sumMatrixparallel->run(); + sumMatrixparallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 30; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(sumMatrixparallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count * count, out[0]); + } +} + +TEST(filatev_v_sum_of_matrix_elements_mpi, test_pipeline_run_4000) { + const int count = 4000; + boost::mpi::communicator world; + std::vector out; + std::vector> in; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + in = std::vector>(count, std::vector(count, 1)); + out = std::vector(1, 0); + for (int i = 0; i < count; i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + auto sumMatrixparallel = + std::make_shared(taskDataPar, world); + ASSERT_EQ(sumMatrixparallel->validation(), true); + sumMatrixparallel->pre_processing(); + sumMatrixparallel->run(); + sumMatrixparallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(sumMatrixparallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count * count, out[0]); + } +} + +TEST(filatev_v_sum_of_matrix_elements_mpi, test_task_run_4000) { + const int count = 4000; + boost::mpi::communicator world; + std::vector out; + std::vector> in; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + in = std::vector>(count, std::vector(count, 1)); + out = std::vector(1, 0); + for (int i = 0; i < count; i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->inputs_count.emplace_back(count); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(1); + } + + auto sumMatrixparallel = + std::make_shared(taskDataPar, world); + ASSERT_EQ(sumMatrixparallel->validation(), true); + sumMatrixparallel->pre_processing(); + sumMatrixparallel->run(); + sumMatrixparallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 30; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(sumMatrixparallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count * count, out[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/filatev_v_sum_of_matrix_elements/src/ops_mpi.cpp b/tasks/mpi/filatev_v_sum_of_matrix_elements/src/ops_mpi.cpp new file mode 100644 index 00000000000..bbcdc4b0779 --- /dev/null +++ b/tasks/mpi/filatev_v_sum_of_matrix_elements/src/ops_mpi.cpp @@ -0,0 +1,104 @@ +// Filatev Vladislav Sum_of_matrix_elements +#include "mpi/filatev_v_sum_of_matrix_elements/include/ops_mpi.hpp" + +#include +#include +#include +#include + +bool filatev_v_sum_of_matrix_elements_mpi::SumMatrixSeq::pre_processing() { + internal_order_test(); + + summ = 0; + size_n = taskData->inputs_count[0]; + size_m = taskData->inputs_count[1]; + + for (int i = 0; i < size_m; ++i) { + auto* temp = reinterpret_cast(taskData->inputs[i]); + + matrix.insert(matrix.end(), temp, temp + size_n); + } + + return true; +} + +bool filatev_v_sum_of_matrix_elements_mpi::SumMatrixSeq::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] > 0 && taskData->inputs_count[1] > 0 && taskData->outputs_count[0] == 1; +} + +bool filatev_v_sum_of_matrix_elements_mpi::SumMatrixSeq::run() { + internal_order_test(); + + summ = std::accumulate(matrix.begin(), matrix.end(), 0); + + return true; +} + +bool filatev_v_sum_of_matrix_elements_mpi::SumMatrixSeq::post_processing() { + internal_order_test(); + + reinterpret_cast(taskData->outputs[0])[0] = summ; + return true; +} + +bool filatev_v_sum_of_matrix_elements_mpi::SumMatrixParallel::pre_processing() { + internal_order_test(); + if (world.rank() == 0) { + size_n = taskData->inputs_count[0]; + size_m = taskData->inputs_count[1]; + + for (int i = 0; i < size_m; ++i) { + auto* temp = reinterpret_cast(taskData->inputs[i]); + + matrix.insert(matrix.end(), temp, temp + size_n); + } + } + summ = 0; + return true; +} + +bool filatev_v_sum_of_matrix_elements_mpi::SumMatrixParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return taskData->inputs_count[0] >= 0 && taskData->inputs_count[1] >= 0 && taskData->outputs_count[0] == 1; + } + return true; +} + +bool filatev_v_sum_of_matrix_elements_mpi::SumMatrixParallel::run() { + internal_order_test(); + int delta = 0; + int ras = 0; + + if (world.rank() == 0 && world.size() > 1) { + ras = (size_n * size_m) % (world.size() - 1); + delta = (size_n * size_m) / (world.size() - 1); + } else if (world.rank() == 0 && world.size() == 1) { + ras = (size_n * size_m); + } + broadcast(world, delta, 0); + + if (world.rank() == 0) { + for (int proc = 0; proc < (world.size() - 1); proc++) { + world.send(proc + 1, 0, matrix.data() + proc * delta + ras, delta); + } + local_vector = std::vector(matrix.begin(), matrix.begin() + ras); + } else { + local_vector = std::vector(delta); + world.recv(0, 0, local_vector.data(), delta); + } + long long local_summ = std::accumulate(local_vector.begin(), local_vector.end(), 0); + reduce(world, local_summ, summ, std::plus(), 0); + return true; +} + +bool filatev_v_sum_of_matrix_elements_mpi::SumMatrixParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = summ; + } + return true; +} diff --git a/tasks/mpi/filateva_e_number_sentences_line/func_tests/main.cpp b/tasks/mpi/filateva_e_number_sentences_line/func_tests/main.cpp new file mode 100644 index 00000000000..e363d6b9cb7 --- /dev/null +++ b/tasks/mpi/filateva_e_number_sentences_line/func_tests/main.cpp @@ -0,0 +1,347 @@ +// Filateva Elizaveta Number_of_sentences_per_line +#include + +#include +#include +#include +#include + +#include "mpi/filateva_e_number_sentences_line/include/ops_mpi.hpp" + +std::string getRandomLine(int max_count) { + std::random_device dev; + std::mt19937 gen(dev()); + std::string line = "Hello world. How many words are in this sentence? The task of parallel programming!"; + int count = gen() % max_count; + for (int i = 0; i < count; ++i) { + line += line; + } + return line; +} + +TEST(filateva_e_number_sentences_line_mpi, Test_countSentences) { + std::string line = "Hello world. How many words are in this sentence? The task of parallel programming!"; + int count = filateva_e_number_sentences_line_mpi::countSentences(line); + ASSERT_EQ(3, count); +} + +TEST(filateva_e_number_sentences_line_mpi, one_sentence_line_1) { + boost::mpi::communicator world; + std::string line = "Hello world."; + std::vector out(1, 0); + // // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + filateva_e_number_sentences_line_mpi::NumberSentencesLineParallel NumS(taskDataPar); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(1, out[0]); + } +} + +TEST(filateva_e_number_sentences_line_mpi, one_sentence_line_2) { + boost::mpi::communicator world; + std::string line = "Hello world"; + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + filateva_e_number_sentences_line_mpi::NumberSentencesLineParallel NumS(taskDataPar); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(1, out[0]); + } +} + +TEST(filateva_e_number_sentences_line_mpi, one_sentence_line_3) { + boost::mpi::communicator world; + std::string line = "Hello world!"; + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + filateva_e_number_sentences_line_mpi::NumberSentencesLineParallel NumS(taskDataPar); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(1, out[0]); + } +} + +TEST(filateva_e_number_sentences_line_mpi, one_sentence_line_4) { + boost::mpi::communicator world; + std::string line = "Hello world?"; + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + filateva_e_number_sentences_line_mpi::NumberSentencesLineParallel NumS(taskDataPar); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(1, out[0]); + } +} + +TEST(filateva_e_number_sentences_line_mpi, empty_string) { + boost::mpi::communicator world; + std::string line; + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + filateva_e_number_sentences_line_mpi::NumberSentencesLineParallel NumS(taskDataPar); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(0, out[0]); + } +} + +TEST(filateva_e_number_sentences_line_mpi, random_text_1) { + boost::mpi::communicator world; + std::string line; + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + line = getRandomLine(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + filateva_e_number_sentences_line_mpi::NumberSentencesLineParallel NumS(taskDataPar); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector ref_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_out.data())); + taskDataSeq->outputs_count.emplace_back(ref_out.size()); + + // Create Task + filateva_e_number_sentences_line_mpi::NumberSentencesLineSequential NumSSeq(taskDataSeq); + ASSERT_EQ(NumSSeq.validation(), true); + NumSSeq.pre_processing(); + NumSSeq.run(); + NumSSeq.post_processing(); + + ASSERT_EQ(out[0], ref_out[0]); + } +} + +TEST(filateva_e_number_sentences_line_mpi, random_text_2) { + boost::mpi::communicator world; + std::string line; + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + line = getRandomLine(3); + taskDataPar->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + filateva_e_number_sentences_line_mpi::NumberSentencesLineParallel NumS(taskDataPar); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + + if (world.rank() == 0) { + // // Create data + std::vector ref_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_out.data())); + taskDataSeq->outputs_count.emplace_back(ref_out.size()); + + // Create Task + filateva_e_number_sentences_line_mpi::NumberSentencesLineSequential NumSSeq(taskDataSeq); + ASSERT_EQ(NumSSeq.validation(), true); + NumSSeq.pre_processing(); + NumSSeq.run(); + NumSSeq.post_processing(); + + ASSERT_EQ(out[0], ref_out[0]); + } +} + +TEST(filateva_e_number_sentences_line_mpi, random_text_3) { + boost::mpi::communicator world; + std::string line; + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + line = getRandomLine(5); + taskDataPar->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + filateva_e_number_sentences_line_mpi::NumberSentencesLineParallel NumS(taskDataPar); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + + if (world.rank() == 0) { + // // Create data + std::vector ref_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_out.data())); + taskDataSeq->outputs_count.emplace_back(ref_out.size()); + + // Create Task + filateva_e_number_sentences_line_mpi::NumberSentencesLineSequential NumSSeq(taskDataSeq); + ASSERT_EQ(NumSSeq.validation(), true); + NumSSeq.pre_processing(); + NumSSeq.run(); + NumSSeq.post_processing(); + + ASSERT_EQ(out[0], ref_out[0]); + } +} + +TEST(filateva_e_number_sentences_line_mpi, random_text_4) { + boost::mpi::communicator world; + std::string line; + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + line = getRandomLine(10); + taskDataPar->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + filateva_e_number_sentences_line_mpi::NumberSentencesLineParallel NumS(taskDataPar); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + + if (world.rank() == 0) { + // // Create data + std::vector ref_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(ref_out.data())); + taskDataSeq->outputs_count.emplace_back(ref_out.size()); + + // Create Task + filateva_e_number_sentences_line_mpi::NumberSentencesLineSequential NumSSeq(taskDataSeq); + ASSERT_EQ(NumSSeq.validation(), true); + NumSSeq.pre_processing(); + NumSSeq.run(); + NumSSeq.post_processing(); + + ASSERT_EQ(out[0], ref_out[0]); + } +} + +TEST(filateva_e_number_sentences_line_mpi, sentence_without_dot) { + boost::mpi::communicator world; + std::string line = "Hello world. Hello world! Hello world"; + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + filateva_e_number_sentences_line_mpi::NumberSentencesLineParallel NumS(taskDataPar); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(3, out[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/filateva_e_number_sentences_line/include/ops_mpi.hpp b/tasks/mpi/filateva_e_number_sentences_line/include/ops_mpi.hpp new file mode 100644 index 00000000000..97a7a689ae5 --- /dev/null +++ b/tasks/mpi/filateva_e_number_sentences_line/include/ops_mpi.hpp @@ -0,0 +1,48 @@ +// Filateva Elizaveta Number_of_sentences_per_line +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace filateva_e_number_sentences_line_mpi { + +int countSentences(std::string line); + +class NumberSentencesLineSequential : public ppc::core::Task { + public: + explicit NumberSentencesLineSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string line; + int sentence_count; +}; + +class NumberSentencesLineParallel : public ppc::core::Task { + public: + explicit NumberSentencesLineParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string line; + std::string local_line; + int sentence_count; + boost::mpi::communicator world; +}; + +} // namespace filateva_e_number_sentences_line_mpi \ No newline at end of file diff --git a/tasks/mpi/filateva_e_number_sentences_line/perf_tests/main.cpp b/tasks/mpi/filateva_e_number_sentences_line/perf_tests/main.cpp new file mode 100644 index 00000000000..d49eb9a7526 --- /dev/null +++ b/tasks/mpi/filateva_e_number_sentences_line/perf_tests/main.cpp @@ -0,0 +1,91 @@ +// Filateva Elizaveta Number_of_sentences_per_line +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/filateva_e_number_sentences_line/include/ops_mpi.hpp" + +TEST(filateva_e_number_sentences_line_mpi, test_pipeline_run) { + int count = 22; + boost::mpi::communicator world; + std::string line = "Hello world."; + std::vector out(1, 0); + // // Create TaskData + for (int i = 0; i < count; i++) { + line += line; + } + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + auto NumS = std::make_shared(taskDataPar); + ASSERT_EQ(NumS->validation(), true); + NumS->pre_processing(); + NumS->run(); + NumS->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(NumS); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(4194304, out[0]); + } +} + +TEST(filateva_e_number_sentences_line_mpi, test_task_run) { + int count = 22; + boost::mpi::communicator world; + std::string line = "Hello world."; + std::vector out(1, 0); + // // Create TaskData + for (int i = 0; i < count; i++) { + line += line; + } + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataPar->outputs_count.emplace_back(out.size()); + } + + auto NumS = std::make_shared(taskDataPar); + ASSERT_EQ(NumS->validation(), true); + NumS->pre_processing(); + NumS->run(); + NumS->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(NumS); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(4194304, out[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/filateva_e_number_sentences_line/src/ops_mpi.cpp b/tasks/mpi/filateva_e_number_sentences_line/src/ops_mpi.cpp new file mode 100644 index 00000000000..8453deb8568 --- /dev/null +++ b/tasks/mpi/filateva_e_number_sentences_line/src/ops_mpi.cpp @@ -0,0 +1,104 @@ +// Filateva Elizaveta Number_of_sentences_per_line +#include "mpi/filateva_e_number_sentences_line/include/ops_mpi.hpp" + +#include +#include +#include +#include + +int filateva_e_number_sentences_line_mpi::countSentences(std::string line) { + int count = 0; + for (long unsigned int i = 0; i < line.size(); ++i) { + if (line[i] == '.' || line[i] == '?' || line[i] == '!') { + ++count; + } + } + return count; +} + +bool filateva_e_number_sentences_line_mpi::NumberSentencesLineSequential::pre_processing() { + internal_order_test(); + // Init vectors + line = std::string(std::move(reinterpret_cast(taskData->inputs[0]))); + sentence_count = 0; + return true; +} + +bool filateva_e_number_sentences_line_mpi::NumberSentencesLineSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] == 1 && taskData->outputs_count[0] == 1; +} + +bool filateva_e_number_sentences_line_mpi::NumberSentencesLineSequential::run() { + internal_order_test(); + sentence_count = countSentences(line); + if (!line.empty() && line.back() != '.' && line.back() != '?' && line.back() != '!') { + ++sentence_count; + } + return true; +} + +bool filateva_e_number_sentences_line_mpi::NumberSentencesLineSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = sentence_count; + return true; +} + +bool filateva_e_number_sentences_line_mpi::NumberSentencesLineParallel::pre_processing() { + internal_order_test(); + if (world.rank() == 0) { + line = std::string(std::move(reinterpret_cast(taskData->inputs[0]))); + } + + sentence_count = 0; + return true; +} + +bool filateva_e_number_sentences_line_mpi::NumberSentencesLineParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return taskData->inputs_count[0] == 1 && taskData->outputs_count[0] == 1; + } + return true; +} + +bool filateva_e_number_sentences_line_mpi::NumberSentencesLineParallel::run() { + internal_order_test(); + unsigned int delta = 0; + unsigned int remains = 0; + int local_sentence_count; + if (world.rank() == 0 && world.size() > 1) { + delta = line.size() / (world.size() - 1); + remains = line.size() % (world.size() - 1); + } else if (world.rank() == 0 && world.size() == 1) { + remains = line.size(); + } + broadcast(world, delta, 0); + + if (world.rank() == 0) { + for (int proc = 0; proc < (world.size() - 1); proc++) { + world.send(proc + 1, 0, line.data() + proc * delta + remains, delta); + } + local_line = std::string(line.begin(), line.begin() + remains); + } else { + local_line = std::string(delta, '*'); + world.recv(0, 0, local_line.data(), delta); + } + + local_sentence_count = countSentences(local_line); + if (world.rank() == 0 && !line.empty() && line.back() != '.' && line.back() != '?' && line.back() != '!') { + ++local_sentence_count; + } + reduce(world, local_sentence_count, sentence_count, std::plus(), 0); + return true; +} + +bool filateva_e_number_sentences_line_mpi::NumberSentencesLineParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = sentence_count; + } + return true; +} diff --git a/tasks/mpi/gusev_n_trapezoidal_rule/func_tests/main.cpp b/tasks/mpi/gusev_n_trapezoidal_rule/func_tests/main.cpp new file mode 100644 index 00000000000..a7c1de0ed2f --- /dev/null +++ b/tasks/mpi/gusev_n_trapezoidal_rule/func_tests/main.cpp @@ -0,0 +1,372 @@ +#define _USE_MATH_DEFINES +#include + +#include +#include +#include +#include +#include + +#include "mpi/gusev_n_trapezoidal_rule/include/ops_mpi.hpp" + +TEST(gusev_n_trapezoidal_rule_mpi, ConstantFunctionTest) { + boost::mpi::communicator world; + std::vector result_global(1, 0); + + auto taskDataParallel = std::make_shared(); + + double lower_bound = 0.0; + double upper_bound = 10.0; + int intervals = 1000000; + + if (world.rank() == 0) { + taskDataParallel->inputs.emplace_back(reinterpret_cast(&lower_bound)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->inputs.emplace_back(reinterpret_cast(&upper_bound)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->inputs.emplace_back(reinterpret_cast(&intervals)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->outputs.emplace_back(reinterpret_cast(result_global.data())); + taskDataParallel->outputs_count.emplace_back(result_global.size()); + } + + gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationParallel parallelTask(taskDataParallel); + parallelTask.set_function([](double x) { return 5.0; }); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + + auto taskDataSequential = std::make_shared(); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&lower_bound)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&upper_bound)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&intervals)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSequential->outputs_count.emplace_back(reference_result.size()); + + gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationSequential sequentialTask(taskDataSequential); + sequentialTask.set_function([](double x) { return 5.0; }); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + ASSERT_NEAR(reference_result[0], result_global[0], 1e-3); + } +} + +TEST(gusev_n_trapezoidal_rule_mpi, SquareFunctionTest) { + boost::mpi::communicator world; + std::vector result_global(1, 0); + auto taskDataParallel = std::make_shared(); + + double lower_bound = 0.0; + double upper_bound = 5.0; + int intervals = 1000000; + + if (world.rank() == 0) { + taskDataParallel->inputs.emplace_back(reinterpret_cast(&lower_bound)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->inputs.emplace_back(reinterpret_cast(&upper_bound)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->inputs.emplace_back(reinterpret_cast(&intervals)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->outputs.emplace_back(reinterpret_cast(result_global.data())); + taskDataParallel->outputs_count.emplace_back(result_global.size()); + } + + gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationParallel parallelTask(taskDataParallel); + parallelTask.set_function([](double x) { return x * x; }); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + auto taskDataSequential = std::make_shared(); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&lower_bound)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&upper_bound)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&intervals)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSequential->outputs_count.emplace_back(reference_result.size()); + + gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationSequential sequentialTask(taskDataSequential); + sequentialTask.set_function([](double x) { return x * x; }); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + ASSERT_NEAR(reference_result[0], result_global[0], 1e-3); + } +} + +TEST(gusev_n_trapezoidal_rule_mpi, SineFunctionTest) { + boost::mpi::communicator world; + std::vector result_global(1, 0); + auto taskDataParallel = std::make_shared(); + + double lower_bound = 0.0; + double upper_bound = M_PI; + int intervals = 1000000; + + if (world.rank() == 0) { + taskDataParallel->inputs.emplace_back(reinterpret_cast(&lower_bound)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->inputs.emplace_back(reinterpret_cast(&upper_bound)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->inputs.emplace_back(reinterpret_cast(&intervals)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->outputs.emplace_back(reinterpret_cast(result_global.data())); + taskDataParallel->outputs_count.emplace_back(result_global.size()); + } + + gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationParallel parallelTask(taskDataParallel); + parallelTask.set_function([](double x) { return std::sin(x); }); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + auto taskDataSequential = std::make_shared(); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&lower_bound)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&upper_bound)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&intervals)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSequential->outputs_count.emplace_back(reference_result.size()); + + gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationSequential sequentialTask(taskDataSequential); + sequentialTask.set_function([](double x) { return std::sin(x); }); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + ASSERT_NEAR(reference_result[0], result_global[0], 1e-3); + } +} + +TEST(gusev_n_trapezoidal_rule_mpi, ExponentialFunctionTest) { + boost::mpi::communicator world; + std::vector result_global(1, 0); + auto taskDataParallel = std::make_shared(); + + double lower_bound = 0.0; + double upper_bound = 1.0; + int intervals = 1000000; + + if (world.rank() == 0) { + taskDataParallel->inputs.emplace_back(reinterpret_cast(&lower_bound)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->inputs.emplace_back(reinterpret_cast(&upper_bound)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->inputs.emplace_back(reinterpret_cast(&intervals)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->outputs.emplace_back(reinterpret_cast(result_global.data())); + taskDataParallel->outputs_count.emplace_back(result_global.size()); + } + + gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationParallel parallelTask(taskDataParallel); + parallelTask.set_function([](double x) { return std::exp(x); }); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + auto taskDataSequential = std::make_shared(); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&lower_bound)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&upper_bound)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&intervals)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSequential->outputs_count.emplace_back(reference_result.size()); + + gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationSequential sequentialTask(taskDataSequential); + sequentialTask.set_function([](double x) { return std::exp(x); }); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + ASSERT_NEAR(reference_result[0], result_global[0], 1e-3); + } +} + +TEST(gusev_n_trapezoidal_rule_mpi, RemainderCaseTest) { + boost::mpi::communicator world; + std::vector result_global(1, 0); + auto taskDataParallel = std::make_shared(); + + double lower_bound = 0.0; + double upper_bound = 5.0; + int intervals = 1000; + + if (world.rank() == 0) { + taskDataParallel->inputs.emplace_back(reinterpret_cast(&lower_bound)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->inputs.emplace_back(reinterpret_cast(&upper_bound)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->inputs.emplace_back(reinterpret_cast(&intervals)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->outputs.emplace_back(reinterpret_cast(result_global.data())); + taskDataParallel->outputs_count.emplace_back(result_global.size()); + } + + gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationParallel parallelTask(taskDataParallel); + parallelTask.set_function([](double x) { return x * x; }); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + auto taskDataSequential = std::make_shared(); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&lower_bound)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&upper_bound)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&intervals)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSequential->outputs_count.emplace_back(reference_result.size()); + + gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationSequential sequentialTask(taskDataSequential); + sequentialTask.set_function([](double x) { return x * x; }); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + ASSERT_NEAR(reference_result[0], result_global[0], 1e-3); + } +} + +TEST(gusev_n_trapezoidal_rule_mpi, RandomizedConstantFunctionTest) { + boost::mpi::communicator world; + std::vector result_global(1, 0); + auto taskDataParallel = std::make_shared(); + + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_real_distribution dis(0.0, 10.0); + int intervals = 1000000; + + double lower_bound = dis(gen); + double upper_bound = dis(gen); + if (lower_bound > upper_bound) std::swap(lower_bound, upper_bound); + + if (world.rank() == 0) { + taskDataParallel->inputs.emplace_back(reinterpret_cast(&lower_bound)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->inputs.emplace_back(reinterpret_cast(&upper_bound)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->inputs.emplace_back(reinterpret_cast(&intervals)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->outputs.emplace_back(reinterpret_cast(result_global.data())); + taskDataParallel->outputs_count.emplace_back(result_global.size()); + } + + gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationParallel parallelTask(taskDataParallel); + parallelTask.set_function([](double x) { return 5.0; }); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + auto taskDataSequential = std::make_shared(); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&lower_bound)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&upper_bound)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&intervals)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSequential->outputs_count.emplace_back(reference_result.size()); + + gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationSequential sequentialTask(taskDataSequential); + sequentialTask.set_function([](double x) { return 5.0; }); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + ASSERT_NEAR(reference_result[0], result_global[0], 1e-3); + } +} + +TEST(gusev_n_trapezoidal_rule_mpi, RandomizedSineFunctionTest) { + boost::mpi::communicator world; + std::vector result_global(1, 0); + auto taskDataParallel = std::make_shared(); + + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_real_distribution dis(0.0, M_PI); + int intervals = 1000000; + + double lower_bound = dis(gen); + double upper_bound = dis(gen); + if (lower_bound > upper_bound) std::swap(lower_bound, upper_bound); + + if (world.rank() == 0) { + taskDataParallel->inputs.emplace_back(reinterpret_cast(&lower_bound)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->inputs.emplace_back(reinterpret_cast(&upper_bound)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->inputs.emplace_back(reinterpret_cast(&intervals)); + taskDataParallel->inputs_count.emplace_back(1); + taskDataParallel->outputs.emplace_back(reinterpret_cast(result_global.data())); + taskDataParallel->outputs_count.emplace_back(result_global.size()); + } + + gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationParallel parallelTask(taskDataParallel); + parallelTask.set_function([](double x) { return std::sin(x); }); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + auto taskDataSequential = std::make_shared(); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&lower_bound)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&upper_bound)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->inputs.emplace_back(reinterpret_cast(&intervals)); + taskDataSequential->inputs_count.emplace_back(1); + taskDataSequential->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSequential->outputs_count.emplace_back(reference_result.size()); + + gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationSequential sequentialTask(taskDataSequential); + sequentialTask.set_function([](double x) { return std::sin(x); }); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + ASSERT_NEAR(reference_result[0], result_global[0], 1e-3); + } +} \ No newline at end of file diff --git a/tasks/mpi/gusev_n_trapezoidal_rule/include/ops_mpi.hpp b/tasks/mpi/gusev_n_trapezoidal_rule/include/ops_mpi.hpp new file mode 100644 index 00000000000..78a8459ba67 --- /dev/null +++ b/tasks/mpi/gusev_n_trapezoidal_rule/include/ops_mpi.hpp @@ -0,0 +1,62 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace gusev_n_trapezoidal_rule_mpi { + +class TrapezoidalIntegrationSequential : public ppc::core::Task { + public: + explicit TrapezoidalIntegrationSequential(std::shared_ptr taskData_) + : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + void set_function(const std::function& func); + + private: + static double integrate(const std::function& f, double a, double b, int n); + double a_{}; + double b_{}; + int n_{}; + double result_{}; + std::function func_; +}; + +class TrapezoidalIntegrationParallel : public ppc::core::Task { + public: + explicit TrapezoidalIntegrationParallel(std::shared_ptr taskData_) + : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + void set_function(const std::function& func); + + private: + double parallel_integrate(const std::function& f, double a, double b, int n); + + double a_{}; + double b_{}; + int n_{}; + double global_result_{}; + std::function func_; + + boost::mpi::communicator world; +}; + +} // namespace gusev_n_trapezoidal_rule_mpi \ No newline at end of file diff --git a/tasks/mpi/gusev_n_trapezoidal_rule/perf_tests/main.cpp b/tasks/mpi/gusev_n_trapezoidal_rule/perf_tests/main.cpp new file mode 100644 index 00000000000..1168c39d2bb --- /dev/null +++ b/tasks/mpi/gusev_n_trapezoidal_rule/perf_tests/main.cpp @@ -0,0 +1,91 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/gusev_n_trapezoidal_rule/include/ops_mpi.hpp" + +TEST(gusev_n_trapezoidal_rule_mpi, test_pipeline_run) { + boost::mpi::communicator world; + double a = 0.0; + double b = 1.0; + int n = 100000000; + double output = 0.0; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.push_back(reinterpret_cast(&a)); + taskDataPar->inputs.push_back(reinterpret_cast(&b)); + taskDataPar->inputs.push_back(reinterpret_cast(&n)); + taskDataPar->outputs.push_back(reinterpret_cast(&output)); + taskDataPar->outputs_count.push_back(1); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + testMpiTaskParallel->set_function([](double x) { return x * x; }); + + ASSERT_TRUE(testMpiTaskParallel->validation()); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + double exact = 1.0 / 3.0; + EXPECT_NEAR(output, exact, 1e-4); + } +} + +TEST(gusev_n_trapezoidal_rule_mpi, test_task_run) { + boost::mpi::communicator world; + double a = 0.0; + double b = 1.0; + int n = 100000000; + double output = 0.0; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.push_back(reinterpret_cast(&a)); + taskDataPar->inputs.push_back(reinterpret_cast(&b)); + taskDataPar->inputs.push_back(reinterpret_cast(&n)); + taskDataPar->outputs.push_back(reinterpret_cast(&output)); + taskDataPar->outputs_count.push_back(1); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + testMpiTaskParallel->set_function([](double x) { return x * x; }); + + ASSERT_TRUE(testMpiTaskParallel->validation()); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + double exact = 1.0 / 3.0; + EXPECT_NEAR(output, exact, 1e-4); + } +} diff --git a/tasks/mpi/gusev_n_trapezoidal_rule/src/ops_mpi.cpp b/tasks/mpi/gusev_n_trapezoidal_rule/src/ops_mpi.cpp new file mode 100644 index 00000000000..da4cd539a51 --- /dev/null +++ b/tasks/mpi/gusev_n_trapezoidal_rule/src/ops_mpi.cpp @@ -0,0 +1,123 @@ +#include "mpi/gusev_n_trapezoidal_rule/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include +#include + +bool gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationSequential::pre_processing() { + internal_order_test(); + + auto* tmp_ptr_a = reinterpret_cast(taskData->inputs[0]); + auto* tmp_ptr_b = reinterpret_cast(taskData->inputs[1]); + auto* tmp_ptr_n = reinterpret_cast(taskData->inputs[2]); + + a_ = *tmp_ptr_a; + b_ = *tmp_ptr_b; + n_ = *tmp_ptr_n; + + return true; +} + +bool gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} + +bool gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationSequential::run() { + internal_order_test(); + result_ = integrate(func_, a_, b_, n_); + return true; +} + +bool gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationSequential::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = result_; + return true; +} + +double gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationSequential::integrate(const std::function& f, + double a, double b, int n) { + double h = (b - a) / n; + double sum = 0.5 * (f(a) + f(b)); + + for (int i = 1; i < n; ++i) { + double x = a + i * h; + sum += f(x); + } + + return sum * h; +} + +void gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationSequential::set_function( + const std::function& func) { + func_ = func; +} + +bool gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationParallel::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + auto* tmp_ptr_a = reinterpret_cast(taskData->inputs[0]); + auto* tmp_ptr_b = reinterpret_cast(taskData->inputs[1]); + auto* tmp_ptr_n = reinterpret_cast(taskData->inputs[2]); + + a_ = *tmp_ptr_a; + b_ = *tmp_ptr_b; + n_ = *tmp_ptr_n; + } + + return true; +} + +bool gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationParallel::run() { + internal_order_test(); + MPI_Bcast(&a_, sizeof(a_) + sizeof(b_) + sizeof(n_), MPI_BYTE, 0, world); + double local_result = parallel_integrate(func_, a_, b_, n_); + reduce(world, local_result, global_result_, std::plus<>(), 0); + return true; +} + +bool gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + *reinterpret_cast(taskData->outputs[0]) = global_result_; + } + return true; +} + +double gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationParallel::parallel_integrate( + const std::function& f, double a, double b, int n) { + int rank = world.rank(); + int size = world.size(); + + double h = (b - a) / n; + double local_sum = 0.0; + + for (int i = rank; i < n; i += size) { + double x = a + i * h; + local_sum += f(x); + } + + if (rank == 0) { + local_sum += 0.5 * (f(a) + f(b)); + } + + return local_sum * h; +} + +void gusev_n_trapezoidal_rule_mpi::TrapezoidalIntegrationParallel::set_function( + const std::function& func) { + func_ = func; +} \ No newline at end of file diff --git a/tasks/mpi/kabalova_v_count_symbols/func_tests/main.cpp b/tasks/mpi/kabalova_v_count_symbols/func_tests/main.cpp new file mode 100644 index 00000000000..8a54212a5d0 --- /dev/null +++ b/tasks/mpi/kabalova_v_count_symbols/func_tests/main.cpp @@ -0,0 +1,168 @@ +// Copyright 2024 Kabalova Valeria +#include + +#include +#include +#include + +#include "mpi/kabalova_v_count_symbols/include/count_symbols_mpi.hpp" + +TEST(kabalova_v_count_symbols_mpi, EmptyString) { + boost::mpi::communicator world; + std::string global_str; + + // Create data + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataMpi = std::make_shared(); + if (world.rank() == 0) { + taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataMpi->inputs_count.emplace_back(global_str.size()); + taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataMpi->outputs_count.emplace_back(global_out.size()); + } + // Create Task + kabalova_v_count_symbols_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + ASSERT_EQ(0, global_out[0]); + } +} + +TEST(kabalova_v_count_symbols_mpi, FourSymbolStringNotLetter) { + boost::mpi::communicator world; + std::string global_str = "1234"; + + // Create data + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataMpi = std::make_shared(); + if (world.rank() == 0) { + taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataMpi->inputs_count.emplace_back(global_str.size()); + taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataMpi->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + kabalova_v_count_symbols_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + kabalova_v_count_symbols_mpi::TestMPITaskSequential TestMPITaskSequential(taskDataSeq); + ASSERT_EQ(TestMPITaskSequential.validation(), true); + TestMPITaskSequential.pre_processing(); + TestMPITaskSequential.run(); + TestMPITaskSequential.post_processing(); + + ASSERT_EQ(reference_out[0], global_out[0]); + } +} + +TEST(kabalova_v_count_symbols_mpi, FourSymbolStringLetter) { + boost::mpi::communicator world; + std::string global_str = "abcd"; + + // Create data + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataMpi = std::make_shared(); + if (world.rank() == 0) { + taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataMpi->inputs_count.emplace_back(global_str.size()); + taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataMpi->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + kabalova_v_count_symbols_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + kabalova_v_count_symbols_mpi::TestMPITaskSequential TestMPITaskSequential(taskDataSeq); + ASSERT_EQ(TestMPITaskSequential.validation(), true); + TestMPITaskSequential.pre_processing(); + TestMPITaskSequential.run(); + TestMPITaskSequential.post_processing(); + + ASSERT_EQ(reference_out[0], global_out[0]); + } +} + +TEST(kabalova_v_count_symbols_mpi, RandomString) { + boost::mpi::communicator world; + std::string global_str = kabalova_v_count_symbols_mpi::getRandomString(); + // Create data + std::vector global_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataMpi = std::make_shared(); + if (world.rank() == 0) { + taskDataMpi->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataMpi->inputs_count.emplace_back(global_str.size()); + taskDataMpi->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataMpi->outputs_count.emplace_back(global_out.size()); + } + + // Create Task + kabalova_v_count_symbols_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataMpi); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_out.data())); + taskDataSeq->outputs_count.emplace_back(reference_out.size()); + + // Create Task + kabalova_v_count_symbols_mpi::TestMPITaskSequential TestMPITaskSequential(taskDataSeq); + ASSERT_EQ(TestMPITaskSequential.validation(), true); + TestMPITaskSequential.pre_processing(); + TestMPITaskSequential.run(); + TestMPITaskSequential.post_processing(); + + ASSERT_EQ(reference_out[0], global_out[0]); + } +} diff --git a/tasks/mpi/kabalova_v_count_symbols/include/count_symbols_mpi.hpp b/tasks/mpi/kabalova_v_count_symbols/include/count_symbols_mpi.hpp new file mode 100644 index 00000000000..298c037752a --- /dev/null +++ b/tasks/mpi/kabalova_v_count_symbols/include/count_symbols_mpi.hpp @@ -0,0 +1,49 @@ +// Copyright 2024 Kabalova Valeria +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace kabalova_v_count_symbols_mpi { + +int getRandomNumber(int left, int right); +std::string getRandomString(); +int countSymbols(std::string& str); + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_{}; + int result{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_{}, local_input_{}; + int result{}; + boost::mpi::communicator world; +}; + +} // namespace kabalova_v_count_symbols_mpi \ No newline at end of file diff --git a/tasks/mpi/kabalova_v_count_symbols/perf_tests/main.cpp b/tasks/mpi/kabalova_v_count_symbols/perf_tests/main.cpp new file mode 100644 index 00000000000..ea37ba37357 --- /dev/null +++ b/tasks/mpi/kabalova_v_count_symbols/perf_tests/main.cpp @@ -0,0 +1,90 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/kabalova_v_count_symbols/include/count_symbols_mpi.hpp" + +TEST(kabalova_v_count_symbols_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::string string = "string"; + std::string global_str; + for (int i = 0; i < 20000; i++) { + global_str += string; + } + + std::vector global_out(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 5000; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + } +} + +TEST(kabalova_v_count_symbols_mpi, test_task_run) { + boost::mpi::communicator world; + std::string string = "string"; + std::string global_str; + for (int i = 0; i < 20000; i++) { + global_str += string; + } + + std::vector global_out(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_out.data())); + taskDataPar->outputs_count.emplace_back(global_out.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 5000; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + } +} \ No newline at end of file diff --git a/tasks/mpi/kabalova_v_count_symbols/src/count_symbols_mpi.cpp b/tasks/mpi/kabalova_v_count_symbols/src/count_symbols_mpi.cpp new file mode 100644 index 00000000000..bfd4c62a79d --- /dev/null +++ b/tasks/mpi/kabalova_v_count_symbols/src/count_symbols_mpi.cpp @@ -0,0 +1,142 @@ +// Copyright 2024 Kabalova Valeria +#include "mpi/kabalova_v_count_symbols/include/count_symbols_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +int kabalova_v_count_symbols_mpi::getRandomNumber(int left, int right) { + std::random_device dev; + std::mt19937 gen(dev()); + return ((gen() % (right - left + 1)) + left); +} + +std::string kabalova_v_count_symbols_mpi::getRandomString() { + std::string str; + std::string alphabet = "abcdefghijklmnopqrstuvwxyz1234567890"; + int strSize = getRandomNumber(1000, 20000); + for (int i = 0; i < strSize; i++) { + str += alphabet[getRandomNumber(0, alphabet.size() - 1)]; + } + return str; +} + +int kabalova_v_count_symbols_mpi::countSymbols(std::string& str) { + int result = 0; + for (size_t i = 0; i < str.size(); i++) { + if (isalpha(str[i]) != 0) { + result++; + } + } + return result; +} + +bool kabalova_v_count_symbols_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + input_ = std::string(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + result = 0; + return true; +} + +bool kabalova_v_count_symbols_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + // На выход подается 1 строка, на выходе только 1 число - число буквенных символов в строке. + bool flag1 = (taskData->inputs_count[0] >= 0 && taskData->outputs_count[0] == 1); + // Нам пришел массив char'ов? + bool flag2 = false; + if (typeid(*taskData->inputs[0]).name() == typeid(uint8_t).name()) { + flag2 = true; + } + return (flag1 && flag2); +} + +bool kabalova_v_count_symbols_mpi::TestMPITaskSequential::run() { + internal_order_test(); + result = countSymbols(input_); + return true; +} + +bool kabalova_v_count_symbols_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = result; + return true; +} + +bool kabalova_v_count_symbols_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + unsigned int delta = 0; + if (world.rank() == 0) { + // Get delta = string.size() / num_threads + delta = taskData->inputs_count[0] % world.size() == 0 ? taskData->inputs_count[0] / world.size() + : taskData->inputs_count[0] / world.size() + 1; + } + broadcast(world, delta, 0); + // Initialize main string in root + // Then send substrings to processes + if (world.rank() == 0) { + input_ = std::string(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + for (int proc = 1; proc < world.size(); proc++) { + // input_size() / world.size() not always an integer + // so the last process sometimes gets memory access violation + // calculate this "delta" between input_.size() and proc * delta + // also if number of processes larger than world.size() then bufdelta is zero and they other processes get empty + // string + int bufDelta = 0; + if ((size_t)(proc * delta + delta) > input_.size() && (size_t)proc < input_.size()) { + bufDelta = input_.size() - proc * delta - delta; + } + world.send(proc, 0, input_.data() + proc * delta, delta + bufDelta); + } + } + // Initialize substring in root + if (world.rank() == 0) + local_input_ = input_.substr(0, delta); + else { + std::string buffer; + buffer.resize(delta); + // Other processes get substrings from root + world.recv(0, 0, buffer.data(), delta); + local_input_ = std::string(buffer.data(), delta); + } + result = 0; + return true; +} + +bool kabalova_v_count_symbols_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // 1 input string - 1 output number + bool flag1 = (taskData->inputs_count[0] >= 0 && taskData->outputs_count[0] == 1); + // Did we get array of chars? + bool flag2 = false; + if (typeid(*taskData->inputs[0]).name() == typeid(uint8_t).name()) { + flag2 = true; + } + return (flag1 && flag2); + } + return true; +} + +bool kabalova_v_count_symbols_mpi::TestMPITaskParallel::run() { + internal_order_test(); + int local_result = 0; + // Count symbols in every substring + local_result = countSymbols(local_input_); + // Get sum and send it into result + reduce(world, local_result, result, std::plus(), 0); + return true; +} + +bool kabalova_v_count_symbols_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = result; + } + return true; +} \ No newline at end of file diff --git a/tasks/mpi/kazunin_n_count_freq_a_char_in_string/func_tests/main.cpp b/tasks/mpi/kazunin_n_count_freq_a_char_in_string/func_tests/main.cpp new file mode 100644 index 00000000000..9f05419e415 --- /dev/null +++ b/tasks/mpi/kazunin_n_count_freq_a_char_in_string/func_tests/main.cpp @@ -0,0 +1,207 @@ +// Copyright 2023 Nesterov Alexander + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "mpi/kazunin_n_count_freq_a_char_in_string/include/ops_mpi.hpp" + +TEST(kazunin_n_count_freq_a_char_in_string_mpi, test_large_random_characters) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'x'; + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_str = 1000000; + global_str.resize(count_size_str); + std::random_device rd; + std::mt19937 eng(rd()); + std::uniform_int_distribution<> distr(0, 25); + + std::generate(global_str.begin(), global_str.end(), [&]() { return 'a' + distr(eng); }); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPIParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPISequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_count[0], global_count[0]); + } +} + +TEST(kazunin_n_count_freq_a_char_in_string_mpi, test_no_target_characters) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'p'; + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_str = 500; + global_str = std::vector(count_size_str, 'f'); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPIParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_count[0], 0); + } +} + +TEST(kazunin_n_count_freq_a_char_in_string_mpi, test_empty_string) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPIParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_count[0], 0); + } +} + +TEST(kazunin_n_count_freq_a_char_in_string_mpi, test_diff_characters) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'z'; + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_str = {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', + 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y'}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPIParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPISequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_count[0], global_count[0]); + } +} + +TEST(kazunin_n_count_freq_a_char_in_string_mpi, test_all_char_is_same) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'p'; + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_str = 325; + global_str = std::vector(count_size_str, 'p'); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPIParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPISequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_count[0], global_count[0]); + } +} // namespace kazunin_n_count_freq_a_char_in_string_mpi diff --git a/tasks/mpi/kazunin_n_count_freq_a_char_in_string/include/ops_mpi.hpp b/tasks/mpi/kazunin_n_count_freq_a_char_in_string/include/ops_mpi.hpp new file mode 100644 index 00000000000..b17d31d374a --- /dev/null +++ b/tasks/mpi/kazunin_n_count_freq_a_char_in_string/include/ops_mpi.hpp @@ -0,0 +1,49 @@ +// Copyright 2023 Nesterov Alexander + +#pragma once +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace kazunin_n_count_freq_a_char_in_string_mpi { +class CharFreqCounterMPISequential : public ppc::core::Task { + public: + explicit CharFreqCounterMPISequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool run() override; + bool validation() override; + bool pre_processing() override; + bool post_processing() override; + + private: + size_t count_result_{0}; + char character_to_count_{}; + std::vector input_string_; +}; + +class CharFreqCounterMPIParallel : public ppc::core::Task { + public: + explicit CharFreqCounterMPIParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool run() override; + bool validation() override; + bool pre_processing() override; + bool post_processing() override; + + private: + size_t total_count_{0}; + size_t local_count_{0}; + char character_to_count_{}; + std::vector input_string_; + std::vector local_segment_; + boost::mpi::communicator global; +}; +} // namespace kazunin_n_count_freq_a_char_in_string_mpi \ No newline at end of file diff --git a/tasks/mpi/kazunin_n_count_freq_a_char_in_string/perf_tests/main.cpp b/tasks/mpi/kazunin_n_count_freq_a_char_in_string/perf_tests/main.cpp new file mode 100644 index 00000000000..81408bde99c --- /dev/null +++ b/tasks/mpi/kazunin_n_count_freq_a_char_in_string/perf_tests/main.cpp @@ -0,0 +1,93 @@ +// Copyright 2024 Nesterov Alexander +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/kazunin_n_count_freq_a_char_in_string/include/ops_mpi.hpp" + +TEST(kazunin_n_count_freq_a_char_in_string_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_str; + char target_char = 'p'; + + if (world.rank() == 0) { + count_size_str = 120; + global_str = std::vector(count_size_str, 'p'); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perf_res = std::make_shared(); + auto perf_analyz = std::make_shared(testMpiTaskParallel); + perf_analyz->pipeline_run(perfAttr, perf_res); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perf_res); + ASSERT_EQ(count_size_str, global_count[0]); + } +} + +TEST(kazunin_n_count_freq_a_char_in_string_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_str; + char target_char = 'p'; + + if (world.rank() == 0) { + count_size_str = 120; + global_str = std::vector(count_size_str, 'p'); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perf_res = std::make_shared(); + auto perf_analyz = std::make_shared(testMpiTaskParallel); + perf_analyz->task_run(perfAttr, perf_res); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perf_res); + ASSERT_EQ(count_size_str, global_count[0]); + } +} diff --git a/tasks/mpi/kazunin_n_count_freq_a_char_in_string/src/ops_mpi.cpp b/tasks/mpi/kazunin_n_count_freq_a_char_in_string/src/ops_mpi.cpp new file mode 100644 index 00000000000..75af53da384 --- /dev/null +++ b/tasks/mpi/kazunin_n_count_freq_a_char_in_string/src/ops_mpi.cpp @@ -0,0 +1,99 @@ +// Copyright 2024 Nesterov Alexander +#include "mpi/kazunin_n_count_freq_a_char_in_string/include/ops_mpi.hpp" + +#include +#include +#include + +bool kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPISequential::pre_processing() { + internal_order_test(); + input_string_.assign(reinterpret_cast(taskData->inputs[0]), + reinterpret_cast(taskData->inputs[0]) + taskData->inputs_count[0]); + character_to_count_ = *reinterpret_cast(taskData->inputs[1]); + count_result_ = 0; + return true; +} + +bool kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPISequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} + +bool kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPISequential::run() { + internal_order_test(); + count_result_ = std::count(input_string_.begin(), input_string_.end(), character_to_count_); + return true; +} + +bool kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPISequential::post_processing() { + *reinterpret_cast(taskData->outputs[0]) = count_result_; + return true; +} + +bool kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPIParallel::pre_processing() { + internal_order_test(); + if (global.rank() == 0) { + character_to_count_ = *reinterpret_cast(taskData->inputs[1]); + } + return true; +} + +bool kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPIParallel::validation() { + internal_order_test(); + return global.rank() != 0 || taskData->outputs_count[0] == 1; +} + +bool kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPIParallel::run() { + internal_order_test(); + + int my_rank = global.rank(); + auto world_size = global.size(); + int n = 0; + + if (my_rank == 0) { + n = taskData->inputs_count[0]; + input_string_.assign(reinterpret_cast(taskData->inputs[0]), + reinterpret_cast(taskData->inputs[0]) + n); + } + + boost::mpi::broadcast(global, n, 0); + boost::mpi::broadcast(global, character_to_count_, 0); + + auto base_segment_size = n / world_size; + auto extra = n % world_size; + std::vector send_counts(world_size, base_segment_size); + std::vector displacements(world_size, 0); + + for (auto i = 0; i < world_size; ++i) { + if (i < extra) { + ++send_counts[i]; + } + if (i > 0) { + displacements[i] = displacements[i - 1] + send_counts[i - 1]; + } + } + + local_segment_.resize(send_counts[my_rank]); + if (my_rank == 0) { + boost::mpi::scatterv(global, input_string_.data(), send_counts, displacements, local_segment_.data(), + send_counts[my_rank], 0); + } else { + std::vector empty_buffer(0); + boost::mpi::scatterv(global, empty_buffer.data(), send_counts, displacements, local_segment_.data(), + send_counts[my_rank], 0); + } + + local_count_ = std::count(local_segment_.begin(), local_segment_.end(), character_to_count_); + + boost::mpi::reduce(global, local_count_, total_count_, std::plus<>(), 0); + return true; +} + +bool kazunin_n_count_freq_a_char_in_string_mpi::CharFreqCounterMPIParallel::post_processing() { + internal_order_test(); + + if (global.rank() == 0) { + *reinterpret_cast(taskData->outputs[0]) = total_count_; + } + return true; +} diff --git a/tasks/mpi/khasanyanov_k_average_vector/func_tests/main.cpp b/tasks/mpi/khasanyanov_k_average_vector/func_tests/main.cpp new file mode 100644 index 00000000000..07d8f56e61f --- /dev/null +++ b/tasks/mpi/khasanyanov_k_average_vector/func_tests/main.cpp @@ -0,0 +1,126 @@ +#include +#include +#include +#include + +#include "../include/avg_mpi.hpp" +#include "core/task/include/task.hpp" +#include "gtest/gtest.h" + +//=========================================sequence========================================= + +#define FUNC_SEQ_TEST(InType, OutType, Size, Value) \ + \ + TEST(khasanyanov_k_average_vector_seq, test_seq_##InType##_##Size) { \ + std::vector in(Size, static_cast(Value)); \ + std::vector out(1, 0.0); \ + std::shared_ptr taskData = \ + khasanyanov_k_average_vector_mpi::create_task_data(in, out); \ + khasanyanov_k_average_vector_mpi::AvgVectorMPITaskSequential testTask(taskData); \ + RUN_TASK(testTask); \ + EXPECT_NEAR(out[0], static_cast(Value), 1e-5); \ + } + +#define RUN_FUNC_SEQ_TESTS(Size, Value) \ + FUNC_SEQ_TEST(int8_t, double, Size, Value) \ + FUNC_SEQ_TEST(int16_t, double, Size, Value) \ + FUNC_SEQ_TEST(int32_t, double, Size, Value) \ + FUNC_SEQ_TEST(int64_t, double, Size, Value) \ + FUNC_SEQ_TEST(uint8_t, double, Size, Value) \ + FUNC_SEQ_TEST(uint16_t, double, Size, Value) \ + FUNC_SEQ_TEST(uint32_t, double, Size, Value) \ + FUNC_SEQ_TEST(uint64_t, double, Size, Value) \ + FUNC_SEQ_TEST(double, double, Size, Value) \ + FUNC_SEQ_TEST(float, double, Size, Value) + +TEST(khasanyanov_k_average_vector_seq, test_random) { + std::vector in = khasanyanov_k_average_vector_mpi::get_random_vector(15); + std::vector out(1, 0.0); + + std::shared_ptr taskData = + khasanyanov_k_average_vector_mpi::create_task_data(in, out); + + khasanyanov_k_average_vector_mpi::AvgVectorMPITaskSequential testTask(taskData); + RUN_TASK(testTask); + + double expect_res = std::accumulate(in.begin(), in.end(), 0.0, std::plus()) / in.size(); + EXPECT_NEAR(out[0], expect_res, 1e-5); +} + +//=========================================parallel========================================= + +namespace mpi = boost::mpi; + +TEST(khasanyanov_k_average_vector_seq, test_displacement) { + auto displacement = khasanyanov_k_average_vector_mpi::AvgVectorMPITaskParallel::displacement(18, 4); + auto sizes = displacement.first; + auto displacements = displacement.second; + std::vector pattern_sizes{5, 5, 4, 4}; + std::vector pattern_displacements{0, 5, 10, 14}; + EXPECT_EQ(sizes, pattern_sizes); + EXPECT_EQ(displacements, pattern_displacements); +} + +TEST(khasanyanov_k_average_vector_mpi, test_wrong_input) { + mpi::communicator world; + std::vector in; + std::vector out; + std::shared_ptr taskData = std::make_shared(); + if (world.rank() == 0) { + taskData = khasanyanov_k_average_vector_mpi::create_task_data(in, out); + } + khasanyanov_k_average_vector_mpi::AvgVectorMPITaskParallel testTask(taskData); + if (world.rank() == 0) { + ASSERT_FALSE(testTask.validation()); + } +} + +#define FUNC_MPI_TEST(InType, OutType, Size) \ + TEST(khasanyanov_k_average_vector_mpi, test_mpi_##InType##_##Size) { \ + mpi::communicator world; \ + std::vector in = khasanyanov_k_average_vector_mpi::get_random_vector(Size); \ + std::vector out(1, 0.0); \ + std::shared_ptr taskData = std::make_shared(); \ + if (world.rank() == 0) { \ + taskData = khasanyanov_k_average_vector_mpi::create_task_data(in, out); \ + } \ + khasanyanov_k_average_vector_mpi::AvgVectorMPITaskParallel testTask(taskData); \ + RUN_TASK(testTask); \ + if (world.rank() == 0) { \ + std::vector seq_out(1, 0.0); \ + std::shared_ptr taskDataSeq = \ + khasanyanov_k_average_vector_mpi::create_task_data(in, seq_out); \ + \ + khasanyanov_k_average_vector_mpi::AvgVectorMPITaskSequential testMpiTaskSequential( \ + taskDataSeq); \ + \ + RUN_TASK(testMpiTaskSequential); \ + EXPECT_NEAR(seq_out[0], out[0], 1e-5); \ + } \ + } + +#define RUN_FUNC_MPI_TESTS(Size) \ + FUNC_MPI_TEST(int8_t, double, Size) \ + FUNC_MPI_TEST(int16_t, double, Size) \ + FUNC_MPI_TEST(int32_t, double, Size) \ + FUNC_MPI_TEST(int64_t, double, Size) \ + FUNC_MPI_TEST(uint8_t, double, Size) \ + FUNC_MPI_TEST(uint16_t, double, Size) \ + FUNC_MPI_TEST(uint32_t, double, Size) \ + FUNC_MPI_TEST(uint64_t, double, Size) \ + FUNC_MPI_TEST(double, double, Size) \ + FUNC_MPI_TEST(float, double, Size) + +#define RUN_FUNC_TESTS(Size, Value) \ + RUN_FUNC_SEQ_TESTS(Size, Value) \ + RUN_FUNC_MPI_TESTS(Size) + +#define RUN_ALL_FUNC_TESTS() \ + RUN_FUNC_TESTS(1234, 7.7) \ + RUN_FUNC_TESTS(2000, 10) \ + RUN_FUNC_TESTS(9, 77) \ + RUN_FUNC_TESTS(3011, 111) \ + RUN_FUNC_TESTS(2, 23) + +//=======run============= +RUN_ALL_FUNC_TESTS() \ No newline at end of file diff --git a/tasks/mpi/khasanyanov_k_average_vector/include/avg_mpi.hpp b/tasks/mpi/khasanyanov_k_average_vector/include/avg_mpi.hpp new file mode 100644 index 00000000000..96ce6952257 --- /dev/null +++ b/tasks/mpi/khasanyanov_k_average_vector/include/avg_mpi.hpp @@ -0,0 +1,205 @@ +#ifndef _AVG_MPI_HPP_ +#define _AVG_MPI_HPP_ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +#ifndef RUN_TASK +#define RUN_TASK(task) \ + ASSERT_TRUE((task).validation()); \ + (task).pre_processing(); \ + (task).run(); \ + (task).post_processing(); + +#endif +namespace khasanyanov_k_average_vector_mpi { + +template +std::vector get_random_vector(size_t size) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(size); + for (size_t i = 0; i < size; i++) { + vec[i] = static_cast(gen() % 1000 + (gen() % 100) / 100.0); + } + return vec; +} + +template +std::shared_ptr create_task_data(std::vector& in, std::vector& out) { + auto taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + return taskData; +} + +//=========================================sequential========================================= + +template +class AvgVectorMPITaskSequential : public ppc::core::Task { + std::vector input_; + Out avg = 0.0; + + public: + explicit AvgVectorMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; +}; + +template +bool khasanyanov_k_average_vector_mpi::AvgVectorMPITaskSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1 && taskData->inputs_count[0] > 0; +} + +template +bool khasanyanov_k_average_vector_mpi::AvgVectorMPITaskSequential::pre_processing() { + internal_order_test(); + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp, tmp + taskData->inputs_count[0], std::back_inserter(input_)); + avg = 0.0; + return true; +} + +template +bool khasanyanov_k_average_vector_mpi::AvgVectorMPITaskSequential::run() { + internal_order_test(); + avg = static_cast(std::accumulate(input_.begin(), input_.end(), 0.0, std::plus())); + avg /= static_cast(taskData->inputs_count[0]); + // std::this_thread::sleep_for(std::chrono::milliseconds(5)); + return true; +} + +template +bool khasanyanov_k_average_vector_mpi::AvgVectorMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = avg; + return true; +} + +//=========================================parallel========================================= + +namespace mpi = boost::mpi; +template +class AvgVectorMPITaskParallel : public ppc::core::Task { + std::vector input_, local_input_; + Out avg = 0.0; + mpi::communicator world; + + public: + explicit AvgVectorMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + static std::pair, std::vector> displacement(size_t, size_t); + static int size_for_rank(int, int, int); +}; + +template +bool khasanyanov_k_average_vector_mpi::AvgVectorMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->outputs_count[0] == 1 && taskData->inputs_count[0] > 0; + } + return true; +} + +template +int khasanyanov_k_average_vector_mpi::AvgVectorMPITaskParallel::size_for_rank(int rank, int count, int size) { + int average = count / size; + int mod = count % size; + return average + ((rank < mod) ? 1 : 0); +} + +template +bool khasanyanov_k_average_vector_mpi::AvgVectorMPITaskParallel::pre_processing() { + internal_order_test(); + size_t input_size; + if (world.rank() == 0) { + input_size = taskData->inputs_count[0]; + } + + mpi::broadcast(world, input_size, 0); + + if (world.rank() == 0) { + std::pair, std::vector> disp = displacement(input_size, world.size()); + auto& displacements = disp.second; + auto& sizes = disp.first; + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp = reinterpret_cast(taskData->inputs[0]); + + input_.clear(); + std::copy(tmp, tmp + taskData->inputs_count[0], std::back_inserter(input_)); + + local_input_.resize(sizes[0]); + mpi::scatterv(world, input_, sizes, displacements, local_input_.data(), sizes[0], 0); + + } else { + auto size = size_for_rank(world.rank(), input_size, world.size()); + local_input_.resize(size); + mpi::scatterv(world, local_input_.data(), size, 0); + } + avg = 0.0; + return true; +} + +template +bool khasanyanov_k_average_vector_mpi::AvgVectorMPITaskParallel::run() { + internal_order_test(); + Out local_sum{}; + local_sum = static_cast(std::accumulate(local_input_.begin(), local_input_.end(), 0.0, std::plus())); + mpi::reduce(world, local_sum, avg, std::plus(), 0); + // std::this_thread::sleep_for(std::chrono::milliseconds(5)); + return true; +} + +template +bool khasanyanov_k_average_vector_mpi::AvgVectorMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = avg / input_.size(); + } + return true; +} + +template +std::pair, std::vector> +khasanyanov_k_average_vector_mpi::AvgVectorMPITaskParallel::displacement(size_t input_size, size_t n) { + const size_t capacity = n; + size_t count = input_size / capacity; + size_t mod = input_size % capacity; + std::vector sizes(capacity, count); + std::transform(sizes.cbegin(), sizes.cbegin() + mod, sizes.begin(), [](auto i) { return i + 1; }); + std::vector disp(capacity); + disp[0] = 0; + std::generate(disp.begin() + 1, disp.end(), [&, i = 0]() mutable { + ++i; + return disp[i - 1] + sizes[i - 1]; + }); + + return {sizes, disp}; +} + +} // namespace khasanyanov_k_average_vector_mpi + +#endif // !_AVG_MPI_HPP_ diff --git a/tasks/mpi/khasanyanov_k_average_vector/perf_tests/main.cpp b/tasks/mpi/khasanyanov_k_average_vector/perf_tests/main.cpp new file mode 100644 index 00000000000..25e167b5024 --- /dev/null +++ b/tasks/mpi/khasanyanov_k_average_vector/perf_tests/main.cpp @@ -0,0 +1,147 @@ +#include +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/khasanyanov_k_average_vector/include/avg_mpi.hpp" + +//=========================================sequence========================================= + +const int SIZE = 2220000; + +TEST(khasanyanov_k_average_vector_seq, test_pipeline_run) { + std::vector global_vec(SIZE, 4); + std::vector average(1, 0.0); + + std::shared_ptr taskData = + khasanyanov_k_average_vector_mpi::create_task_data(global_vec, average); + + auto testAvgVectorSequence = + std::make_shared>(taskData); + + RUN_TASK(*testAvgVectorSequence); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testAvgVectorSequence); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + EXPECT_NEAR(4, average[0], 1e-5); +} + +TEST(khasanyanov_k_average_vector_seq, test_task_run) { + std::vector global_vec(SIZE, 4); + std::vector average(1, 0.0); + + std::shared_ptr taskData = + khasanyanov_k_average_vector_mpi::create_task_data(global_vec, average); + + auto testAvgVectorSequence = + std::make_shared>(taskData); + + RUN_TASK(*testAvgVectorSequence); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testAvgVectorSequence); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + EXPECT_NEAR(4, average[0], 1e-5); +} + +//=========================================parallel========================================= + +TEST(khasanyanov_k_average_vector_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector average_par(1, 0.0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_vec = khasanyanov_k_average_vector_mpi::get_random_vector(SIZE); + taskDataPar = khasanyanov_k_average_vector_mpi::create_task_data(global_vec, average_par); + } + + auto testMpiTaskParallel = + std::make_shared>(taskDataPar); + + RUN_TASK(*testMpiTaskParallel); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + std::vector average_seq(1, 0.0); + std::shared_ptr taskDataSeq = + khasanyanov_k_average_vector_mpi::create_task_data(global_vec, average_seq); + auto testMpiTaskSequential = + khasanyanov_k_average_vector_mpi::AvgVectorMPITaskSequential(taskDataSeq); + RUN_TASK(testMpiTaskSequential); + ppc::core::Perf::print_perf_statistic(perfResults); + EXPECT_NEAR(average_seq[0], average_par[0], 1e-5); + } +} + +TEST(khasanyanov_k_average_vector_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector average_par(1, 0.0); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_vec = khasanyanov_k_average_vector_mpi::get_random_vector(SIZE); + taskDataPar = khasanyanov_k_average_vector_mpi::create_task_data(global_vec, average_par); + } + + auto testMpiTaskParallel = + std::make_shared>(taskDataPar); + + RUN_TASK(*testMpiTaskParallel); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + std::vector average_seq(1, 0.0); + std::shared_ptr taskDataSeq = + khasanyanov_k_average_vector_mpi::create_task_data(global_vec, average_seq); + auto testMpiTaskSequential = + khasanyanov_k_average_vector_mpi::AvgVectorMPITaskSequential(taskDataSeq); + RUN_TASK(testMpiTaskSequential); + ppc::core::Perf::print_perf_statistic(perfResults); + EXPECT_NEAR(average_seq[0], average_par[0], 1e-5); + } +} \ No newline at end of file diff --git a/tasks/mpi/khasanyanov_k_average_vector/src/avg_mpi.cpp b/tasks/mpi/khasanyanov_k_average_vector/src/avg_mpi.cpp new file mode 100644 index 00000000000..1e73d54ad1a --- /dev/null +++ b/tasks/mpi/khasanyanov_k_average_vector/src/avg_mpi.cpp @@ -0,0 +1,3 @@ +#include "mpi/khasanyanov_k_average_vector/include/avg_mpi.hpp" + +/* nothing to realization*/ diff --git a/tasks/mpi/kolokolova_d_max_of_row_matrix/func_tests/main.cpp b/tasks/mpi/kolokolova_d_max_of_row_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..38795889ad1 --- /dev/null +++ b/tasks/mpi/kolokolova_d_max_of_row_matrix/func_tests/main.cpp @@ -0,0 +1,439 @@ +#include + +#include +#include +#include + +#include "mpi/kolokolova_d_max_of_row_matrix/include/ops_mpi.hpp" + +TEST(kolokolova_d_max_of_row_matrix_mpi, Test_Parallel_Max1) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_max(world.size(), 0); + int count_rows = world.size(); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = count_rows * 3; // size of rows + global_matrix = kolokolova_d_max_of_row_matrix_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(world.size(), 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < int(reference_max.size()); i++) { + ASSERT_EQ(reference_max[i], global_max[i]); + } + } +} + +TEST(kolokolova_d_max_of_row_matrix_mpi, Test_Parallel_Max2) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_max(world.size(), 0); + int count_rows = world.size(); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = count_rows * 5; // size of rows + global_matrix = kolokolova_d_max_of_row_matrix_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(world.size(), 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < int(reference_max.size()); i++) { + ASSERT_EQ(reference_max[i], global_max[i]); + } + } +} + +TEST(kolokolova_d_max_of_row_matrix_mpi, Test_Parallel_Max3) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_max(world.size(), 0); + int count_rows = world.size(); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = count_rows * 10; // size of rows + global_matrix = kolokolova_d_max_of_row_matrix_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(world.size(), 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < int(reference_max.size()); i++) { + ASSERT_EQ(reference_max[i], global_max[i]); + } + } +} + +TEST(kolokolova_d_max_of_row_matrix_mpi, Test_Parallel_Max4) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_max(world.size(), 0); + int count_rows = world.size(); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = count_rows * 15; // size of rows + global_matrix = kolokolova_d_max_of_row_matrix_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(world.size(), 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < int(reference_max.size()); i++) { + ASSERT_EQ(reference_max[i], global_max[i]); + } + } +} + +TEST(kolokolova_d_max_of_row_matrix_mpi, Test_Parallel_Max5) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_max(world.size(), 0); + int count_rows = world.size(); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = count_rows * 20; // size of rows + global_matrix = kolokolova_d_max_of_row_matrix_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(world.size(), 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < int(reference_max.size()); i++) { + ASSERT_EQ(reference_max[i], global_max[i]); + } + } +} + +TEST(kolokolova_d_max_of_row_matrix_mpi, Test_Parallel_Max6) { + boost::mpi::communicator world; + int size = world.size(); + std::vector global_matrix; + std::vector global_max(world.size(), 0); + int count_rows = 10; + int count_column = 5; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = count_rows * count_column; + global_matrix = kolokolova_d_max_of_row_matrix_mpi::getRandomVector(count_size_vector * size); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(world.size(), 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < int(reference_max.size()); i++) { + ASSERT_EQ(reference_max[i], global_max[i]); + } + } +} + +TEST(kolokolova_d_max_of_row_matrix_mpi, Test_Parallel_Max_Row1) { + boost::mpi::communicator world; + int size = world.size(); + int rank = world.rank(); + std::vector global_matrix; + std::vector global_max(size, 0); + int count_rows = 1; + int count_column = 10; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (rank == 0) { + const int count_size_vector = count_rows * count_column; // size of vector + global_matrix = kolokolova_d_max_of_row_matrix_mpi::getRandomVector(count_size_vector * size); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (rank == 0) { + // Create data + std::vector reference_max(world.size(), 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < int(reference_max.size()); i++) { + ASSERT_EQ(reference_max[i], global_max[i]); + } + } +} + +TEST(kolokolova_d_max_of_row_matrix_mpi, Test_Parallel_Max_Column1) { + boost::mpi::communicator world; + int size = world.size(); + int rank = world.rank(); + std::vector global_matrix; + std::vector global_max(size, 0); + int count_rows = 10; + int count_column = 1; + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (rank == 0) { + const int count_size_vector = count_rows * count_column; // size of vector + global_matrix = kolokolova_d_max_of_row_matrix_mpi::getRandomVector(count_size_vector * size); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (rank == 0) { + // Create data + std::vector reference_max(world.size(), 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&size)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < int(reference_max.size()); i++) { + ASSERT_EQ(reference_max[i], global_max[i]); + } + } +} + +TEST(kolokolova_d_max_of_row_matrix_mpi, Test_Parallel_Max_Empty_Matrix) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_max(world.size(), 0); + int count_rows = 0; + int count_column = 0; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = count_rows * count_column; // size of rows + global_matrix = kolokolova_d_max_of_row_matrix_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + if (world.rank() == 0) { + kolokolova_d_max_of_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), false); + } +} \ No newline at end of file diff --git a/tasks/mpi/kolokolova_d_max_of_row_matrix/include/ops_mpi.hpp b/tasks/mpi/kolokolova_d_max_of_row_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..8c65fc8e0c2 --- /dev/null +++ b/tasks/mpi/kolokolova_d_max_of_row_matrix/include/ops_mpi.hpp @@ -0,0 +1,47 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace kolokolova_d_max_of_row_matrix_mpi { + +std::vector getRandomVector(int sz); + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + std::vector res; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + std::vector res; + boost::mpi::communicator world; + unsigned int delta = 0; +}; + +} // namespace kolokolova_d_max_of_row_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/kolokolova_d_max_of_row_matrix/perf_tests/main.cpp b/tasks/mpi/kolokolova_d_max_of_row_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..b540664dcdd --- /dev/null +++ b/tasks/mpi/kolokolova_d_max_of_row_matrix/perf_tests/main.cpp @@ -0,0 +1,89 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/kolokolova_d_max_of_row_matrix/include/ops_mpi.hpp" + +TEST(kolokolova_d_max_of_row_matrix_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_max(world.size(), 0); + int count_rows = world.size(); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + const int count_size_vector = count_rows * 2000000; + if (world.rank() == 0) { + global_matrix = std::vector(count_size_vector, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1, global_max[0]); + } +} + +TEST(kolokolova_d_max_of_row_matrix_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector global_matrix; + std::vector global_max(world.size(), 0); + int count_rows = world.size(); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + const int count_size_vector = count_rows * 8500000; + if (world.rank() == 0) { + global_matrix = std::vector(count_size_vector, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1, global_max[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/kolokolova_d_max_of_row_matrix/src/ops_mpi.cpp b/tasks/mpi/kolokolova_d_max_of_row_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..60baade22d2 --- /dev/null +++ b/tasks/mpi/kolokolova_d_max_of_row_matrix/src/ops_mpi.cpp @@ -0,0 +1,135 @@ +#include "mpi/kolokolova_d_max_of_row_matrix/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +std::vector kolokolova_d_max_of_row_matrix_mpi::getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + std::uniform_int_distribution dist(-100, 99); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} +bool kolokolova_d_max_of_row_matrix_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + auto row_count = static_cast(*taskData->inputs[1]); + size_t col_count = taskData->inputs_count[0] / row_count; + + input_.resize(row_count, std::vector(col_count)); + + int* input_ptr = reinterpret_cast(taskData->inputs[0]); + for (size_t i = 0; i < row_count; ++i) { + for (size_t j = 0; j < col_count; ++j) { + input_[i][j] = input_ptr[i * col_count + j]; + } + } + res.resize(row_count); + return true; +} + +bool kolokolova_d_max_of_row_matrix_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return *taskData->inputs[1] == taskData->outputs_count[0]; +} + +bool kolokolova_d_max_of_row_matrix_mpi::TestMPITaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < input_.size(); ++i) { + int max_value = input_[i][0]; + for (size_t j = 1; j < input_[i].size(); ++j) { + if (input_[i][j] > max_value) { + max_value = input_[i][j]; + } + } + res[i] = max_value; + } + return true; +} + +bool kolokolova_d_max_of_row_matrix_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + int* output_ptr = reinterpret_cast(taskData->outputs[0]); + for (size_t i = 0; i < res.size(); ++i) { + output_ptr[i] = res[i]; + } + return true; +} + +bool kolokolova_d_max_of_row_matrix_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + int proc_rank = world.rank(); + + if (proc_rank == 0) { + delta = taskData->inputs_count[0] / world.size(); + } + + if (proc_rank == 0) { + // Init vectors + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + } + // Init value for output + res.resize(world.size()); + return true; +} + +bool kolokolova_d_max_of_row_matrix_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output and input + if (taskData->outputs_count[0] == 0 || taskData->inputs_count[0] == 0) return false; + } + return true; +} + +bool kolokolova_d_max_of_row_matrix_mpi::TestMPITaskParallel::run() { + internal_order_test(); + int proc_rank = world.rank(); + + broadcast(world, delta, 0); + + if (proc_rank == 0) { + for (int proc = 1; proc < world.size(); proc++) { + world.send(proc, 0, input_.data() + proc * delta, delta); + } + } + + local_input_ = std::vector(delta); + + if (proc_rank == 0) { + local_input_ = std::vector(input_.begin(), input_.begin() + delta); + } else { + world.recv(0, 0, local_input_.data(), delta); + } + int local_res = 0; + for (int i = 0; i < int(local_input_.size()); i++) { + if (local_res < local_input_[i]) local_res = local_input_[i]; + } + gather(world, local_res, res, 0); + return true; +} + +bool kolokolova_d_max_of_row_matrix_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + for (int i = 0; i < world.size(); i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + } + return true; +} \ No newline at end of file diff --git a/tasks/mpi/korablev_v_rect_int_mpi/func_tests/main.cpp b/tasks/mpi/korablev_v_rect_int_mpi/func_tests/main.cpp new file mode 100644 index 00000000000..5ccf20946d6 --- /dev/null +++ b/tasks/mpi/korablev_v_rect_int_mpi/func_tests/main.cpp @@ -0,0 +1,261 @@ +#define _USE_MATH_DEFINES +#include + +#include +#include +#include +#include + +#include "mpi/korablev_v_rect_int_mpi/include/ops_mpi.hpp" + +TEST(korablev_v_rect_int, test_constant_function) { + boost::mpi::communicator world; + std::vector global_result(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + double a = 0.0; + double b = 10.0; + int n = 1000000; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&n)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + korablev_v_rect_int_mpi::RectangularIntegrationParallel parallelTask(taskDataPar); + parallelTask.set_function([](double x) { return 5.0; }); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + korablev_v_rect_int_mpi::RectangularIntegrationSequential sequentialTask(taskDataSeq); + sequentialTask.set_function([](double x) { return 5.0; }); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + ASSERT_NEAR(reference_result[0], global_result[0], 1e-3); + } +} + +TEST(korablev_v_rect_int, test_square_function) { + boost::mpi::communicator world; + std::vector global_result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + double a = 0.0; + double b = 5.0; + int n = 1000000; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&n)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + korablev_v_rect_int_mpi::RectangularIntegrationParallel parallelTask(taskDataPar); + parallelTask.set_function([](double x) { return x * x; }); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + korablev_v_rect_int_mpi::RectangularIntegrationSequential sequentialTask(taskDataSeq); + sequentialTask.set_function([](double x) { return x * x; }); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + ASSERT_NEAR(reference_result[0], global_result[0], 1e-3); + } +} + +TEST(korablev_v_rect_int, test_sine_function) { + boost::mpi::communicator world; + std::vector global_result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + double a = 0.0; + double b = M_PI; + int n = 1000000; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&n)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + korablev_v_rect_int_mpi::RectangularIntegrationParallel parallelTask(taskDataPar); + parallelTask.set_function([](double x) { return std::sin(x); }); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + korablev_v_rect_int_mpi::RectangularIntegrationSequential sequentialTask(taskDataSeq); + sequentialTask.set_function([](double x) { return std::sin(x); }); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + ASSERT_NEAR(reference_result[0], global_result[0], 1e-3); + } +} + +TEST(korablev_v_rect_int, test_exponential_function) { + boost::mpi::communicator world; + std::vector global_result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + double a = 0.0; + double b = 1.0; + int n = 1000000; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&n)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + korablev_v_rect_int_mpi::RectangularIntegrationParallel parallelTask(taskDataPar); + parallelTask.set_function([](double x) { return std::exp(x); }); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + korablev_v_rect_int_mpi::RectangularIntegrationSequential sequentialTask(taskDataSeq); + sequentialTask.set_function([](double x) { return std::exp(x); }); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + ASSERT_NEAR(reference_result[0], global_result[0], 1e-3); + } +} + +TEST(korablev_v_rect_int, test_remainder_case) { + boost::mpi::communicator world; + std::vector global_result(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + double a = 0.0; + double b = 5.0; + int n = 10; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&n)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + taskDataPar->outputs_count.emplace_back(global_result.size()); + } + + korablev_v_rect_int_mpi::RectangularIntegrationParallel parallelTask(taskDataPar); + parallelTask.set_function([](double x) { return x * x; }); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&n)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + taskDataSeq->outputs_count.emplace_back(reference_result.size()); + + korablev_v_rect_int_mpi::RectangularIntegrationSequential sequentialTask(taskDataSeq); + sequentialTask.set_function([](double x) { return x * x; }); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + ASSERT_NEAR(reference_result[0], global_result[0], 1e-3); + } +} diff --git a/tasks/mpi/korablev_v_rect_int_mpi/include/ops_mpi.hpp b/tasks/mpi/korablev_v_rect_int_mpi/include/ops_mpi.hpp new file mode 100644 index 00000000000..086c55eac1c --- /dev/null +++ b/tasks/mpi/korablev_v_rect_int_mpi/include/ops_mpi.hpp @@ -0,0 +1,62 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace korablev_v_rect_int_mpi { + +class RectangularIntegrationSequential : public ppc::core::Task { + public: + explicit RectangularIntegrationSequential(std::shared_ptr taskData_) + : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + void set_function(const std::function& func); + + private: + static double integrate(const std::function& f, double a, double b, int n); + double a_{}; + double b_{}; + int n_{}; + double result_{}; + std::function func_; +}; + +class RectangularIntegrationParallel : public ppc::core::Task { + public: + explicit RectangularIntegrationParallel(std::shared_ptr taskData_) + : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + void set_function(const std::function& func); + + private: + double parallel_integrate(const std::function& f, double a, double b, int n); + + double a_{}; + double b_{}; + int n_{}; + double global_result_{}; + std::function func_; + + boost::mpi::communicator world; +}; + +} // namespace korablev_v_rect_int_mpi \ No newline at end of file diff --git a/tasks/mpi/korablev_v_rect_int_mpi/perf_tests/main.cpp b/tasks/mpi/korablev_v_rect_int_mpi/perf_tests/main.cpp new file mode 100644 index 00000000000..2c10b720b9f --- /dev/null +++ b/tasks/mpi/korablev_v_rect_int_mpi/perf_tests/main.cpp @@ -0,0 +1,89 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/korablev_v_rect_int_mpi/include/ops_mpi.hpp" + +TEST(korablev_v_rect_int, test_pipeline_run) { + boost::mpi::communicator world; + double a = 0.0; + double b = 1.0; + int n = 1000000; + double output = 0.0; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.push_back(reinterpret_cast(&a)); + taskDataPar->inputs.push_back(reinterpret_cast(&b)); + taskDataPar->inputs.push_back(reinterpret_cast(&n)); + taskDataPar->outputs.push_back(reinterpret_cast(&output)); + taskDataPar->outputs_count.push_back(1); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + testMpiTaskParallel->set_function([](double x) { return x * x; }); + + ASSERT_TRUE(testMpiTaskParallel->validation()); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + double exact = 1.0 / 3.0; + EXPECT_NEAR(output, exact, 1e-4); + } +} + +TEST(korablev_v_rect_int, test_task_run) { + boost::mpi::communicator world; + double a = 0.0; + double b = 1.0; + int n = 1000000; + double output = 0.0; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.push_back(reinterpret_cast(&a)); + taskDataPar->inputs.push_back(reinterpret_cast(&b)); + taskDataPar->inputs.push_back(reinterpret_cast(&n)); + taskDataPar->outputs.push_back(reinterpret_cast(&output)); + taskDataPar->outputs_count.push_back(1); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + testMpiTaskParallel->set_function([](double x) { return x * x; }); + + ASSERT_TRUE(testMpiTaskParallel->validation()); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + double exact = 1.0 / 3.0; + EXPECT_NEAR(output, exact, 1e-4); + } +} \ No newline at end of file diff --git a/tasks/mpi/korablev_v_rect_int_mpi/src/ops_mpi.cpp b/tasks/mpi/korablev_v_rect_int_mpi/src/ops_mpi.cpp new file mode 100644 index 00000000000..3416d8ed454 --- /dev/null +++ b/tasks/mpi/korablev_v_rect_int_mpi/src/ops_mpi.cpp @@ -0,0 +1,126 @@ +#include "mpi/korablev_v_rect_int_mpi/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool korablev_v_rect_int_mpi::RectangularIntegrationSequential::pre_processing() { + internal_order_test(); + + auto* tmp_ptr_a = reinterpret_cast(taskData->inputs[0]); + auto* tmp_ptr_b = reinterpret_cast(taskData->inputs[1]); + auto* tmp_ptr_n = reinterpret_cast(taskData->inputs[2]); + + a_ = *tmp_ptr_a; + b_ = *tmp_ptr_b; + n_ = *tmp_ptr_n; + + return true; +} + +bool korablev_v_rect_int_mpi::RectangularIntegrationSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} + +bool korablev_v_rect_int_mpi::RectangularIntegrationSequential::run() { + internal_order_test(); + result_ = integrate(func_, a_, b_, n_); + return true; +} + +bool korablev_v_rect_int_mpi::RectangularIntegrationSequential::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = result_; + return true; +} + +double korablev_v_rect_int_mpi::RectangularIntegrationSequential::integrate(const std::function& f, + double a, double b, int n) { + double h = (b - a) / n; + double sum = 0.0; + + for (int i = 0; i < n; ++i) { + double x = a + i * h; + sum += f(x) * h; + } + + return sum; +} + +void korablev_v_rect_int_mpi::RectangularIntegrationSequential::set_function( + const std::function& func) { + func_ = func; +} + +bool korablev_v_rect_int_mpi::RectangularIntegrationParallel::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + auto* tmp_ptr_a = reinterpret_cast(taskData->inputs[0]); + auto* tmp_ptr_b = reinterpret_cast(taskData->inputs[1]); + auto* tmp_ptr_n = reinterpret_cast(taskData->inputs[2]); + + a_ = *tmp_ptr_a; + b_ = *tmp_ptr_b; + n_ = *tmp_ptr_n; + } + + broadcast(world, a_, 0); + broadcast(world, b_, 0); + broadcast(world, n_, 0); + + return true; +} + +bool korablev_v_rect_int_mpi::RectangularIntegrationParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool korablev_v_rect_int_mpi::RectangularIntegrationParallel::run() { + internal_order_test(); + double local_result_{}; + local_result_ = parallel_integrate(func_, a_, b_, n_); + reduce(world, local_result_, global_result_, std::plus<>(), 0); + return true; +} + +bool korablev_v_rect_int_mpi::RectangularIntegrationParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + *reinterpret_cast(taskData->outputs[0]) = global_result_; + } + return true; +} + +double korablev_v_rect_int_mpi::RectangularIntegrationParallel::parallel_integrate( + const std::function& f, double a, double b, int n) { + int rank = world.rank(); + int size = world.size(); + + double h = (b - a) / n; + double local_sum = 0.0; + + for (int i = rank; i < n; i += size) { + double x = a + i * h; + local_sum += f(x) * h; + } + + return local_sum; +} + +void korablev_v_rect_int_mpi::RectangularIntegrationParallel::set_function(const std::function& func) { + func_ = func; +} diff --git a/tasks/mpi/korobeinikov_a_max_elements_in_rows_of_matrix/func_tests/main_korobeinikov.cpp b/tasks/mpi/korobeinikov_a_max_elements_in_rows_of_matrix/func_tests/main_korobeinikov.cpp new file mode 100644 index 00000000000..b034cfd33ab --- /dev/null +++ b/tasks/mpi/korobeinikov_a_max_elements_in_rows_of_matrix/func_tests/main_korobeinikov.cpp @@ -0,0 +1,251 @@ +// Copyright 2024 Korobeinikov Arseny +#include + +#include +#include +#include + +#include "mpi/korobeinikov_a_max_elements_in_rows_of_matrix/include/ops_mpi_korobeinikov.hpp" + +TEST(max_elements_in_rows_of_matrix_mpi, Test_1_const__matrix) { + boost::mpi::communicator world; + + // Create data + + int count_rows = 4; // not const, because reinterpret_cast does not work with const + std::vector global_matrix{3, 17, 5, -1, 2, -3, 11, 12, 13, -7, 4, 9}; + std::vector mpi_res(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(1); + + taskDataPar->outputs.emplace_back(reinterpret_cast(mpi_res.data())); + taskDataPar->outputs_count.emplace_back(mpi_res.size()); + } + + korobeinikov_a_test_task_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + + std::vector right_answer(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(1); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(right_answer.data())); + taskDataSeq->outputs_count.emplace_back(right_answer.size()); + + // Create Task + korobeinikov_a_test_task_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(right_answer, mpi_res); + } +} + +TEST(max_elements_in_rows_of_matrix_mpi, Test_2_random_matrix) { + boost::mpi::communicator world; + + // Create data + + int count_rows = 10; // not const, because reinterpret_cast does not work with const + int size_rows = 20; + std::vector global_matrix; + std::vector mpi_res(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = korobeinikov_a_test_task_mpi::getRandomVector(count_rows * size_rows); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(1); + + taskDataPar->outputs.emplace_back(reinterpret_cast(mpi_res.data())); + taskDataPar->outputs_count.emplace_back(mpi_res.size()); + } + + korobeinikov_a_test_task_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + + std::vector right_answer(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(1); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(right_answer.data())); + taskDataSeq->outputs_count.emplace_back(right_answer.size()); + + // Create Task + korobeinikov_a_test_task_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(right_answer, mpi_res); + } +} + +TEST(max_elements_in_rows_of_matrix_mpi, Test_3_false_validation) { + boost::mpi::communicator world; + + // Create data + + int count_rows = 10; // not const, because reinterpret_cast does not work with const + std::vector global_matrix{3, 17, 5, -1, 2, -3, 11, 12, 13, -7, 4, 9}; + std::vector mpi_res(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(1); + + taskDataPar->outputs.emplace_back(reinterpret_cast(mpi_res.data())); + taskDataPar->outputs_count.emplace_back(mpi_res.size()); + korobeinikov_a_test_task_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), false); + } +} + +TEST(max_elements_in_rows_of_matrix_mpi, Test_4_empty_matrix) { + boost::mpi::communicator world; + + // Create data + + int count_rows = 0; // not const, because reinterpret_cast does not work with const + std::vector global_matrix; + std::vector mpi_res(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(1); + + taskDataPar->outputs.emplace_back(reinterpret_cast(mpi_res.data())); + taskDataPar->outputs_count.emplace_back(mpi_res.size()); + } + + korobeinikov_a_test_task_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + + std::vector right_answer(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(1); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(right_answer.data())); + taskDataSeq->outputs_count.emplace_back(right_answer.size()); + + // Create Task + korobeinikov_a_test_task_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(right_answer, mpi_res); + } +} + +TEST(max_elements_in_rows_of_matrix_mpi, Test_5_one_row_matrix) { + boost::mpi::communicator world; + + // Create data + + int count_rows = 1; // not const, because reinterpret_cast does not work with const + std::vector global_matrix{1, 3, 2}; + std::vector mpi_res(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(1); + + taskDataPar->outputs.emplace_back(reinterpret_cast(mpi_res.data())); + taskDataPar->outputs_count.emplace_back(mpi_res.size()); + } + + korobeinikov_a_test_task_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + + std::vector right_answer(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(1); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(right_answer.data())); + taskDataSeq->outputs_count.emplace_back(right_answer.size()); + + // Create Task + korobeinikov_a_test_task_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(right_answer, mpi_res); + } +} \ No newline at end of file diff --git a/tasks/mpi/korobeinikov_a_max_elements_in_rows_of_matrix/include/ops_mpi_korobeinikov.hpp b/tasks/mpi/korobeinikov_a_max_elements_in_rows_of_matrix/include/ops_mpi_korobeinikov.hpp new file mode 100644 index 00000000000..fad4ba17edb --- /dev/null +++ b/tasks/mpi/korobeinikov_a_max_elements_in_rows_of_matrix/include/ops_mpi_korobeinikov.hpp @@ -0,0 +1,52 @@ +// Copyright 2024 Korobeinikov Arseny +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace korobeinikov_a_test_task_mpi { + +std::vector getRandomVector(int sz); + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + std::vector res; + int count_rows; + int size_rows; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + std::vector res; + int count_rows; + int size_rows; + int num_use_proc; + boost::mpi::communicator world; +}; + +} // namespace korobeinikov_a_test_task_mpi \ No newline at end of file diff --git a/tasks/mpi/korobeinikov_a_max_elements_in_rows_of_matrix/perf_tests/main_korobeinikov.cpp b/tasks/mpi/korobeinikov_a_max_elements_in_rows_of_matrix/perf_tests/main_korobeinikov.cpp new file mode 100644 index 00000000000..96e90a1c879 --- /dev/null +++ b/tasks/mpi/korobeinikov_a_max_elements_in_rows_of_matrix/perf_tests/main_korobeinikov.cpp @@ -0,0 +1,100 @@ +// Copyright 2024 Korobeinikov Arseny +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/korobeinikov_a_max_elements_in_rows_of_matrix/include/ops_mpi_korobeinikov.hpp" + +TEST(mpi_korobeinikov_a_max_elements_in_rows_of_matrix_perf_test, test_pipeline_run) { + boost::mpi::communicator world; + + // Create data + + int count_rows = 100; // not const, because reinterpret_cast does not work with const + std::vector global_matrix; + std::vector mpi_res(count_rows, 0); + std::vector right_answer(count_rows, 1); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_matrix = std::vector(count_rows * 500000, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(1); + + taskDataPar->outputs.emplace_back(reinterpret_cast(mpi_res.data())); + taskDataPar->outputs_count.emplace_back(mpi_res.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(mpi_res, right_answer); + } +} + +TEST(mpi_korobeinikov_a_max_elements_in_rows_of_matrix_perf_test, test_task_run) { + boost::mpi::communicator world; + + // Create data + + int count_rows = 200; // not const, because reinterpret_cast does not work with const + std::vector global_matrix; + std::vector mpi_res(count_rows, 0); + std::vector right_answer(count_rows, 1); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + global_matrix = std::vector(count_rows * 500000, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataPar->inputs_count.emplace_back(global_matrix.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataPar->inputs_count.emplace_back(1); + + taskDataPar->outputs.emplace_back(reinterpret_cast(mpi_res.data())); + taskDataPar->outputs_count.emplace_back(mpi_res.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(mpi_res, right_answer); + } +} diff --git a/tasks/mpi/korobeinikov_a_max_elements_in_rows_of_matrix/src/ops_mpi_korobeinikov.cpp b/tasks/mpi/korobeinikov_a_max_elements_in_rows_of_matrix/src/ops_mpi_korobeinikov.cpp new file mode 100644 index 00000000000..2dd94fefb7d --- /dev/null +++ b/tasks/mpi/korobeinikov_a_max_elements_in_rows_of_matrix/src/ops_mpi_korobeinikov.cpp @@ -0,0 +1,192 @@ +// Copyright 2024 Korobeinikov Arseny +#include "mpi/korobeinikov_a_max_elements_in_rows_of_matrix/include/ops_mpi_korobeinikov.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +std::vector korobeinikov_a_test_task_mpi::getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} + +bool korobeinikov_a_test_task_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + + input_.reserve(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], std::back_inserter(input_)); + + count_rows = (int)*taskData->inputs[1]; + if (count_rows != 0) { + size_rows = (int)(taskData->inputs_count[0] / (*taskData->inputs[1])); + } else { + size_rows = 0; + } + res = std::vector(count_rows, 0); + return true; +} + +bool korobeinikov_a_test_task_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + + if ((*taskData->inputs[1]) == 0) { + return true; + } + return (*taskData->inputs[1] == taskData->outputs_count[0] && + (taskData->inputs_count[0] % (*taskData->inputs[1])) == 0); +} + +bool korobeinikov_a_test_task_mpi::TestMPITaskSequential::run() { + internal_order_test(); + for (int i = 0; i < count_rows; i++) { + res[i] = *std::max_element(input_.begin() + i * size_rows, input_.begin() + (i + 1) * size_rows); + } + return true; +} + +bool korobeinikov_a_test_task_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + for (int i = 0; i < count_rows; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + return true; +} + +bool korobeinikov_a_test_task_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + return true; +} + +bool korobeinikov_a_test_task_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + if ((*taskData->inputs[1]) == 0) { + return true; + } + return (*taskData->inputs[1] == taskData->outputs_count[0] && + (taskData->inputs_count[0] % (*taskData->inputs[1])) == 0); + } + return true; +} + +bool korobeinikov_a_test_task_mpi::TestMPITaskParallel::run() { + internal_order_test(); + + unsigned int delta = 0; + + if (world.rank() == 0) { + count_rows = (int)*taskData->inputs[1]; + if (count_rows != 0) { + size_rows = (int)(taskData->inputs_count[0] / (*taskData->inputs[1])); + } else { + size_rows = 0; + } + if (count_rows != 0) { + num_use_proc = std::min(world.size(), count_rows * size_rows); + } else { + num_use_proc = world.size(); + } + delta = taskData->inputs_count[0] / num_use_proc; + } + broadcast(world, delta, 0); + broadcast(world, count_rows, 0); + if (count_rows == 0) { + return true; + } + broadcast(world, size_rows, 0); + broadcast(world, num_use_proc, 0); + + if (world.rank() == 0) { + // Init vectors + input_.reserve(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], std::back_inserter(input_)); + + for (int proc = 1; proc < num_use_proc - 1; proc++) { + world.send(proc, 0, input_.data() + proc * delta, delta); + } + if (num_use_proc != 1) { + int proc = num_use_proc - 1; + world.send(proc, 0, input_.data() + proc * delta, delta + taskData->inputs_count[0] % num_use_proc); + } + } + + if (world.rank() == 0) { + local_input_ = std::vector(delta); + local_input_ = std::vector(input_.begin(), input_.begin() + delta); + } else { + if (world.rank() == num_use_proc - 1 && num_use_proc != 0) { + local_input_ = std::vector(delta + (count_rows * size_rows) % num_use_proc); + world.recv(0, 0, local_input_.data(), delta + (count_rows * size_rows) % num_use_proc); + } else { + if (world.rank() < num_use_proc) { + local_input_ = std::vector(delta); + world.recv(0, 0, local_input_.data(), delta); + } + } + } + + res = std::vector(count_rows, 0); + + size_t default_local_size = 0; + if (world.rank() == 0) { + default_local_size = local_input_.size(); + } + broadcast(world, default_local_size, 0); + + if (world.rank() < num_use_proc) { + unsigned int ind = (world.rank() * default_local_size) / size_rows; + for (unsigned int i = 0; i < ind; ++i) { + reduce(world, INT_MIN, res[i], boost::mpi::maximum(), 0); + } + + unsigned int near_end = std::min(local_input_.size(), size_rows - (world.rank() * default_local_size) % size_rows); + int local_res; + + local_res = *std::max_element(local_input_.begin(), local_input_.begin() + near_end); + reduce(world, local_res, res[ind], boost::mpi::maximum(), 0); + ++ind; + + unsigned int k = 0; + while (local_input_.begin() + near_end + k * size_rows < local_input_.end()) { + local_res = + *std::max_element(local_input_.begin() + near_end + k * size_rows, + std::min(local_input_.end(), local_input_.begin() + near_end + (k + 1) * size_rows)); + reduce(world, local_res, res[ind], boost::mpi::maximum(), 0); + ++k; + ++ind; + } + + for (unsigned int i = ind; i < res.size(); ++i) { + reduce(world, INT_MIN, res[i], boost::mpi::maximum(), 0); + } + } else { + for (unsigned int i = 0; i < res.size(); ++i) { + reduce(world, INT_MIN, res[i], boost::mpi::maximum(), 0); + } + } + return true; +} + +bool korobeinikov_a_test_task_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + for (int i = 0; i < count_rows; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + } + return true; +} \ No newline at end of file diff --git a/tasks/mpi/korovin_n_min_val_row_matrix/func_tests/main.cpp b/tasks/mpi/korovin_n_min_val_row_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..1d1f282d360 --- /dev/null +++ b/tasks/mpi/korovin_n_min_val_row_matrix/func_tests/main.cpp @@ -0,0 +1,373 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "mpi/korovin_n_min_val_row_matrix/include/ops_mpi.hpp" + +TEST(korovin_n_min_val_row_matrix_mpi, find_min_val_in_row_10x10_matrix) { + boost::mpi::communicator world; + const int count_rows = 10; + const int count_columns = 10; + + std::vector> global_matrix; + std::vector global_min(count_rows, INT_MAX); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(count_rows, count_columns); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataPar->inputs_count = {count_rows, count_columns}; + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + korovin_n_min_val_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_min(count_rows, INT_MAX); + std::shared_ptr taskDataSeq = std::make_shared(); + + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataSeq->inputs_count = {count_rows, count_columns}; + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < count_rows; i++) { + ASSERT_EQ(global_min[i], INT_MIN); + } + } +} + +TEST(korovin_n_min_val_row_matrix_mpi, find_min_val_in_row_100x100_matrix) { + boost::mpi::communicator world; + const int count_rows = 100; + const int count_columns = 100; + + std::vector> global_matrix; + std::vector global_min(count_rows, INT_MAX); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(count_rows, count_columns); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataPar->inputs_count = {count_rows, count_columns}; + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + korovin_n_min_val_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_min(count_rows, INT_MAX); + std::shared_ptr taskDataSeq = std::make_shared(); + + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataSeq->inputs_count = {count_rows, count_columns}; + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < count_rows; i++) { + ASSERT_EQ(global_min[i], INT_MIN); + } + } +} + +TEST(korovin_n_min_val_row_matrix_mpi, find_min_val_in_row_100x500_matrix) { + boost::mpi::communicator world; + const int count_rows = 100; + const int count_columns = 500; + + std::vector> global_matrix; + std::vector global_min(count_rows, INT_MAX); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(count_rows, count_columns); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataPar->inputs_count = {count_rows, count_columns}; + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + korovin_n_min_val_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_min(count_rows, INT_MAX); + std::shared_ptr taskDataSeq = std::make_shared(); + + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataSeq->inputs_count = {count_rows, count_columns}; + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < count_rows; i++) { + ASSERT_EQ(global_min[i], INT_MIN); + } + } +} + +TEST(korovin_n_min_val_row_matrix_mpi, find_min_val_in_row_5000x5000_matrix) { + boost::mpi::communicator world; + const int count_rows = 5000; + const int count_columns = 5000; + + std::vector> global_matrix; + std::vector global_min(count_rows, INT_MAX); + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_matrix = + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(count_rows, count_columns); + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataPar->inputs_count = {count_rows, count_columns}; + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + korovin_n_min_val_row_matrix_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_min(count_rows, INT_MAX); + std::shared_ptr taskDataSeq = std::make_shared(); + + for (unsigned int i = 0; i < global_matrix.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + } + taskDataSeq->inputs_count = {count_rows, count_columns}; + + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + for (int i = 0; i < count_rows; i++) { + ASSERT_EQ(global_min[i], INT_MIN); + } + } +} + +TEST(korovin_n_min_val_row_matrix_mpi, validation_input_empty_100x100_matrix) { + boost::mpi::communicator world; + if (world.rank() == 0) { + const int rows = 100; + const int cols = 100; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(rows, cols); + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testMpiTaskSequential.validation(), false); + } +} + +TEST(korovin_n_min_val_row_matrix_mpi, validation_output_empty_100x100_matrix) { + boost::mpi::communicator world; + if (world.rank() == 0) { + const int rows = 100; + const int cols = 100; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows, 0); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testMpiTaskSequential.validation(), false); + } +} + +TEST(korovin_n_min_val_row_matrix_mpi, validation_less_two_cols_100x100_matrix) { + boost::mpi::communicator world; + if (world.rank() == 0) { + const int rows = 100; + const int cols = 100; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testMpiTaskSequential.validation(), false); + } +} + +TEST(korovin_n_min_val_row_matrix_mpi, validation_find_min_val_in_row_0x10_matrix) { + boost::mpi::communicator world; + if (world.rank() == 0) { + const int rows = 0; + const int cols = 10; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testMpiTaskSequential.validation(), false); + } +} + +TEST(korovin_n_min_val_row_matrix_mpi, validation_find_min_val_in_row_10x10_cols_0_matrix) { + boost::mpi::communicator world; + if (world.rank() == 0) { + const int rows = 10; + const int cols = 10; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(0); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testMpiTaskSequential.validation(), false); + } +} + +TEST(korovin_n_min_val_row_matrix_mpi, validation_fails_on_invalid_output_size) { + boost::mpi::communicator world; + if (world.rank() == 0) { + const int rows = 10; + const int cols = 10; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows - 1, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testMpiTaskSequential.validation(), false); + } +} diff --git a/tasks/mpi/korovin_n_min_val_row_matrix/include/ops_mpi.hpp b/tasks/mpi/korovin_n_min_val_row_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..863a412fa35 --- /dev/null +++ b/tasks/mpi/korovin_n_min_val_row_matrix/include/ops_mpi.hpp @@ -0,0 +1,48 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace korovin_n_min_val_row_matrix_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + static std::vector generate_rnd_vector(int size, int lower_bound = 0, int upper_bound = 50); + static std::vector> generate_rnd_matrix(int rows, int cols); + + private: + std::vector> input_; + std::vector res_; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + std::vector> local_input_; + std::vector res_; + boost::mpi::communicator world; +}; + +} // namespace korovin_n_min_val_row_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/korovin_n_min_val_row_matrix/perf_tests/main.cpp b/tasks/mpi/korovin_n_min_val_row_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..435f0ecba63 --- /dev/null +++ b/tasks/mpi/korovin_n_min_val_row_matrix/perf_tests/main.cpp @@ -0,0 +1,86 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/korovin_n_min_val_row_matrix/include/ops_mpi.hpp" + +TEST(korovin_n_min_val_row_matrix_mpi, test_pipeline_run_min) { + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_min; + + std::shared_ptr taskDataPar = std::make_shared(); + int count_rows; + int count_columns; + + if (world.rank() == 0) { + count_rows = 5000; + count_columns = 5000; + global_matrix = + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(count_rows, count_columns); + global_min.resize(count_rows, INT_MAX); + + for (auto& row : global_matrix) { + taskDataPar->inputs.emplace_back(reinterpret_cast(row.data())); + } + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + if (world.rank() == 0) { + for (size_t i = 0; i < global_min.size(); ++i) { + ASSERT_EQ(global_min[i], INT_MIN); + } + } +} + +TEST(korovin_n_min_val_row_matrix_mpi_perf_test, test_task_run_min) { + boost::mpi::communicator world; + std::vector> global_matrix; + std::vector global_min; + + std::shared_ptr taskDataPar = std::make_shared(); + int count_rows; + int count_columns; + + if (world.rank() == 0) { + count_rows = 5000; + count_columns = 5000; + global_matrix = + korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(count_rows, count_columns); + global_min.resize(count_rows, INT_MAX); + + for (auto& row : global_matrix) { + taskDataPar->inputs.emplace_back(reinterpret_cast(row.data())); + } + taskDataPar->inputs_count.emplace_back(count_rows); + taskDataPar->inputs_count.emplace_back(count_columns); + + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + if (world.rank() == 0) { + for (size_t i = 0; i < global_min.size(); ++i) { + ASSERT_EQ(global_min[i], INT_MIN); + } + } +} diff --git a/tasks/mpi/korovin_n_min_val_row_matrix/src/ops_mpi.cpp b/tasks/mpi/korovin_n_min_val_row_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..832b8910d91 --- /dev/null +++ b/tasks/mpi/korovin_n_min_val_row_matrix/src/ops_mpi.cpp @@ -0,0 +1,182 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/korovin_n_min_val_row_matrix/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + + int rows = taskData->inputs_count[0]; + int cols = taskData->inputs_count[1]; + + input_.resize(rows, std::vector(cols)); + + for (int i = 0; i < rows; i++) { + int* input_matrix = reinterpret_cast(taskData->inputs[i]); + for (int j = 0; j < cols; j++) { + input_[i][j] = input_matrix[j]; + } + } + res_.resize(rows); + return true; +} + +bool korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + return ((!taskData->inputs.empty() && !taskData->outputs.empty()) && + (taskData->inputs_count.size() >= 2 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0) && + (taskData->outputs_count[0] == taskData->inputs_count[0])); +} + +bool korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::run() { + internal_order_test(); + + for (size_t i = 0; i < input_.size(); i++) { + int min_val = input_[i][0]; + for (size_t j = 1; j < input_[i].size(); j++) { + if (input_[i][j] < min_val) { + min_val = input_[i][j]; + } + } + res_[i] = min_val; + } + return true; +} + +bool korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + + int* output_matrix = reinterpret_cast(taskData->outputs[0]); + for (size_t i = 0; i < res_.size(); i++) { + output_matrix[i] = res_[i]; + } + return true; +} + +bool korovin_n_min_val_row_matrix_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + int rows = 0; + int cols = 0; + + if (world.rank() == 0) { + rows = taskData->inputs_count[0]; + cols = taskData->inputs_count[1]; + } + + broadcast(world, rows, 0); + broadcast(world, cols, 0); + + int delta = rows / world.size(); + int extra = rows % world.size(); + + if (world.rank() == 0) { + input_.resize(rows, std::vector(cols)); + for (int i = 0; i < rows; i++) { + int* input_matrix = reinterpret_cast(taskData->inputs[i]); + input_[i].assign(input_matrix, input_matrix + cols); + } + + for (int proc = 1; proc < world.size(); proc++) { + int start_row = proc * delta + std::min(proc, extra); + int num_rows = delta + (proc < extra ? 1 : 0); + for (int r = start_row; r < start_row + num_rows; r++) { + world.send(proc, 0, input_[r].data(), cols); + } + } + } + + int local_rows = delta + (world.rank() < extra ? 1 : 0); + + local_input_.resize(local_rows, std::vector(cols)); + + if (world.rank() == 0) { + std::copy(input_.begin(), input_.begin() + local_rows, local_input_.begin()); + } else { + for (int r = 0; r < local_rows; r++) { + world.recv(0, 0, local_input_[r].data(), cols); + } + } + + res_.resize(rows); + return true; +} + +bool korovin_n_min_val_row_matrix_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + + if (world.rank() == 0) { + return ((!taskData->inputs.empty() && !taskData->outputs.empty()) && + (taskData->inputs_count.size() >= 2 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0) && + (taskData->outputs_count[0] == taskData->inputs_count[0])); + } + return true; +} + +bool korovin_n_min_val_row_matrix_mpi::TestMPITaskParallel::run() { + internal_order_test(); + + std::vector local_mins(local_input_.size(), INT_MAX); + for (size_t i = 0; i < local_input_.size(); i++) { + for (const auto& val : local_input_[i]) { + local_mins[i] = std::min(local_mins[i], val); + } + } + + if (world.rank() == 0) { + int current_ind = 0; + std::copy(local_mins.begin(), local_mins.end(), res_.begin()); + current_ind += local_mins.size(); + for (int proc = 1; proc < world.size(); proc++) { + int loc_size; + world.recv(proc, 0, &loc_size, 1); + std::vector loc_res_(loc_size); + world.recv(proc, 0, loc_res_.data(), loc_size); + copy(loc_res_.begin(), loc_res_.end(), res_.data() + current_ind); + current_ind += loc_res_.size(); + } + } else { + int loc_res__size = (int)local_mins.size(); + world.send(0, 0, &loc_res__size, 1); + world.send(0, 0, local_mins.data(), loc_res__size); + } + return true; +} + +bool korovin_n_min_val_row_matrix_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + + if (world.rank() == 0) { + int* output_matrix = reinterpret_cast(taskData->outputs[0]); + std::copy(res_.begin(), res_.end(), output_matrix); + } + + return true; +} + +std::vector korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_vector(int size, int lower_bound, + int upper_bound) { + std::vector v1(size); + for (auto& num : v1) { + num = lower_bound + std::rand() % (upper_bound - lower_bound + 1); + } + return v1; +} + +std::vector> korovin_n_min_val_row_matrix_mpi::TestMPITaskSequential::generate_rnd_matrix(int rows, + int cols) { + std::vector> matrix1(rows, std::vector(cols)); + for (auto& row : matrix1) { + row = generate_rnd_vector(cols, -1000, 1000); + int rnd_index = std::rand() % cols; + row[rnd_index] = INT_MIN; + } + return matrix1; +} diff --git a/tasks/mpi/krylov_m_num_of_alternations_signs/func_tests/main.cpp b/tasks/mpi/krylov_m_num_of_alternations_signs/func_tests/main.cpp new file mode 100644 index 00000000000..99e411dada6 --- /dev/null +++ b/tasks/mpi/krylov_m_num_of_alternations_signs/func_tests/main.cpp @@ -0,0 +1,196 @@ +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../include/ops_mpi.hpp" + +#define EXPAND(x) x + +#define T_DEF(macro, ...) \ + EXPAND(macro(int16_t, __VA_ARGS__)) \ + EXPAND(macro(int32_t, __VA_ARGS__)) \ + EXPAND(macro(int64_t, __VA_ARGS__)) \ + EXPAND(macro(float, __VA_ARGS__)) + +using CountType = uint32_t; + +class krylov_m_num_of_alternations_signs_mpi_test : public ::testing::Test { + protected: + template + void run_generic_test(const boost::mpi::communicator &world, const CountType count, std::vector &in, + const std::vector &shift_indices, CountType &out, + std::shared_ptr &taskDataPar) { + if (world.rank() == 0) { + in = std::vector(count); + std::iota(in.begin(), in.end(), 1); + + for (auto idx : shift_indices) { + in[idx] *= -1; + } + + // + taskDataPar->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(&out)); + taskDataPar->outputs_count.emplace_back(1); + } + + // + krylov_m_num_of_alternations_signs_mpi::TestMPITaskParallel testMpiTaskParallel( + taskDataPar); + ASSERT_TRUE(testMpiTaskParallel.validation()); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + } + + template + std::vector get_random_vector(T size, T min, T max) { + std::random_device dev; + std::mt19937 gen(dev()); + std::uniform_int_distribution<> distr(min, max); // inclusive + + std::vector v(size); + std::transform(v.cbegin(), v.cend(), v.begin(), [&](auto) { return distr(gen); }); + + return v; + } + + // + + template + void T_fails_validation() { + boost::mpi::communicator world; + + std::shared_ptr taskDataPar = std::make_shared(); + krylov_m_num_of_alternations_signs_mpi::TestMPITaskParallel testMpiTaskParallel( + taskDataPar); + + if (world.rank() == 0) { + taskDataPar->outputs_count.emplace_back(0); + EXPECT_FALSE(testMpiTaskParallel.validation()); + } else { + EXPECT_TRUE(testMpiTaskParallel.validation()); + } + } +}; + +// clang-format off +using PrecalcOpts = std::tuple< + CountType /* count */, + std::vector /* shift_indices */, + CountType /* num */ +>; +// clang-format on +class krylov_m_num_of_alternations_signs_mpi_test_precalc : public krylov_m_num_of_alternations_signs_mpi_test, + public ::testing::WithParamInterface { + protected: + template + void PT_yields_correct_result() { + boost::mpi::communicator world; + const auto &[count, shift_indices, num] = GetParam(); + + std::vector in; + CountType out = 0; + // + std::shared_ptr taskDataPar = std::make_shared(); + + run_generic_test(world, count, in, shift_indices, out, taskDataPar); + + if (world.rank() == 0) { + ASSERT_EQ(out, num); + } + } +}; + +class krylov_m_num_of_alternations_signs_mpi_test_random : public krylov_m_num_of_alternations_signs_mpi_test, + public ::testing::WithParamInterface { + protected: + template + void PT_yields_correct_result_random() { + boost::mpi::communicator world; + const auto count = GetParam(); + + std::vector in; + CountType out = 0; + std::vector shift_indices; + // + if (world.rank() == 0) { + const auto shift_indices_count = get_random_vector(1, 0, count - 1)[0]; + shift_indices = get_random_vector(shift_indices_count, 0, count - 1); + } + // + std::shared_ptr taskDataPar = std::make_shared(); + + run_generic_test(world, count, in, shift_indices, out, taskDataPar); + + if (world.rank() == 0) { + CountType reference_num = 0; + + // + std::shared_ptr taskDataSeq = std::make_shared(*taskDataPar); + taskDataSeq->outputs[0] = reinterpret_cast(&reference_num); + + // + krylov_m_num_of_alternations_signs_mpi::TestMPITaskSequential testMpiTaskSequential( + taskDataSeq); + ASSERT_TRUE(testMpiTaskSequential.validation()); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(out, reference_num); + } + } +}; + +#define DECL_TYPE_VALUE_PARAMETRIZED_TEST(TypeParam, Fixture, TestName, ...) \ + TEST_P(Fixture, TestName##__##TypeParam) { PT_##TestName(__VA_ARGS__); } +#define DECL_TYPE_VALUE_PARAMETRIZED_TEST_ALL(Fixture, TestName, ...) \ + T_DEF(DECL_TYPE_VALUE_PARAMETRIZED_TEST, Fixture, TestName, __VA_ARGS__) + +#define DECL_TYPE_PARAMETRIZED_TEST(TypeParam, Fixture, TestName, ...) \ + TEST_F(Fixture, TestName##__##TypeParam) { T_##TestName(__VA_ARGS__); } +#define DECL_TYPE_PARAMETRIZED_TEST_ALL(Fixture, TestName, ...) \ + T_DEF(DECL_TYPE_PARAMETRIZED_TEST, Fixture, TestName, __VA_ARGS__) + +INSTANTIATE_TEST_SUITE_P( + krylov_m_num_of_alternations_signs_mpi_test, krylov_m_num_of_alternations_signs_mpi_test_precalc, + // clang-format off + ::testing::Values( + std::make_tuple(129, std::vector{0, 1, /* . */ 3, /* . */ 5, 6, 7, /* . */ 12 /* . */}, 7), + std::make_tuple(129, std::vector{0, /* . */}, 1), + std::make_tuple(129, std::vector{/* . */ 128}, 1), + std::make_tuple(129, std::vector{/* . */ 64 /* . */}, 2), + std::make_tuple(129, std::vector{/* . */ 43, /* . */ 86, /* . */}, 4), + std::make_tuple(129, std::vector{/* . */}, 0), + std::make_tuple(128, std::vector{0, 1, /* . */ 3, /* . */ 5, 6, 7, /* . */ 12 /* . */}, 7), + std::make_tuple(128, std::vector{0, /* . */}, 1), + std::make_tuple(128, std::vector{/* . */ 127}, 1), + std::make_tuple(128, std::vector{/* . */ 64 /* . */}, 2), + std::make_tuple(129, std::vector{/* . */ 43, /* . */ 86, /* . */}, 4), + std::make_tuple(129, std::vector{/* . */ 42, /* . */ 84, /* . */}, 4), + std::make_tuple(128, std::vector{/* . */}, 0), + std::make_tuple(4, std::vector{/* . */}, 0), + std::make_tuple(4, std::vector{/* . */ 2 /* . */}, 2), + std::make_tuple(1, std::vector{/* . */}, 0), + std::make_tuple(1, std::vector{0}, 0), + std::make_tuple(0, std::vector{/* . */}, 0) + ) + // clang-format on +); + +INSTANTIATE_TEST_SUITE_P(krylov_m_num_of_alternations_signs_mpi_test, + krylov_m_num_of_alternations_signs_mpi_test_random, + ::testing::Values(1, 2, 3, 4, 5, 128, 129)); + +DECL_TYPE_VALUE_PARAMETRIZED_TEST_ALL(krylov_m_num_of_alternations_signs_mpi_test_precalc, yields_correct_result); +DECL_TYPE_VALUE_PARAMETRIZED_TEST_ALL(krylov_m_num_of_alternations_signs_mpi_test_random, yields_correct_result_random); +DECL_TYPE_PARAMETRIZED_TEST_ALL(krylov_m_num_of_alternations_signs_mpi_test, fails_validation); \ No newline at end of file diff --git a/tasks/mpi/krylov_m_num_of_alternations_signs/include/ops_mpi.hpp b/tasks/mpi/krylov_m_num_of_alternations_signs/include/ops_mpi.hpp new file mode 100644 index 00000000000..516dbd7b6a6 --- /dev/null +++ b/tasks/mpi/krylov_m_num_of_alternations_signs/include/ops_mpi.hpp @@ -0,0 +1,207 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace krylov_m_num_of_alternations_signs_mpi { + +using namespace std::chrono_literals; + +template +class TestMPITaskParallel : public ppc::core::Task { + static_assert(sizeof(CountType) <= + sizeof(typename decltype(std::declval().inputs_count)::value_type), + "There's no sense in providing CountType that exceeds TaskData capabilities"); + + static bool distribute(std::vector& distribution, std::vector& displacement, int amount, int world_size) { + const int average = amount / world_size; + if (average < world_size) { + distribution.resize(world_size, 0); + distribution[0] = amount; + displacement.resize(world_size, 0); + return false; + } + + distribution.resize(world_size, average); + displacement.resize(world_size); + + const int leftover = amount % world_size; + + int pos = 0; + for (int i = 0; i < world_size; i++) { + if (i < leftover) { + distribution[i]++; + } + displacement[i] = pos; + pos += distribution[i]; + } + + return true; + } + + static int calc_distribution(int world_rank, int amount, int world_size) { + const int average = amount / world_size; + const int leftover = amount % world_size; + if (average < world_size && world_rank != 0) { + return 0; + } + return average + ((world_rank < leftover) ? 1 : 0); + } + + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override { + internal_order_test(); + + res = 0; + + unsigned int amount = 0; + if (world.rank() == 0) { + amount = taskData->inputs_count[0]; + } + boost::mpi::broadcast(world, amount, 0); + + if (world.rank() == 0) { + std::vector distribution; + std::vector displacement; + if (distribute(distribution, displacement, amount, world.size())) { + std::transform(distribution.cbegin(), distribution.cend() - 1, distribution.begin(), + [](auto x) { return x + 1; }); + } + + partial_input_.resize(distribution[0]); + + const auto* in_p = reinterpret_cast(taskData->inputs[0]); + boost::mpi::scatterv(world, in_p, distribution, displacement, partial_input_.data(), distribution[0], 0); + } else { + int distribution = calc_distribution(world.rank(), amount, world.size()); + if (distribution > 0) { + if (world.rank() != world.size() - 1) { + distribution++; + } + partial_input_.resize(distribution); + boost::mpi::scatterv(world, partial_input_.data(), distribution, 0); + } + } + + return true; + } + + bool validation() override { + internal_order_test(); + + return world.rank() != 0 || (taskData->outputs_count[0] == 1); + } + + bool run() override { + internal_order_test(); + + CountType partial_res = 0; + + const std::size_t size = partial_input_.size(); + if (size > 0) { + bool neg = partial_input_[0] < 0; + for (std::size_t i = 1; i < size; i++) { + bool cur = partial_input_[i] < 0; + if (neg == cur) { + continue; + } + partial_res++; + neg = cur; + } + } + + boost::mpi::reduce(world, partial_res, res, std::plus(), 0); + + return true; + } + + bool post_processing() override { + internal_order_test(); + + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res; + } + + return true; + } + + private: + std::vector partial_input_{}; + CountType res{}; + boost::mpi::communicator world; +}; + +template +class TestMPITaskSequential : public ppc::core::Task { + static_assert(sizeof(CountType) <= + sizeof(typename decltype(std::declval().inputs_count)::value_type), + "There's no sense in providing CountType that exceeds TaskData capabilities"); + + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override { + internal_order_test(); + + const auto count = taskData->inputs_count[0]; + const auto* in_p = reinterpret_cast(taskData->inputs[0]); + input_.resize(count); + std::copy(in_p, in_p + count, std::begin(input_)); + // + res = 0; + + return true; + } + + bool validation() override { + internal_order_test(); + + return taskData->outputs_count[0] == 1; + } + + bool run() override { + internal_order_test(); + + const std::size_t size = input_.size(); + if (size > 1) { + bool neg = input_[0] < 0; + for (std::size_t i = 1; i < size; i++) { + bool cur = input_[i] < 0; + if (neg == cur) { + continue; + } + res++; + neg = cur; + } + } + + return true; + } + + bool post_processing() override { + internal_order_test(); + + reinterpret_cast(taskData->outputs[0])[0] = res; + + return true; + } + + private: + std::vector input_{}; + CountType res{}; +}; + +} // namespace krylov_m_num_of_alternations_signs_mpi diff --git a/tasks/mpi/krylov_m_num_of_alternations_signs/perf_tests/main.cpp b/tasks/mpi/krylov_m_num_of_alternations_signs/perf_tests/main.cpp new file mode 100644 index 00000000000..13cc214c998 --- /dev/null +++ b/tasks/mpi/krylov_m_num_of_alternations_signs/perf_tests/main.cpp @@ -0,0 +1,82 @@ +#include + +#include +#include + +#include "../include/ops_mpi.hpp" +#include "core/perf/include/perf.hpp" + +class krylov_m_num_of_alternations_signs_mpi_perf_test : public ::testing::Test { + using ElementType = int32_t; + using CountType = uint32_t; + // + const CountType in_count = 128; + const std::vector shift_indices{0, 1, /* . */ 3, /* . */ 5, 6, 7, /* . */ 12 /* . */}; + // + const CountType num = 7; + + protected: + void run_perf_test( + const std::function &perfAttr, + const std::shared_ptr &perfResults)> &runner) { + boost::mpi::communicator world; + + std::shared_ptr taskDataPar = std::make_shared(); + + // + std::vector in; + CountType out = 0; + if (world.rank() == 0) { + in = std::vector(in_count); + std::iota(in.begin(), in.end(), 1); + + for (auto idx : shift_indices) { + in[idx] *= -1; + } + + // + taskDataPar->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataPar->inputs_count.emplace_back(in.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(&out)); + taskDataPar->outputs_count.emplace_back(1); + } + + // + auto testMpiTaskParallel = + std::make_shared>( + taskDataPar); + + // + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + ppc::core::Perf perfAnalyzer(testMpiTaskParallel); + runner(perfAnalyzer, perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + EXPECT_EQ(num, out); + } + } +}; + +TEST_F(krylov_m_num_of_alternations_signs_mpi_perf_test, test_pipeline_run) { + run_perf_test([](auto &perfAnalyzer, const auto &perfAttr, const auto &perfResults) { + perfAnalyzer.pipeline_run(perfAttr, perfResults); + }); +} + +TEST_F(krylov_m_num_of_alternations_signs_mpi_perf_test, test_task_run) { + run_perf_test([](auto &perfAnalyzer, const auto &perfAttr, const auto &perfResults) { + perfAnalyzer.task_run(perfAttr, perfResults); + }); +} diff --git a/tasks/mpi/krylov_m_num_of_alternations_signs/src/ops_mpi.cpp b/tasks/mpi/krylov_m_num_of_alternations_signs/src/ops_mpi.cpp new file mode 100644 index 00000000000..a9cf15bbf66 --- /dev/null +++ b/tasks/mpi/krylov_m_num_of_alternations_signs/src/ops_mpi.cpp @@ -0,0 +1 @@ +#include "../include/ops_mpi.hpp" diff --git a/tasks/mpi/lysov_i_integration_the_trapezoid_method/func_tests/main.cpp b/tasks/mpi/lysov_i_integration_the_trapezoid_method/func_tests/main.cpp new file mode 100644 index 00000000000..588a41c4553 --- /dev/null +++ b/tasks/mpi/lysov_i_integration_the_trapezoid_method/func_tests/main.cpp @@ -0,0 +1,260 @@ +#include + +#include +#include +#include +#include + +#include "mpi/lysov_i_integration_the_trapezoid_method/include/ops_mpi.hpp" + +TEST(lysov_i_integration_the_trapezoid_method_mpi, Test_Integration_mpi_1) { + boost::mpi::communicator world; + std::vector global_result(1, 0.0); + std::shared_ptr taskDataPar = std::make_shared(); + double a = -1.45; + double b = 0.0; + double epsilon = 0.000001; + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + } + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0.0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_NEAR(reference_result[0], global_result[0], 1e-1); + } +} + +TEST(lysov_i_integration_the_trapezoid_method_mpi, Test_Integration_mpi_2) { + boost::mpi::communicator world; + std::vector global_result(1, 0.0); + std::shared_ptr taskDataPar = std::make_shared(); + double a = 0.0; + double b = 1.45; + double epsilon = 0.000001; + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + } + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0.0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_NEAR(reference_result[0], global_result[0], 1e-1); + } +} + +TEST(lysov_i_integration_the_trapezoid_method_mpi, Test_Integration_mpi_3) { + boost::mpi::communicator world; + std::vector global_result(1, 0.0); + std::shared_ptr taskDataPar = std::make_shared(); + + double a = -10.0; + double b = 65.0; + double epsilon = 0.000001; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + } + + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_TRUE(testMpiTaskParallel.validation()); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0.0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_NEAR(reference_result[0], global_result[0], 1e-1); + } +} + +TEST(lysov_i_integration_the_trapezoid_method_mpi, Test_Integration_mpi_4) { + boost::mpi::communicator world; + std::vector global_result(1, 0.0); + std::shared_ptr taskDataPar = std::make_shared(); + + double a = -5.0; + double b = 5.0; + double epsilon = 0.000001; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + } + + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_TRUE(testMpiTaskParallel.validation()); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0.0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_NEAR(reference_result[0], global_result[0], 1e-1); + } +} + +TEST(lysov_i_integration_the_trapezoid_method_mpi, Test_Integration_mpi_random) { + boost::mpi::communicator world; + std::vector global_result(1, 0.0); + std::shared_ptr taskDataPar = std::make_shared(); + std::random_device dev; + std::mt19937 gen(dev()); + double a = (gen() % 100) / 100.0; + double b = (gen() % 100) / 100.0; + if (a == b) b += 0.1; + double epsilon = 0.0001; + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + } + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0.0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_NEAR(reference_result[0], global_result[0], 1e-1); + } +} + +TEST(lysov_i_integration_the_trapezoid_method_mpi, TaskMpi_InputSizeLessThan3) { + std::shared_ptr taskDataMPIParallel = std::make_shared(); + boost::mpi::communicator world; + if (world.rank() == 0) { + double a = -1.0; + double b = 1.0; + taskDataMPIParallel->inputs.emplace_back(reinterpret_cast(&a)); + taskDataMPIParallel->inputs.emplace_back(reinterpret_cast(&b)); + double result = 0.0; + taskDataMPIParallel->outputs.emplace_back(reinterpret_cast(&result)); + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskParallel testTaskMPIParallel(taskDataMPIParallel); + ASSERT_EQ(testTaskMPIParallel.validation(), false); + } +} + +TEST(lysov_i_integration_the_trapezoid_method_mpi, TaskMpi_InputSizeMoreThan3) { + std::shared_ptr taskDataMPIParallel = std::make_shared(); + boost::mpi::communicator world; + if (world.rank() == 0) { + double a = -1.0; + double b = 1.0; + double epsilon = 0.01; + double extra_input = 5.0; + taskDataMPIParallel->inputs.emplace_back(reinterpret_cast(&a)); + taskDataMPIParallel->inputs.emplace_back(reinterpret_cast(&b)); + taskDataMPIParallel->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataMPIParallel->inputs.emplace_back(reinterpret_cast(&extra_input)); + double result = 0.0; + taskDataMPIParallel->outputs.emplace_back(reinterpret_cast(&result)); + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskParallel testTaskMPIParallel(taskDataMPIParallel); + ASSERT_EQ(testTaskMPIParallel.validation(), false); + } +} + +TEST(lysov_i_integration_the_trapezoid_method_mpi, TaskMpi_OutputSizeMoreThan1) { + std::shared_ptr taskDataMPIParallel = std::make_shared(); + boost::mpi::communicator world; + if (world.rank() == 0) { + double a = -1.0; + double b = 1.0; + double epsilon = 0.01; + taskDataMPIParallel->inputs.emplace_back(reinterpret_cast(&a)); + taskDataMPIParallel->inputs.emplace_back(reinterpret_cast(&b)); + taskDataMPIParallel->inputs.emplace_back(reinterpret_cast(&epsilon)); + double result1 = 0.0; + double result2 = 0.0; + taskDataMPIParallel->outputs.emplace_back(reinterpret_cast(&result1)); + taskDataMPIParallel->outputs.emplace_back(reinterpret_cast(&result2)); + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskParallel testTaskMPIParallel(taskDataMPIParallel); + ASSERT_EQ(testTaskMPIParallel.validation(), false); + } +} + +TEST(lysov_i_integration_the_trapezoid_method_mpi, TaskMpi_OutputSizeLessThan1) { + std::shared_ptr taskDataMPIParallel = std::make_shared(); + boost::mpi::communicator world; + if (world.rank() == 0) { + double a = -1.0; + double b = 1.0; + double epsilon = 0.01; + taskDataMPIParallel->inputs.emplace_back(reinterpret_cast(&a)); + taskDataMPIParallel->inputs.emplace_back(reinterpret_cast(&b)); + taskDataMPIParallel->inputs.emplace_back(reinterpret_cast(&epsilon)); + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskParallel testTaskMPIParallel(taskDataMPIParallel); + ASSERT_EQ(testTaskMPIParallel.validation(), false); + } +} diff --git a/tasks/mpi/lysov_i_integration_the_trapezoid_method/include/ops_mpi.hpp b/tasks/mpi/lysov_i_integration_the_trapezoid_method/include/ops_mpi.hpp new file mode 100644 index 00000000000..006a91b740e --- /dev/null +++ b/tasks/mpi/lysov_i_integration_the_trapezoid_method/include/ops_mpi.hpp @@ -0,0 +1,59 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace lysov_i_integration_the_trapezoid_method_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + double a = 0.0; + double b = 0.0; + double h = 0.0; + int cnt_of_splits = 0; + double epsilon; + double static function_square(double x) { return x * x; } + + private: + std::vector input_; + double res{}; + std::string ops; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + double a = 0.0; + double b = 0.0; + double h = 0.0; + int cnt_of_splits = 0; + double local_a; + int local_cnt_of_splits; + static double function_square(double x) { return x * x; } + + private: + std::vector input_, local_input_; + double res; + std::string ops; + boost::mpi::communicator world; +}; + +} // namespace lysov_i_integration_the_trapezoid_method_mpi \ No newline at end of file diff --git a/tasks/mpi/lysov_i_integration_the_trapezoid_method/perf_tests/main.cpp b/tasks/mpi/lysov_i_integration_the_trapezoid_method/perf_tests/main.cpp new file mode 100644 index 00000000000..5dd51ff1716 --- /dev/null +++ b/tasks/mpi/lysov_i_integration_the_trapezoid_method/perf_tests/main.cpp @@ -0,0 +1,83 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/lysov_i_integration_the_trapezoid_method/include/ops_mpi.hpp" + +TEST(lysov_i_integration_the_trapezoid_method_mpi, test_integration_pipeline_run) { + boost::mpi::communicator world; + std::vector global_result(1, 0.0); + double a = -1.45; + double b = 1.45; + double epsilon = 0.0000001; + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + } + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + std::vector reference_result(1, 0.0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_result.data())); + lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_NEAR(reference_result[0], global_result[0], 1e-1); + } +} + +TEST(lysov_i_integration_the_trapezoid_method_mpi, test_integration_task_run) { + boost::mpi::communicator world; + std::vector global_result(1, 0.0); + double a = -1.45; + double b = 1.45; + double epsilon = 0.0000001; + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(&a)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&b)); + taskDataPar->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_result.data())); + } + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + double reference_result = 2.0; + ASSERT_NEAR(reference_result, global_result[0], 1e-1); + } +} diff --git a/tasks/mpi/lysov_i_integration_the_trapezoid_method/src/ops_mpi.cpp b/tasks/mpi/lysov_i_integration_the_trapezoid_method/src/ops_mpi.cpp new file mode 100644 index 00000000000..08f8c9c6734 --- /dev/null +++ b/tasks/mpi/lysov_i_integration_the_trapezoid_method/src/ops_mpi.cpp @@ -0,0 +1,99 @@ +#include "mpi/lysov_i_integration_the_trapezoid_method/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +bool lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + return (taskData->inputs.size() == 3 && taskData->outputs.size() == 1); +} +bool lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + a = *reinterpret_cast(taskData->inputs[0]); + b = *reinterpret_cast(taskData->inputs[1]); + epsilon = *reinterpret_cast(taskData->inputs[2]); + cnt_of_splits = static_cast(std::abs((b - a)) / epsilon); + h = (b - a) / cnt_of_splits; + input_.resize(cnt_of_splits + 1); + for (int i = 0; i <= cnt_of_splits; ++i) { + double x = a + i * h; + input_[i] = function_square(x); + } + return true; +} +bool lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskSequential::run() { + internal_order_test(); + double result = 0.0; + result += 0.5 * (function_square(a) + function_square(b)); + for (int i = 1; i < cnt_of_splits; ++i) { + double x = a + i * h; + result += function_square(x); + } + result *= h; + res = result; + return true; +} +bool lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +bool lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + if ((taskData->inputs.size() != 3) || (taskData->outputs.size() != 1)) { + return false; + } + double epsilon = *reinterpret_cast(taskData->inputs[2]); + if (epsilon <= 0) { + return false; + } + } + return true; +} +bool lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + if (world.rank() == 0) { + a = *reinterpret_cast(taskData->inputs[0]); + b = *reinterpret_cast(taskData->inputs[1]); + double epsilon = *reinterpret_cast(taskData->inputs[2]); + cnt_of_splits = static_cast(std::abs((b - a)) / epsilon); + } + + boost::mpi::broadcast(world, a, 0); + boost::mpi::broadcast(world, b, 0); + boost::mpi::broadcast(world, cnt_of_splits, 0); + + h = (b - a) / cnt_of_splits; + local_cnt_of_splits = cnt_of_splits / world.size(); + if (world.rank() < cnt_of_splits % world.size()) { + local_cnt_of_splits++; + } + local_a = a + world.rank() * local_cnt_of_splits * h; + local_input_.resize(local_cnt_of_splits + 1); + return true; +} +bool lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskParallel::run() { + internal_order_test(); + double local_res = 0.0; + local_res += 0.5 * (function_square(local_a) + function_square(local_a + local_cnt_of_splits * h)); + for (int i = 0; i < local_cnt_of_splits; i++) { + double x = local_a + i * h; + local_res += function_square(x); + } + local_res *= h; + boost::mpi::reduce(world, local_res, res, std::plus<>(), 0); + return true; +} +bool lysov_i_integration_the_trapezoid_method_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + *reinterpret_cast(taskData->outputs[0]) = res; + } + return true; +} diff --git a/tasks/mpi/muhina_m_min_of_vector_elements/func_tests/main.cpp b/tasks/mpi/muhina_m_min_of_vector_elements/func_tests/main.cpp new file mode 100644 index 00000000000..796d40091ec --- /dev/null +++ b/tasks/mpi/muhina_m_min_of_vector_elements/func_tests/main.cpp @@ -0,0 +1,203 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include +#include + +#include "mpi/muhina_m_min_of_vector_elements/include/ops_mpi.hpp" + +std::vector GetRandomVector(int sz, int min_value, int max_value) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = min_value + gen() % (max_value - min_value + 1); + } + return vec; +} + +TEST(muhina_m_min_of_vector_elements, Test_Min) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_min(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 120; + const int min_val = 0; + const int max_val = 100; + global_vec = GetRandomVector(count_size_vector, min_val, max_val); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + muhina_m_min_of_vector_elements_mpi::MinOfVectorMPIParallel minOfVectorMPIParalle(taskDataPar); + ASSERT_EQ(minOfVectorMPIParalle.validation(), true); + minOfVectorMPIParalle.pre_processing(); + minOfVectorMPIParalle.run(); + minOfVectorMPIParalle.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + muhina_m_min_of_vector_elements_mpi::MinOfVectorMPISequential minOfVectorMPISequential(taskDataSeq); + ASSERT_EQ(minOfVectorMPISequential.validation(), true); + minOfVectorMPISequential.pre_processing(); + minOfVectorMPISequential.run(); + minOfVectorMPISequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} + +TEST(muhina_m_min_of_vector_elements, Test_Min_LargeVector) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_min(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 10000; + const int min_val = 0; + const int max_val = 100; + global_vec = GetRandomVector(count_size_vector, min_val, max_val); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + muhina_m_min_of_vector_elements_mpi::MinOfVectorMPIParallel minOfVectorMPIParalle(taskDataPar); + ASSERT_EQ(minOfVectorMPIParalle.validation(), true); + minOfVectorMPIParalle.pre_processing(); + minOfVectorMPIParalle.run(); + minOfVectorMPIParalle.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + muhina_m_min_of_vector_elements_mpi::MinOfVectorMPISequential minOfVectorMPISequential(taskDataSeq); + ASSERT_EQ(minOfVectorMPISequential.validation(), true); + minOfVectorMPISequential.pre_processing(); + minOfVectorMPISequential.run(); + minOfVectorMPISequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} + +TEST(muhina_m_min_of_vector_elements, Test_Min_NegativeValues) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_min(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 120; + const int min_val = -100; + const int max_val = -10; + global_vec = GetRandomVector(count_size_vector, min_val, max_val); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + muhina_m_min_of_vector_elements_mpi::MinOfVectorMPIParallel minOfVectorMPIParalle(taskDataPar); + ASSERT_EQ(minOfVectorMPIParalle.validation(), true); + minOfVectorMPIParalle.pre_processing(); + minOfVectorMPIParalle.run(); + minOfVectorMPIParalle.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + muhina_m_min_of_vector_elements_mpi::MinOfVectorMPISequential minOfVectorMPISequential(taskDataSeq); + ASSERT_EQ(minOfVectorMPISequential.validation(), true); + minOfVectorMPISequential.pre_processing(); + minOfVectorMPISequential.run(); + minOfVectorMPISequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} + +TEST(muhina_m_min_of_vector_elements, Test_Min_RepeatingValues) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_min(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 120; + global_vec = {10, 10, 10, 10, 10, 10, 10, 10, 10, 10}; + global_vec.resize(count_size_vector, 10); + + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataPar->outputs_count.emplace_back(global_min.size()); + } + + muhina_m_min_of_vector_elements_mpi::MinOfVectorMPIParallel minOfVectorMPIParalle(taskDataPar); + ASSERT_EQ(minOfVectorMPIParalle.validation(), true); + minOfVectorMPIParalle.pre_processing(); + minOfVectorMPIParalle.run(); + minOfVectorMPIParalle.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_min(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_min.data())); + taskDataSeq->outputs_count.emplace_back(reference_min.size()); + + // Create Task + muhina_m_min_of_vector_elements_mpi::MinOfVectorMPISequential minOfVectorMPISequential(taskDataSeq); + ASSERT_EQ(minOfVectorMPISequential.validation(), true); + minOfVectorMPISequential.pre_processing(); + minOfVectorMPISequential.run(); + minOfVectorMPISequential.post_processing(); + + ASSERT_EQ(reference_min[0], global_min[0]); + } +} diff --git a/tasks/mpi/muhina_m_min_of_vector_elements/include/ops_mpi.hpp b/tasks/mpi/muhina_m_min_of_vector_elements/include/ops_mpi.hpp new file mode 100644 index 00000000000..8772dc545ef --- /dev/null +++ b/tasks/mpi/muhina_m_min_of_vector_elements/include/ops_mpi.hpp @@ -0,0 +1,46 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace muhina_m_min_of_vector_elements_mpi { +int vectorMin(std::vector> v); + +class MinOfVectorMPISequential : public ppc::core::Task { + public: + explicit MinOfVectorMPISequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int res_{}; +}; + +class MinOfVectorMPIParallel : public ppc::core::Task { + public: + explicit MinOfVectorMPIParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + int res_{}; + boost::mpi::communicator world_; +}; + +} // namespace muhina_m_min_of_vector_elements_mpi diff --git a/tasks/mpi/muhina_m_min_of_vector_elements/perf_tests/main.cpp b/tasks/mpi/muhina_m_min_of_vector_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..53d1f87927a --- /dev/null +++ b/tasks/mpi/muhina_m_min_of_vector_elements/perf_tests/main.cpp @@ -0,0 +1,105 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/muhina_m_min_of_vector_elements/include/ops_mpi.hpp" + +std::vector GetRandomVector(int sz, int min_value, int max_value) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = min_value + gen() % (max_value - min_value + 1); + } + return vec; +} + +TEST(muhina_m_min_of_vector_elements_mpi, run_pipeline) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_res(1, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 10000000; + const int min_val = 0; + const int max_val = 100; + global_vec = GetRandomVector(count_size_vector, min_val, max_val); + global_vec[0] = -100; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(global_res.size()); + } + + auto MinOfVectorMPIParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(MinOfVectorMPIParallel->validation(), true); + MinOfVectorMPIParallel->pre_processing(); + MinOfVectorMPIParallel->run(); + MinOfVectorMPIParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(MinOfVectorMPIParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(-100, global_res[0]); + } +} + +TEST(muhina_m_min_of_vector_elements_mpi, run_task) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_res(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 10000000; + const int min_val = 0; + const int max_val = 100; + global_vec = GetRandomVector(count_size_vector, min_val, max_val); + global_vec[0] = -100; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(global_res.size()); + } + + auto MinOfVectorMPIParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(MinOfVectorMPIParallel->validation(), true); + MinOfVectorMPIParallel->pre_processing(); + MinOfVectorMPIParallel->run(); + MinOfVectorMPIParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(MinOfVectorMPIParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(-100, global_res[0]); + } +} diff --git a/tasks/mpi/muhina_m_min_of_vector_elements/src/ops_mpi.cpp b/tasks/mpi/muhina_m_min_of_vector_elements/src/ops_mpi.cpp new file mode 100644 index 00000000000..78112ee7361 --- /dev/null +++ b/tasks/mpi/muhina_m_min_of_vector_elements/src/ops_mpi.cpp @@ -0,0 +1,112 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/muhina_m_min_of_vector_elements/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +int muhina_m_min_of_vector_elements_mpi::vectorMin(std::vector> vect) { + int mini = vect[0]; + + for (size_t i = 1; i < vect.size(); i++) { + if (vect[i] < mini) { + mini = vect[i]; + } + } + return mini; +} + +bool muhina_m_min_of_vector_elements_mpi::MinOfVectorMPISequential::pre_processing() { + internal_order_test(); + // Init vectors + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + return true; +} + +bool muhina_m_min_of_vector_elements_mpi::MinOfVectorMPISequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->outputs_count[0] == 1; +} + +bool muhina_m_min_of_vector_elements_mpi::MinOfVectorMPISequential::run() { + internal_order_test(); + if (input_.empty()) { + // Handle the case when the input vector is empty + return true; + } + res_ = muhina_m_min_of_vector_elements_mpi::vectorMin(input_); + return true; +} + +bool muhina_m_min_of_vector_elements_mpi::MinOfVectorMPISequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res_; + return true; +} + +bool muhina_m_min_of_vector_elements_mpi::MinOfVectorMPIParallel::pre_processing() { + internal_order_test(); + unsigned int delta = 0; + if (world_.rank() == 0) { + delta = taskData->inputs_count[0] / world_.size(); + } + broadcast(world_, delta, 0); + + if (world_.rank() == 0) { + // Init vectors + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + for (int proc = 1; proc < world_.size(); proc++) { + world_.send(proc, 0, input_.data() + proc * delta, delta); + } + } + local_input_ = std::vector(delta); + if (world_.rank() == 0) { + local_input_ = std::vector(input_.begin(), input_.begin() + delta); + } else { + world_.recv(0, 0, local_input_.data(), delta); + } + return true; +} + +bool muhina_m_min_of_vector_elements_mpi::MinOfVectorMPIParallel::validation() { + internal_order_test(); + if (world_.rank() == 0) { + // Check count elements of output + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool muhina_m_min_of_vector_elements_mpi::MinOfVectorMPIParallel::run() { + internal_order_test(); + if (local_input_.empty()) { + // Handle the case when the local input vector is empty + return true; + } + int local_min = muhina_m_min_of_vector_elements_mpi::vectorMin(local_input_); + + reduce(world_, local_min, res_, boost::mpi::minimum(), 0); + return true; +} + +bool muhina_m_min_of_vector_elements_mpi::MinOfVectorMPIParallel::post_processing() { + internal_order_test(); + if (world_.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res_; + } + return true; +} diff --git a/tasks/mpi/rezantseva_a_vector_dot_product/func_tests/main.cpp b/tasks/mpi/rezantseva_a_vector_dot_product/func_tests/main.cpp new file mode 100644 index 00000000000..5a08a249f45 --- /dev/null +++ b/tasks/mpi/rezantseva_a_vector_dot_product/func_tests/main.cpp @@ -0,0 +1,301 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "mpi/rezantseva_a_vector_dot_product/include/ops_mpi.hpp" + +static int offset = 0; + +std::vector createRandomVector(int v_size) { + std::vector vec(v_size); + std::mt19937 gen; + gen.seed((unsigned)time(nullptr) + ++offset); + for (int i = 0; i < v_size; i++) vec[i] = gen() % 100; + return vec; +} + +TEST(rezantseva_a_vector_dot_product_mpi, can_scalar_multiply_vec_size_125) { + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + const int count_size_vector = 125; + std::vector v1 = createRandomVector(count_size_vector); + std::vector v2 = createRandomVector(count_size_vector); + + global_vec = {v1, v2}; + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + + rezantseva_a_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_res(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataSeq->inputs_count.emplace_back(global_vec[0].size()); + taskDataSeq->inputs_count.emplace_back(global_vec[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(reference_res.size()); + + // Create Task + rezantseva_a_vector_dot_product_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_res[0], res[0]); + ASSERT_EQ(rezantseva_a_vector_dot_product_mpi::vectorDotProduct(global_vec[0], global_vec[1]), res[0]); + } +} + +TEST(rezantseva_a_vector_dot_product_mpi, can_scalar_multiply_vec_size_300) { + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 300; + std::vector v1 = createRandomVector(count_size_vector); + std::vector v2 = createRandomVector(count_size_vector); + + global_vec = {v1, v2}; + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + + rezantseva_a_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_res(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataSeq->inputs_count.emplace_back(global_vec[0].size()); + taskDataSeq->inputs_count.emplace_back(global_vec[1].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(reference_res.size()); + + // Create Task + rezantseva_a_vector_dot_product_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + ASSERT_EQ(reference_res[0], res[0]); + ASSERT_EQ(rezantseva_a_vector_dot_product_mpi::vectorDotProduct(global_vec[0], global_vec[1]), res[0]); + } +} + +TEST(rezantseva_a_vector_dot_product_mpi, check_vectors_not_equal) { + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 120; + std::vector v1 = createRandomVector(count_size_vector); + std::vector v2 = createRandomVector(count_size_vector + 5); + + global_vec = {v1, v2}; + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + rezantseva_a_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), false); + } + // Create Task +} + +TEST(rezantseva_a_vector_dot_product_mpi, check_vectors_equal_true) { + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 120; + std::vector v1 = createRandomVector(count_size_vector); + std::vector v2 = createRandomVector(count_size_vector); + + global_vec = {v1, v2}; + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + rezantseva_a_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + } + // Create Task +} + +TEST(rezantseva_a_vector_dot_product_mpi, check_mpi_vectorDotProduct_right) { + // Create data + std::vector v1 = {1, 2, 5}; + std::vector v2 = {4, 7, 8}; + ASSERT_EQ(58, rezantseva_a_vector_dot_product_mpi::vectorDotProduct(v1, v2)); +} + +TEST(rezantseva_a_vector_dot_product_mpi, check_mpi_run_right_size_5) { + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + std::vector v1 = {1, 2, 5, 6, 3}; + std::vector v2 = {4, 7, 8, 9, 5}; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_vec = {v1, v2}; + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + rezantseva_a_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + ASSERT_EQ(rezantseva_a_vector_dot_product_mpi::vectorDotProduct(v1, v2), res[0]); + } +} + +TEST(rezantseva_a_vector_dot_product_mpi, check_mpi_run_right_size_3) { + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + std::vector v1 = {1, 2, 5}; + std::vector v2 = {4, 7, 8}; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_vec = {v1, v2}; + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + rezantseva_a_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + ASSERT_EQ(58, res[0]); + } +} + +TEST(rezantseva_a_vector_dot_product_mpi, check_mpi_run_right_size_7) { + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + std::vector v1 = {1, 2, 5, 14, 21, 16, 11}; + std::vector v2 = {4, 7, 8, 12, 31, 25, 9}; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_vec = {v1, v2}; + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + rezantseva_a_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + ASSERT_EQ(rezantseva_a_vector_dot_product_mpi::vectorDotProduct(v1, v2), res[0]); + } +} + +TEST(rezantseva_a_vector_dot_product_mpi, check_mpi_run_right_empty) { + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + std::vector v1 = {0, 0, 0}; + std::vector v2 = {0, 0, 0}; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_vec = {v1, v2}; + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + rezantseva_a_vector_dot_product_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + if (world.rank() == 0) { + ASSERT_EQ(rezantseva_a_vector_dot_product_mpi::vectorDotProduct(v1, v2), res[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/rezantseva_a_vector_dot_product/include/ops_mpi.hpp b/tasks/mpi/rezantseva_a_vector_dot_product/include/ops_mpi.hpp new file mode 100644 index 00000000000..40945cf4f8b --- /dev/null +++ b/tasks/mpi/rezantseva_a_vector_dot_product/include/ops_mpi.hpp @@ -0,0 +1,48 @@ +// Copyright 2023 Nesterov Alexander +#pragma once +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace rezantseva_a_vector_dot_product_mpi { +int vectorDotProduct(const std::vector& v1, const std::vector& v2); + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + int res{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_{}; + std::vector local_input1_{}, local_input2_{}; + std::vector counts_{}; + size_t num_processes_ = 0; + int res{}; + boost::mpi::communicator world; +}; + +} // namespace rezantseva_a_vector_dot_product_mpi \ No newline at end of file diff --git a/tasks/mpi/rezantseva_a_vector_dot_product/perf_tests/main.cpp b/tasks/mpi/rezantseva_a_vector_dot_product/perf_tests/main.cpp new file mode 100644 index 00000000000..fef7888a861 --- /dev/null +++ b/tasks/mpi/rezantseva_a_vector_dot_product/perf_tests/main.cpp @@ -0,0 +1,110 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/rezantseva_a_vector_dot_product/include/ops_mpi.hpp" + +static int offset = 0; +const int count_size_vector = 49000000; + +std::vector createRandomVector(int v_size) { + std::vector vec(v_size); + std::mt19937 gen; + gen.seed((unsigned)time(nullptr) + ++offset); + for (int i = 0; i < v_size; i++) vec[i] = gen() % 100; + return vec; +} + +TEST(rezantseva_a_vector_dot_product_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector> global_vec; + + std::vector v1 = createRandomVector(count_size_vector); + std::vector v2 = createRandomVector(count_size_vector); + + std::vector res(1, 0); + global_vec = {v1, v2}; + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + // Create and init perf results + auto perfResults = std::make_shared(); + int answer = rezantseva_a_vector_dot_product_mpi::vectorDotProduct(v1, v2); + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(answer, res[0]); + } +} + +TEST(rezantseva_a_vector_dot_product_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector> global_vec; + std::vector res(1, 0); + std::vector v1 = createRandomVector(count_size_vector); + std::vector v2 = createRandomVector(count_size_vector); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + global_vec = {v1, v2}; + + if (world.rank() == 0) { + for (size_t i = 0; i < global_vec.size(); i++) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec[i].data())); + } + taskDataPar->inputs_count.emplace_back(global_vec[0].size()); + taskDataPar->inputs_count.emplace_back(global_vec[1].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + // int answer = res[0]; + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(rezantseva_a_vector_dot_product_mpi::vectorDotProduct(global_vec[0], global_vec[1]), res[0]); + } +} diff --git a/tasks/mpi/rezantseva_a_vector_dot_product/src/ops_mpi.cpp b/tasks/mpi/rezantseva_a_vector_dot_product/src/ops_mpi.cpp new file mode 100644 index 00000000000..8f6acb58a94 --- /dev/null +++ b/tasks/mpi/rezantseva_a_vector_dot_product/src/ops_mpi.cpp @@ -0,0 +1,141 @@ +// Copyright 2024 Nesterov Alexander +#include "mpi/rezantseva_a_vector_dot_product/include/ops_mpi.hpp" + +int rezantseva_a_vector_dot_product_mpi::vectorDotProduct(const std::vector& v1, const std::vector& v2) { + long long result = 0; + for (size_t i = 0; i < v1.size(); i++) result += v1[i] * v2[i]; + return result; +} + +bool rezantseva_a_vector_dot_product_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return (taskData->inputs.size() == taskData->inputs_count.size() && taskData->inputs.size() == 2) && + (taskData->inputs_count[0] == taskData->inputs_count[1]) && + (taskData->outputs.size() == taskData->outputs_count.size()) && taskData->outputs.size() == 1 && + taskData->outputs_count[0] == 1; +} + +bool rezantseva_a_vector_dot_product_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + + input_ = std::vector>(taskData->inputs.size()); + for (size_t i = 0; i < input_.size(); i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + input_[i] = std::vector(taskData->inputs_count[i]); + for (size_t j = 0; j < taskData->inputs_count[i]; j++) { + input_[i][j] = tmp_ptr[j]; + } + } + res = 0; + return true; +} + +bool rezantseva_a_vector_dot_product_mpi::TestMPITaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < input_[0].size(); i++) { + res += input_[0][i] * input_[1][i]; + } + + return true; +} + +bool rezantseva_a_vector_dot_product_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +bool rezantseva_a_vector_dot_product_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return (taskData->inputs.size() == taskData->inputs_count.size() && taskData->inputs.size() == 2) && + (taskData->inputs_count[0] == taskData->inputs_count[1]) && + (taskData->outputs.size() == taskData->outputs_count.size()) && taskData->outputs.size() == 1 && + taskData->outputs_count[0] == 1; + } + return true; +} + +bool rezantseva_a_vector_dot_product_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + size_t total_elements = 0; + size_t delta = 0; + size_t remainder = 0; + + if (world.rank() == 0) { + total_elements = taskData->inputs_count[0]; + num_processes_ = world.size(); + delta = total_elements / num_processes_; // Calculate base size for each process + remainder = total_elements % num_processes_; // Calculate remaining elements + } + boost::mpi::broadcast(world, num_processes_, 0); + + counts_.resize(num_processes_); // Vector to store counts for each process + + if (world.rank() == 0) { + // Distribute sizes to each process + for (unsigned int i = 0; i < num_processes_; ++i) { + counts_[i] = delta + (i < remainder ? 1 : 0); // Assign 1 additional element to the first 'remainder' processes + } + } + boost::mpi::broadcast(world, counts_.data(), num_processes_, 0); + + if (world.rank() == 0) { + input_ = std::vector>(taskData->inputs.size()); + for (size_t i = 0; i < input_.size(); i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + input_[i] = std::vector(taskData->inputs_count[i]); + for (size_t j = 0; j < taskData->inputs_count[i]; j++) { + input_[i][j] = tmp_ptr[j]; + } + } + } + + res = 0; + return true; +} + +bool rezantseva_a_vector_dot_product_mpi::TestMPITaskParallel::run() { + internal_order_test(); + + if (world.rank() == 0) { + size_t offset_remainder = counts_[0]; + for (unsigned int proc = 1; proc < num_processes_; proc++) { + size_t current_count = counts_[proc]; + world.send(proc, 0, input_[0].data() + offset_remainder, current_count); + world.send(proc, 1, input_[1].data() + offset_remainder, current_count); + offset_remainder += current_count; + } + } + + local_input1_ = std::vector(counts_[world.rank()]); + local_input2_ = std::vector(counts_[world.rank()]); + + if (world.rank() > 0) { + world.recv(0, 0, local_input1_.data(), counts_[world.rank()]); + world.recv(0, 1, local_input2_.data(), counts_[world.rank()]); + } else { + local_input1_ = std::vector(input_[0].begin(), input_[0].begin() + counts_[0]); + local_input2_ = std::vector(input_[1].begin(), input_[1].begin() + counts_[0]); + } + + int local_res = 0; + + for (size_t i = 0; i < local_input1_.size(); i++) { + local_res += local_input1_[i] * local_input2_[i]; + } + boost::mpi::reduce(world, local_res, res, std::plus<>(), 0); + return true; +} + +bool rezantseva_a_vector_dot_product_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res; + } + return true; +} diff --git a/tasks/mpi/shvedova_v_char_freq/func_tests/main.cpp b/tasks/mpi/shvedova_v_char_freq/func_tests/main.cpp new file mode 100644 index 00000000000..ef0f1f421ff --- /dev/null +++ b/tasks/mpi/shvedova_v_char_freq/func_tests/main.cpp @@ -0,0 +1,323 @@ +#include + +#include +#include +#include +#include + +#include "mpi/shvedova_v_char_freq/include/ops_mpi.hpp" + +TEST(shvedova_v_char_freq_mpi, test_all_same_characters) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_str = 240; + global_str = std::vector(count_size_str, 'a'); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + shvedova_v_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + shvedova_v_char_freq_mpi::CharFrequencySequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_count[0], global_count[0]); + } +} + +TEST(shvedova_v_char_freq_mpi, test_no_occurrences) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'z'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_str = 240; + global_str = std::vector(count_size_str, 'a'); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + shvedova_v_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + shvedova_v_char_freq_mpi::CharFrequencySequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_count[0], global_count[0]); + } +} + +TEST(shvedova_v_char_freq_mpi, test_mixed_characters) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'b'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_str = 240; + global_str = std::vector(count_size_str, 'a'); + for (int i = 0; i < count_size_str; i += 3) { + global_str[i] = 'b'; + } + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + shvedova_v_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_count(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataSeq->inputs_count.emplace_back(global_str.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_count.data())); + taskDataSeq->outputs_count.emplace_back(reference_count.size()); + + shvedova_v_char_freq_mpi::CharFrequencySequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_count[0], global_count[0]); + } +} + +TEST(shvedova_v_char_freq_mpi, test_empty_string) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_str = std::vector(); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + shvedova_v_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_count[0], 0); + } +} + +TEST(shvedova_v_char_freq_mpi, test_string_length_1) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_str = std::vector(1, 'a'); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + shvedova_v_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_count[0], 1); + } +} + +TEST(shvedova_v_char_freq_mpi, test_string_length_2) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_str = std::vector{'a', 'b'}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + shvedova_v_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_count[0], 1); + } +} + +TEST(shvedova_v_char_freq_mpi, test_string_length_3) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_str = std::vector{'a', 'b', 'c'}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + shvedova_v_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_count[0], 1); + } +} + +TEST(shvedova_v_char_freq_mpi, test_string_length_5) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_str = std::vector{'a', 'b', 'c', 'a', 'b'}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + shvedova_v_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_count[0], 2); + } +} + +TEST(shvedova_v_char_freq_mpi, test_string_length_7) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + global_str = std::vector{'a', 'b', 'c', 'a', 'b', 'c', 'b'}; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + shvedova_v_char_freq_mpi::CharFrequencyParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_count[0], 2); + } +} \ No newline at end of file diff --git a/tasks/mpi/shvedova_v_char_freq/include/ops_mpi.hpp b/tasks/mpi/shvedova_v_char_freq/include/ops_mpi.hpp new file mode 100644 index 00000000000..1bce11fc9ef --- /dev/null +++ b/tasks/mpi/shvedova_v_char_freq/include/ops_mpi.hpp @@ -0,0 +1,53 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace shvedova_v_char_freq_mpi { + +std::vector getRandomVector(int sz); + +class CharFrequencySequential : public ppc::core::Task { + public: + explicit CharFrequencySequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_str_; + char target_char_; + int res{}; +}; + +class CharFrequencyParallel : public ppc::core::Task { + public: + explicit CharFrequencyParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_str_; + std::vector local_input_; + char target_char_; + int res{}; + int local_res{}; + + boost::mpi::communicator world; +}; + +} // namespace shvedova_v_char_freq_mpi \ No newline at end of file diff --git a/tasks/mpi/shvedova_v_char_freq/perf_tests/main.cpp b/tasks/mpi/shvedova_v_char_freq/perf_tests/main.cpp new file mode 100644 index 00000000000..5e25bffd1a0 --- /dev/null +++ b/tasks/mpi/shvedova_v_char_freq/perf_tests/main.cpp @@ -0,0 +1,89 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/shvedova_v_char_freq/include/ops_mpi.hpp" + +TEST(shvedova_v_char_freq_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_str; + if (world.rank() == 0) { + count_size_str = 120; + global_str = std::vector(count_size_str, 'a'); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count_size_str, global_count[0]); + } +} + +TEST(shvedova_v_char_freq_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector global_str; + std::vector global_count(1, 0); + char target_char = 'a'; + + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_str; + if (world.rank() == 0) { + count_size_str = 120; + global_str = std::vector(count_size_str, 'a'); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_str.data())); + taskDataPar->inputs_count.emplace_back(global_str.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&target_char)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_count.data())); + taskDataPar->outputs_count.emplace_back(global_count.size()); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count_size_str, global_count[0]); + } +} \ No newline at end of file diff --git a/tasks/mpi/shvedova_v_char_freq/src/ops_mpi.cpp b/tasks/mpi/shvedova_v_char_freq/src/ops_mpi.cpp new file mode 100644 index 00000000000..1cdb04cdf41 --- /dev/null +++ b/tasks/mpi/shvedova_v_char_freq/src/ops_mpi.cpp @@ -0,0 +1,110 @@ +#include "mpi/shvedova_v_char_freq/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool shvedova_v_char_freq_mpi::CharFrequencySequential::pre_processing() { + internal_order_test(); + + input_str_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_str_[i] = tmp_ptr[i]; + } + + target_char_ = *reinterpret_cast(taskData->inputs[1]); + res = 0; + return true; +} + +bool shvedova_v_char_freq_mpi::CharFrequencySequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} + +bool shvedova_v_char_freq_mpi::CharFrequencySequential::run() { + internal_order_test(); + + res = std::count(input_str_.begin(), input_str_.end(), target_char_); + return true; +} + +bool shvedova_v_char_freq_mpi::CharFrequencySequential::post_processing() { + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +bool shvedova_v_char_freq_mpi::CharFrequencyParallel::pre_processing() { + internal_order_test(); + + int myid = world.rank(); + int world_size = world.size(); + unsigned int n = 0; + + if (myid == 0) { + n = taskData->inputs_count[0]; + input_str_ = std::vector(n); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + memcpy(input_str_.data(), tmp_ptr, sizeof(char) * n); + target_char_ = *reinterpret_cast(taskData->inputs[1]); + } + + boost::mpi::broadcast(world, n, 0); + boost::mpi::broadcast(world, target_char_, 0); + + unsigned int vec_send_size = n / world_size; + unsigned int overflow_size = n % world_size; + std::vector send_counts(world_size, vec_send_size); + std::vector displs(world_size, 0); + + for (unsigned int i = 0; i < static_cast(world_size); ++i) { + if (i < static_cast(overflow_size)) { + ++send_counts[i]; + } + if (i > 0) { + displs[i] = displs[i - 1] + send_counts[i - 1]; + } + } + + auto loc_vec_size = static_cast(send_counts[myid]); + local_input_.resize(loc_vec_size); + + boost::mpi::scatterv(world, input_str_.data(), send_counts, displs, local_input_.data(), loc_vec_size, 0); + + local_res = 0; + res = 0; + return true; +} + +bool shvedova_v_char_freq_mpi::CharFrequencyParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool shvedova_v_char_freq_mpi::CharFrequencyParallel::run() { + internal_order_test(); + local_res = std::count(local_input_.begin(), local_input_.end(), target_char_); + + boost::mpi::reduce(world, local_res, res, std::plus<>(), 0); + return true; +} + +bool shvedova_v_char_freq_mpi::CharFrequencyParallel::post_processing() { + internal_order_test(); + + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res; + } + + return true; +} \ No newline at end of file diff --git a/tasks/mpi/solovyev_d_vector_max/func_tests/main.cpp b/tasks/mpi/solovyev_d_vector_max/func_tests/main.cpp new file mode 100644 index 00000000000..ba20c58cfc1 --- /dev/null +++ b/tasks/mpi/solovyev_d_vector_max/func_tests/main.cpp @@ -0,0 +1,107 @@ +#include + +#include +#include +#include +#include +#include + +#include "mpi/solovyev_d_vector_max/include/header.hpp" + +std::vector getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} + +TEST(solovyev_d_vector_max_mpi, Test_Max) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_max(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + std::cerr << "1 " << world.rank() << std::endl; + if (world.rank() == 0) { + const int count_size_vector = 240; + global_vec = getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + std::cerr << "2 " << world.rank() << std::endl; + solovyev_d_vector_max_mpi::VectorMaxMPIParallel VectorMaxMPIParallel(taskDataPar); + ASSERT_EQ(VectorMaxMPIParallel.validation(), true); + VectorMaxMPIParallel.pre_processing(); + VectorMaxMPIParallel.run(); + VectorMaxMPIParallel.post_processing(); + std::cerr << "3 " << world.rank() << std::endl; + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxMPISequential(taskDataSeq); + ASSERT_EQ(VectorMaxMPISequential.validation(), true); + VectorMaxMPISequential.pre_processing(); + VectorMaxMPISequential.run(); + VectorMaxMPISequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} + +TEST(solovyev_d_vector_max_mpi, Test_Max_2) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_max(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 120; + global_vec = getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_max.data())); + taskDataPar->outputs_count.emplace_back(global_max.size()); + } + + solovyev_d_vector_max_mpi::VectorMaxMPIParallel VectorMaxMPIParallel(taskDataPar); + ASSERT_EQ(VectorMaxMPIParallel.validation(), true); + VectorMaxMPIParallel.pre_processing(); + VectorMaxMPIParallel.run(); + VectorMaxMPIParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_max(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_max.data())); + taskDataSeq->outputs_count.emplace_back(reference_max.size()); + + // Create Task + solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxMPISequential(taskDataSeq); + ASSERT_EQ(VectorMaxMPISequential.validation(), true); + VectorMaxMPISequential.pre_processing(); + VectorMaxMPISequential.run(); + VectorMaxMPISequential.post_processing(); + + ASSERT_EQ(reference_max[0], global_max[0]); + } +} diff --git a/tasks/mpi/solovyev_d_vector_max/include/header.hpp b/tasks/mpi/solovyev_d_vector_max/include/header.hpp new file mode 100644 index 00000000000..0b49b459cad --- /dev/null +++ b/tasks/mpi/solovyev_d_vector_max/include/header.hpp @@ -0,0 +1,48 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace solovyev_d_vector_max_mpi { + +int vectorMax(std::vector> v); + +class VectorMaxSequential : public ppc::core::Task { + public: + explicit VectorMaxSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector data; + int result{}; + std::string ops; +}; + +class VectorMaxMPIParallel : public ppc::core::Task { + public: + explicit VectorMaxMPIParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector data, localData; + int result{}; + std::string ops; + boost::mpi::communicator world; +}; + +} // namespace solovyev_d_vector_max_mpi \ No newline at end of file diff --git a/tasks/mpi/solovyev_d_vector_max/perf_tests/main.cpp b/tasks/mpi/solovyev_d_vector_max/perf_tests/main.cpp new file mode 100644 index 00000000000..36f24830de3 --- /dev/null +++ b/tasks/mpi/solovyev_d_vector_max/perf_tests/main.cpp @@ -0,0 +1,98 @@ +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/solovyev_d_vector_max/include/header.hpp" + +std::vector getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} + +TEST(solovyev_d_vector_max_mpi, run_pipeline) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_res(1, 0); + + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 12000000; + global_vec = getRandomVector(count_size_vector); + global_vec[count_size_vector / 2] = 1024; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(global_res.size()); + } + + auto VectorMaxMPIParallel = std::make_shared(taskDataPar); + ASSERT_EQ(VectorMaxMPIParallel->validation(), true); + VectorMaxMPIParallel->pre_processing(); + VectorMaxMPIParallel->run(); + VectorMaxMPIParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(VectorMaxMPIParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1024, global_res[0]); + } +} + +TEST(solovyev_d_vector_max_mpi, run_task) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_res(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 12000000; + global_vec = getRandomVector(count_size_vector); + global_vec[count_size_vector / 2] = 1024; + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_res.data())); + taskDataPar->outputs_count.emplace_back(global_res.size()); + } + + auto VectorMaxMPIParallel = std::make_shared(taskDataPar); + ASSERT_EQ(VectorMaxMPIParallel->validation(), true); + VectorMaxMPIParallel->pre_processing(); + VectorMaxMPIParallel->run(); + VectorMaxMPIParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(VectorMaxMPIParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1024, global_res[0]); + } +} diff --git a/tasks/mpi/solovyev_d_vector_max/src/source.cpp b/tasks/mpi/solovyev_d_vector_max/src/source.cpp new file mode 100644 index 00000000000..76213313933 --- /dev/null +++ b/tasks/mpi/solovyev_d_vector_max/src/source.cpp @@ -0,0 +1,119 @@ +#include +#include +#include +#include +#include +#include + +#include "mpi/solovyev_d_vector_max/include/header.hpp" + +using namespace std::chrono_literals; + +int solovyev_d_vector_max_mpi::vectorMax(std::vector> v) { + int m = -214748364; + for (std::string::size_type i = 0; i < v.size(); i++) { + if (v[i] > m) { + m = v[i]; + } + } + return m; +} + +bool solovyev_d_vector_max_mpi::VectorMaxMPIParallel::pre_processing() { + internal_order_test(); + + // Determine number of vector elements per process + unsigned int delta = 0; + if (world.rank() == 0) { + delta = taskData->inputs_count[0] / world.size(); + } + + // Share delta between all processes + broadcast(world, delta, 0); + + if (world.rank() == 0) { + // Convert input data to vector + int* input_ = reinterpret_cast(taskData->inputs[0]); + data = std::vector(input_, input_ + taskData->inputs_count[0]); + + // Send each of processes their portion of data + for (int process = 1; process < world.size(); process++) { + world.send(process, 0, data.data() + process * delta, delta); + } + } + + // Initialize local vector + localData = std::vector(delta); + if (world.rank() == 0) { + // Getting data directly if we in zero process + localData = std::vector(data.begin(), data.begin() + delta); + } else { + // Otherwise, recieving data + world.recv(0, 0, localData.data(), delta); + } + + // Init result value + result = 0; + return true; +} + +bool solovyev_d_vector_max_mpi::VectorMaxMPIParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return (taskData->outputs_count[0] == 1 and taskData->inputs_count[0] != 0); + } + return true; +} + +bool solovyev_d_vector_max_mpi::VectorMaxMPIParallel::run() { + internal_order_test(); + int localResult; + + // Search for maximum vector element in current process data + localResult = vectorMax(localData); + + // Search for maximum vector element using all processes data + reduce(world, localResult, result, boost::mpi::maximum(), 0); + return true; +} + +bool solovyev_d_vector_max_mpi::VectorMaxMPIParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = result; + } + return true; +} + +bool solovyev_d_vector_max_mpi::VectorMaxSequential::pre_processing() { + internal_order_test(); + + // Init data vector + int* input_ = reinterpret_cast(taskData->inputs[0]); + data = std::vector(input_, input_ + taskData->inputs_count[0]); + + // Init result value + result = 0; + return true; +} + +bool solovyev_d_vector_max_mpi::VectorMaxSequential::validation() { + internal_order_test(); + // Check count elements of output + return (taskData->outputs_count[0] == 1 and taskData->inputs_count[0] != 0); +} + +bool solovyev_d_vector_max_mpi::VectorMaxSequential::run() { + internal_order_test(); + + // Determine maximum value of data vector + result = vectorMax(data); + return true; +} + +bool solovyev_d_vector_max_mpi::VectorMaxSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = result; + return true; +} \ No newline at end of file diff --git a/tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/func_tests/main.cpp b/tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/func_tests/main.cpp new file mode 100644 index 00000000000..b5345df0607 --- /dev/null +++ b/tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/func_tests/main.cpp @@ -0,0 +1,237 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "mpi/sorokin_a_check_lexicographic_order_of_strings/include/ops_mpi.hpp" + +TEST(sorokin_a_check_lexicographic_order_of_strings_mpi, The_difference_is_in_1_characters) { + boost::mpi::communicator world; + std::vector> strs = {{'a', 'p', 'p', 'p'}, {'b', 'a', 'g', 'p'}}; + std::vector res(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + for (unsigned int i = 0; i < strs.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(strs[i].data())); + taskDataPar->inputs_count.emplace_back(strs.size()); + taskDataPar->inputs_count.emplace_back(strs[0].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + + sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_res(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < strs.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(strs[i].data())); + taskDataSeq->inputs_count.emplace_back(strs.size()); + taskDataSeq->inputs_count.emplace_back(strs[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(reference_res.size()); + + // Create Task + sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_res[0], res[0]); + ASSERT_EQ(0, res[0]); + } +} + +TEST(sorokin_a_check_lexicographic_order_of_strings_mpi, The_difference_is_in_1_characters_res1) { + boost::mpi::communicator world; + std::vector> strs = {{'c', 'p', 'p', 'p'}, {'b', 'a', 'g', 'p'}}; + std::vector res(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + for (unsigned int i = 0; i < strs.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(strs[i].data())); + taskDataPar->inputs_count.emplace_back(strs.size()); + taskDataPar->inputs_count.emplace_back(strs[0].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + + sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_res(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < strs.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(strs[i].data())); + taskDataSeq->inputs_count.emplace_back(strs.size()); + taskDataSeq->inputs_count.emplace_back(strs[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(reference_res.size()); + + // Create Task + sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_res[0], res[0]); + ASSERT_EQ(1, res[0]); + } +} +TEST(sorokin_a_check_lexicographic_order_of_strings_mpi, The_difference_is_in_3_characters_res1) { + boost::mpi::communicator world; + std::vector> strs = {{'a', 'a', 'p', 'p'}, {'a', 'a', 'g', 'p'}}; + std::vector res(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + for (unsigned int i = 0; i < strs.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(strs[i].data())); + taskDataPar->inputs_count.emplace_back(strs.size()); + taskDataPar->inputs_count.emplace_back(strs[0].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + + sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_res(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < strs.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(strs[i].data())); + taskDataSeq->inputs_count.emplace_back(strs.size()); + taskDataSeq->inputs_count.emplace_back(strs[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(reference_res.size()); + + // Create Task + sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_res[0], res[0]); + ASSERT_EQ(1, res[0]); + } +} +TEST(sorokin_a_check_lexicographic_order_of_strings_mpi, The_difference_is_in_4_characters) { + boost::mpi::communicator world; + std::vector> strs = {{'a', 'p', 'p', 'a'}, {'a', 'p', 'p', 'p'}}; + std::vector res(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + for (unsigned int i = 0; i < strs.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(strs[i].data())); + taskDataPar->inputs_count.emplace_back(strs.size()); + taskDataPar->inputs_count.emplace_back(strs[0].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + + sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_res(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < strs.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(strs[i].data())); + taskDataSeq->inputs_count.emplace_back(strs.size()); + taskDataSeq->inputs_count.emplace_back(strs[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(reference_res.size()); + + // Create Task + sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_res[0], res[0]); + ASSERT_EQ(0, res[0]); + } +} +TEST(sorokin_a_check_lexicographic_order_of_strings_mpi, Equal_strings) { + boost::mpi::communicator world; + std::vector str1; + std::vector str2; + std::vector> strs = {str1, str2}; + std::vector res(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + for (unsigned int i = 0; i < strs.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(strs[i].data())); + taskDataPar->inputs_count.emplace_back(strs.size()); + taskDataPar->inputs_count.emplace_back(strs[0].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + + sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskParallel testMpiTaskParallel(taskDataPar); + ASSERT_EQ(testMpiTaskParallel.validation(), true); + testMpiTaskParallel.pre_processing(); + testMpiTaskParallel.run(); + testMpiTaskParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_res(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < strs.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(strs[i].data())); + taskDataSeq->inputs_count.emplace_back(strs.size()); + taskDataSeq->inputs_count.emplace_back(strs[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_res.data())); + taskDataSeq->outputs_count.emplace_back(reference_res.size()); + + // Create Task + sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskSequential testMpiTaskSequential(taskDataSeq); + ASSERT_EQ(testMpiTaskSequential.validation(), true); + testMpiTaskSequential.pre_processing(); + testMpiTaskSequential.run(); + testMpiTaskSequential.post_processing(); + + ASSERT_EQ(reference_res[0], 0); + ASSERT_EQ(2, res[0]); + } +} diff --git a/tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/include/ops_mpi.hpp b/tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/include/ops_mpi.hpp new file mode 100644 index 00000000000..15ba3230fbe --- /dev/null +++ b/tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/include/ops_mpi.hpp @@ -0,0 +1,46 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace sorokin_a_check_lexicographic_order_of_strings_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + int res_{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + std::vector local_input1_, local_input2_; + int res_{}; + boost::mpi::communicator world; +}; + +} // namespace sorokin_a_check_lexicographic_order_of_strings_mpi diff --git a/tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/perf_tests/main.cpp b/tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/perf_tests/main.cpp new file mode 100644 index 00000000000..cd4fd75c53f --- /dev/null +++ b/tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/perf_tests/main.cpp @@ -0,0 +1,94 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/sorokin_a_check_lexicographic_order_of_strings/include/ops_mpi.hpp" + +TEST(sorokin_a_check_lexicographic_order_of_strings_mpi, The_difference_is_in_20000000_characters) { + boost::mpi::communicator world; + std::vector str1(20000000, 'a'); + std::vector str2(19999999, 'a'); + str2.push_back('b'); + std::vector> strs = {str1, str2}; + std::vector res(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + for (unsigned int i = 0; i < strs.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(strs[i].data())); + taskDataPar->inputs_count.emplace_back(strs.size()); + taskDataPar->inputs_count.emplace_back(strs[0].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(res[0], 0); + } +} + +TEST(sorokin_a_check_lexicographic_order_of_strings_mpi, The_difference_is_in_20000000_characters_res1) { + boost::mpi::communicator world; + std::vector str1(20000000, 'b'); + std::vector str2(19999999, 'b'); + str2.push_back('a'); + std::vector> strs = {str1, str2}; + std::vector res(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + for (unsigned int i = 0; i < strs.size(); i++) + taskDataPar->inputs.emplace_back(reinterpret_cast(strs[i].data())); + taskDataPar->inputs_count.emplace_back(strs.size()); + taskDataPar->inputs_count.emplace_back(strs[0].size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(res.data())); + taskDataPar->outputs_count.emplace_back(res.size()); + } + + auto testMpiTaskParallel = + std::make_shared(taskDataPar); + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(res[0], 1); + } +} diff --git a/tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/src/ops_mpi.cpp b/tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/src/ops_mpi.cpp new file mode 100644 index 00000000000..58bd3d8f46c --- /dev/null +++ b/tasks/mpi/sorokin_a_check_lexicographic_order_of_strings/src/ops_mpi.cpp @@ -0,0 +1,130 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/sorokin_a_check_lexicographic_order_of_strings/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + input_ = std::vector>(taskData->inputs_count[0], std::vector(taskData->inputs_count[1])); + + for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + for (unsigned int j = 0; j < taskData->inputs_count[1]; j++) { + input_[i][j] = tmp_ptr[j]; + } + } + res_ = 0; + return true; +} + +bool sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] == 2 && taskData->outputs_count[0] == 1; +} + +bool sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < std::min(input_[0].size(), input_[1].size()); ++i) { + if (static_cast(input_[0][i]) > static_cast(input_[1][i])) { + res_ = 1; + break; + } + if (static_cast(input_[0][i]) < static_cast(input_[1][i])) { + break; + } + } + return true; +} + +bool sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res_; + return true; +} + +bool sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + unsigned int delta = 0; + unsigned int remainder = 0; + if (world.rank() == 0) { + delta = taskData->inputs_count[1] / world.size(); + remainder = taskData->inputs_count[1] % world.size(); + } + broadcast(world, delta, 0); + + if (world.rank() == 0) { + input_ = std::vector>(taskData->inputs_count[0], std::vector(taskData->inputs_count[1])); + + for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + for (unsigned int j = 0; j < taskData->inputs_count[1]; j++) { + input_[i][j] = tmp_ptr[j]; + } + } + for (int proc = 1; proc < world.size(); proc++) { + world.send(proc, 0, input_[0].data() + delta * proc + remainder, delta); + world.send(proc, 1, input_[1].data() + delta * proc + remainder, delta); + } + } + local_input1_ = std::vector(delta); + local_input2_ = std::vector(delta); + if (world.rank() == 0) { + local_input1_ = std::vector(input_[0].begin(), input_[0].begin() + delta + remainder); + local_input2_ = std::vector(input_[1].begin(), input_[1].begin() + delta + remainder); + } else { + world.recv(0, 0, local_input1_.data(), delta); + world.recv(0, 1, local_input2_.data(), delta); + } + res_ = 2; + return true; +} + +bool sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskParallel::run() { + internal_order_test(); + int local_res = 2; + for (size_t i = 0; i < local_input1_.size(); ++i) { + if (static_cast(local_input1_[i]) > static_cast(local_input2_[i])) { + local_res = 1; + break; + } + if (static_cast(local_input1_[i]) < static_cast(local_input2_[i])) { + local_res = 0; + break; + } + } + std::vector all_res; + boost::mpi::gather(world, local_res, all_res, 0); + + if (world.rank() == 0) { + for (int result : all_res) { + if (result != 2) { + res_ = result; + break; + } + } + } + return true; +} + +bool sorokin_a_check_lexicographic_order_of_strings_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res_; + } + return true; +} diff --git a/tasks/mpi/sotskov_a_sum_element_matrix/func_tests/main.cpp b/tasks/mpi/sotskov_a_sum_element_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..a3fff0e94d1 --- /dev/null +++ b/tasks/mpi/sotskov_a_sum_element_matrix/func_tests/main.cpp @@ -0,0 +1,365 @@ +#include + +#include +#include +#include + +#include "mpi/sotskov_a_sum_element_matrix/include/ops_mpi.hpp" + +TEST(sotskov_a_sum_element_matrix, test_constant_matrix) { + boost::mpi::communicator world; + std::vector global_result(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + int rows = 1000; + int cols = 1000; + std::vector matrix(rows * cols, 5.0); + double output = 0.0; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&rows)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&cols)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&output)); + taskDataPar->outputs_count.emplace_back(1); + } + + sotskov_a_sum_element_matrix_mpi::TestMPITaskParallel parallelTask(taskDataPar); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&rows)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&cols)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&output)); + taskDataSeq->outputs_count.emplace_back(1); + + sotskov_a_sum_element_matrix_mpi::TestMPITaskSequential sequentialTask(taskDataSeq); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + double exact = 5.0 * rows * cols; + EXPECT_NEAR(output, exact, 1e-6); + } +} + +TEST(sotskov_a_sum_element_matrix, test_random_matrix) { + boost::mpi::communicator world; + std::vector global_result(1, 0); + + std::shared_ptr taskDataPar = std::make_shared(); + + int rows = 1000; + int cols = 1000; + std::vector matrix(rows * cols); + for (int i = 0; i < rows * cols; ++i) { + matrix[i] = static_cast(rand()) / RAND_MAX; + } + double output = 0.0; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&rows)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&cols)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&output)); + taskDataPar->outputs_count.emplace_back(1); + } + + sotskov_a_sum_element_matrix_mpi::TestMPITaskParallel parallelTask(taskDataPar); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + std::vector reference_result(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&rows)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&cols)); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&output)); + taskDataSeq->outputs_count.emplace_back(1); + + sotskov_a_sum_element_matrix_mpi::TestMPITaskSequential sequentialTask(taskDataSeq); + ASSERT_EQ(sequentialTask.validation(), true); + sequentialTask.pre_processing(); + sequentialTask.run(); + sequentialTask.post_processing(); + + double exact = std::accumulate(matrix.begin(), matrix.end(), 0.0); + EXPECT_NEAR(output, exact, 1e-6); + } +} + +TEST(sotskov_a_sum_element_matrix, test_empty_matrix) { + boost::mpi::communicator world; + double output = 0.0; + + std::shared_ptr taskDataPar = std::make_shared(); + + int rows = 0; + int cols = 0; + std::vector matrix; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&rows)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&cols)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&output)); + taskDataPar->outputs_count.emplace_back(1); + } + + sotskov_a_sum_element_matrix_mpi::TestMPITaskParallel parallelTask(taskDataPar); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + EXPECT_NEAR(output, 0.0, 1e-6); + } +} + +TEST(sotskov_a_sum_element_matrix, test_single_element_matrix) { + boost::mpi::communicator world; + double output = 0.0; + + std::shared_ptr taskDataPar = std::make_shared(); + + int rows = 1; + int cols = 1; + std::vector matrix = {7.0}; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&rows)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&cols)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&output)); + taskDataPar->outputs_count.emplace_back(1); + } + + sotskov_a_sum_element_matrix_mpi::TestMPITaskParallel parallelTask(taskDataPar); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + EXPECT_NEAR(output, 7.0, 1e-6); + } +} + +TEST(sotskov_a_sum_element_matrix, test_zero_matrix) { + boost::mpi::communicator world; + double output = 0.0; + + std::shared_ptr taskDataPar = std::make_shared(); + + int rows = 10; + int cols = 10; + std::vector matrix(rows * cols, 0.0); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&rows)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&cols)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&output)); + taskDataPar->outputs_count.emplace_back(1); + } + + sotskov_a_sum_element_matrix_mpi::TestMPITaskParallel parallelTask(taskDataPar); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + EXPECT_NEAR(output, 0.0, 1e-6); + } +} + +TEST(sotskov_a_sum_element_matrix, test_mixed_values_matrix) { + boost::mpi::communicator world; + double output = 0.0; + + std::shared_ptr taskDataPar = std::make_shared(); + + int rows = 5; + int cols = 5; + std::vector matrix = {1.0, -1.0, 2.0, -2.0, 3.0, -3.0, 4.0, -4.0, 5.0, -5.0, 6.0, -6.0, 7.0, + -7.0, 8.0, -8.0, 9.0, -9.0, 10.0, -10.0, 11.0, -11.0, 12.0, -12.0, 13.0}; + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&rows)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&cols)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&output)); + taskDataPar->outputs_count.emplace_back(1); + } + + sotskov_a_sum_element_matrix_mpi::TestMPITaskParallel parallelTask(taskDataPar); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + double exact = std::accumulate(matrix.begin(), matrix.end(), 0.0); + EXPECT_NEAR(output, exact, 1e-6); + } +} + +TEST(sotskov_a_sum_element_matrix, test_large_values_matrix) { + boost::mpi::communicator world; + double output = 0.0; + + std::shared_ptr taskDataPar = std::make_shared(); + + int rows = 10; + int cols = 10; + std::vector matrix(rows * cols, 1e6); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs_count.emplace_back(matrix.size()); + taskDataPar->inputs.emplace_back(reinterpret_cast(&rows)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->inputs.emplace_back(reinterpret_cast(&cols)); + taskDataPar->inputs_count.emplace_back(1); + taskDataPar->outputs.emplace_back(reinterpret_cast(&output)); + taskDataPar->outputs_count.emplace_back(1); + } + + sotskov_a_sum_element_matrix_mpi::TestMPITaskParallel parallelTask(taskDataPar); + ASSERT_EQ(parallelTask.validation(), true); + parallelTask.pre_processing(); + parallelTask.run(); + parallelTask.post_processing(); + + if (world.rank() == 0) { + double exact = 1e6 * rows * cols; + EXPECT_NEAR(output, exact, 1e-6); + } +} + +TEST(sotskov_a_sum_element_matrix, test_data_distribution) { + boost::mpi::communicator world; + int rank = world.rank(); + int size = world.size(); + + int total_rows = 4; + int total_cols = 3; + std::vector matrix(total_rows * total_cols); + + if (rank == 0) { + for (int i = 0; i < total_rows; ++i) { + for (int j = 0; j < total_cols; ++j) { + matrix[i * total_cols + j] = static_cast(i * total_cols + j + 1); + } + } + } + + boost::mpi::broadcast(world, matrix.data(), matrix.size(), 0); + + int base_elements_per_process = total_rows * total_cols / size; + int remainder = (total_rows * total_cols) % size; + + int start_idx = rank * base_elements_per_process + std::min(rank, remainder); + int end_idx = start_idx + base_elements_per_process + (rank < remainder ? 1 : 0); + + for (int i = start_idx; i < end_idx; ++i) { + double expected_value = i + 1; + EXPECT_EQ(matrix[i], expected_value) << "Process " << rank << " has incorrect value at index " << i; + } +} + +TEST(sotskov_a_sum_element_matrix, test_data_distribution_single_element_matrix) { + boost::mpi::communicator world; + int rank = world.rank(); + int size = world.size(); + + int total_rows = 1; + int total_cols = 1; + std::vector matrix(total_rows * total_cols, 1.0); + + if (rank == 0) { + matrix[0] = 42.0; + } + + boost::mpi::broadcast(world, matrix.data(), matrix.size(), 0); + + int base_elements_per_process = total_rows * total_cols / size; + int remainder = (total_rows * total_cols) % size; + + int start_idx = rank * base_elements_per_process + std::min(rank, remainder); + int end_idx = start_idx + base_elements_per_process + (rank < remainder ? 1 : 0); + + if (start_idx < end_idx) { + EXPECT_EQ(matrix[start_idx], 42.0) << "Process " << rank << " should have value 42."; + } +} + +TEST(sotskov_a_sum_element_matrix, test_data_distribution_2x3_matrix) { + boost::mpi::communicator world; + int rank = world.rank(); + int size = world.size(); + + int total_rows = 2; + int total_cols = 3; + std::vector matrix(total_rows * total_cols); + + if (rank == 0) { + for (int i = 0; i < total_rows; ++i) { + for (int j = 0; j < total_cols; ++j) { + matrix[i * total_cols + j] = static_cast(i * total_cols + j + 1); + } + } + } + + boost::mpi::broadcast(world, matrix.data(), matrix.size(), 0); + + int base_elements_per_process = total_rows * total_cols / size; + int remainder = (total_rows * total_cols) % size; + + int start_idx = rank * base_elements_per_process + std::min(rank, remainder); + int end_idx = start_idx + base_elements_per_process + (rank < remainder ? 1 : 0); + + for (int i = start_idx; i < end_idx; ++i) { + double expected_value = i + 1; + EXPECT_EQ(matrix[i], expected_value) << "Process " << rank << " has incorrect value at index " << i; + } +} \ No newline at end of file diff --git a/tasks/mpi/sotskov_a_sum_element_matrix/include/ops_mpi.hpp b/tasks/mpi/sotskov_a_sum_element_matrix/include/ops_mpi.hpp new file mode 100644 index 00000000000..a52b879d7ed --- /dev/null +++ b/tasks/mpi/sotskov_a_sum_element_matrix/include/ops_mpi.hpp @@ -0,0 +1,59 @@ +#pragma once +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace sotskov_a_sum_element_matrix_mpi { + +class TestMPITaskSequential : public ppc::core::Task { + public: + explicit TestMPITaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + void set_matrix(const std::vector& matrix, int rows, int cols); + + private: + double sum_elements(const std::vector& matrix); + + std::vector matrix_; + int rows_{}; + int cols_{}; + double result_{}; +}; + +class TestMPITaskParallel : public ppc::core::Task { + public: + explicit TestMPITaskParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + void set_matrix(const std::vector& matrix, int rows, int cols); + + private: + double parallel_sum_elements(const std::vector& matrix); + + std::vector matrix_; + int rows_{}; + int cols_{}; + double local_result_{}; + double global_result_{}; + + boost::mpi::communicator world; +}; + +} // namespace sotskov_a_sum_element_matrix_mpi \ No newline at end of file diff --git a/tasks/mpi/sotskov_a_sum_element_matrix/perf_tests/main.cpp b/tasks/mpi/sotskov_a_sum_element_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..349dff7971a --- /dev/null +++ b/tasks/mpi/sotskov_a_sum_element_matrix/perf_tests/main.cpp @@ -0,0 +1,87 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/sotskov_a_sum_element_matrix/include/ops_mpi.hpp" + +TEST(sotskov_a_sum_element_matrix, test_pipeline_run) { + boost::mpi::communicator world; + int rows = 1000; + int cols = 1000; + std::vector matrix(rows * cols, 1.0); + double output = 0.0; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.push_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs.push_back(reinterpret_cast(&rows)); + taskDataPar->inputs.push_back(reinterpret_cast(&cols)); + taskDataPar->outputs.push_back(reinterpret_cast(&output)); + taskDataPar->outputs_count.push_back(1); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + + ASSERT_TRUE(testMpiTaskParallel->validation()); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + auto exact = static_cast(rows * cols); + EXPECT_NEAR(output, exact, 1e-4); + } +} + +TEST(sotskov_a_sum_element_matrix, test_task_run) { + boost::mpi::communicator world; + int rows = 10000; + int cols = 10000; + std::vector matrix(rows * cols, 1.0); + double output = 0.0; + + std::shared_ptr taskDataPar = std::make_shared(); + if (world.rank() == 0) { + taskDataPar->inputs.push_back(reinterpret_cast(matrix.data())); + taskDataPar->inputs.push_back(reinterpret_cast(&rows)); + taskDataPar->inputs.push_back(reinterpret_cast(&cols)); + taskDataPar->outputs.push_back(reinterpret_cast(&output)); + taskDataPar->outputs_count.push_back(1); + } + + auto testMpiTaskParallel = std::make_shared(taskDataPar); + + ASSERT_EQ(testMpiTaskParallel->validation(), true); + testMpiTaskParallel->pre_processing(); + testMpiTaskParallel->run(); + testMpiTaskParallel->post_processing(); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testMpiTaskParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + auto exact = static_cast(rows * cols); + EXPECT_NEAR(output, exact, 1e-4); + } +} \ No newline at end of file diff --git a/tasks/mpi/sotskov_a_sum_element_matrix/src/ops_mpi.cpp b/tasks/mpi/sotskov_a_sum_element_matrix/src/ops_mpi.cpp new file mode 100644 index 00000000000..23751002e38 --- /dev/null +++ b/tasks/mpi/sotskov_a_sum_element_matrix/src/ops_mpi.cpp @@ -0,0 +1,106 @@ +#include "mpi/sotskov_a_sum_element_matrix/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +bool sotskov_a_sum_element_matrix_mpi::TestMPITaskSequential::pre_processing() { + internal_order_test(); + auto* tmp_ptr_matrix = reinterpret_cast(taskData->inputs[0]); + auto* tmp_ptr_rows = reinterpret_cast(taskData->inputs[1]); + auto* tmp_ptr_cols = reinterpret_cast(taskData->inputs[2]); + matrix_.assign(tmp_ptr_matrix, tmp_ptr_matrix + (*tmp_ptr_rows) * (*tmp_ptr_cols)); + rows_ = *tmp_ptr_rows; + cols_ = *tmp_ptr_cols; + return true; +} + +bool sotskov_a_sum_element_matrix_mpi::TestMPITaskSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} + +bool sotskov_a_sum_element_matrix_mpi::TestMPITaskSequential::run() { + internal_order_test(); + result_ = std::accumulate(matrix_.begin(), matrix_.end(), 0.0); + return true; +} + +bool sotskov_a_sum_element_matrix_mpi::TestMPITaskSequential::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = result_; + return true; +} + +bool sotskov_a_sum_element_matrix_mpi::TestMPITaskParallel::pre_processing() { + internal_order_test(); + + if (world.rank() == 0) { + auto* tmp_ptr_matrix = reinterpret_cast(taskData->inputs[0]); + auto* tmp_ptr_rows = reinterpret_cast(taskData->inputs[1]); + auto* tmp_ptr_cols = reinterpret_cast(taskData->inputs[2]); + matrix_.assign(tmp_ptr_matrix, tmp_ptr_matrix + (*tmp_ptr_rows) * (*tmp_ptr_cols)); + rows_ = *tmp_ptr_rows; + cols_ = *tmp_ptr_cols; + } + broadcast(world, rows_, 0); + broadcast(world, cols_, 0); + if (world.rank() != 0) { + matrix_.resize(rows_ * cols_); + } + broadcast(world, matrix_.data(), matrix_.size(), 0); + return true; +} + +bool sotskov_a_sum_element_matrix_mpi::TestMPITaskParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool sotskov_a_sum_element_matrix_mpi::TestMPITaskParallel::run() { + internal_order_test(); + local_result_ = parallel_sum_elements(matrix_); + reduce(world, local_result_, global_result_, std::plus<>(), 0); + return true; +} + +bool sotskov_a_sum_element_matrix_mpi::TestMPITaskParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + *reinterpret_cast(taskData->outputs[0]) = global_result_; + } + return true; +} + +double sotskov_a_sum_element_matrix_mpi::TestMPITaskParallel::parallel_sum_elements(const std::vector& matrix) { + int rank = world.rank(); + int size = world.size(); + int total_elements = matrix.size(); + + int base_elements_per_process = total_elements / size; + int remainder = total_elements % size; + + int start_idx = rank * base_elements_per_process + std::min(rank, remainder); + int end_idx = start_idx + base_elements_per_process + (rank < remainder ? 1 : 0); + + double local_sum = 0.0; + for (int i = start_idx; i < end_idx; ++i) { + local_sum += matrix[i]; + } + + double global_sum = 0.0; + MPI_Reduce(&local_sum, &global_sum, 1, MPI_DOUBLE, MPI_SUM, 0, world); + + return global_sum; +} diff --git a/tasks/mpi/titov_s_vector_sum/func_tests/main.cpp b/tasks/mpi/titov_s_vector_sum/func_tests/main.cpp new file mode 100644 index 00000000000..c4fe34b01ba --- /dev/null +++ b/tasks/mpi/titov_s_vector_sum/func_tests/main.cpp @@ -0,0 +1,241 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "mpi/titov_s_vector_sum/include/ops_mpi.hpp" + +TEST(titov_s_vector_sum_mpi, Test_Sum_100) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 100; + global_vec = titov_s_vector_sum_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + titov_s_vector_sum_mpi::MPIVectorSumParallel MPIVectorSumParallel(taskDataPar); + ASSERT_TRUE(MPIVectorSumParallel.validation()); + MPIVectorSumParallel.pre_processing(); + MPIVectorSumParallel.run(); + MPIVectorSumParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_sum(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + // Create Task + titov_s_vector_sum_mpi::MPIVectorSumSequential MPIVectorSumSequential(taskDataSeq); + ASSERT_TRUE(MPIVectorSumSequential.validation()); + MPIVectorSumSequential.pre_processing(); + MPIVectorSumSequential.run(); + MPIVectorSumSequential.post_processing(); + + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} + +TEST(titov_s_vector_sum_mpi, Test_Sum_EmptyArray) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + titov_s_vector_sum_mpi::MPIVectorSumParallel MPIVectorSumParallel(taskDataPar); + ASSERT_TRUE(MPIVectorSumParallel.validation()); + MPIVectorSumParallel.pre_processing(); + MPIVectorSumParallel.run(); + MPIVectorSumParallel.post_processing(); + + if (world.rank() == 0) { + ASSERT_EQ(global_sum[0], 0); + } +} + +TEST(titov_s_vector_sum_mpi, Test_Sum_1000) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 1000; + global_vec = titov_s_vector_sum_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + titov_s_vector_sum_mpi::MPIVectorSumParallel MPIVectorSumParallel(taskDataPar); + ASSERT_TRUE(MPIVectorSumParallel.validation()); + MPIVectorSumParallel.pre_processing(); + MPIVectorSumParallel.run(); + MPIVectorSumParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_sum(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + // Create Task + titov_s_vector_sum_mpi::MPIVectorSumSequential MPIVectorSumSequential(taskDataSeq); + ASSERT_TRUE(MPIVectorSumSequential.validation()); + MPIVectorSumSequential.pre_processing(); + MPIVectorSumSequential.run(); + MPIVectorSumSequential.post_processing(); + + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} + +TEST(titov_s_vector_sum_mpi, Test_Sum_100000) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 100000; + global_vec = titov_s_vector_sum_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + titov_s_vector_sum_mpi::MPIVectorSumParallel MPIVectorSumParallel(taskDataPar); + ASSERT_TRUE(MPIVectorSumParallel.validation()); + MPIVectorSumParallel.pre_processing(); + MPIVectorSumParallel.run(); + MPIVectorSumParallel.post_processing(); + + if (world.rank() == 0) { + // Create data + std::vector reference_sum(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + // Create Task + titov_s_vector_sum_mpi::MPIVectorSumSequential MPIVectorSumSequential(taskDataSeq); + ASSERT_TRUE(MPIVectorSumSequential.validation()); + MPIVectorSumSequential.pre_processing(); + MPIVectorSumSequential.run(); + MPIVectorSumSequential.post_processing(); + + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} + +TEST(titov_s_vector_sum_mpi, Test_Sum_SmallArray_1) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + const int count_size_vector = 1; + global_vec = titov_s_vector_sum_mpi::getRandomVector(count_size_vector); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + titov_s_vector_sum_mpi::MPIVectorSumParallel MPIVectorSumParallel(taskDataPar); + ASSERT_TRUE(MPIVectorSumParallel.validation()); + MPIVectorSumParallel.pre_processing(); + MPIVectorSumParallel.run(); + MPIVectorSumParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_sum(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + titov_s_vector_sum_mpi::MPIVectorSumSequential MPIVectorSumSequential(taskDataSeq); + ASSERT_TRUE(MPIVectorSumSequential.validation()); + MPIVectorSumSequential.pre_processing(); + MPIVectorSumSequential.run(); + MPIVectorSumSequential.post_processing(); + + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} + +TEST(titov_s_vector_sum_mpi, Test_Sum_SmallArray_0) { + boost::mpi::communicator world; + std::vector global_vec(1, 0); + std::vector global_sum(1, 0); + std::shared_ptr taskDataPar = std::make_shared(); + + if (world.rank() == 0) { + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + titov_s_vector_sum_mpi::MPIVectorSumParallel MPIVectorSumParallel(taskDataPar); + ASSERT_TRUE(MPIVectorSumParallel.validation()); + MPIVectorSumParallel.pre_processing(); + MPIVectorSumParallel.run(); + MPIVectorSumParallel.post_processing(); + + if (world.rank() == 0) { + std::vector reference_sum(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataSeq->inputs_count.emplace_back(global_vec.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + titov_s_vector_sum_mpi::MPIVectorSumSequential MPIVectorSumSequential(taskDataSeq); + ASSERT_TRUE(MPIVectorSumSequential.validation()); + MPIVectorSumSequential.pre_processing(); + MPIVectorSumSequential.run(); + MPIVectorSumSequential.post_processing(); + + ASSERT_EQ(reference_sum[0], global_sum[0]); + } +} diff --git a/tasks/mpi/titov_s_vector_sum/include/ops_mpi.hpp b/tasks/mpi/titov_s_vector_sum/include/ops_mpi.hpp new file mode 100644 index 00000000000..3319bd016ec --- /dev/null +++ b/tasks/mpi/titov_s_vector_sum/include/ops_mpi.hpp @@ -0,0 +1,49 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace titov_s_vector_sum_mpi { + +std::vector getRandomVector(int sz); + +class MPIVectorSumSequential : public ppc::core::Task { + public: + explicit MPIVectorSumSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int res{}; + std::string ops; +}; + +class MPIVectorSumParallel : public ppc::core::Task { + public: + explicit MPIVectorSumParallel(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_, local_input_; + int res{}; + std::string ops; + boost::mpi::communicator world; +}; + +} // namespace titov_s_vector_sum_mpi diff --git a/tasks/mpi/titov_s_vector_sum/perf_tests/main.cpp b/tasks/mpi/titov_s_vector_sum/perf_tests/main.cpp new file mode 100644 index 00000000000..c565240f793 --- /dev/null +++ b/tasks/mpi/titov_s_vector_sum/perf_tests/main.cpp @@ -0,0 +1,88 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "mpi/titov_s_vector_sum/include/ops_mpi.hpp" + +TEST(titov_s_vector_sum_mpi, test_pipeline_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 100000000; + global_vec = std::vector(count_size_vector, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto MPIVectorSumParallel = std::make_shared(taskDataPar); + ASSERT_EQ(MPIVectorSumParallel->validation(), true); + MPIVectorSumParallel->pre_processing(); + MPIVectorSumParallel->run(); + MPIVectorSumParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(MPIVectorSumParallel); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count_size_vector, global_sum[0]); + } +} + +TEST(titov_s_vector_sum_mpi, test_task_run) { + boost::mpi::communicator world; + std::vector global_vec; + std::vector global_sum(1, 0); + // Create TaskData + std::shared_ptr taskDataPar = std::make_shared(); + int count_size_vector; + if (world.rank() == 0) { + count_size_vector = 100000000; + global_vec = std::vector(count_size_vector, 1); + taskDataPar->inputs.emplace_back(reinterpret_cast(global_vec.data())); + taskDataPar->inputs_count.emplace_back(global_vec.size()); + taskDataPar->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataPar->outputs_count.emplace_back(global_sum.size()); + } + + auto MPIVectorSumParallel = std::make_shared(taskDataPar); + ASSERT_EQ(MPIVectorSumParallel->validation(), true); + MPIVectorSumParallel->pre_processing(); + MPIVectorSumParallel->run(); + MPIVectorSumParallel->post_processing(); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const boost::mpi::timer current_timer; + perfAttr->current_timer = [&] { return current_timer.elapsed(); }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(MPIVectorSumParallel); + perfAnalyzer->task_run(perfAttr, perfResults); + if (world.rank() == 0) { + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(count_size_vector, global_sum[0]); + } +} diff --git a/tasks/mpi/titov_s_vector_sum/src/ops_mpi.cpp b/tasks/mpi/titov_s_vector_sum/src/ops_mpi.cpp new file mode 100644 index 00000000000..5f5b744c068 --- /dev/null +++ b/tasks/mpi/titov_s_vector_sum/src/ops_mpi.cpp @@ -0,0 +1,116 @@ +// Copyright 2023 Nesterov Alexander +#include "mpi/titov_s_vector_sum/include/ops_mpi.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace std::chrono_literals; + +std::vector titov_s_vector_sum_mpi::getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} + +bool titov_s_vector_sum_mpi::MPIVectorSumSequential::pre_processing() { + internal_order_test(); + // Init vectors + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + // Init value for output + res = 0; + return true; +} + +bool titov_s_vector_sum_mpi::MPIVectorSumSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->outputs_count[0] == 1; +} + +bool titov_s_vector_sum_mpi::MPIVectorSumSequential::run() { + internal_order_test(); + res = std::accumulate(input_.begin(), input_.end(), 0); + return true; +} + +bool titov_s_vector_sum_mpi::MPIVectorSumSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +bool titov_s_vector_sum_mpi::MPIVectorSumParallel::pre_processing() { + internal_order_test(); + unsigned int delta = 0; + unsigned int remainder = 0; + + if (world.rank() == 0) { + delta = taskData->inputs_count[0] / world.size(); + remainder = taskData->inputs_count[0] % world.size(); + } + + broadcast(world, delta, 0); + broadcast(world, remainder, 0); + + if (world.rank() == 0) { + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + + for (int proc = 1; proc < world.size(); proc++) { + unsigned int send_size = (proc == world.size() - 1) ? delta + remainder : delta; + world.send(proc, 0, input_.data() + proc * delta, send_size); + } + } + local_input_ = std::vector((world.rank() == world.size() - 1) ? delta + remainder : delta); + + if (world.rank() != 0) { + unsigned int recv_size = (world.rank() == world.size() - 1) ? delta + remainder : delta; + world.recv(0, 0, local_input_.data(), recv_size); + } else { + local_input_ = std::vector(input_.begin(), input_.begin() + delta); + } + + res = 0; + return true; +} + +bool titov_s_vector_sum_mpi::MPIVectorSumParallel::validation() { + internal_order_test(); + if (world.rank() == 0) { + // Check count elements of output + return taskData->outputs_count[0] == 1; + } + return true; +} + +bool titov_s_vector_sum_mpi::MPIVectorSumParallel::run() { + internal_order_test(); + int local_res; + local_res = std::accumulate(local_input_.begin(), local_input_.end(), 0); + reduce(world, local_res, res, std::plus(), 0); + return true; +} + +bool titov_s_vector_sum_mpi::MPIVectorSumParallel::post_processing() { + internal_order_test(); + if (world.rank() == 0) { + reinterpret_cast(taskData->outputs[0])[0] = res; + } + return true; +} diff --git a/tasks/omp/example/src/ops_omp.cpp b/tasks/omp/example/src/ops_omp.cpp index 77fae981e09..6fa84eb99bc 100644 --- a/tasks/omp/example/src/ops_omp.cpp +++ b/tasks/omp/example/src/ops_omp.cpp @@ -50,7 +50,6 @@ bool nesterov_a_test_task_omp::TestOMPTaskSequential::run() { } else if (ops == "*") { res = std::accumulate(input_.begin(), input_.end(), 1, std::multiplies<>()); } - std::this_thread::sleep_for(20ms); return true; } diff --git a/tasks/seq/Shurygin_S_max_po_stolbam_matrix/func_tests/main.cpp b/tasks/seq/Shurygin_S_max_po_stolbam_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..b0c59511a2f --- /dev/null +++ b/tasks/seq/Shurygin_S_max_po_stolbam_matrix/func_tests/main.cpp @@ -0,0 +1,209 @@ +// Copyright 2023 Nesterov Alexander + +#include + +#include + +#include "seq/Shurygin_S_max_po_stolbam_matrix/include/ops_seq.hpp" + +TEST(Shurygin_S_max_po_stolbam_matrix_seq, EmptyInputs) { + std::shared_ptr taskDataSeq = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_FALSE(testTaskSequential.validation()); +} + +TEST(Shurygin_S_max_po_stolbam_matrix_seq, EmptyOutputs) { + std::shared_ptr taskDataSeq = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + taskDataSeq->inputs_count.push_back(3); + taskDataSeq->inputs_count.push_back(4); + taskDataSeq->inputs.push_back(reinterpret_cast(new int[12])); + + ASSERT_FALSE(testTaskSequential.validation()); + + delete[] reinterpret_cast(taskDataSeq->inputs[0]); +} + +TEST(Shurygin_S_max_po_stolbam_matrix_seq, IncorrectInputsCountSize) { + std::shared_ptr taskDataSeq = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + taskDataSeq->inputs_count.push_back(3); + taskDataSeq->inputs.push_back(reinterpret_cast(new int[12])); + taskDataSeq->outputs_count.push_back(4); + + ASSERT_FALSE(testTaskSequential.validation()); + + delete[] reinterpret_cast(taskDataSeq->inputs[0]); +} + +TEST(Shurygin_S_max_po_stolbam_matrix_seq, IncorrectInputsCountValue) { + std::shared_ptr taskDataSeq = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + taskDataSeq->inputs_count.push_back(3); + taskDataSeq->inputs_count.push_back(0); + taskDataSeq->inputs.push_back(reinterpret_cast(new int[12])); + taskDataSeq->outputs_count.push_back(4); + + ASSERT_FALSE(testTaskSequential.validation()); + + delete[] reinterpret_cast(taskDataSeq->inputs[0]); +} + +TEST(Shurygin_S_max_po_stolbam_matrix_seq, IncorrectOutputsCountSize) { + std::shared_ptr taskDataSeq = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + taskDataSeq->inputs_count.push_back(3); + taskDataSeq->inputs_count.push_back(4); + taskDataSeq->inputs.push_back(reinterpret_cast(new int[12])); + taskDataSeq->outputs_count.push_back(3); + + ASSERT_FALSE(testTaskSequential.validation()); + + delete[] reinterpret_cast(taskDataSeq->inputs[0]); +} + +TEST(Shurygin_S_max_po_stolbam_matrix_seq, IncorrectOutputsCountValue) { + std::shared_ptr taskDataSeq = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + taskDataSeq->inputs_count.push_back(3); + taskDataSeq->inputs_count.push_back(4); + taskDataSeq->inputs.push_back(reinterpret_cast(new int[12])); + taskDataSeq->outputs_count.push_back(5); + + ASSERT_FALSE(testTaskSequential.validation()); + + delete[] reinterpret_cast(taskDataSeq->inputs[0]); +} + +TEST(Shurygin_S_max_po_stolbam_matrix_seq, find_max_val_in_columns_10x10_matrix) { + const int rows = 10; + const int cols = 10; + std::shared_ptr taskDataSeq = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential::generate_random_matrix(rows, cols); + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + std::vector v_res(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + + for (int j = 0; j < cols; j++) { + int max_val = matrix_rnd[0][j]; + for (int i = 1; i < rows; i++) { + if (matrix_rnd[i][j] > max_val) { + max_val = matrix_rnd[i][j]; + } + } + ASSERT_EQ(v_res[j], max_val); + } +} + +TEST(Shurygin_S_max_po_stolbam_matrix_seq, find_max_val_in_columns_100x100_matrix) { + const int rows = 100; + const int cols = 100; + std::shared_ptr taskDataSeq = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential::generate_random_matrix(rows, cols); + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + std::vector v_res(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + + for (int j = 0; j < cols; j++) { + int max_val = matrix_rnd[0][j]; + for (int i = 1; i < rows; i++) { + if (matrix_rnd[i][j] > max_val) { + max_val = matrix_rnd[i][j]; + } + } + ASSERT_EQ(v_res[j], 200); + } +} + +TEST(Shurygin_S_max_po_stolbam_matrix_seq, find_max_val_in_columns_100x500_matrix) { + const int rows = 100; + const int cols = 500; + std::shared_ptr taskDataSeq = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential::generate_random_matrix(rows, cols); + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + std::vector v_res(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + + for (int j = 0; j < cols; j++) { + int max_val = matrix_rnd[0][j]; + for (int i = 1; i < rows; i++) { + if (matrix_rnd[i][j] > max_val) { + max_val = matrix_rnd[i][j]; + } + } + ASSERT_EQ(v_res[j], 200); + } +} + +TEST(Shurygin_S_max_po_stolbam_matrix_seq, find_max_val_in_columns_3000x3000_matrix) { + const int rows = 3000; + const int cols = 3000; + std::shared_ptr taskDataSeq = std::make_shared(); + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential::generate_random_matrix(rows, cols); + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + std::vector v_res(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + + for (int j = 0; j < cols; j++) { + int max_val = matrix_rnd[0][j]; + for (int i = 1; i < rows; i++) { + if (matrix_rnd[i][j] > max_val) { + max_val = matrix_rnd[i][j]; + } + } + ASSERT_EQ(v_res[j], 200); + } +} diff --git a/tasks/seq/Shurygin_S_max_po_stolbam_matrix/include/ops_seq.hpp b/tasks/seq/Shurygin_S_max_po_stolbam_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..f1efe718b50 --- /dev/null +++ b/tasks/seq/Shurygin_S_max_po_stolbam_matrix/include/ops_seq.hpp @@ -0,0 +1,25 @@ +// Copyright 2023 Nesterov Alexander +#pragma once +#include +#include + +#include "core/task/include/task.hpp" + +namespace Shurygin_S_max_po_stolbam_matrix_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + static std::vector generating_random_vector(int size, int lower_bound = 0, int upper_bound = 10); + static std::vector> generate_random_matrix(int rows, int columns); + + private: + std::vector> input_; + std::vector res_; +}; + +} // namespace Shurygin_S_max_po_stolbam_matrix_seq diff --git a/tasks/seq/Shurygin_S_max_po_stolbam_matrix/perf_tests/main.cpp b/tasks/seq/Shurygin_S_max_po_stolbam_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..fd3c0b5fe1a --- /dev/null +++ b/tasks/seq/Shurygin_S_max_po_stolbam_matrix/perf_tests/main.cpp @@ -0,0 +1,77 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/Shurygin_S_max_po_stolbam_matrix/include/ops_seq.hpp" + +TEST(Shurygin_S_max_po_stolbam_matrix_seq_perf, test_pipeline_run) { + const int rows = 5000; + const int cols = 5000; + std::shared_ptr taskDataSeq = std::make_shared(); + auto testTaskSequential = std::make_shared(taskDataSeq); + std::vector> matrix_rnd = + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential::generate_random_matrix(rows, cols); + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + std::vector v_res(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + for (int j = 0; j < cols; j++) { + ASSERT_EQ(v_res[j], 200); + } +} + +TEST(Shurygin_S_max_po_stolbam_matrix_seq_perf, test_task_run) { + const int rows = 4560; + const int cols = 4560; + std::shared_ptr taskDataSeq = std::make_shared(); + auto testTaskSequential = std::make_shared(taskDataSeq); + std::vector> matrix_rnd = + Shurygin_S_max_po_stolbam_matrix_seq::TestTaskSequential::generate_random_matrix(rows, cols); + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + std::vector v_res(cols, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + for (int j = 0; j < cols; j++) { + ASSERT_EQ(v_res[j], 200); + } +} diff --git a/tasks/seq/Shurygin_S_max_po_stolbam_matrix/src/ops_seq.cpp b/tasks/seq/Shurygin_S_max_po_stolbam_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..29c74b03456 --- /dev/null +++ b/tasks/seq/Shurygin_S_max_po_stolbam_matrix/src/ops_seq.cpp @@ -0,0 +1,80 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/Shurygin_S_max_po_stolbam_matrix/include/ops_seq.hpp" + +#include +using namespace std::chrono_literals; + +namespace Shurygin_S_max_po_stolbam_matrix_seq { + +bool TestTaskSequential::pre_processing() { + internal_order_test(); + int rows = taskData->inputs_count[0]; + int columns = taskData->inputs_count[1]; + input_.resize(rows, std::vector(columns)); + for (int i = 0; i < rows; i++) { + int* input_matrix = reinterpret_cast(taskData->inputs[i]); + for (int j = 0; j < columns; j++) { + input_[i][j] = input_matrix[j]; + } + } + res_.resize(columns); + return true; +} + +bool TestTaskSequential::validation() { + internal_order_test(); + if (taskData->inputs.empty() || taskData->outputs.empty()) { + return false; + } + if (taskData->inputs_count.size() < 2 || taskData->inputs_count[0] <= 0 || taskData->inputs_count[1] <= 0) { + return false; + } + if (taskData->outputs_count.size() != 1 || taskData->outputs_count[0] != taskData->inputs_count[1]) { + return false; + } + return true; +} + +bool TestTaskSequential::run() { + internal_order_test(); + for (size_t j = 0; j < input_[0].size(); j++) { + int max_val = input_[0][j]; + for (size_t i = 1; i < input_.size(); i++) { + if (input_[i][j] > max_val) { + max_val = input_[i][j]; + } + } + res_[j] = max_val; + } + return true; +} + +bool TestTaskSequential::post_processing() { + internal_order_test(); + int* output_matrix = reinterpret_cast(taskData->outputs[0]); + for (size_t i = 0; i < res_.size(); i++) { + output_matrix[i] = res_[i]; + } + return true; +} + +std::vector TestTaskSequential::generating_random_vector(int size, int lower_bound, int upper_bound) { + std::vector v1(size); + for (auto& num : v1) { + num = lower_bound + std::rand() % (upper_bound - lower_bound + 1); + } + return v1; +} + +std::vector> TestTaskSequential::generate_random_matrix(int rows, int columns) { + std::vector> matrix1(rows, std::vector(columns)); + for (int i = 0; i < rows; ++i) { + matrix1[i] = generating_random_vector(columns, 1, 100); + } + for (int j = 0; j < columns; ++j) { + int random_row = std::rand() % rows; + matrix1[random_row][j] = 200; + } + return matrix1; +} +} // namespace Shurygin_S_max_po_stolbam_matrix_seq diff --git a/tasks/seq/baranov_a_num_of_orderly_violations/func_tests/main.cpp b/tasks/seq/baranov_a_num_of_orderly_violations/func_tests/main.cpp new file mode 100644 index 00000000000..0f874e379d3 --- /dev/null +++ b/tasks/seq/baranov_a_num_of_orderly_violations/func_tests/main.cpp @@ -0,0 +1,158 @@ +#include + +#include "seq/baranov_a_num_of_orderly_violations/include/header.hpp" +TEST(baranov_a_num_of_orderly_violations_seq, Test_viol_0_int) { + const int N = 0; + // Create data + std::vector arr(N); + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, N); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + std::shared_ptr data_seq = std::make_shared(); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + std::vector out(1); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + baranov_a_num_of_orderly_violations_seq::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(num, out[0]); +} +TEST(baranov_a_num_of_orderly_violations_seq, Test_viol_10_int) { + const int N = 10; + // Create data + std::vector arr(N); + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, N); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + std::shared_ptr data_seq = std::make_shared(); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + std::vector out(1); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + baranov_a_num_of_orderly_violations_seq::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(num, out[0]); +} +TEST(baranov_a_num_of_orderly_violations_seq, Test_viol_100_int) { + const int N = 100; + // Create data + std::vector arr(N); + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, N); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + std::shared_ptr data_seq = std::make_shared(); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + std::vector out(1); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + baranov_a_num_of_orderly_violations_seq::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(num, out[0]); +} +TEST(baranov_a_num_of_orderly_violations_seq, Test_viol_0_double) { + const int N = 0; + // Create data + std::vector arr(N); + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, N); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + std::shared_ptr data_seq = std::make_shared(); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + std::vector out(1); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + baranov_a_num_of_orderly_violations_seq::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(num, out[0]); +} +TEST(baranov_a_num_of_orderly_violations_seq, Test_viol_100_double) { + const int N = 100; + // Create data + std::vector arr(N); + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, N); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + std::shared_ptr data_seq = std::make_shared(); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + std::vector out(1); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + baranov_a_num_of_orderly_violations_seq::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(num, out[0]); +} + +TEST(baranov_a_num_of_orderly_violations_seq, Test_viol_1000_double) { + const int N = 1000; + // Create data + std::vector arr(N); + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, N); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + std::shared_ptr data_seq = std::make_shared(); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + std::vector out(1); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + baranov_a_num_of_orderly_violations_seq::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(num, out[0]); +} +TEST(baranov_a_num_of_orderly_violations_seq, Test_viol_10000_double) { + const int N = 10000; + // Create data + std::vector arr(N); + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, N); + std::generate(arr.begin(), arr.end(), [&dist, &reng] { return dist(reng); }); + std::shared_ptr data_seq = std::make_shared(); + data_seq->inputs.emplace_back(reinterpret_cast(arr.data())); + data_seq->inputs_count.emplace_back(arr.size()); + std::vector out(1); + data_seq->outputs.emplace_back(reinterpret_cast(out.data())); + data_seq->outputs_count.emplace_back(1); + baranov_a_num_of_orderly_violations_seq::num_of_orderly_violations test1(data_seq); + ASSERT_EQ(test1.validation(), true); + test1.pre_processing(); + test1.run(); + test1.post_processing(); + int num = test1.seq_proc(arr); + ASSERT_EQ(num, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/baranov_a_num_of_orderly_violations/include/header.hpp b/tasks/seq/baranov_a_num_of_orderly_violations/include/header.hpp new file mode 100644 index 00000000000..74c14417e4a --- /dev/null +++ b/tasks/seq/baranov_a_num_of_orderly_violations/include/header.hpp @@ -0,0 +1,28 @@ +#pragma once +#include +#include +#include +#include + +#include "core/task/include/task.hpp" +namespace baranov_a_num_of_orderly_violations_seq { +template +class num_of_orderly_violations : public ppc::core::Task { + public: + explicit num_of_orderly_violations(std::shared_ptr taskData_) : Task(taskData_) {} + bool pre_processing() override; + + bool validation() override; + + bool run() override; + + bool post_processing() override; + + cntype seq_proc(std::vector vec); + + private: + std::vector input_; + cntype num_; +}; + +} // namespace baranov_a_num_of_orderly_violations_seq diff --git a/tasks/seq/baranov_a_num_of_orderly_violations/perf_tests/main.cpp b/tasks/seq/baranov_a_num_of_orderly_violations/perf_tests/main.cpp new file mode 100644 index 00000000000..d34695946ed --- /dev/null +++ b/tasks/seq/baranov_a_num_of_orderly_violations/perf_tests/main.cpp @@ -0,0 +1,94 @@ + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/baranov_a_num_of_orderly_violations/include/header.hpp" + +TEST(sequential_baranov_a_num_of_orderly_violations_perf_test, test_pipeline_run) { + const int count = 10000000; + + // Create data + std::vector in(count); + std::vector out(1, 0); + + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, in.size()); + std::generate(in.begin(), in.end(), [&dist, &reng] { return dist(reng); }); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = + std::make_shared>(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + auto temp = testTaskSequential->seq_proc(in); + + ASSERT_EQ(temp, out[0]); +} + +TEST(sequential_baranov_a_num_of_orderly_violations_perf_test, test_task_run) { + const int count = 10000000; + + // Create data + std::vector in(count); + std::vector out(1, 0); + + // Create TaskData + std::random_device rd; + std::default_random_engine reng(rd()); + std::uniform_int_distribution dist(0, in.size()); + std::generate(in.begin(), in.end(), [&dist, &reng] { return dist(reng); }); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = + std::make_shared>(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + auto temp = testTaskSequential->seq_proc(in); + + ASSERT_EQ(temp, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/baranov_a_num_of_orderly_violations/src/source.cpp b/tasks/seq/baranov_a_num_of_orderly_violations/src/source.cpp new file mode 100644 index 00000000000..c90a5422e84 --- /dev/null +++ b/tasks/seq/baranov_a_num_of_orderly_violations/src/source.cpp @@ -0,0 +1,53 @@ +#include "seq/baranov_a_num_of_orderly_violations/include/header.hpp" +namespace baranov_a_num_of_orderly_violations_seq { + +template +cntype num_of_orderly_violations::seq_proc(std::vector vec) { + cntype num = 0; + int n = vec.size(); + for (int i = 0; i < n - 1; ++i) { + if (vec[i] < vec[i + 1]) { + ++num; + } + } + return num; +} + +template +bool num_of_orderly_violations::pre_processing() { + internal_order_test(); + // Init vectors + int n = taskData->inputs_count[0]; + input_ = std::vector(n); + void* ptr_r = taskData->inputs[0]; + void* ptr_d = input_.data(); + memcpy(ptr_d, ptr_r, sizeof(iotype) * n); + // Init value for output + num_ = 0; + return true; +} +template +bool num_of_orderly_violations::validation() { + internal_order_test(); + // Check count elements of output + + return (taskData->outputs_count[0] == 1); +} +template +bool num_of_orderly_violations::run() { + internal_order_test(); + num_ = seq_proc(input_); + + return true; +} +template +bool baranov_a_num_of_orderly_violations_seq::num_of_orderly_violations::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = num_; + return true; +} + +template class baranov_a_num_of_orderly_violations_seq::num_of_orderly_violations; + +template class baranov_a_num_of_orderly_violations_seq::num_of_orderly_violations; +} // namespace baranov_a_num_of_orderly_violations_seq \ No newline at end of file diff --git a/tasks/seq/beskhmelnova_k_most_different_neighbor_elements/func_tests/main.cpp b/tasks/seq/beskhmelnova_k_most_different_neighbor_elements/func_tests/main.cpp new file mode 100644 index 00000000000..4bcd3caa5e7 --- /dev/null +++ b/tasks/seq/beskhmelnova_k_most_different_neighbor_elements/func_tests/main.cpp @@ -0,0 +1,154 @@ +#include + +#include "seq/beskhmelnova_k_most_different_neighbor_elements/src/seq.cpp" + +TEST(beskhmelnova_k_most_different_neighbor_elements_seq, Test_vector_int_100) { + const int count = 100; + + // Create data + std::vector in(count); + std::vector out(2); + + in = beskhmelnova_k_most_different_neighbor_elements_seq::getRandomVector(count); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + int index = testTaskSequential.position_of_first_neighbour_seq(in); + ASSERT_EQ(in[index], out[0]); + ASSERT_EQ(in[index + 1], out[1]); +} + +TEST(beskhmelnova_k_most_different_neighbor_elements_seq, Test_vector_int_10000) { + const int count = 10000; + + // Create data + std::vector in(count, 5); + std::vector out(2); + + in = beskhmelnova_k_most_different_neighbor_elements_seq::getRandomVector(count); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + int index = testTaskSequential.position_of_first_neighbour_seq(in); + ASSERT_EQ(in[index], out[0]); + ASSERT_EQ(in[index + 1], out[1]); +} + +TEST(beskhmelnova_k_most_different_neighbor_elements_seq, Test_vector_int_100_equal_elements) { + const int count = 1000; + const int elem = 7; + + // Create data + std::vector in(count, elem); + std::vector out(2); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(elem, out[0]); + ASSERT_EQ(elem, out[1]); +} + +TEST(beskhmelnova_k_most_different_neighbor_elements_seq, Test_1_size_vector_int) { + const int count = 1; + + // Create data + std::vector in(count); + std::vector out(2); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(-1, out[0]); + ASSERT_EQ(-1, out[1]); +} + +TEST(beskhmelnova_k_most_different_neighbor_elements_seq, Test_0_size_vector_int) { + const int count = 0; + + // Create data + std::vector in(count); + std::vector out(2); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(-1, out[0]); + ASSERT_EQ(-1, out[1]); +} + +TEST(beskhmelnova_k_most_different_neighbor_elements_seq, Test_2_size_vector_int) { + const int count = 2; + + // Create data + std::vector in(count); + std::vector out(2); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + beskhmelnova_k_most_different_neighbor_elements_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(in[0], out[0]); + ASSERT_EQ(in[1], out[1]); +} diff --git a/tasks/seq/beskhmelnova_k_most_different_neighbor_elements/include/seq.hpp b/tasks/seq/beskhmelnova_k_most_different_neighbor_elements/include/seq.hpp new file mode 100644 index 00000000000..83258e17f96 --- /dev/null +++ b/tasks/seq/beskhmelnova_k_most_different_neighbor_elements/include/seq.hpp @@ -0,0 +1,31 @@ +#pragma once + +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace beskhmelnova_k_most_different_neighbor_elements_seq { + +template +std::vector getRandomVector(int sz); + +template +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + int position_of_first_neighbour_seq(std::vector vector); + + private: + std::vector input_; + DataType res[2]; +}; + +} // namespace beskhmelnova_k_most_different_neighbor_elements_seq \ No newline at end of file diff --git a/tasks/seq/beskhmelnova_k_most_different_neighbor_elements/perf_tests/main.cpp b/tasks/seq/beskhmelnova_k_most_different_neighbor_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..cc58280d96c --- /dev/null +++ b/tasks/seq/beskhmelnova_k_most_different_neighbor_elements/perf_tests/main.cpp @@ -0,0 +1,84 @@ +#include + +#include "core/perf/include/perf.hpp" +#include "seq/beskhmelnova_k_most_different_neighbor_elements/src/seq.cpp" + +TEST(sequential_beskhmelnova_k_most_different_neighbor_element_perf_test, test_pipeline_run) { + const int count = 10000000; + + // Create data + std::vector in(count, 1); + std::vector out(2); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = + std::make_shared>(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + int index = testTaskSequential->position_of_first_neighbour_seq(in); + ASSERT_EQ(in[index], out[0]); + ASSERT_EQ(in[index + 1], out[1]); +} + +TEST(sequential_beskhmelnova_k_most_different_neighbor_element_perf_test, test_task_run) { + const int count = 10000000; + + // Create data + std::vector in(count, 1); + std::vector out(2); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = + std::make_shared>(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + int index = testTaskSequential->position_of_first_neighbour_seq(in); + ASSERT_EQ(in[index], out[0]); + ASSERT_EQ(in[index + 1], out[1]); +} diff --git a/tasks/seq/beskhmelnova_k_most_different_neighbor_elements/src/seq.cpp b/tasks/seq/beskhmelnova_k_most_different_neighbor_elements/src/seq.cpp new file mode 100644 index 00000000000..376f5d85b0d --- /dev/null +++ b/tasks/seq/beskhmelnova_k_most_different_neighbor_elements/src/seq.cpp @@ -0,0 +1,71 @@ +#include "seq/beskhmelnova_k_most_different_neighbor_elements/include/seq.hpp" + +template +std::vector beskhmelnova_k_most_different_neighbor_elements_seq::getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} + +template +int beskhmelnova_k_most_different_neighbor_elements_seq::TestTaskSequential::position_of_first_neighbour_seq( + std::vector vector) { + int n = vector.size(); + if (n == 0 || n == 1) return -1; + DataType max_dif = abs(vector[0] - vector[1]); + DataType dif; + int index = 0; + for (int i = 1; i < n - 1; i++) { + dif = abs(vector[i] - vector[i + 1]); + if (dif > max_dif) { + max_dif = dif; + index = i; + } + } + return index; +} + +template +bool beskhmelnova_k_most_different_neighbor_elements_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init value for input + int n = taskData->inputs_count[0]; + input_ = std::vector(n); + void* ptr_r = taskData->inputs[0]; + void* ptr_d = input_.data(); + memcpy(ptr_d, ptr_r, sizeof(DataType) * n); + return true; +} + +template +bool beskhmelnova_k_most_different_neighbor_elements_seq::TestTaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count.size() == 1 && taskData->inputs_count[0] >= 0 && taskData->outputs_count[0] == 2; +} + +template +bool beskhmelnova_k_most_different_neighbor_elements_seq::TestTaskSequential::run() { + internal_order_test(); + int index = position_of_first_neighbour_seq(input_); + if (index == -1) { + res[0] = -1; + res[1] = -1; + return true; + } + res[0] = input_[index]; + res[1] = input_[index + 1]; + return true; +} + +template +bool beskhmelnova_k_most_different_neighbor_elements_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res[0]; + reinterpret_cast(taskData->outputs[0])[1] = res[1]; + return true; +} \ No newline at end of file diff --git a/tasks/seq/chernykh_a_num_of_alternations_signs/func_tests/main.cpp b/tasks/seq/chernykh_a_num_of_alternations_signs/func_tests/main.cpp new file mode 100644 index 00000000000..d74d50c11c1 --- /dev/null +++ b/tasks/seq/chernykh_a_num_of_alternations_signs/func_tests/main.cpp @@ -0,0 +1,87 @@ +#include + +#include + +#include "seq/chernykh_a_num_of_alternations_signs/include/ops_seq.hpp" + +TEST(chernykh_a_num_of_alternations_signs_seq, correct_alternating_signs_count) { + // Create data + auto input = std::vector{3, -2, 4, -5, -1, 6}; + auto output = std::vector(1, 0); + auto want = 4; + + // Create TaskData + auto task_data = std::make_shared(); + task_data->inputs.emplace_back(reinterpret_cast(input.data())); + task_data->inputs_count.emplace_back(input.size()); + task_data->outputs.emplace_back(reinterpret_cast(output.data())); + task_data->outputs_count.emplace_back(output.size()); + + // Create Task + auto task = chernykh_a_num_of_alternations_signs_seq::Task(task_data); + + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + ASSERT_EQ(want, output[0]); +} + +TEST(chernykh_a_num_of_alternations_signs_seq, input_size_less_than_two_fails_validation) { + // Create data + auto input = std::vector(); + auto output = std::vector(1, 0); + + // Create TaskData + auto task_data = std::make_shared(); + task_data->inputs.emplace_back(reinterpret_cast(input.data())); + task_data->inputs_count.emplace_back(input.size()); + task_data->outputs.emplace_back(reinterpret_cast(output.data())); + task_data->outputs_count.emplace_back(output.size()); + + // Create Task + auto task = chernykh_a_num_of_alternations_signs_seq::Task(task_data); + + ASSERT_FALSE(task.validation()); +} + +TEST(chernykh_a_num_of_alternations_signs_seq, output_size_not_equal_one_fails_validation) { + // Create data + auto input = std::vector{3, -2, 4, -5, -1, 6}; + auto output = std::vector(); + + // Create TaskData + auto task_data = std::make_shared(); + task_data->inputs.emplace_back(reinterpret_cast(input.data())); + task_data->inputs_count.emplace_back(input.size()); + task_data->outputs.emplace_back(reinterpret_cast(output.data())); + task_data->outputs_count.emplace_back(output.size()); + + // Create Task + auto task = chernykh_a_num_of_alternations_signs_seq::Task(task_data); + + ASSERT_FALSE(task.validation()); +} + +TEST(chernykh_a_num_of_alternations_signs_seq, all_elements_are_equal) { + // Create data + auto input = std::vector(5, 0); + auto output = std::vector(1, 0); + auto want = 0; + + // Create TaskData + auto task_data = std::make_shared(); + task_data->inputs.emplace_back(reinterpret_cast(input.data())); + task_data->inputs_count.emplace_back(input.size()); + task_data->outputs.emplace_back(reinterpret_cast(output.data())); + task_data->outputs_count.emplace_back(output.size()); + + // Create Task + auto task = chernykh_a_num_of_alternations_signs_seq::Task(task_data); + + ASSERT_TRUE(task.validation()); + ASSERT_TRUE(task.pre_processing()); + ASSERT_TRUE(task.run()); + ASSERT_TRUE(task.post_processing()); + ASSERT_EQ(want, output[0]); +} diff --git a/tasks/seq/chernykh_a_num_of_alternations_signs/include/ops_seq.hpp b/tasks/seq/chernykh_a_num_of_alternations_signs/include/ops_seq.hpp new file mode 100644 index 00000000000..e029d9e5017 --- /dev/null +++ b/tasks/seq/chernykh_a_num_of_alternations_signs/include/ops_seq.hpp @@ -0,0 +1,22 @@ +#pragma once + +#include + +#include "core/task/include/task.hpp" + +namespace chernykh_a_num_of_alternations_signs_seq { + +class Task : public ppc::core::Task { + public: + explicit Task(std::shared_ptr task_data) : ppc::core::Task(std::move(task_data)) {} + bool validation() override; + bool pre_processing() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input; + int result{}; +}; + +} // namespace chernykh_a_num_of_alternations_signs_seq \ No newline at end of file diff --git a/tasks/seq/chernykh_a_num_of_alternations_signs/perf_tests/main.cpp b/tasks/seq/chernykh_a_num_of_alternations_signs/perf_tests/main.cpp new file mode 100644 index 00000000000..d1f45630c8f --- /dev/null +++ b/tasks/seq/chernykh_a_num_of_alternations_signs/perf_tests/main.cpp @@ -0,0 +1,90 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/chernykh_a_num_of_alternations_signs/include/ops_seq.hpp" + +TEST(chernykh_a_num_of_alternations_signs_seq, test_pipeline_run) { + // Create data + auto input = std::vector(10'000'000, 0); + auto output = std::vector(1, 0); + auto want = 0; + + // Create TaskData + auto task_data = std::make_shared(); + task_data->inputs.emplace_back(reinterpret_cast(input.data())); + task_data->inputs_count.emplace_back(input.size()); + task_data->outputs.emplace_back(reinterpret_cast(output.data())); + task_data->outputs_count.emplace_back(output.size()); + + // Create Task + auto task = std::make_shared(task_data); + + ASSERT_TRUE(task->validation()); + ASSERT_TRUE(task->pre_processing()); + ASSERT_TRUE(task->run()); + ASSERT_TRUE(task->post_processing()); + + // Create PerfAttributes + auto perf_attributes = std::make_shared(); + perf_attributes->num_running = 10; + auto start = std::chrono::high_resolution_clock::now(); + perf_attributes->current_timer = [&] { + auto current = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current - start).count(); + return static_cast(duration) * 1e-9; + }; + + // Create PerfResults + auto perf_results = std::make_shared(); + + // Create Perf analyzer + auto perf_analyzer = std::make_shared(task); + + perf_analyzer->pipeline_run(perf_attributes, perf_results); + ppc::core::Perf::print_perf_statistic(perf_results); + ASSERT_EQ(want, output[0]); +} + +TEST(chernykh_a_num_of_alternations_signs_seq, test_task_run) { + // Create data + auto input = std::vector(10'000'000, 0); + auto output = std::vector(1, 0); + auto want = 0; + + // Create TaskData + auto task_data = std::make_shared(); + task_data->inputs.emplace_back(reinterpret_cast(input.data())); + task_data->inputs_count.emplace_back(input.size()); + task_data->outputs.emplace_back(reinterpret_cast(output.data())); + task_data->outputs_count.emplace_back(output.size()); + + // Create Task + auto task = std::make_shared(task_data); + + ASSERT_TRUE(task->validation()); + ASSERT_TRUE(task->pre_processing()); + ASSERT_TRUE(task->run()); + ASSERT_TRUE(task->post_processing()); + + // Create PerfAttributes + auto perf_attributes = std::make_shared(); + perf_attributes->num_running = 10; + auto start = std::chrono::high_resolution_clock::now(); + perf_attributes->current_timer = [&] { + auto current = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current - start).count(); + return static_cast(duration) * 1e-9; + }; + + // Create PerfResults + auto perf_results = std::make_shared(); + + // Create Perf analyzer + auto perf_analyzer = std::make_shared(task); + + perf_analyzer->task_run(perf_attributes, perf_results); + ppc::core::Perf::print_perf_statistic(perf_results); + ASSERT_EQ(want, output[0]); +} \ No newline at end of file diff --git a/tasks/seq/chernykh_a_num_of_alternations_signs/src/ops_seq.cpp b/tasks/seq/chernykh_a_num_of_alternations_signs/src/ops_seq.cpp new file mode 100644 index 00000000000..b568b80e053 --- /dev/null +++ b/tasks/seq/chernykh_a_num_of_alternations_signs/src/ops_seq.cpp @@ -0,0 +1,32 @@ +#include "seq/chernykh_a_num_of_alternations_signs/include/ops_seq.hpp" + +bool chernykh_a_num_of_alternations_signs_seq::Task::validation() { + internal_order_test(); + return taskData->inputs_count[0] >= 2 && taskData->outputs_count[0] == 1; +} + +bool chernykh_a_num_of_alternations_signs_seq::Task::pre_processing() { + internal_order_test(); + auto *input_ptr = reinterpret_cast(taskData->inputs[0]); + auto input_size = taskData->inputs_count[0]; + input = std::vector(input_ptr, input_ptr + input_size); + result = 0; + return true; +} + +bool chernykh_a_num_of_alternations_signs_seq::Task::run() { + internal_order_test(); + auto input_size = input.size(); + for (size_t i = 0; i < input_size - 1; i++) { + if ((input[i] ^ input[i + 1]) < 0) { + result++; + } + } + return true; +} + +bool chernykh_a_num_of_alternations_signs_seq::Task::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = result; + return true; +} diff --git a/tasks/seq/chistov_a_sum_of_matrix_elements/func_tests/main.cpp b/tasks/seq/chistov_a_sum_of_matrix_elements/func_tests/main.cpp new file mode 100644 index 00000000000..c9413ee86a9 --- /dev/null +++ b/tasks/seq/chistov_a_sum_of_matrix_elements/func_tests/main.cpp @@ -0,0 +1,113 @@ +#include + +#include + +#include "seq/chistov_a_sum_of_matrix_elements/include/ops_seq.hpp" + +TEST(chistov_a_sum_of_matrix_elements_seq, test_int_sum_sequential) { + const int n = 3; + const int m = 4; + std::vector global_matrix = chistov_a_sum_of_matrix_elements_seq::get_random_matrix_seq(n, m); + std::vector reference_sum(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + chistov_a_sum_of_matrix_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); + ASSERT_EQ(TestTaskSequential.validation(), true); + TestTaskSequential.pre_processing(); + TestTaskSequential.run(); + TestTaskSequential.post_processing(); + + int sum = chistov_a_sum_of_matrix_elements_seq::classic_way_seq(global_matrix, n, m); + ASSERT_EQ(reference_sum[0], sum); +} + +TEST(chistov_a_sum_of_matrix_elements_seq, test_double_sum_sequential) { + const int n = 3; + const int m = 4; + std::vector global_matrix = chistov_a_sum_of_matrix_elements_seq::get_random_matrix_seq(n, m); + std::vector reference_sum(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + chistov_a_sum_of_matrix_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); + + ASSERT_EQ(TestTaskSequential.validation(), true); + TestTaskSequential.pre_processing(); + TestTaskSequential.run(); + TestTaskSequential.post_processing(); + double sum = chistov_a_sum_of_matrix_elements_seq::classic_way_seq(global_matrix, n, m); + + ASSERT_NEAR(reference_sum[0], sum, 1e-6); +} + +TEST(chistov_a_sum_of_matrix_elements_seq, test_sum_with_empty_matrix_sequential) { + std::vector reference_sum(1, 0); + std::vector empty_matrix; + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(empty_matrix.data())); + taskDataSeq->inputs_count.emplace_back(empty_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + chistov_a_sum_of_matrix_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); + ASSERT_EQ(TestTaskSequential.validation(), true); + TestTaskSequential.pre_processing(); + TestTaskSequential.run(); + TestTaskSequential.post_processing(); + + ASSERT_EQ(reference_sum[0], 0); +} + +TEST(chistov_a_sum_of_matrix_elements_seq, test_sum_with_single_element_matrix_sequential) { + const int n = 1; + const int m = 1; + std::vector global_matrix = chistov_a_sum_of_matrix_elements_seq::get_random_matrix_seq(n, m); + std::vector reference_sum(1, 0); + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + chistov_a_sum_of_matrix_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); + ASSERT_EQ(TestTaskSequential.validation(), true); + TestTaskSequential.pre_processing(); + TestTaskSequential.run(); + TestTaskSequential.post_processing(); + + int sum = chistov_a_sum_of_matrix_elements_seq::classic_way_seq(global_matrix, n, m); + ASSERT_EQ(reference_sum[0], sum); +} + +TEST(chistov_a_sum_of_matrix_elements_seq, returns_empty_matrix_when_small_n_or_m_sequential) { + auto matrix1 = chistov_a_sum_of_matrix_elements_seq::get_random_matrix_seq(0, 1); + EXPECT_TRUE(matrix1.empty()); + auto matrix2 = chistov_a_sum_of_matrix_elements_seq::get_random_matrix_seq(1, 0); + EXPECT_TRUE(matrix2.empty()); +} + +TEST(chistov_a_sum_of_matrix_elements_seq, test_wrong_validation_sequential) { + std::vector global_matrix; + std::vector global_sum(2, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + const int n = 3; + const int m = 4; + global_matrix = chistov_a_sum_of_matrix_elements_seq::get_random_matrix_seq(n, m); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataSeq->outputs_count.emplace_back(global_sum.size()); + chistov_a_sum_of_matrix_elements_seq::TestTaskSequential TestTaskSequential(taskDataSeq); + ASSERT_EQ(TestTaskSequential.validation(), false); +} diff --git a/tasks/seq/chistov_a_sum_of_matrix_elements/include/ops_seq.hpp b/tasks/seq/chistov_a_sum_of_matrix_elements/include/ops_seq.hpp new file mode 100644 index 00000000000..60ae08fd01e --- /dev/null +++ b/tasks/seq/chistov_a_sum_of_matrix_elements/include/ops_seq.hpp @@ -0,0 +1,48 @@ +#pragma once +#include +#include + +#include "core/task/include/task.hpp" + +namespace chistov_a_sum_of_matrix_elements_seq { + +template +std::vector get_random_matrix_seq(const int n, const int m) { + if (n <= 0 || m <= 0) { + return std::vector(); + } + + std::vector matrix(n * m); + for (int i = 0; i < n * m; ++i) { + matrix[i] = static_cast((std::rand() % 201) - 100); + } + return matrix; +} + +template +T classic_way_seq(const std::vector matrix, const int n, const int m) { + T result = 0; + for (int i = 0; i < n; ++i) { + for (int j = 0; j < m; ++j) { + result += matrix[i * m + j]; + } + } + return result; +} + +template +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + T res{}; +}; + +} // namespace chistov_a_sum_of_matrix_elements_seq diff --git a/tasks/seq/chistov_a_sum_of_matrix_elements/perf_tests/main.cpp b/tasks/seq/chistov_a_sum_of_matrix_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..c0c75db9363 --- /dev/null +++ b/tasks/seq/chistov_a_sum_of_matrix_elements/perf_tests/main.cpp @@ -0,0 +1,81 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/chistov_a_sum_of_matrix_elements/include/ops_seq.hpp" + +TEST(chistov_a_sum_of_matrix_elements_seq, test_pipeline_run_seq) { + const int n = 4000; + const int m = 3500; + + // Create data + std::vector in(n * m, 1); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = + std::make_shared>(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(std::accumulate(in.begin(), in.end(), 0), out[0]); +} + +TEST(chistov_a_sum_of_matrix_elements_seq, test_task_run_seq) { + const int n = 6000; + const int m = 6000; + + std::vector in(n * m, 1); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = + std::make_shared>(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(std::accumulate(in.begin(), in.end(), 0), out[0]); +} \ No newline at end of file diff --git a/tasks/seq/chistov_a_sum_of_matrix_elements/src/ops_seq.cpp b/tasks/seq/chistov_a_sum_of_matrix_elements/src/ops_seq.cpp new file mode 100644 index 00000000000..6f2a540345b --- /dev/null +++ b/tasks/seq/chistov_a_sum_of_matrix_elements/src/ops_seq.cpp @@ -0,0 +1,43 @@ +#include "seq/chistov_a_sum_of_matrix_elements/include/ops_seq.hpp" + +namespace chistov_a_sum_of_matrix_elements_seq { + +template +bool TestTaskSequential::pre_processing() { + internal_order_test(); + + T* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + input_.assign(tmp_ptr, tmp_ptr + taskData->inputs_count[0]); + return true; +} + +template +bool TestTaskSequential::validation() { + internal_order_test(); + + return taskData->outputs_count[0] == 1; +} + +template +bool TestTaskSequential::run() { + internal_order_test(); + + res = std::accumulate(input_.begin(), input_.end(), 0); + return true; +} + +template +bool TestTaskSequential::post_processing() { + internal_order_test(); + + if (!taskData->outputs.empty() && taskData->outputs[0] != nullptr) { + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; + } + return false; +} + +template class TestTaskSequential; +template class TestTaskSequential; + +} // namespace chistov_a_sum_of_matrix_elements_seq diff --git a/tasks/seq/drozhdinov_d_sum_cols_matrix/func_tests/main.cpp b/tasks/seq/drozhdinov_d_sum_cols_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..1b46665baa5 --- /dev/null +++ b/tasks/seq/drozhdinov_d_sum_cols_matrix/func_tests/main.cpp @@ -0,0 +1,232 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "seq/drozhdinov_d_sum_cols_matrix/include/ops_seq.hpp" + +TEST(drozhdinov_d_sum_cols_matrix_seq, EmptyMatrixTest) { + int cols = 0; + int rows = 0; + + // Create data + std::vector matrix = {}; + std::vector expres; + std::vector ans = {}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres.data())); + taskDataSeq->outputs_count.emplace_back(expres.size()); + + // Create Task + drozhdinov_d_sum_cols_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expres, ans); +} + +TEST(drozhdinov_d_sum_cols_matrix_seq, SquareMatrixTests1) { + int cols = 2; + int rows = 2; + + // Create data + std::vector matrix = {1, 0, 2, 1}; + std::vector expres(cols, 0); + std::vector ans = {3, 1}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + // taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres.data())); + taskDataSeq->outputs_count.emplace_back(expres.size()); + + // Create Task + drozhdinov_d_sum_cols_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expres, ans); +} + +TEST(drozhdinov_d_sum_cols_matrix_seq, SquareMatrixTests2) { + int cols = 2000; + int rows = 2000; + + // Create data + std::vector matrix(cols * rows, 0); + matrix[1] = 1; + std::vector expres(cols, 0); + std::vector ans(cols, 0); + ans[1] = 1; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres.data())); + taskDataSeq->outputs_count.emplace_back(expres.size()); + + // Create Task + drozhdinov_d_sum_cols_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expres, ans); +} + +TEST(drozhdinov_d_sum_cols_matrix_seq, SquareMatrixTests3) { + int cols = 3500; + int rows = 3500; + + // Create data + std::vector matrix(cols * rows, 0); + matrix[1] = 1; + std::vector expres(cols, 0); + std::vector ans(cols, 0); + ans[1] = 1; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres.data())); + taskDataSeq->outputs_count.emplace_back(expres.size()); + + // Create Task + drozhdinov_d_sum_cols_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expres, ans); +} + +TEST(drozhdinov_d_sum_cols_matrix_seq, RectangleMatrixTests1) { + int cols = 4; + int rows = 1; + + // Create data + std::vector matrix = {1, 0, 2, 1}; + std::vector expres(cols, 0); + std::vector ans = {1, 0, 2, 1}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres.data())); + taskDataSeq->outputs_count.emplace_back(expres.size()); + + // Create Task + drozhdinov_d_sum_cols_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expres, ans); +} + +TEST(drozhdinov_d_sum_cols_matrix_seq, RectangleMatrixTests2) { + int cols = 1; + int rows = 100; + + // Create data + std::vector matrix(cols * rows, 0); + matrix[1] = 1; + std::vector expres(cols, 0); + std::vector ans(cols, 0); + ans[0] = 1; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres.data())); + taskDataSeq->outputs_count.emplace_back(expres.size()); + + // Create Task + drozhdinov_d_sum_cols_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expres, ans); +} + +TEST(drozhdinov_d_sum_cols_matrix_seq, RectangleMatrixTests3) { + int cols = 2000; + int rows = 1000; + + // Create data + std::vector matrix(cols * rows, 0); + matrix[1] = 1; + std::vector expres(cols, 0); + std::vector ans(cols, 0); + ans[1] = 1; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres.data())); + taskDataSeq->outputs_count.emplace_back(expres.size()); + + // Create Task + drozhdinov_d_sum_cols_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expres, ans); +} + +TEST(drozhdinov_d_sum_cols_matrix_seq, WrongValidationTest) { + int cols = 2; + int rows = 2; + + // Create data + std::vector matrix = {1, 0, 2, 1}; + std::vector expres(cols, 0); + std::vector ans = {3, 1}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres.data())); + taskDataSeq->outputs_count.emplace_back(matrix.size()); + + // Create Task + drozhdinov_d_sum_cols_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), false); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(expres, ans); +} \ No newline at end of file diff --git a/tasks/seq/drozhdinov_d_sum_cols_matrix/include/ops_seq.hpp b/tasks/seq/drozhdinov_d_sum_cols_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..37626dc5ff3 --- /dev/null +++ b/tasks/seq/drozhdinov_d_sum_cols_matrix/include/ops_seq.hpp @@ -0,0 +1,28 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +int makeLinCoords(int x, int y, int xSize); +std::vector calcMatrixSumSeq(const std::vector& matrix, int xSize, int ySize, int fromX, int toX); +namespace drozhdinov_d_sum_cols_matrix_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + int rows{}; + int cols{}; + std::vector input_; + std::vector res; +}; + +} // namespace drozhdinov_d_sum_cols_matrix_seq \ No newline at end of file diff --git a/tasks/seq/drozhdinov_d_sum_cols_matrix/perf_tests/main.cpp b/tasks/seq/drozhdinov_d_sum_cols_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..2b5e0c2f808 --- /dev/null +++ b/tasks/seq/drozhdinov_d_sum_cols_matrix/perf_tests/main.cpp @@ -0,0 +1,96 @@ +// Copyright 2023 Nesterov Alexander +// seq drozhdinov_d_sum_cols_matrix perf +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/drozhdinov_d_sum_cols_matrix/include/ops_seq.hpp" + +TEST(drozhdinov_d_sum_cols_matrix_seq, test_pipeline_run) { + int cols = 5000; + int rows = 5000; + + // Create data + std::vector matrix(cols * rows, 0); + matrix[1] = 1; + std::vector expres(cols, 0); + std::vector ans(cols, 0); + ans[1] = 1; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + // taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres.data())); + taskDataSeq->outputs_count.emplace_back(expres.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(expres, ans); +} + +TEST(drozhdinov_d_sum_cols_matrix_seq, test_task_run) { + int cols = 5000; + int rows = 5000; + + // Create data + std::vector matrix(cols * rows, 0); + matrix[1] = 1; + std::vector expres(cols, 0); + std::vector ans(cols, 0); + ans[1] = 1; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs_count.emplace_back(cols); + taskDataSeq->inputs_count.emplace_back(rows); + // taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(expres.data())); + taskDataSeq->outputs_count.emplace_back(expres.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(expres, ans); +} \ No newline at end of file diff --git a/tasks/seq/drozhdinov_d_sum_cols_matrix/src/ops_seq.cpp b/tasks/seq/drozhdinov_d_sum_cols_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..fe7ca169fd0 --- /dev/null +++ b/tasks/seq/drozhdinov_d_sum_cols_matrix/src/ops_seq.cpp @@ -0,0 +1,55 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/drozhdinov_d_sum_cols_matrix/include/ops_seq.hpp" + +#include + +using namespace std::chrono_literals; + +int makeLinCoords(int x, int y, int xSize) { return y * xSize + x; } + +std::vector calcMatrixSumSeq(const std::vector& matrix, int xSize, int ySize, int fromX, int toX) { + std::vector result; + for (int x = fromX; x < toX; x++) { + int columnSum = 0; + for (int y = 0; y < ySize; y++) { + int linearizedCoordinate = makeLinCoords(x, y, xSize); + columnSum += matrix[linearizedCoordinate]; + } + result.push_back(columnSum); + } + return result; +} + +bool drozhdinov_d_sum_cols_matrix_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + input_ = std::vector(taskData->inputs_count[0]); + auto* ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = ptr[i]; + } + cols = taskData->inputs_count[1]; + rows = taskData->inputs_count[2]; + res = std::vector(cols, 0); + return true; +} + +bool drozhdinov_d_sum_cols_matrix_seq::TestTaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[1] == taskData->outputs_count[0]; +} + +bool drozhdinov_d_sum_cols_matrix_seq::TestTaskSequential::run() { + internal_order_test(); + res = calcMatrixSumSeq(input_, cols, rows, 0, cols); + return true; +} + +bool drozhdinov_d_sum_cols_matrix_seq::TestTaskSequential::post_processing() { + internal_order_test(); + for (int i = 0; i < cols; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + return true; +} diff --git a/tasks/seq/ermolaev_v_min_matrix/func_tests/main.cpp b/tasks/seq/ermolaev_v_min_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..8cc451adfcd --- /dev/null +++ b/tasks/seq/ermolaev_v_min_matrix/func_tests/main.cpp @@ -0,0 +1,159 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include + +#include "seq/ermolaev_v_min_matrix/include/ops_seq.hpp" + +TEST(ermolaev_v_min_matrix_seq, test_min_10x10) { + std::random_device dev; + std::mt19937 gen(dev()); + + const int count_rows = 10; + const int count_columns = 10; + const int gen_min = -500; + const int gen_max = 500; + int ref = INT_MIN; + + // Create data + std::vector out(1, INT_MAX); + std::vector> in = + ermolaev_v_min_matrix_seq::getRandomMatrix(count_rows, count_columns, gen_min, gen_max); + + int index = gen() % (count_rows * count_columns); + in[index / count_columns][index / count_rows] = ref; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + ermolaev_v_min_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(ref, out[0]); +} + +TEST(ermolaev_v_min_matrix_seq, test_min_10x100) { + std::random_device dev; + std::mt19937 gen(dev()); + + const int count_rows = 10; + const int count_columns = 50; + const int gen_min = -500; + const int gen_max = 500; + int ref = INT_MIN; + + // Create data + std::vector out(1, INT_MAX); + std::vector> in = + ermolaev_v_min_matrix_seq::getRandomMatrix(count_rows, count_columns, gen_min, gen_max); + int index = gen() % (count_rows * count_columns); + in[index / count_columns][index / count_rows] = ref; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + ermolaev_v_min_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(ref, out[0]); +} + +TEST(ermolaev_v_min_matrix_seq, test_min_100x10) { + std::random_device dev; + std::mt19937 gen(dev()); + + const int count_rows = 100; + const int count_columns = 10; + const int gen_min = -500; + const int gen_max = 500; + int ref = INT_MIN; + + // Create data + std::vector out(1, INT_MAX); + std::vector> in = + ermolaev_v_min_matrix_seq::getRandomMatrix(count_rows, count_columns, gen_min, gen_max); + + int index = gen() % (count_rows * count_columns); + in[index / count_columns][index / count_rows] = ref; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + ermolaev_v_min_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(ref, out[0]); +} + +TEST(ermolaev_v_min_matrix_seq, test_min_100x100) { + std::random_device dev; + std::mt19937 gen(dev()); + + const int count_rows = 100; + const int count_columns = 100; + const int gen_min = -500; + const int gen_max = 500; + int ref = INT_MIN; + + // Create data + std::vector out(1, INT_MAX); + std::vector> in = + ermolaev_v_min_matrix_seq::getRandomMatrix(count_rows, count_columns, gen_min, gen_max); + + int index = gen() % (count_rows * count_columns); + in[index / count_columns][index / count_rows] = ref; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + ermolaev_v_min_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(ref, out[0]); +} diff --git a/tasks/seq/ermolaev_v_min_matrix/include/ops_seq.hpp b/tasks/seq/ermolaev_v_min_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..1077c2182db --- /dev/null +++ b/tasks/seq/ermolaev_v_min_matrix/include/ops_seq.hpp @@ -0,0 +1,28 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace ermolaev_v_min_matrix_seq { + +std::vector getRandomVector(int sz, int min = 0, int max = 100); +std::vector> getRandomMatrix(int rows, int columns, int min = 0, int max = 100); + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + int res_{}; +}; + +} // namespace ermolaev_v_min_matrix_seq \ No newline at end of file diff --git a/tasks/seq/ermolaev_v_min_matrix/perf_tests/main.cpp b/tasks/seq/ermolaev_v_min_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..76b96d0072e --- /dev/null +++ b/tasks/seq/ermolaev_v_min_matrix/perf_tests/main.cpp @@ -0,0 +1,108 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/ermolaev_v_min_matrix/include/ops_seq.hpp" + +TEST(ermolaev_v_min_matrix_seq, test_pipeline_run) { + std::vector> global_matrix; + std::vector global_min(1, INT_MAX); + int ref = INT_MIN; + + std::random_device dev; + std::mt19937 gen(dev()); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + int count_rows = 4000; + int count_columns = 4000; + int gen_min = -500; + int gen_max = 500; + + global_matrix = ermolaev_v_min_matrix_seq::getRandomMatrix(count_rows, count_columns, gen_min, gen_max); + int index = gen() % (count_rows * count_columns); + global_matrix[index / count_columns][index / count_rows] = ref; + + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataSeq->outputs_count.emplace_back(global_min.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ref, global_min[0]); +} + +TEST(sequential_ermolaev_v_min_matrix_seq, test_task_run) { + std::vector> global_matrix; + std::vector global_min(1, INT_MAX); + int ref = INT_MIN; + + std::random_device dev; + std::mt19937 gen(dev()); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + int count_rows = 4000; + int count_columns = 4000; + int gen_min = -500; + int gen_max = 500; + + global_matrix = ermolaev_v_min_matrix_seq::getRandomMatrix(count_rows, count_columns, gen_min, gen_max); + int index = gen() % (count_rows * count_columns); + global_matrix[index / count_columns][index / count_rows] = ref; + + for (unsigned int i = 0; i < global_matrix.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix[i].data())); + taskDataSeq->inputs_count.emplace_back(count_rows); + taskDataSeq->inputs_count.emplace_back(count_columns); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(global_min.data())); + taskDataSeq->outputs_count.emplace_back(global_min.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(ref, global_min[0]); +} \ No newline at end of file diff --git a/tasks/seq/ermolaev_v_min_matrix/src/ops_seq.cpp b/tasks/seq/ermolaev_v_min_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..57d1fba2b9e --- /dev/null +++ b/tasks/seq/ermolaev_v_min_matrix/src/ops_seq.cpp @@ -0,0 +1,67 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/ermolaev_v_min_matrix/include/ops_seq.hpp" + +#include +#include + +using namespace std::chrono_literals; + +std::vector ermolaev_v_min_matrix_seq::getRandomVector(int sz, int min, int max) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = min + gen() % (max - min + 1); + } + return vec; +} + +std::vector> ermolaev_v_min_matrix_seq::getRandomMatrix(int rows, int columns, int min, int max) { + std::vector> vec(rows); + + for (int i = 0; i < rows; i++) { + vec[i] = ermolaev_v_min_matrix_seq::getRandomVector(columns, min, max); + } + return vec; +} + +bool ermolaev_v_min_matrix_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init vectors + input_ = std::vector>(taskData->inputs_count[0], std::vector(taskData->inputs_count[1])); + + for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + for (unsigned int j = 0; j < taskData->inputs_count[1]; j++) { + input_[i][j] = tmp_ptr[j]; + } + } + + // Init value for output + res_ = INT_MAX; + return true; +} + +bool ermolaev_v_min_matrix_seq::TestTaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] > 0 && taskData->inputs_count[1] > 0 && taskData->outputs_count[0] == 1; +} + +bool ermolaev_v_min_matrix_seq::TestTaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < input_.size(); i++) { + for (size_t j = 0; j < input_[i].size(); j++) { + if (input_[i][j] < res_) { + res_ = input_[i][j]; + } + } + } + return true; +} + +bool ermolaev_v_min_matrix_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res_; + return true; +} diff --git a/tasks/seq/example/src/ops_seq.cpp b/tasks/seq/example/src/ops_seq.cpp index c1a5d4997f7..085ae82f5bf 100644 --- a/tasks/seq/example/src/ops_seq.cpp +++ b/tasks/seq/example/src/ops_seq.cpp @@ -24,7 +24,6 @@ bool nesterov_a_test_task_seq::TestTaskSequential::run() { for (int i = 0; i < input_; i++) { res++; } - std::this_thread::sleep_for(20ms); return true; } diff --git a/tasks/seq/filatev_v_sum_of_matrix_elements/func_tests/main.cpp b/tasks/seq/filatev_v_sum_of_matrix_elements/func_tests/main.cpp new file mode 100644 index 00000000000..4a8f0621500 --- /dev/null +++ b/tasks/seq/filatev_v_sum_of_matrix_elements/func_tests/main.cpp @@ -0,0 +1,178 @@ +// Filatev Vladislav Sum_of_matrix_elements +#include + +#include + +#include "seq/filatev_v_sum_of_matrix_elements/include/ops_seq.hpp" + +TEST(filatev_v_sum_of_matrix_elements_seq, Test_Sum_10_10_1) { + const int count = 10; + + // Create data + std::vector> in(count, std::vector(count, 1)); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (int i = 0; i < count; i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataSeq->inputs_count.emplace_back(count); + taskDataSeq->inputs_count.emplace_back(count); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(1); + + // Create Task + filatev_v_sum_of_matrix_elements_seq::SumMatrix sumMatrix(taskDataSeq); + ASSERT_EQ(sumMatrix.validation(), true); + sumMatrix.pre_processing(); + sumMatrix.run(); + sumMatrix.post_processing(); + + ASSERT_EQ(100, out[0]); +} + +TEST(filatev_v_sum_of_matrix_elements_seq, Test_Sum_10_20_1) { + const int size_m = 10; + const int size_n = 20; + + // Create data + std::vector> in(size_m, std::vector(size_n, 1)); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (int i = 0; i < size_m; i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataSeq->inputs_count.emplace_back(size_n); + taskDataSeq->inputs_count.emplace_back(size_m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(1); + + // Create Task + filatev_v_sum_of_matrix_elements_seq::SumMatrix sumMatrix(taskDataSeq); + ASSERT_EQ(sumMatrix.validation(), true); + sumMatrix.pre_processing(); + sumMatrix.run(); + sumMatrix.post_processing(); + + ASSERT_EQ(200, out[0]); +} + +TEST(filatev_v_sum_of_matrix_elements_seq, Test_Sum_20_10_1) { + const int size_m = 20; + const int size_n = 10; + + // Create data + std::vector> in(size_m, std::vector(size_n, 1)); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (int i = 0; i < size_m; i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataSeq->inputs_count.emplace_back(size_n); + taskDataSeq->inputs_count.emplace_back(size_m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(1); + + // Create Task + filatev_v_sum_of_matrix_elements_seq::SumMatrix sumMatrix(taskDataSeq); + ASSERT_EQ(sumMatrix.validation(), true); + sumMatrix.pre_processing(); + sumMatrix.run(); + sumMatrix.post_processing(); + + ASSERT_EQ(200, out[0]); +} + +TEST(filatev_v_sum_of_matrix_elements_seq, Test_Sum_1_1_1) { + const int size_m = 1; + const int size_n = 1; + + // Create data + std::vector> in(size_m, std::vector(size_n, 1)); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (int i = 0; i < size_m; i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataSeq->inputs_count.emplace_back(size_n); + taskDataSeq->inputs_count.emplace_back(size_m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(1); + + // Create Task + filatev_v_sum_of_matrix_elements_seq::SumMatrix sumMatrix(taskDataSeq); + ASSERT_EQ(sumMatrix.validation(), true); + sumMatrix.pre_processing(); + sumMatrix.run(); + sumMatrix.post_processing(); + + ASSERT_EQ(1, out[0]); +} + +TEST(filatev_v_sum_of_matrix_elements_seq, Test_Sum_10_20_different) { + const int size_m = 10; + const int size_n = 20; + + // Create data + std::vector> in(size_m, std::vector(size_n, 1)); + std::vector out(1, 0); + + for (int i = 0; i < size_m; ++i) { + for (int j = 0; j < size_n; ++j) { + in[i][j] = (i * size_n + j + 1); + } + } + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (int i = 0; i < size_m; ++i) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataSeq->inputs_count.emplace_back(size_n); + taskDataSeq->inputs_count.emplace_back(size_m); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(1); + + // Create Task + filatev_v_sum_of_matrix_elements_seq::SumMatrix sumMatrix(taskDataSeq); + ASSERT_EQ(sumMatrix.validation(), true); + sumMatrix.pre_processing(); + sumMatrix.run(); + sumMatrix.post_processing(); + + ASSERT_EQ(20100, out[0]); +} + +TEST(filatev_v_sum_of_matrix_elements_seq, Test_Empty_Matrix) { + const int count = 0; + + // Create data + std::vector> in(count, std::vector(count, 1)); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (int i = 0; i < count; i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataSeq->inputs_count.emplace_back(count); + taskDataSeq->inputs_count.emplace_back(count); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(1); + + // Create Task + filatev_v_sum_of_matrix_elements_seq::SumMatrix sumMatrix(taskDataSeq); + ASSERT_EQ(sumMatrix.validation(), true); + sumMatrix.pre_processing(); + sumMatrix.run(); + sumMatrix.post_processing(); + + ASSERT_EQ(0, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/filatev_v_sum_of_matrix_elements/include/ops_seq.hpp b/tasks/seq/filatev_v_sum_of_matrix_elements/include/ops_seq.hpp new file mode 100644 index 00000000000..83df1ea99c6 --- /dev/null +++ b/tasks/seq/filatev_v_sum_of_matrix_elements/include/ops_seq.hpp @@ -0,0 +1,29 @@ +// Filatev Vladislav Sum_of_matrix_elements +#pragma once + +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace filatev_v_sum_of_matrix_elements_seq { + +long long sumVector(std::vector vector); +std::vector> getRandomMatrix(int size_n, int size_m); + +class SumMatrix : public ppc::core::Task { + public: + explicit SumMatrix(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector matrix; + long long summ = 0; + int size_n, size_m; +}; + +} // namespace filatev_v_sum_of_matrix_elements_seq \ No newline at end of file diff --git a/tasks/seq/filatev_v_sum_of_matrix_elements/perf_tests/main.cpp b/tasks/seq/filatev_v_sum_of_matrix_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..d32c3b2d1b3 --- /dev/null +++ b/tasks/seq/filatev_v_sum_of_matrix_elements/perf_tests/main.cpp @@ -0,0 +1,87 @@ +// Filatev Vladislav Sum_of_matrix_elements +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/filatev_v_sum_of_matrix_elements/include/ops_seq.hpp" + +TEST(filatev_v_sum_of_matrix_elements, test_pipeline_run) { + const int count = 10000; + + // Create data + std::vector> in(count, std::vector(count, 1)); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (int i = 0; i < count; i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataSeq->inputs_count.emplace_back(count); + taskDataSeq->inputs_count.emplace_back(count); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(1); + + // Create Task + auto sumMatrix = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(sumMatrix); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(100000000, out[0]); +} + +TEST(filatev_v_sum_of_matrix_elements, test_task_run) { + const int count = 10000; + + // Create data + std::vector> in(count, std::vector(count, 1)); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (int i = 0; i < count; i++) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + } + taskDataSeq->inputs_count.emplace_back(count); + taskDataSeq->inputs_count.emplace_back(count); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(1); + + // Create Task + auto sumMatrix = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(sumMatrix); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(100000000, out[0]); +} diff --git a/tasks/seq/filatev_v_sum_of_matrix_elements/src/ops_seq.cpp b/tasks/seq/filatev_v_sum_of_matrix_elements/src/ops_seq.cpp new file mode 100644 index 00000000000..2f12ec2e3a6 --- /dev/null +++ b/tasks/seq/filatev_v_sum_of_matrix_elements/src/ops_seq.cpp @@ -0,0 +1,44 @@ +// Filatev Vladislav Sum_of_matrix_elements +#include "seq/filatev_v_sum_of_matrix_elements/include/ops_seq.hpp" + +bool filatev_v_sum_of_matrix_elements_seq::SumMatrix::pre_processing() { + internal_order_test(); + + summ = 0; + size_n = taskData->inputs_count[0]; + size_m = taskData->inputs_count[1]; + matrix = std::vector(size_m * size_n); + + for (int i = 0; i < size_m; ++i) { + auto* temp = reinterpret_cast(taskData->inputs[i]); + + for (int j = 0; j < size_n; ++j) { + matrix[i * size_n + j] = temp[j]; + } + } + + return true; +} + +bool filatev_v_sum_of_matrix_elements_seq::SumMatrix::validation() { + internal_order_test(); + + return taskData->inputs_count[0] >= 0 && taskData->inputs_count[1] >= 0 && taskData->outputs_count[0] == 1; +} + +bool filatev_v_sum_of_matrix_elements_seq::SumMatrix::run() { + internal_order_test(); + + for (long unsigned int i = 0; i < matrix.size(); ++i) { + summ += matrix[i]; + } + + return true; +} + +bool filatev_v_sum_of_matrix_elements_seq::SumMatrix::post_processing() { + internal_order_test(); + + reinterpret_cast(taskData->outputs[0])[0] = summ; + return true; +} diff --git a/tasks/seq/filateva_e_number_sentences_line/func_tests/main.cpp b/tasks/seq/filateva_e_number_sentences_line/func_tests/main.cpp new file mode 100644 index 00000000000..a189d6d3df2 --- /dev/null +++ b/tasks/seq/filateva_e_number_sentences_line/func_tests/main.cpp @@ -0,0 +1,153 @@ +// Filateva Elizaveta Number_of_sentences_per_line +#include + +#include + +#include "seq/filateva_e_number_sentences_line/include/ops_seq.hpp" + +TEST(filateva_e_number_sentences_line_seq, one_sentence_line_1) { + // Create data + std::string line = "Hello world."; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + filateva_e_number_sentences_line_seq::NumberSentencesLine NumS(taskDataSeq); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + ASSERT_EQ(1, out[0]); +} + +TEST(filateva_e_number_sentences_line_seq, one_sentence_line_2) { + // Create data + std::string line = "Hello world"; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + filateva_e_number_sentences_line_seq::NumberSentencesLine NumS(taskDataSeq); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + ASSERT_EQ(1, out[0]); +} + +TEST(filateva_e_number_sentences_line_seq, one_sentence_line_3) { + // Create data + std::string line = "Hello world!"; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + filateva_e_number_sentences_line_seq::NumberSentencesLine NumS(taskDataSeq); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + ASSERT_EQ(1, out[0]); +} + +TEST(filateva_e_number_sentences_line_seq, one_sentence_line_4) { + // Create data + std::string line = "Hello world?"; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + filateva_e_number_sentences_line_seq::NumberSentencesLine NumS(taskDataSeq); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + ASSERT_EQ(1, out[0]); +} + +TEST(filateva_e_number_sentences_line_seq, several_sentence_line_1) { + // Create data + std::string line = "Hello world. How many words are in this sentence? The task of parallel programming."; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + filateva_e_number_sentences_line_seq::NumberSentencesLine NumS(taskDataSeq); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + ASSERT_EQ(3, out[0]); +} + +TEST(filateva_e_number_sentences_line_seq, several_sentence_line_2) { + // Create data + std::string line = "Hello world. How many words are in this sentence? The task of parallel programming"; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + filateva_e_number_sentences_line_seq::NumberSentencesLine NumS(taskDataSeq); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + ASSERT_EQ(3, out[0]); +} + +TEST(filateva_e_number_sentences_line_seq, empty_string) { + // Create data + std::string line; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + filateva_e_number_sentences_line_seq::NumberSentencesLine NumS(taskDataSeq); + ASSERT_EQ(NumS.validation(), true); + NumS.pre_processing(); + NumS.run(); + NumS.post_processing(); + ASSERT_EQ(0, out[0]); +} diff --git a/tasks/seq/filateva_e_number_sentences_line/include/ops_seq.hpp b/tasks/seq/filateva_e_number_sentences_line/include/ops_seq.hpp new file mode 100644 index 00000000000..68e316f25c1 --- /dev/null +++ b/tasks/seq/filateva_e_number_sentences_line/include/ops_seq.hpp @@ -0,0 +1,23 @@ +// Filateva Elizaveta Number_of_sentences_per_line + +#include +#include + +#include "core/task/include/task.hpp" + +namespace filateva_e_number_sentences_line_seq { + +class NumberSentencesLine : public ppc::core::Task { + public: + explicit NumberSentencesLine(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string line; + int sentence_count; +}; + +} // namespace filateva_e_number_sentences_line_seq \ No newline at end of file diff --git a/tasks/seq/filateva_e_number_sentences_line/perf_tests/main.cpp b/tasks/seq/filateva_e_number_sentences_line/perf_tests/main.cpp new file mode 100644 index 00000000000..a4e943b6bbe --- /dev/null +++ b/tasks/seq/filateva_e_number_sentences_line/perf_tests/main.cpp @@ -0,0 +1,89 @@ +// Filateva Elizaveta Number_of_sentences_per_line +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/filateva_e_number_sentences_line/include/ops_seq.hpp" + +TEST(filateva_e_number_sentences_line_seq, test_pipeline_run) { + const int count = 20; + + // Create data + std::string line("Helo world."); + std::vector out(1, 0); + + for (int i = 0; i < count; ++i) { + line += line; + } + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto NumS = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(NumS); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1048576, out[0]); +} + +TEST(filateva_e_number_sentences_line_seq, test_task_run) { + const int count = 20; + + // Create data + std::string line("Helo world."); + std::vector out(1, 0); + + for (int i = 0; i < count; ++i) { + line += line; + } + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(line.data())); + taskDataSeq->inputs_count.emplace_back(1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto NumS = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(NumS); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1048576, out[0]); +} diff --git a/tasks/seq/filateva_e_number_sentences_line/src/ops_seq.cpp b/tasks/seq/filateva_e_number_sentences_line/src/ops_seq.cpp new file mode 100644 index 00000000000..158019e4d26 --- /dev/null +++ b/tasks/seq/filateva_e_number_sentences_line/src/ops_seq.cpp @@ -0,0 +1,38 @@ +// Filateva Elizaveta Number_of_sentences_per_line + +#include "seq/filateva_e_number_sentences_line/include/ops_seq.hpp" + +#include + +bool filateva_e_number_sentences_line_seq::NumberSentencesLine::pre_processing() { + internal_order_test(); + // Init value for input and output + line = std::string(std::move(reinterpret_cast(taskData->inputs[0]))); + sentence_count = 0; + return true; +} + +bool filateva_e_number_sentences_line_seq::NumberSentencesLine::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] == 1 && taskData->outputs_count[0] == 1; +} + +bool filateva_e_number_sentences_line_seq::NumberSentencesLine::run() { + internal_order_test(); + for (long unsigned int i = 0; i < line.size(); ++i) { + if (line[i] == '.' || line[i] == '?' || line[i] == '!') { + ++sentence_count; + } + } + if (!line.empty() && line.back() != '.' && line.back() != '?' && line.back() != '!') { + ++sentence_count; + } + return true; +} + +bool filateva_e_number_sentences_line_seq::NumberSentencesLine::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = sentence_count; + return true; +} diff --git a/tasks/seq/gusev_n_trapezoidal_rule/func_tests/main.cpp b/tasks/seq/gusev_n_trapezoidal_rule/func_tests/main.cpp new file mode 100644 index 00000000000..f6628b0e1e6 --- /dev/null +++ b/tasks/seq/gusev_n_trapezoidal_rule/func_tests/main.cpp @@ -0,0 +1,147 @@ +// Copyright 2023 Nesterov Alexander +#define _USE_MATH_DEFINES +#include + +#include +#include + +#include "seq/gusev_n_trapezoidal_rule/include/ops_seq.hpp" + +TEST(gusev_n_trapezoidal_rule_seq, test_integration_x_squared) { + const double a = 0.0; + const double b = 1.0; + const int n = 1000; + const double expected_result = 1.0 / 3.0; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = + std::make_shared(taskDataSeq); + + std::function func = [](double x) { return x * x; }; + testTaskSequential->set_function(func); + + ASSERT_TRUE(testTaskSequential->validation()); + testTaskSequential->pre_processing(); + testTaskSequential->run(); + testTaskSequential->post_processing(); + + ASSERT_NEAR(out[0], expected_result, 1e-3); +} + +TEST(gusev_n_trapezoidal_rule_seq, test_integration_x) { + const double a = 0.0; + const double b = 1.0; + const int n = 1000; + + const double expected_result = 0.5; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + gusev_n_trapezoidal_rule_seq::TrapezoidalIntegrationSequential testTaskSequential(taskDataSeq); + + std::function func = [](double x) { return x; }; + testTaskSequential.set_function(func); + + ASSERT_TRUE(testTaskSequential.validation()); + + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_NEAR(out[0], expected_result, 1e-3); +} + +TEST(gusev_n_trapezoidal_rule_seq, test_integration_sin_x) { + const double a = 0.0; + const double b = M_PI; + const int n = 1000; + + const double expected_result = 2.0; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + gusev_n_trapezoidal_rule_seq::TrapezoidalIntegrationSequential testTaskSequential(taskDataSeq); + + std::function func = [](double x) { return std::sin(x); }; + testTaskSequential.set_function(func); + + ASSERT_TRUE(testTaskSequential.validation()); + + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_NEAR(out[0], expected_result, 1e-3); +} + +TEST(gusev_n_trapezoidal_rule_seq, test_integration_exp_x) { + const double a = 0.0; + const double b = 1.0; + const int n = 1000; + + const double expected_result = std::exp(1.0) - 1.0; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + gusev_n_trapezoidal_rule_seq::TrapezoidalIntegrationSequential testTaskSequential(taskDataSeq); + + std::function func = [](double x) { return std::exp(x); }; + testTaskSequential.set_function(func); + + ASSERT_TRUE(testTaskSequential.validation()); + + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_NEAR(out[0], expected_result, 1e-3); +} + +TEST(gusev_n_trapezoidal_rule_seq, test_set_function) { + std::vector in = {0.0, 1.0, 1000}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + gusev_n_trapezoidal_rule_seq::TrapezoidalIntegrationSequential testTaskSequential(taskDataSeq); + + std::function func = [](double x) { return x * x; }; + testTaskSequential.set_function(func); + + double x = 2.0; + double expected_result = 4.0; + ASSERT_EQ(func(x), expected_result); +} diff --git a/tasks/seq/gusev_n_trapezoidal_rule/include/ops_seq.hpp b/tasks/seq/gusev_n_trapezoidal_rule/include/ops_seq.hpp new file mode 100644 index 00000000000..978d61fa031 --- /dev/null +++ b/tasks/seq/gusev_n_trapezoidal_rule/include/ops_seq.hpp @@ -0,0 +1,34 @@ +// Copyright 2024 Nesterov Alexander +#pragma once + +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace gusev_n_trapezoidal_rule_seq { + +class TrapezoidalIntegrationSequential : public ppc::core::Task { + public: + explicit TrapezoidalIntegrationSequential(std::shared_ptr taskData_) + : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + void set_function(const std::function& func); + + private: + static double integrate(const std::function& f, double a, double b, int n); + + double a_{}; + double b_{}; + int n_{}; + double result_{}; + std::function func_; +}; + +} // namespace gusev_n_trapezoidal_rule_seq \ No newline at end of file diff --git a/tasks/seq/gusev_n_trapezoidal_rule/perf_tests/main.cpp b/tasks/seq/gusev_n_trapezoidal_rule/perf_tests/main.cpp new file mode 100644 index 00000000000..f8aa8d4adb7 --- /dev/null +++ b/tasks/seq/gusev_n_trapezoidal_rule/perf_tests/main.cpp @@ -0,0 +1,92 @@ +// Copyright 2024 Nesterov Alexander +#define _USE_MATH_DEFINES +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/gusev_n_trapezoidal_rule/include/ops_seq.hpp" + +TEST(gusev_n_trapezoidal_rule_seq, test_pipeline_run) { + const double a = 0.0; + const double b = 1.0; + const int n = 10000000; + + const double expected_result = 1.0 / 3.0; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = + std::make_shared(taskDataSeq); + + std::function func = [](double x) { return x * x; }; + testTaskSequential->set_function(func); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_NEAR(out[0], expected_result, 1e-3); +} + +TEST(gusev_n_trapezoidal_rule_seq, test_task_run) { + const double a = 0.0; + const double b = 1.0; + const int n = 10000000; + const double expected_result = 1.0 / 3.0; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = + std::make_shared(taskDataSeq); + + std::function func = [](double x) { return x * x; }; + testTaskSequential->set_function(func); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + + perfAnalyzer->task_run(perfAttr, perfResults); + + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_NEAR(out[0], expected_result, 1e-3); +} diff --git a/tasks/seq/gusev_n_trapezoidal_rule/src/ops_seq.cpp b/tasks/seq/gusev_n_trapezoidal_rule/src/ops_seq.cpp new file mode 100644 index 00000000000..251891b2fe3 --- /dev/null +++ b/tasks/seq/gusev_n_trapezoidal_rule/src/ops_seq.cpp @@ -0,0 +1,56 @@ +#include "seq/gusev_n_trapezoidal_rule/include/ops_seq.hpp" + +#include +#include + +bool gusev_n_trapezoidal_rule_seq::TrapezoidalIntegrationSequential::pre_processing() { + internal_order_test(); + + auto* inputs = reinterpret_cast(taskData->inputs[0]); + + a_ = inputs[0]; + b_ = inputs[1]; + n_ = static_cast(inputs[2]); + + result_ = 0.0; + return true; +} + +bool gusev_n_trapezoidal_rule_seq::TrapezoidalIntegrationSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] == 3 && taskData->outputs_count[0] == 1; +} + +bool gusev_n_trapezoidal_rule_seq::TrapezoidalIntegrationSequential::run() { + internal_order_test(); + + result_ = integrate(func_, a_, b_, n_); + + return true; +} + +bool gusev_n_trapezoidal_rule_seq::TrapezoidalIntegrationSequential::post_processing() { + internal_order_test(); + + reinterpret_cast(taskData->outputs[0])[0] = result_; + return true; +} + +double gusev_n_trapezoidal_rule_seq::TrapezoidalIntegrationSequential::integrate(const std::function& f, + double a, double b, int n) { + double step = (b - a) / n; + double area = 0.0; + + for (int i = 0; i < n; ++i) { + double x0 = a + i * step; + double x1 = a + (i + 1) * step; + area += (f(x0) + f(x1)) * step / 2.0; + } + + return area; +} + +void gusev_n_trapezoidal_rule_seq::TrapezoidalIntegrationSequential::set_function( + const std::function& func) { + func_ = func; +} \ No newline at end of file diff --git a/tasks/seq/kabalova_v_count_symbols/func_tests/main.cpp b/tasks/seq/kabalova_v_count_symbols/func_tests/main.cpp new file mode 100644 index 00000000000..11843e79221 --- /dev/null +++ b/tasks/seq/kabalova_v_count_symbols/func_tests/main.cpp @@ -0,0 +1,93 @@ +// Copyright 2024 Kabalova Valeria +#include + +#include + +#include "seq/kabalova_v_count_symbols/include/count_symbols.hpp" + +TEST(kabalova_v_count_symbols_seq, EmptyString) { + std::string str; + + // Create data + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str.data())); + taskDataSeq->inputs_count.emplace_back(str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + kabalova_v_count_symbols_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(0, out[0]); +} + +TEST(kabalova_v_count_symbols_seq, OneSymbolStringNotLetter) { + std::string str = "1"; + + // Create data + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str.data())); + taskDataSeq->inputs_count.emplace_back(str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + kabalova_v_count_symbols_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(0, out[0]); +} + +TEST(kabalova_v_count_symbols_seq, OneSymbolStringLetter) { + std::string str = "a"; + + // Create data + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str.data())); + taskDataSeq->inputs_count.emplace_back(str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + kabalova_v_count_symbols_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(1, out[0]); +} + +TEST(kabalova_v_count_symbols_seq, string1) { + std::string str = "string;"; + + // Create data + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str.data())); + taskDataSeq->inputs_count.emplace_back(str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + // Create Task + kabalova_v_count_symbols_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(6, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/kabalova_v_count_symbols/include/count_symbols.hpp b/tasks/seq/kabalova_v_count_symbols/include/count_symbols.hpp new file mode 100644 index 00000000000..fe95ea8339a --- /dev/null +++ b/tasks/seq/kabalova_v_count_symbols/include/count_symbols.hpp @@ -0,0 +1,26 @@ +// Copyright 2024 Kabalova Valeria +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace kabalova_v_count_symbols_seq { + +int countSymbols(std::string& str); + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_{}; + int result{}; +}; + +} // namespace kabalova_v_count_symbols_seq \ No newline at end of file diff --git a/tasks/seq/kabalova_v_count_symbols/perf_tests/main.cpp b/tasks/seq/kabalova_v_count_symbols/perf_tests/main.cpp new file mode 100644 index 00000000000..6fe51aa19a8 --- /dev/null +++ b/tasks/seq/kabalova_v_count_symbols/perf_tests/main.cpp @@ -0,0 +1,85 @@ +// Copyright 2024 Kabalova Valeria +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/kabalova_v_count_symbols/include/count_symbols.hpp" + +TEST(kabalova_v_count_symbols_seq_perf_test, test_pipeline_run) { + std::string string = "string"; + std::string str; + for (int i = 0; i < 20000; i++) { + str += string; + } + // Create data + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str.data())); + taskDataSeq->inputs_count.emplace_back(str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 5000; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); +} + +TEST(kabalova_v_count_symbols_seq_perf_test, test_task_run) { + std::string string = "string"; + std::string str; + for (int i = 0; i < 20000; i++) { + str += string; + } + + // Create data + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(str.data())); + taskDataSeq->inputs_count.emplace_back(str.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 5000; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); +} \ No newline at end of file diff --git a/tasks/seq/kabalova_v_count_symbols/src/count_symbols.cpp b/tasks/seq/kabalova_v_count_symbols/src/count_symbols.cpp new file mode 100644 index 00000000000..3d07c9f2423 --- /dev/null +++ b/tasks/seq/kabalova_v_count_symbols/src/count_symbols.cpp @@ -0,0 +1,44 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/kabalova_v_count_symbols/include/count_symbols.hpp" + +#include +#include +#include + +using namespace std::chrono_literals; + +int kabalova_v_count_symbols_seq::countSymbols(std::string& str) { + int result = 0; + for (size_t i = 0; i < str.size(); i++) { + if (isalpha(str[i]) != 0) { + result++; + } + } + return result; +} + +bool kabalova_v_count_symbols_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + input_ = std::string(reinterpret_cast(taskData->inputs[0]), taskData->inputs_count[0]); + result = 0; + return true; +} + +bool kabalova_v_count_symbols_seq::TestTaskSequential::validation() { + internal_order_test(); + // На выход подается 1 строка, на выходе только 1 число - число буквенных символов в строке. + return (taskData->inputs_count[0] >= 0 && taskData->outputs_count[0] == 1); +} + +bool kabalova_v_count_symbols_seq::TestTaskSequential::run() { + internal_order_test(); + result = countSymbols(input_); + return true; +} + +bool kabalova_v_count_symbols_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = result; + return true; +} diff --git a/tasks/seq/kazunin_n_count_freq_a_char_in_string/func_tests/main.cpp b/tasks/seq/kazunin_n_count_freq_a_char_in_string/func_tests/main.cpp new file mode 100644 index 00000000000..5676576392d --- /dev/null +++ b/tasks/seq/kazunin_n_count_freq_a_char_in_string/func_tests/main.cpp @@ -0,0 +1,138 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "seq/kazunin_n_count_freq_a_char_in_string/include/ops_seq.hpp" + +TEST(kazunin_n_count_freq_a_char_in_string_seq, test_numeric_characters) { + std::string test_string = "1122334455"; + + char target_character = '2'; + int expected_count = 2; + + std::vector input_strings(1, test_string); + std::vector target_characters(1, target_character); + std::vector output(1, 0); + + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_strings.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(target_characters.data())); + taskDataSeq->inputs_count.emplace_back(input_strings.size()); + taskDataSeq->inputs_count.emplace_back(target_characters.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(output.data())); + taskDataSeq->outputs_count.emplace_back(output.size()); + + kazunin_n_count_freq_a_char_in_string_seq::CountFreqCharTaskSequential freq_char_task(taskDataSeq); + ASSERT_TRUE(freq_char_task.validation()); + freq_char_task.pre_processing(); + freq_char_task.run(); + freq_char_task.post_processing(); + ASSERT_EQ(expected_count, output[0]); +} + +TEST(kazunin_n_count_freq_a_char_in_string_seq, test_empty_string) { + std::string test_string; + + char target_character = 'p'; + int expected_count = 0; + + std::vector input_strings(1, test_string); + std::vector target_characters(1, target_character); + std::vector output(1, 0); + + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_strings.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(target_characters.data())); + taskDataSeq->inputs_count.emplace_back(input_strings.size()); + taskDataSeq->inputs_count.emplace_back(target_characters.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(output.data())); + taskDataSeq->outputs_count.emplace_back(output.size()); + + kazunin_n_count_freq_a_char_in_string_seq::CountFreqCharTaskSequential freq_char_task(taskDataSeq); + ASSERT_TRUE(freq_char_task.validation()); + freq_char_task.pre_processing(); + freq_char_task.run(); + freq_char_task.post_processing(); + ASSERT_EQ(expected_count, output[0]); +} + +TEST(kazunin_n_count_freq_a_char_in_string_seq, test_mixed_characters) { + std::string test_string = "a1b2c3d4a5"; + + char target_character = 'a'; + int expected_count = 2; + + std::vector input_strings(1, test_string); + std::vector target_characters(1, target_character); + std::vector output(1, 0); + + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_strings.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(target_characters.data())); + taskDataSeq->inputs_count.emplace_back(input_strings.size()); + taskDataSeq->inputs_count.emplace_back(target_characters.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(output.data())); + taskDataSeq->outputs_count.emplace_back(output.size()); + + kazunin_n_count_freq_a_char_in_string_seq::CountFreqCharTaskSequential freq_char_task(taskDataSeq); + ASSERT_TRUE(freq_char_task.validation()); + freq_char_task.pre_processing(); + freq_char_task.run(); + freq_char_task.post_processing(); + ASSERT_EQ(expected_count, output[0]); +} + +TEST(kazunin_n_count_freq_a_char_in_string_seq, test_absent_character_in_repeated_string) { + std::string test_string(500, 'x'); + + char target_character = 'y'; + int expected_count = 0; + + std::vector input_strings(1, test_string); + std::vector target_characters(1, target_character); + std::vector output(1, 0); + + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_strings.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(target_characters.data())); + taskDataSeq->inputs_count.emplace_back(input_strings.size()); + taskDataSeq->inputs_count.emplace_back(target_characters.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(output.data())); + taskDataSeq->outputs_count.emplace_back(output.size()); + + kazunin_n_count_freq_a_char_in_string_seq::CountFreqCharTaskSequential freq_char_task(taskDataSeq); + ASSERT_TRUE(freq_char_task.validation()); + freq_char_task.pre_processing(); + freq_char_task.run(); + freq_char_task.post_processing(); + ASSERT_EQ(expected_count, output[0]); +} + +TEST(kazunin_n_count_freq_a_char_in_string_seq, test_special_characters) { + std::string test_string = "@@##!!&&"; + + char target_character = '#'; + int expected_count = 2; + + std::vector input_strings(1, test_string); + std::vector target_characters(1, target_character); + std::vector output(1, 0); + + auto taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(input_strings.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(target_characters.data())); + taskDataSeq->inputs_count.emplace_back(input_strings.size()); + taskDataSeq->inputs_count.emplace_back(target_characters.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(output.data())); + taskDataSeq->outputs_count.emplace_back(output.size()); + + kazunin_n_count_freq_a_char_in_string_seq::CountFreqCharTaskSequential freq_char_task(taskDataSeq); + ASSERT_TRUE(freq_char_task.validation()); + freq_char_task.pre_processing(); + freq_char_task.run(); + freq_char_task.post_processing(); + + ASSERT_EQ(expected_count, output[0]); +} diff --git a/tasks/seq/kazunin_n_count_freq_a_char_in_string/include/ops_seq.hpp b/tasks/seq/kazunin_n_count_freq_a_char_in_string/include/ops_seq.hpp new file mode 100644 index 00000000000..584ec2c3352 --- /dev/null +++ b/tasks/seq/kazunin_n_count_freq_a_char_in_string/include/ops_seq.hpp @@ -0,0 +1,24 @@ +// Copyright 2023 Nesterov Alexander +#pragma once +#include +#include + +#include "core/task/include/task.hpp" + +namespace kazunin_n_count_freq_a_char_in_string_seq { + +class CountFreqCharTaskSequential : public ppc::core::Task { + public: + explicit CountFreqCharTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool run() override; + bool validation() override; + bool pre_processing() override; + bool post_processing() override; + + private: + char target_character_{}; + int frequency_count_ = 0; + std::string input_string_; +}; +} // namespace kazunin_n_count_freq_a_char_in_string_seq diff --git a/tasks/seq/kazunin_n_count_freq_a_char_in_string/perf_tests/main.cpp b/tasks/seq/kazunin_n_count_freq_a_char_in_string/perf_tests/main.cpp new file mode 100644 index 00000000000..fd72abfffa3 --- /dev/null +++ b/tasks/seq/kazunin_n_count_freq_a_char_in_string/perf_tests/main.cpp @@ -0,0 +1,84 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/kazunin_n_count_freq_a_char_in_string/include/ops_seq.hpp" + +TEST(kazunin_n_count_freq_a_char_in_string_seq, test_pipeline_run) { + std::string input_str(95000, 'o'); + char target_char = 'o'; + int expected_frequency = 95000; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto charFrequencyTask = + std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 1; + + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(charFrequencyTask); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(expected_frequency, out[0]); +} + +TEST(kazunin_n_count_freq_a_char_in_string_seq, test_task_run) { + std::string input_str(95000, 'o'); + char target_char = 'o'; + int expected_frequency = 95000; + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto charFrequencyTask = + std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 1; + + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(charFrequencyTask); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(expected_frequency, out[0]); +} diff --git a/tasks/seq/kazunin_n_count_freq_a_char_in_string/src/ops_seq.cpp b/tasks/seq/kazunin_n_count_freq_a_char_in_string/src/ops_seq.cpp new file mode 100644 index 00000000000..99ae27c5b39 --- /dev/null +++ b/tasks/seq/kazunin_n_count_freq_a_char_in_string/src/ops_seq.cpp @@ -0,0 +1,37 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/kazunin_n_count_freq_a_char_in_string/include/ops_seq.hpp" + +#include +#include +#include + +namespace kazunin_n_count_freq_a_char_in_string_seq { +bool kazunin_n_count_freq_a_char_in_string_seq::CountFreqCharTaskSequential::pre_processing() { + internal_order_test(); + input_string_ = *reinterpret_cast(taskData->inputs[0]); + target_character_ = *reinterpret_cast(taskData->inputs[1]); + frequency_count_ = 0; + return true; +} + +bool kazunin_n_count_freq_a_char_in_string_seq::CountFreqCharTaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] == 1 && taskData->inputs_count[1] == 1 && taskData->outputs_count[0] == 1; +} + +bool kazunin_n_count_freq_a_char_in_string_seq::CountFreqCharTaskSequential::run() { + internal_order_test(); + for (const auto& ch : input_string_) { + if (ch == target_character_) { + ++frequency_count_; + } + } + return true; +} + +bool kazunin_n_count_freq_a_char_in_string_seq::CountFreqCharTaskSequential::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = frequency_count_; + return true; +} +} // namespace kazunin_n_count_freq_a_char_in_string_seq diff --git a/tasks/seq/khasanyanov_k_average_vector/func_tests/main.cpp b/tasks/seq/khasanyanov_k_average_vector/func_tests/main.cpp new file mode 100644 index 00000000000..760942f7ca7 --- /dev/null +++ b/tasks/seq/khasanyanov_k_average_vector/func_tests/main.cpp @@ -0,0 +1,54 @@ +#include +#include +#include +#include + +#include "../include/avg_seq.hpp" +#include "core/task/include/task.hpp" +#include "gtest/gtest.h" + +#define FUNC_SEQ_TEST(InType, OutType, Size, Value) \ + \ + TEST(khasanyanov_k_average_vector_seq, test_seq_##InType##_##Size) { \ + std::vector in(Size, static_cast(Value)); \ + std::vector out(1, 0.0); \ + std::shared_ptr taskData = \ + khasanyanov_k_average_vector_seq::create_task_data(in, out); \ + khasanyanov_k_average_vector_seq::AvgVectorSEQTaskSequential testTask(taskData); \ + RUN_TASK(testTask); \ + EXPECT_NEAR(out[0], static_cast(Value), 1e-5); \ + } + +#define RUN_FUNC_SEQ_TESTS(Size, Value) \ + FUNC_SEQ_TEST(int8_t, double, Size, Value) \ + FUNC_SEQ_TEST(int16_t, double, Size, Value) \ + FUNC_SEQ_TEST(int32_t, double, Size, Value) \ + FUNC_SEQ_TEST(int64_t, double, Size, Value) \ + FUNC_SEQ_TEST(uint8_t, double, Size, Value) \ + FUNC_SEQ_TEST(uint16_t, double, Size, Value) \ + FUNC_SEQ_TEST(uint32_t, double, Size, Value) \ + FUNC_SEQ_TEST(uint64_t, double, Size, Value) \ + FUNC_SEQ_TEST(double, double, Size, Value) \ + FUNC_SEQ_TEST(float, double, Size, Value) + +TEST(khasanyanov_k_average_vector_seq, test_random) { + std::vector in = khasanyanov_k_average_vector_seq::get_random_vector(15); + std::vector out(1, 0.0); + + std::shared_ptr taskData = + khasanyanov_k_average_vector_seq::create_task_data(in, out); + + khasanyanov_k_average_vector_seq::AvgVectorSEQTaskSequential testTask(taskData); + RUN_TASK(testTask); + + double expect_res = std::accumulate(in.begin(), in.end(), 0.0, std::plus()) / in.size(); + EXPECT_NEAR(out[0], expect_res, 1e-5); +} + +#define RUN_ALL_FUNC_TESTS() \ + RUN_FUNC_SEQ_TESTS(1234, 7.7) \ + RUN_FUNC_SEQ_TESTS(2000, 10) \ + RUN_FUNC_SEQ_TESTS(9, 77) \ + RUN_FUNC_SEQ_TESTS(3011, 111) + +RUN_ALL_FUNC_TESTS() \ No newline at end of file diff --git a/tasks/seq/khasanyanov_k_average_vector/include/avg_seq.hpp b/tasks/seq/khasanyanov_k_average_vector/include/avg_seq.hpp new file mode 100644 index 00000000000..be6fa872032 --- /dev/null +++ b/tasks/seq/khasanyanov_k_average_vector/include/avg_seq.hpp @@ -0,0 +1,96 @@ +#ifndef _AVG_SEQ_HPP_ +#define _AVG_SEQ_HPP_ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +#ifndef RUN_TASK +#define RUN_TASK(task) \ + ASSERT_TRUE((task).validation()); \ + (task).pre_processing(); \ + (task).run(); \ + (task).post_processing(); + +#endif +namespace khasanyanov_k_average_vector_seq { + +template +std::vector get_random_vector(size_t size) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(size); + for (size_t i = 0; i < size; i++) { + vec[i] = static_cast(gen() % 1000 + (gen() % 100) / 100.0); + } + return vec; +} + +template +std::shared_ptr create_task_data(std::vector& in, std::vector& out) { + auto taskData = std::make_shared(); + taskData->inputs.emplace_back(reinterpret_cast(in.data())); + taskData->inputs_count.emplace_back(in.size()); + taskData->outputs.emplace_back(reinterpret_cast(out.data())); + taskData->outputs_count.emplace_back(out.size()); + return taskData; +} + +//=========================================sequential========================================= + +template +class AvgVectorSEQTaskSequential : public ppc::core::Task { + std::vector input_; + Out avg = 0.0; + + public: + explicit AvgVectorSEQTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; +}; + +template +bool khasanyanov_k_average_vector_seq::AvgVectorSEQTaskSequential::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1 && taskData->inputs_count[0] > 0; +} + +template +bool khasanyanov_k_average_vector_seq::AvgVectorSEQTaskSequential::pre_processing() { + internal_order_test(); + input_ = std::vector(taskData->inputs_count[0]); + auto* tmp = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp, tmp + taskData->inputs_count[0], std::back_inserter(input_)); + avg = 0.0; + return true; +} + +template +bool khasanyanov_k_average_vector_seq::AvgVectorSEQTaskSequential::run() { + internal_order_test(); + avg = static_cast(std::accumulate(input_.begin(), input_.end(), 0.0, std::plus())); + avg /= static_cast(taskData->inputs_count[0]); + // std::this_thread::sleep_for(std::chrono::milliseconds(5)); + return true; +} + +template +bool khasanyanov_k_average_vector_seq::AvgVectorSEQTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = avg; + return true; +} + +} // namespace khasanyanov_k_average_vector_seq + +#endif // !_AVG_MPI_HPP_ diff --git a/tasks/seq/khasanyanov_k_average_vector/perf_tests/main.cpp b/tasks/seq/khasanyanov_k_average_vector/perf_tests/main.cpp new file mode 100644 index 00000000000..7d798b0f8cb --- /dev/null +++ b/tasks/seq/khasanyanov_k_average_vector/perf_tests/main.cpp @@ -0,0 +1,68 @@ +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/khasanyanov_k_average_vector/include/avg_seq.hpp" + +//=========================================sequence========================================= + +const int SIZE = 1220000; + +TEST(khasanyanov_k_average_vector_seq, test_pipeline_run) { + std::vector global_vec(SIZE, 4); + std::vector average(1, 0.0); + + std::shared_ptr taskData = + khasanyanov_k_average_vector_seq::create_task_data(global_vec, average); + + auto testAvgVectorSequence = + std::make_shared>(taskData); + + RUN_TASK(*testAvgVectorSequence); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testAvgVectorSequence); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(4, average[0]); +} + +TEST(khasanyanov_k_average_vector_seq, test_task_run) { + std::vector global_vec(SIZE, 4); + std::vector average(1, 0.0); + + std::shared_ptr taskData = + khasanyanov_k_average_vector_seq::create_task_data(global_vec, average); + + auto testAvgVectorSequence = + std::make_shared>(taskData); + + RUN_TASK(*testAvgVectorSequence); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testAvgVectorSequence); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(4, average[0]); +} \ No newline at end of file diff --git a/tasks/seq/khasanyanov_k_average_vector/src/avg_seq.cpp b/tasks/seq/khasanyanov_k_average_vector/src/avg_seq.cpp new file mode 100644 index 00000000000..0569ed917a8 --- /dev/null +++ b/tasks/seq/khasanyanov_k_average_vector/src/avg_seq.cpp @@ -0,0 +1,3 @@ +#include "seq/khasanyanov_k_average_vector/include/avg_seq.hpp" + +/* nothing to realization*/ diff --git a/tasks/seq/kolokolova_d_max_of_row_matrix/func_tests/main.cpp b/tasks/seq/kolokolova_d_max_of_row_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..db125844bf8 --- /dev/null +++ b/tasks/seq/kolokolova_d_max_of_row_matrix/func_tests/main.cpp @@ -0,0 +1,87 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/kolokolova_d_max_of_row_matrix/include/ops_seq.hpp" + +TEST(kolokolova_d_max_of_row_matrix_seq, Test_Max_For_Rows1) { + int count_rows = 3; + // Создание данных + std::vector global_mat = {2, 5, 4, 7, 9, 3, 5, 6, 7, 9, 2, 4, 2, 5, 0}; + std::vector seq_max_vec(count_rows, 0); + std::vector ans = {9, 9, 5}; + + // Создание TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_max_vec.data())); + taskDataSeq->outputs_count.emplace_back(seq_max_vec.size()); + + // Создание задачи + kolokolova_d_max_of_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(ans, seq_max_vec); +} + +TEST(kolokolova_d_max_of_row_matrix_seq, Test_Max_For_Rows2) { + int count_rows = 4; + // Создание данных + std::vector global_mat = {1, 2, 6, 11, 3, 5, 6, 3, 5, 4, 10, 12, 20, 4, 8, 2}; + std::vector seq_max_vec(count_rows, 0); + std::vector ans = {11, 6, 12, 20}; + + // Создание TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_max_vec.data())); + taskDataSeq->outputs_count.emplace_back(seq_max_vec.size()); + + // Создание задачи + kolokolova_d_max_of_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(ans, seq_max_vec); +} + +TEST(kolokolova_d_max_of_row_matrix_seq, Test_Max_For_Rows3) { + int count_rows = 5; + // Создание данных + std::vector global_mat = {10, 4, 3, 9, 7, 9, 13, 4, 6, 7, 5, 9, 12, 4, 2, 1, 10, 9, 0, 8}; + std::vector seq_max_vec(count_rows, 0); + std::vector ans = {10, 13, 9, 12, 10}; + + // Создание TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_max_vec.data())); + taskDataSeq->outputs_count.emplace_back(seq_max_vec.size()); + + // Создание задачи + kolokolova_d_max_of_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_EQ(ans, seq_max_vec); +} diff --git a/tasks/seq/kolokolova_d_max_of_row_matrix/include/ops_seq.hpp b/tasks/seq/kolokolova_d_max_of_row_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..dc8dd42cc7e --- /dev/null +++ b/tasks/seq/kolokolova_d_max_of_row_matrix/include/ops_seq.hpp @@ -0,0 +1,24 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace kolokolova_d_max_of_row_matrix_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + std::vector res; +}; + +} // namespace kolokolova_d_max_of_row_matrix_seq \ No newline at end of file diff --git a/tasks/seq/kolokolova_d_max_of_row_matrix/perf_tests/main.cpp b/tasks/seq/kolokolova_d_max_of_row_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..5a20fadac16 --- /dev/null +++ b/tasks/seq/kolokolova_d_max_of_row_matrix/perf_tests/main.cpp @@ -0,0 +1,106 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/kolokolova_d_max_of_row_matrix/include/ops_seq.hpp" + +TEST(kolokolova_d_max_of_row_matrix_seq, test_pipeline_run) { + int count_rows = 200; + int size_rows = 90000; + + // Создание данных (массив с различными значениями) + std::vector global_mat; + for (int i = 0; i < count_rows; ++i) { + for (int j = 0; j < size_rows; ++j) { + global_mat.push_back(i + j); // Используем i + j для создания различных значений + } + } + + std::vector seq_max_vec(count_rows, 0); // Вектор для хранения максимальных значений + + // Создание TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_max_vec.data())); + taskDataSeq->outputs_count.emplace_back(seq_max_vec.size()); + + // Создание задачи + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Создание атрибутов производительности + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; // Количество запусков + + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; // Конвертация в секунды + }; + + // Создание и инициализация результатов производительности + auto perfResults = std::make_shared(); + + // Создание анализатора производительности + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + // Печать статистики производительности + ppc::core::Perf::print_perf_statistic(perfResults); + + // Проверка результатов + for (size_t i = 0; i < seq_max_vec.size(); i++) { + EXPECT_EQ(seq_max_vec[i], + int(size_rows + i - 1)); // Проверка, что максимальное значение в каждой строке соответствует + } +} + +TEST(kolokolova_d_max_of_row_matrix_seq, test_task_run) { + int count_rows = 3000; + int size_rows = 6000; + + std::vector global_mat(count_rows * size_rows, 0); + std::vector seq_max_vec(count_rows, 0); // Вектор для хранения максимальных значений + + // Создание TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_mat.data())); + taskDataSeq->inputs_count.emplace_back(global_mat.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back((size_t)1); + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_max_vec.data())); + taskDataSeq->outputs_count.emplace_back(seq_max_vec.size()); + + // Создание задачи + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Создание атрибутов производительности + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; // Количество запусков + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; // Конвертация в секунды + }; + + // Создание и инициализация результатов производительности + auto perfResults = std::make_shared(); + + // Создание анализатора производительности + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); // Запуск задачи + + // Печать статистики производительности + ppc::core::Perf::print_perf_statistic(perfResults); + + // Проверка результатов + for (size_t i = 0; i < seq_max_vec.size(); i++) { + EXPECT_EQ(0, seq_max_vec[i]); // Проверка, что максимальное значение в каждой строке равно 0 + } +} \ No newline at end of file diff --git a/tasks/seq/kolokolova_d_max_of_row_matrix/src/ops_seq.cpp b/tasks/seq/kolokolova_d_max_of_row_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..a52c2c38b6f --- /dev/null +++ b/tasks/seq/kolokolova_d_max_of_row_matrix/src/ops_seq.cpp @@ -0,0 +1,51 @@ +#include "seq/kolokolova_d_max_of_row_matrix/include/ops_seq.hpp" + +#include + +using namespace std::chrono_literals; + +bool kolokolova_d_max_of_row_matrix_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + auto row_count = static_cast(*taskData->inputs[1]); + size_t col_count = taskData->inputs_count[0] / row_count; + + input_.resize(row_count, std::vector(col_count)); + + int* input_ptr = reinterpret_cast(taskData->inputs[0]); + for (size_t i = 0; i < row_count; ++i) { + for (size_t j = 0; j < col_count; ++j) { + input_[i][j] = input_ptr[i * col_count + j]; + } + } + res.resize(row_count); + return true; +} + +bool kolokolova_d_max_of_row_matrix_seq::TestTaskSequential::validation() { + internal_order_test(); + return *taskData->inputs[1] == taskData->outputs_count[0]; +} + +bool kolokolova_d_max_of_row_matrix_seq::TestTaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < input_.size(); ++i) { + int max_value = input_[i][0]; + for (size_t j = 1; j < input_[i].size(); ++j) { + if (input_[i][j] > max_value) { + max_value = input_[i][j]; + } + } + res[i] = max_value; + } + return true; +} + +bool kolokolova_d_max_of_row_matrix_seq::TestTaskSequential::post_processing() { + internal_order_test(); + int* output_ptr = reinterpret_cast(taskData->outputs[0]); + for (size_t i = 0; i < res.size(); ++i) { + output_ptr[i] = res[i]; + } + return true; +} diff --git a/tasks/seq/korablev_v_rect_int_seq/func_tests/main.cpp b/tasks/seq/korablev_v_rect_int_seq/func_tests/main.cpp new file mode 100644 index 00000000000..4c60614be6a --- /dev/null +++ b/tasks/seq/korablev_v_rect_int_seq/func_tests/main.cpp @@ -0,0 +1,146 @@ +#define _USE_MATH_DEFINES +#include + +#include +#include +#include + +#include "seq/korablev_v_rect_int_seq/include/ops_seq.hpp" + +TEST(korablev_v_rectangular_integration_seq, test_integration_x_squared) { + const double a = 0.0; + const double b = 1.0; + const int n = 1000; + const double expected_result = 1.0 / 3.0; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + std::function func = [](double x) { return x * x; }; + testTaskSequential->set_function(func); + + ASSERT_TRUE(testTaskSequential->validation()); + testTaskSequential->pre_processing(); + testTaskSequential->run(); + testTaskSequential->post_processing(); + + ASSERT_NEAR(out[0], expected_result, 1e-3); +} + +TEST(korablev_v_rectangular_integration_seq, test_integration_x) { + const double a = 0.0; + const double b = 1.0; + const int n = 1000; + + const double expected_result = 0.5; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + korablev_v_rect_int_seq::RectangularIntegrationSequential testTaskSequential(taskDataSeq); + + std::function func = [](double x) { return x; }; + testTaskSequential.set_function(func); + + ASSERT_TRUE(testTaskSequential.validation()); + + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_NEAR(out[0], expected_result, 1e-3); +} + +TEST(korablev_v_rectangular_integration_seq, test_integration_sin_x) { + const double a = 0.0; + const double b = M_PI; + const int n = 1000; + + const double expected_result = 2.0; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + korablev_v_rect_int_seq::RectangularIntegrationSequential testTaskSequential(taskDataSeq); + + std::function func = [](double x) { return std::sin(x); }; + testTaskSequential.set_function(func); + + ASSERT_TRUE(testTaskSequential.validation()); + + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_NEAR(out[0], expected_result, 1e-3); +} + +TEST(korablev_v_rectangular_integration_seq, test_integration_exp_x) { + const double a = 0.0; + const double b = 1.0; + const int n = 1000; + + const double expected_result = std::exp(1.0) - 1.0; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + korablev_v_rect_int_seq::RectangularIntegrationSequential testTaskSequential(taskDataSeq); + + std::function func = [](double x) { return std::exp(x); }; + testTaskSequential.set_function(func); + + ASSERT_TRUE(testTaskSequential.validation()); + + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + + ASSERT_NEAR(out[0], expected_result, 1e-3); +} + +TEST(korablev_v_rectangular_integration_seq, test_set_function) { + std::vector in = {0.0, 1.0, 1000}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + korablev_v_rect_int_seq::RectangularIntegrationSequential testTaskSequential(taskDataSeq); + + std::function func = [](double x) { return x * x; }; + testTaskSequential.set_function(func); + + double x = 2.0; + double expected_result = 4.0; + ASSERT_EQ(func(x), expected_result); +} \ No newline at end of file diff --git a/tasks/seq/korablev_v_rect_int_seq/include/ops_seq.hpp b/tasks/seq/korablev_v_rect_int_seq/include/ops_seq.hpp new file mode 100644 index 00000000000..da77c9c628e --- /dev/null +++ b/tasks/seq/korablev_v_rect_int_seq/include/ops_seq.hpp @@ -0,0 +1,31 @@ +#pragma once +#include +#include + +#include "core/task/include/task.hpp" + +namespace korablev_v_rect_int_seq { + +class RectangularIntegrationSequential : public ppc::core::Task { + public: + explicit RectangularIntegrationSequential(std::shared_ptr taskData_) + : Task(std::move(taskData_)) {} + + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + void set_function(const std::function& func); + + private: + static double integrate(const std::function& f, double a, double b, int n); + + double a_{}; + double b_{}; + int n_{}; + double result_{}; + std::function func_; +}; + +} // namespace korablev_v_rect_int_seq \ No newline at end of file diff --git a/tasks/seq/korablev_v_rect_int_seq/perf_tests/main.cpp b/tasks/seq/korablev_v_rect_int_seq/perf_tests/main.cpp new file mode 100644 index 00000000000..c2390fc5b41 --- /dev/null +++ b/tasks/seq/korablev_v_rect_int_seq/perf_tests/main.cpp @@ -0,0 +1,88 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/korablev_v_rect_int_seq/include/ops_seq.hpp" + +TEST(korablev_v_rect_int_seq, test_pipeline_run) { + const double a = 0.0; + const double b = 1.0; + const int n = 10000; + + const double expected_result = 1.0 / 3.0; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + std::function func = [](double x) { return x * x; }; + testTaskSequential->set_function(func); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + + perfAnalyzer->pipeline_run(perfAttr, perfResults); + + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_NEAR(out[0], expected_result, 1e-3); +} + +TEST(korablev_v_rect_int_seq, test_task_run) { + const double a = 0.0; + const double b = 1.0; + const int n = 10000; + const double expected_result = 1.0 / 3.0; + + std::vector in = {a, b, static_cast(n)}; + std::vector out(1, 0.0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + std::function func = [](double x) { return x * x; }; + testTaskSequential->set_function(func); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(testTaskSequential); + + perfAnalyzer->task_run(perfAttr, perfResults); + + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_NEAR(out[0], expected_result, 1e-3); +} diff --git a/tasks/seq/korablev_v_rect_int_seq/src/ops_seq.cpp b/tasks/seq/korablev_v_rect_int_seq/src/ops_seq.cpp new file mode 100644 index 00000000000..e8d45c7853a --- /dev/null +++ b/tasks/seq/korablev_v_rect_int_seq/src/ops_seq.cpp @@ -0,0 +1,58 @@ +#include "seq/korablev_v_rect_int_seq/include/ops_seq.hpp" + +#include +#include +#include + +using namespace std::chrono_literals; + +bool korablev_v_rect_int_seq::RectangularIntegrationSequential::pre_processing() { + internal_order_test(); + + auto* inputs = reinterpret_cast(taskData->inputs[0]); + + a_ = inputs[0]; + b_ = inputs[1]; + n_ = static_cast(inputs[2]); + + result_ = 0.0; + return true; +} + +bool korablev_v_rect_int_seq::RectangularIntegrationSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] == 3 && taskData->outputs_count[0] == 1; +} + +bool korablev_v_rect_int_seq::RectangularIntegrationSequential::run() { + internal_order_test(); + + result_ = integrate(func_, a_, b_, n_); + + return true; +} + +bool korablev_v_rect_int_seq::RectangularIntegrationSequential::post_processing() { + internal_order_test(); + + reinterpret_cast(taskData->outputs[0])[0] = result_; + return true; +} + +double korablev_v_rect_int_seq::RectangularIntegrationSequential::integrate(const std::function& f, + double a, double b, int n) { + double step = (b - a) / n; + double area = 0.0; + + for (int i = 0; i < n; ++i) { + double x = a + (i + 0.5) * step; + area += f(x) * step; + } + + return area; +} + +void korablev_v_rect_int_seq::RectangularIntegrationSequential::set_function( + const std::function& func) { + func_ = func; +} diff --git a/tasks/seq/korobeinikov_a_max_elements_in_rows_of_matrix/func_tests/main_korobeinikov.cpp b/tasks/seq/korobeinikov_a_max_elements_in_rows_of_matrix/func_tests/main_korobeinikov.cpp new file mode 100644 index 00000000000..ed6c5957f54 --- /dev/null +++ b/tasks/seq/korobeinikov_a_max_elements_in_rows_of_matrix/func_tests/main_korobeinikov.cpp @@ -0,0 +1,161 @@ +// Copyright 2024 Korobeinikov Arseny +#include + +#include + +#include "seq/korobeinikov_a_max_elements_in_rows_of_matrix/include/ops_seq_korobeinikov.hpp" + +TEST(max_elements_in_rows_of_matrix_seq, Test_1_without_negative_max_elemet) { + // Create data + int count_rows = 4; // not const, because reinterpret_cast does not work with const + std::vector matrix{3, 17, 5, -1, 2, -3, 11, 12, 13, -7, 4, 9}; + + std::vector seq_res(count_rows, 0); + std::vector right_answer = {17, 2, 13, 9}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(1); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_res.data())); + taskDataSeq->outputs_count.emplace_back(seq_res.size()); + + // Create Task + korobeinikov_a_test_task_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(right_answer, seq_res); +} + +TEST(max_elements_in_rows_of_matrix_seq, Test_2_with_negative_max_elemet) { + // Create data + int count_rows = 4; // not const, because reinterpret_cast does not work with const + std::vector matrix{3, 7, 5, -6, -10, -8, 15, 12, 21, -7, 0, 9}; + + std::vector seq_res(count_rows, 0); + std::vector right_answer = {7, -6, 21, 9}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(1); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_res.data())); + taskDataSeq->outputs_count.emplace_back(seq_res.size()); + + // Create Task + korobeinikov_a_test_task_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(right_answer, seq_res); +} + +TEST(max_elements_in_rows_of_matrix_seq, Test_3_only_zero) { + // Create data + int count_rows = 2; // not const, because reinterpret_cast does not work with const + std::vector matrix{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + + std::vector seq_res(count_rows, 0); + std::vector right_answer = {0, 0}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(1); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_res.data())); + taskDataSeq->outputs_count.emplace_back(seq_res.size()); + + // Create Task + korobeinikov_a_test_task_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(right_answer, seq_res); +} + +TEST(max_elements_in_rows_of_matrix_seq, Test_4_empty_matrix) { + // Create data + int count_rows = 0; // not const, because reinterpret_cast does not work with const + std::vector matrix; + + std::vector seq_res(count_rows, 0); + std::vector right_answer(count_rows, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(1); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_res.data())); + taskDataSeq->outputs_count.emplace_back(seq_res.size()); + + // Create Task + korobeinikov_a_test_task_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(right_answer, seq_res); +} + +TEST(max_elements_in_rows_of_matrix_seq, Test_5_Unequal_number_of_elements_in_rows_exeption) { + // Create data + int count_rows = 2; // not const, because reinterpret_cast does not work with const + std::vector matrix{1, 2, 3, 4, 5}; + + std::vector seq_res(count_rows, 0); + std::vector right_answer = {0, 0}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(1); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_res.data())); + taskDataSeq->outputs_count.emplace_back(seq_res.size()); + + // Create Task + korobeinikov_a_test_task_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(max_elements_in_rows_of_matrix_seq, + Test_6_number_of_elements_in_the_output_is_not_equal_to_number_of_rows_exeption) { + // Create data + int count_rows = 2; // not const, because reinterpret_cast does not work with const + std::vector matrix{1, 2, 3, 4, 5}; + + std::vector seq_res(count_rows, 0); + std::vector right_answer = {0, 0}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(1); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_res.data())); + taskDataSeq->outputs_count.emplace_back(seq_res.size()); + + // Create Task + korobeinikov_a_test_task_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), false); +} diff --git a/tasks/seq/korobeinikov_a_max_elements_in_rows_of_matrix/include/ops_seq_korobeinikov.hpp b/tasks/seq/korobeinikov_a_max_elements_in_rows_of_matrix/include/ops_seq_korobeinikov.hpp new file mode 100644 index 00000000000..261cb6d7e1c --- /dev/null +++ b/tasks/seq/korobeinikov_a_max_elements_in_rows_of_matrix/include/ops_seq_korobeinikov.hpp @@ -0,0 +1,26 @@ +// Copyright 2024 Korobeinikov Arseny +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace korobeinikov_a_test_task_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + std::vector res; + int count_rows{}; + int size_rows{}; +}; + +} // namespace korobeinikov_a_test_task_seq \ No newline at end of file diff --git a/tasks/seq/korobeinikov_a_max_elements_in_rows_of_matrix/perf_tests/main_korobeinikov.cpp b/tasks/seq/korobeinikov_a_max_elements_in_rows_of_matrix/perf_tests/main_korobeinikov.cpp new file mode 100644 index 00000000000..679fb730dbd --- /dev/null +++ b/tasks/seq/korobeinikov_a_max_elements_in_rows_of_matrix/perf_tests/main_korobeinikov.cpp @@ -0,0 +1,93 @@ +// Copyright 2024 Korobeinikov Arseny +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/korobeinikov_a_max_elements_in_rows_of_matrix/include/ops_seq_korobeinikov.hpp" + +TEST(sequential_korobeinikov_perf_test, test_pipeline_run) { + // Create data + int count_rows = 500; // not const, because reinterpret_cast does not work with const + std::vector matrix(count_rows * 10000, 10); + + std::vector seq_res(count_rows, 0); + std::vector right_answer = std::vector(count_rows, 10); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(1); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_res.data())); + taskDataSeq->outputs_count.emplace_back(seq_res.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + for (unsigned i = 0; i < seq_res.size(); i++) { + EXPECT_EQ(10, seq_res[0]); + } +} + +TEST(sequential_korobeinikov_perf_test, test_task_run) { + // Create data + int count_rows = 500; // not const, because reinterpret_cast does not work with const + std::vector matrix(count_rows * 100000, 10); + + std::vector seq_res(count_rows, 0); + std::vector right_answer = std::vector(count_rows, 10); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(matrix.data())); + taskDataSeq->inputs_count.emplace_back(matrix.size()); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&count_rows)); + taskDataSeq->inputs_count.emplace_back(1); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(seq_res.data())); + taskDataSeq->outputs_count.emplace_back(seq_res.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + for (unsigned i = 0; i < seq_res.size(); i++) { + EXPECT_EQ(10, seq_res[0]); + } +} diff --git a/tasks/seq/korobeinikov_a_max_elements_in_rows_of_matrix/src/ops_seq_korobeinikov.cpp b/tasks/seq/korobeinikov_a_max_elements_in_rows_of_matrix/src/ops_seq_korobeinikov.cpp new file mode 100644 index 00000000000..44c1e2a92a3 --- /dev/null +++ b/tasks/seq/korobeinikov_a_max_elements_in_rows_of_matrix/src/ops_seq_korobeinikov.cpp @@ -0,0 +1,51 @@ +// Copyright 2024 Korobeinikov Arseny +#include "seq/korobeinikov_a_max_elements_in_rows_of_matrix/include/ops_seq_korobeinikov.hpp" + +#include +#include + +using namespace std::chrono_literals; + +bool korobeinikov_a_test_task_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + + input_.reserve(taskData->inputs_count[0]); + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + std::copy(tmp_ptr, tmp_ptr + taskData->inputs_count[0], std::back_inserter(input_)); + count_rows = (int)*taskData->inputs[1]; + if (count_rows != 0) { + size_rows = (int)(taskData->inputs_count[0] / (*taskData->inputs[1])); + } else { + size_rows = 0; + } + + res = std::vector(count_rows, 0); + return true; +} + +bool korobeinikov_a_test_task_seq::TestTaskSequential::validation() { + internal_order_test(); + + if ((*taskData->inputs[1]) == 0) { + return true; + } + return (*taskData->inputs[1] == taskData->outputs_count[0] && + (taskData->inputs_count[0] % (*taskData->inputs[1])) == 0); +} + +bool korobeinikov_a_test_task_seq::TestTaskSequential::run() { + internal_order_test(); + for (int i = 0; i < count_rows; i++) { + res[i] = *std::max_element(input_.begin() + i * size_rows, input_.begin() + (i + 1) * size_rows); + } + return true; +} + +bool korobeinikov_a_test_task_seq::TestTaskSequential::post_processing() { + internal_order_test(); + for (int i = 0; i < count_rows; i++) { + reinterpret_cast(taskData->outputs[0])[i] = res[i]; + } + return true; +} diff --git a/tasks/seq/korovin_n_min_val_row_matrix/func_tests/main.cpp b/tasks/seq/korovin_n_min_val_row_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..e40afea053b --- /dev/null +++ b/tasks/seq/korovin_n_min_val_row_matrix/func_tests/main.cpp @@ -0,0 +1,292 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/korovin_n_min_val_row_matrix/include/ops_seq.hpp" + +TEST(korovin_n_min_val_row_matrix_seq, find_min_val_in_row_10x10_matrix) { + const int rows = 10; + const int cols = 10; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + + for (int i = 0; i < rows; i++) { + ASSERT_EQ(v_res[i], INT_MIN); + } +} + +TEST(korovin_n_min_val_row_matrix_seq, find_min_val_in_row_100x100_matrix) { + const int rows = 100; + const int cols = 100; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + + for (int i = 0; i < rows; i++) { + ASSERT_EQ(v_res[i], INT_MIN); + } +} + +TEST(korovin_n_min_val_row_matrix_seq, find_min_val_in_row_100x500_matrix) { + const int rows = 100; + const int cols = 500; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + + for (int i = 0; i < rows; i++) { + ASSERT_EQ(v_res[i], INT_MIN); + } +} + +TEST(korovin_n_min_val_row_matrix_seq, find_min_val_in_row_5000x5000_matrix) { + const int rows = 5000; + const int cols = 5000; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), true); + ASSERT_TRUE(testTaskSequential.pre_processing()); + ASSERT_TRUE(testTaskSequential.run()); + ASSERT_TRUE(testTaskSequential.post_processing()); + + for (int i = 0; i < rows; i++) { + ASSERT_EQ(v_res[i], INT_MIN); + } +} + +TEST(korovin_n_min_val_row_matrix_seq, validation_input_empty_100x100_matrix) { + const int rows = 100; + const int cols = 100; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(korovin_n_min_val_row_matrix_seq, validation_output_empty_100x100_matrix) { + const int rows = 100; + const int cols = 100; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows, 0); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(korovin_n_min_val_row_matrix_seq, validation_less_two_100x100_matrix) { + const int rows = 100; + const int cols = 100; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(korovin_n_min_val_row_matrix_seq, validation_less_two_cols_100x100_matrix) { + const int rows = 100; + const int cols = 100; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(korovin_n_min_val_row_matrix_seq, validation_find_min_val_in_row_0x10_matrix) { + const int rows = 0; + const int cols = 10; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(korovin_n_min_val_row_matrix_seq, validation_find_min_val_in_row_10x10_cols_0_matrix) { + const int rows = 10; + const int cols = 10; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(0); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(korovin_n_min_val_row_matrix_seq, validation_fails_on_invalid_output_size) { + const int rows = 10; + const int cols = 10; + + std::shared_ptr taskDataSeq = std::make_shared(); + + korovin_n_min_val_row_matrix_seq::TestTaskSequential testTaskSequential(taskDataSeq); + + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows - 1, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + ASSERT_EQ(testTaskSequential.validation(), false); +} diff --git a/tasks/seq/korovin_n_min_val_row_matrix/include/ops_seq.hpp b/tasks/seq/korovin_n_min_val_row_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..49ce7c430a7 --- /dev/null +++ b/tasks/seq/korovin_n_min_val_row_matrix/include/ops_seq.hpp @@ -0,0 +1,28 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace korovin_n_min_val_row_matrix_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) { + std::srand(std::time(nullptr)); + } + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + static std::vector generate_rnd_vector(int size, int lower_bound = 0, int upper_bound = 50); + static std::vector> generate_rnd_matrix(int rows, int cols); + + private: + std::vector> input_; + std::vector res_; +}; + +} // namespace korovin_n_min_val_row_matrix_seq \ No newline at end of file diff --git a/tasks/seq/korovin_n_min_val_row_matrix/perf_tests/main.cpp b/tasks/seq/korovin_n_min_val_row_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..301f425150c --- /dev/null +++ b/tasks/seq/korovin_n_min_val_row_matrix/perf_tests/main.cpp @@ -0,0 +1,95 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/korovin_n_min_val_row_matrix/include/ops_seq.hpp" + +TEST(korovin_n_min_val_row_matrix_seq, test_pipeline_run) { + const int rows = 5000; + const int cols = 5000; + + std::shared_ptr taskDataSeq = std::make_shared(); + auto testTaskSequential = std::make_shared(taskDataSeq); + + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; // Set the number of runs as needed + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + for (int i = 0; i < rows; i++) { + ASSERT_EQ(v_res[i], INT_MIN); + } +} + +TEST(korovin_n_min_val_row_matrix_seq, test_task_run) { + const int rows = 5000; + const int cols = 5000; + + std::shared_ptr taskDataSeq = std::make_shared(); + auto testTaskSequential = std::make_shared(taskDataSeq); + + std::vector> matrix_rnd = + korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(rows, cols); + + for (auto& row : matrix_rnd) { + taskDataSeq->inputs.emplace_back(reinterpret_cast(row.data())); + } + + taskDataSeq->inputs_count.emplace_back(rows); + taskDataSeq->inputs_count.emplace_back(cols); + + std::vector v_res(rows, 0); + taskDataSeq->outputs.emplace_back(reinterpret_cast(v_res.data())); + taskDataSeq->outputs_count.emplace_back(v_res.size()); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + for (int i = 0; i < rows; i++) { + ASSERT_EQ(v_res[i], INT_MIN); + } +} diff --git a/tasks/seq/korovin_n_min_val_row_matrix/src/ops_seq.cpp b/tasks/seq/korovin_n_min_val_row_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..7d80ba6374c --- /dev/null +++ b/tasks/seq/korovin_n_min_val_row_matrix/src/ops_seq.cpp @@ -0,0 +1,77 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/korovin_n_min_val_row_matrix/include/ops_seq.hpp" + +#include + +using namespace std::chrono_literals; + +bool korovin_n_min_val_row_matrix_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + + int rows = taskData->inputs_count[0]; + int cols = taskData->inputs_count[1]; + + input_.resize(rows, std::vector(cols)); + + for (int i = 0; i < rows; i++) { + int* input_matrix = reinterpret_cast(taskData->inputs[i]); + for (int j = 0; j < cols; j++) { + input_[i][j] = input_matrix[j]; + } + } + res_.resize(rows); + return true; +} + +bool korovin_n_min_val_row_matrix_seq::TestTaskSequential::validation() { + internal_order_test(); + + return ((!taskData->inputs.empty() && !taskData->outputs.empty()) && + (taskData->inputs_count.size() >= 2 && taskData->inputs_count[0] != 0 && taskData->inputs_count[1] != 0) && + (taskData->outputs_count[0] == taskData->inputs_count[0])); +} + +bool korovin_n_min_val_row_matrix_seq::TestTaskSequential::run() { + internal_order_test(); + + for (size_t i = 0; i < input_.size(); i++) { + int min_val = input_[i][0]; + for (size_t j = 1; j < input_[i].size(); j++) { + if (input_[i][j] < min_val) { + min_val = input_[i][j]; + } + } + res_[i] = min_val; + } + return true; +} + +bool korovin_n_min_val_row_matrix_seq::TestTaskSequential::post_processing() { + internal_order_test(); + + int* output_matrix = reinterpret_cast(taskData->outputs[0]); + for (size_t i = 0; i < res_.size(); i++) { + output_matrix[i] = res_[i]; + } + return true; +} + +std::vector korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_vector(int size, int lower_bound, + int upper_bound) { + std::vector v1(size); + for (auto& num : v1) { + num = lower_bound + std::rand() % (upper_bound - lower_bound + 1); + } + return v1; +} + +std::vector> korovin_n_min_val_row_matrix_seq::TestTaskSequential::generate_rnd_matrix(int rows, + int cols) { + std::vector> matrix1(rows, std::vector(cols)); + for (auto& row : matrix1) { + row = generate_rnd_vector(cols, -1000, 1000); + int rnd_index = std::rand() % cols; + row[rnd_index] = INT_MIN; + } + return matrix1; +} \ No newline at end of file diff --git a/tasks/seq/krylov_m_num_of_alternations_signs/func_tests/main.cpp b/tasks/seq/krylov_m_num_of_alternations_signs/func_tests/main.cpp new file mode 100644 index 00000000000..4ef58303194 --- /dev/null +++ b/tasks/seq/krylov_m_num_of_alternations_signs/func_tests/main.cpp @@ -0,0 +1,104 @@ +#include + +#include +#include +#include + +#include "../include/ops_seq.hpp" + +#define EXPAND(x) x + +#define T_DEF(macro, ...) \ + EXPAND(macro(int16_t, __VA_ARGS__)) \ + EXPAND(macro(int32_t, __VA_ARGS__)) \ + EXPAND(macro(int64_t, __VA_ARGS__)) \ + EXPAND(macro(float, __VA_ARGS__)) + +using CountType = uint32_t; + +// clang-format off +using PredefParam = std::tuple< + CountType /* count */, + std::vector /* shift_indices */, + CountType /* num */ +>; +// clang-format on + +class krylov_m_num_of_alternations_signs_seq_test : public ::testing::TestWithParam { + protected: + template + void PT_yields_correct_result() { + const auto &[count, shift_indices, num] = GetParam(); + + // + std::vector in(count); + CountType out = 0; + + std::iota(in.begin(), in.end(), 1); + + for (auto idx : shift_indices) { + in[idx] *= -1; + } + + // + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&out)); + taskDataSeq->outputs_count.emplace_back(1); + + // + krylov_m_num_of_alternations_signs_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_TRUE(testTaskSequential.validation()); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(num, out); + } + + template + void T_fails_validation() { + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->outputs_count.emplace_back(0); + + krylov_m_num_of_alternations_signs_seq::TestTaskSequential testTaskSequential(taskDataSeq); + EXPECT_FALSE(testTaskSequential.validation()); + } +}; + +#define DECL_TYPE_VALUE_PARAMETRIZED_TEST(TypeParam, TestName) \ + TEST_P(krylov_m_num_of_alternations_signs_seq_test, TestName##__##TypeParam) { TestName(); } +#define DECL_TYPE_VALUE_PARAMETRIZED_TEST_ALL(TestName) T_DEF(DECL_TYPE_PARAMETRIZED_TEST, PT_##TestName) + +#define DECL_TYPE_PARAMETRIZED_TEST(TypeParam, TestName) \ + TEST_P(krylov_m_num_of_alternations_signs_seq_test, TestName##__##TypeParam) { TestName(); } +#define DECL_TYPE_PARAMETRIZED_TEST_ALL(TestName) T_DEF(DECL_TYPE_PARAMETRIZED_TEST, T_##TestName) + +INSTANTIATE_TEST_SUITE_P( + krylov_m_num_of_alternations_signs_seq_test, krylov_m_num_of_alternations_signs_seq_test, + // clang-format off + ::testing::Values( + std::make_tuple(129, std::vector{0, 1, /* . */ 3, /* . */ 5, 6, 7, /* . */ 12 /* . */}, 7), + std::make_tuple(129, std::vector{0, /* . */}, 1), + std::make_tuple(129, std::vector{/* . */ 128}, 1), + std::make_tuple(129, std::vector{/* . */ 64 /* . */}, 2), + std::make_tuple(129, std::vector{/* . */ 43, /* . */ 86, /* . */}, 4), + std::make_tuple(129, std::vector{/* . */}, 0), + std::make_tuple(128, std::vector{0, 1, /* . */ 3, /* . */ 5, 6, 7, /* . */ 12 /* . */}, 7), + std::make_tuple(128, std::vector{0, /* . */}, 1), + std::make_tuple(128, std::vector{/* . */ 127}, 1), + std::make_tuple(128, std::vector{/* . */ 64 /* . */}, 2), + std::make_tuple(129, std::vector{/* . */ 43, /* . */ 86, /* . */}, 4), + std::make_tuple(129, std::vector{/* . */ 42, /* . */ 84, /* . */}, 4), + std::make_tuple(128, std::vector{/* . */}, 0), + std::make_tuple(4, std::vector{/* . */}, 0), + std::make_tuple(4, std::vector{/* . */ 2 /* . */}, 2), + std::make_tuple(1, std::vector{/* . */}, 0), + std::make_tuple(1, std::vector{0}, 0), + std::make_tuple(0, std::vector{/* . */}, 0) + ) + // clang-format on +); + +DECL_TYPE_VALUE_PARAMETRIZED_TEST_ALL(yields_correct_result); +DECL_TYPE_PARAMETRIZED_TEST_ALL(fails_validation); diff --git a/tasks/seq/krylov_m_num_of_alternations_signs/include/ops_seq.hpp b/tasks/seq/krylov_m_num_of_alternations_signs/include/ops_seq.hpp new file mode 100644 index 00000000000..d5adbd772af --- /dev/null +++ b/tasks/seq/krylov_m_num_of_alternations_signs/include/ops_seq.hpp @@ -0,0 +1,77 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace krylov_m_num_of_alternations_signs_seq { + +using namespace std::chrono_literals; + +template +class TestTaskSequential : public ppc::core::Task { + static_assert(sizeof(CountType) <= + sizeof(typename decltype(std::declval().inputs_count)::value_type), + "There's no sense in providing CountType that exceeds TaskData capabilities"); + + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + + bool pre_processing() override { + internal_order_test(); + + const auto count = taskData->inputs_count[0]; + const auto* in_p = reinterpret_cast(taskData->inputs[0]); + input_.resize(count); + std::copy(in_p, in_p + count, std::begin(input_)); + // + res = 0; + + return true; + } + + bool validation() override { + internal_order_test(); + + return taskData->outputs_count[0] == 1; + } + + bool run() override { + internal_order_test(); + + const std::size_t size = input_.size(); + if (size > 1) { + bool neg = input_[0] < 0; + for (std::size_t i = 1; i < size; i++) { + bool cur = input_[i] < 0; + if (neg == cur) { + continue; + } + res++; + neg = cur; + } + } + + return true; + } + + bool post_processing() override { + internal_order_test(); + + reinterpret_cast(taskData->outputs[0])[0] = res; + + return true; + } + + private: + std::vector input_{}; + CountType res{}; +}; + +} // namespace krylov_m_num_of_alternations_signs_seq diff --git a/tasks/seq/krylov_m_num_of_alternations_signs/perf_tests/main.cpp b/tasks/seq/krylov_m_num_of_alternations_signs/perf_tests/main.cpp new file mode 100644 index 00000000000..ca7c7a29f5e --- /dev/null +++ b/tasks/seq/krylov_m_num_of_alternations_signs/perf_tests/main.cpp @@ -0,0 +1,76 @@ +#include + +#include +#include +#include + +#include "../include/ops_seq.hpp" +#include "core/perf/include/perf.hpp" + +class krylov_m_num_of_alternations_signs_seq_perf_test : public ::testing::Test { + using ElementType = int32_t; + using CountType = uint32_t; + // + const CountType in_count = 128; + const std::vector shift_indices{0, 1, /* . */ 3, /* . */ 5, 6, 7, /* . */ 12 /* . */}; + // + const CountType num = 7; + + protected: + void run_perf_test( + const std::function &perfAttr, + const std::shared_ptr &perfResults)> &runner) { + // + std::vector in(in_count); + CountType out = 0; + + std::iota(in.begin(), in.end(), 1); + + for (auto idx : shift_indices) { + in[idx] *= -1; + } + + // + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&out)); + taskDataSeq->outputs_count.emplace_back(1); + + // + auto testTaskSequential = + std::make_shared>( + taskDataSeq); + + // + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + ppc::core::Perf perfAnalyzer(testTaskSequential); + runner(perfAnalyzer, perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(num, out); + } +}; + +TEST_F(krylov_m_num_of_alternations_signs_seq_perf_test, test_pipeline_run) { + run_perf_test([](auto &perfAnalyzer, const auto &perfAttr, const auto &perfResults) { + perfAnalyzer.pipeline_run(perfAttr, perfResults); + }); +} + +TEST_F(krylov_m_num_of_alternations_signs_seq_perf_test, test_task_run) { + run_perf_test([](auto &perfAnalyzer, const auto &perfAttr, const auto &perfResults) { + perfAnalyzer.task_run(perfAttr, perfResults); + }); +} diff --git a/tasks/seq/krylov_m_num_of_alternations_signs/src/ops_seq.cpp b/tasks/seq/krylov_m_num_of_alternations_signs/src/ops_seq.cpp new file mode 100644 index 00000000000..ab96e22e641 --- /dev/null +++ b/tasks/seq/krylov_m_num_of_alternations_signs/src/ops_seq.cpp @@ -0,0 +1 @@ +#include "../include/ops_seq.hpp" diff --git a/tasks/seq/lysov_i_integration_the_trapezoid_method/func_tests/main.cpp b/tasks/seq/lysov_i_integration_the_trapezoid_method/func_tests/main.cpp new file mode 100644 index 00000000000..28c90b06220 --- /dev/null +++ b/tasks/seq/lysov_i_integration_the_trapezoid_method/func_tests/main.cpp @@ -0,0 +1,196 @@ +#include + +#include +#include + +#include "seq/lysov_i_integration_the_trapezoid_method/include/ops_seq.hpp" + +TEST(lysov_i_integration_the_trapezoid_method_seq, BasicTest) { + double a = 0.0; + double b = 1.45; + double epsilon = 1e-2; + auto taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(&a)); + taskData->inputs.push_back(reinterpret_cast(&b)); + taskData->inputs.push_back(reinterpret_cast(&epsilon)); + double output = 0.0; + taskData->outputs.push_back(reinterpret_cast(&output)); + lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential task(taskData); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + double expected_result = 1.016; + ASSERT_NEAR(output, expected_result, epsilon); +} + +TEST(lysov_i_integration_the_trapezoid_method_seq, BasicTest2) { + double a = -1.45; + double b = 0.0; + double epsilon = 0.01; + auto taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(&a)); + taskData->inputs.push_back(reinterpret_cast(&b)); + taskData->inputs.push_back(reinterpret_cast(&epsilon)); + double output = 0.0; + taskData->outputs.push_back(reinterpret_cast(&output)); + lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential task(taskData); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + double expected_result = 1.016; + ASSERT_NEAR(output, expected_result, epsilon); +} + +TEST(lysov_i_integration_the_trapezoid_method_seq, BasicTest3) { + double a = -1.45; + double b = 1.45; + double epsilon = 0.01; + auto taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(&a)); + taskData->inputs.push_back(reinterpret_cast(&b)); + taskData->inputs.push_back(reinterpret_cast(&epsilon)); + double output = 0.0; + taskData->outputs.push_back(reinterpret_cast(&output)); + lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential task(taskData); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + double expected_result = 2.03; + ASSERT_NEAR(output, expected_result, epsilon); +} + +TEST(lysov_i_integration_the_trapezoid_method_seq, BasicTest4) { + double a = 1.45; + double b = 0; + double epsilon = 0.01; + auto taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(&a)); + taskData->inputs.push_back(reinterpret_cast(&b)); + taskData->inputs.push_back(reinterpret_cast(&epsilon)); + double output = 0.0; + taskData->outputs.push_back(reinterpret_cast(&output)); + lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential task(taskData); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + double expected_result = -1.016; + ASSERT_NEAR(output, expected_result, epsilon); +} + +TEST(lysov_i_integration_the_trapezoid_method_seq, BasicTest5) { + double a = 0.0; + double b = 100.0; + double epsilon = 0.001; + auto taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(&a)); + taskData->inputs.push_back(reinterpret_cast(&b)); + taskData->inputs.push_back(reinterpret_cast(&epsilon)); + double output = 0.0; + taskData->outputs.push_back(reinterpret_cast(&output)); + lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential task(taskData); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + double expected_result = 333333.333510; + ASSERT_NEAR(output, expected_result, epsilon); +} + +TEST(lysov_i_integration_the_trapezoid_method_seq, BasicTest6) { + double a = -10.0; + double b = 65.0; + double epsilon = 0.01; + auto taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(&a)); + taskData->inputs.push_back(reinterpret_cast(&b)); + taskData->inputs.push_back(reinterpret_cast(&epsilon)); + double output = 0.0; + taskData->outputs.push_back(reinterpret_cast(&output)); + lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential task(taskData); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + double expected_result = 91875.001; + ASSERT_NEAR(output, expected_result, epsilon); +} + +TEST(lysov_i_integration_the_trapezoid_method_seq, BasicTest7) { + double a = -10.0; + double b = 10.0; + double epsilon = 0.001; + auto taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(&a)); + taskData->inputs.push_back(reinterpret_cast(&b)); + taskData->inputs.push_back(reinterpret_cast(&epsilon)); + double output = 0.0; + taskData->outputs.push_back(reinterpret_cast(&output)); + lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential task(taskData); + ASSERT_TRUE(task.validation()); + task.pre_processing(); + task.run(); + task.post_processing(); + std::cout << output << std::endl; + double expected_result = 666.66666; + ASSERT_NEAR(output, expected_result, 1e-2); +} + +TEST(lysov_i_integration_the_trapezoid_method_seq, InputSizeLessThan3) { + std::shared_ptr taskDataSeq = std::make_shared(); + double a = -1.0; + double b = 1.0; + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + double result = 0.0; + taskDataSeq->outputs.emplace_back(reinterpret_cast(&result)); + lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_FALSE(testTaskSequential.validation()); +} + +TEST(lysov_i_integration_the_trapezoid_method_seq, InputSizeMoreThan3) { + std::shared_ptr taskDataSeq = std::make_shared(); + double a = -1.0; + double b = 1.0; + double epsilon = 0.01; + double extra_input = 5.0; + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&epsilon)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&extra_input)); + double result = 0.0; + taskDataSeq->outputs.emplace_back(reinterpret_cast(&result)); + lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_FALSE(testTaskSequential.validation()); +} + +TEST(lysov_i_integration_the_trapezoid_method_seq, OutputSizeLessThan1) { + std::shared_ptr taskDataSeq = std::make_shared(); + double a = -1.0; + double b = 1.0; + double epsilon = 0.01; + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&epsilon)); + lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_FALSE(testTaskSequential.validation()); +} + +TEST(lysov_i_integration_the_trapezoid_method_seq, OutputSizeMoreThan1) { + std::shared_ptr taskDataSeq = std::make_shared(); + double a = -1.0; + double b = 1.0; + double epsilon = 0.01; + taskDataSeq->inputs.emplace_back(reinterpret_cast(&a)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&b)); + taskDataSeq->inputs.emplace_back(reinterpret_cast(&epsilon)); + double result1 = 0.0; + double result2 = 0.0; + taskDataSeq->outputs.emplace_back(reinterpret_cast(&result1)); + taskDataSeq->outputs.emplace_back(reinterpret_cast(&result2)); + lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_FALSE(testTaskSequential.validation()); +} diff --git a/tasks/seq/lysov_i_integration_the_trapezoid_method/include/ops_seq.hpp b/tasks/seq/lysov_i_integration_the_trapezoid_method/include/ops_seq.hpp new file mode 100644 index 00000000000..e889c5a868f --- /dev/null +++ b/tasks/seq/lysov_i_integration_the_trapezoid_method/include/ops_seq.hpp @@ -0,0 +1,21 @@ +#pragma once +#include +#include + +#include "core/task/include/task.hpp" +namespace lysov_i_integration_the_trapezoid_method_seq { +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + double a, b, cnt_of_splits, epsilon, h; + static double function_square(double x) { return x * x; } + + private: + std::vector input_; + double res{}; +}; +} // namespace lysov_i_integration_the_trapezoid_method_seq \ No newline at end of file diff --git a/tasks/seq/lysov_i_integration_the_trapezoid_method/perf_tests/main.cpp b/tasks/seq/lysov_i_integration_the_trapezoid_method/perf_tests/main.cpp new file mode 100644 index 00000000000..26bbd7e8054 --- /dev/null +++ b/tasks/seq/lysov_i_integration_the_trapezoid_method/perf_tests/main.cpp @@ -0,0 +1,61 @@ +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/lysov_i_integration_the_trapezoid_method/include/ops_seq.hpp" +TEST(lysov_i_integration_the_trapezoid_method_seq, test_pipeline_run) { + double a = 0.0; + double b = 1.45; + double epsilon = 0.0000001; + auto taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(&a)); + taskData->inputs.push_back(reinterpret_cast(&b)); + taskData->inputs.push_back(reinterpret_cast(&epsilon)); + double output = 1.0; + taskData->outputs.push_back(reinterpret_cast(&output)); + auto testTaskSequential = + std::make_shared(taskData); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + double expected_result = 1.0; + ASSERT_NEAR(output, expected_result, 1e-1); +} + +TEST(lysov_i_integration_the_trapezoid_method_seq, test_task_run) { + double a = 0.0; + double b = 1.45; + double epsilon = 0.0000001; + auto taskData = std::make_shared(); + taskData->inputs.push_back(reinterpret_cast(&a)); + taskData->inputs.push_back(reinterpret_cast(&b)); + taskData->inputs.push_back(reinterpret_cast(&epsilon)); + double output = 1.0; + taskData->outputs.push_back(reinterpret_cast(&output)); + auto testTaskSequential = + std::make_shared(taskData); + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + double expected_result = 1.0; + ASSERT_NEAR(output, expected_result, 1e-1); +} diff --git a/tasks/seq/lysov_i_integration_the_trapezoid_method/src/ops_seq.cpp b/tasks/seq/lysov_i_integration_the_trapezoid_method/src/ops_seq.cpp new file mode 100644 index 00000000000..633c87631fc --- /dev/null +++ b/tasks/seq/lysov_i_integration_the_trapezoid_method/src/ops_seq.cpp @@ -0,0 +1,42 @@ +#include "seq/lysov_i_integration_the_trapezoid_method/include/ops_seq.hpp" + +#include +using namespace std::chrono_literals; +bool lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential::validation() { + internal_order_test(); + return (taskData->inputs.size() == 3 && taskData->outputs.size() == 1); +} + +bool lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + a = *reinterpret_cast(taskData->inputs[0]); + b = *reinterpret_cast(taskData->inputs[1]); + epsilon = *reinterpret_cast(taskData->inputs[2]); + cnt_of_splits = static_cast(std::abs((b - a)) / epsilon); + h = (b - a) / cnt_of_splits; + input_.resize(cnt_of_splits + 1); + for (int i = 0; i <= cnt_of_splits; ++i) { + double x = a + i * h; + input_[i] = function_square(x); + } + return true; +} + +bool lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential::run() { + internal_order_test(); + double result = 0.0; + result += 0.5 * (function_square(a) + function_square(b)); + for (int i = 1; i < cnt_of_splits; ++i) { + double x = a + i * h; + result += function_square(x); + } + result *= h; + res = result; + return true; +} + +bool lysov_i_integration_the_trapezoid_method_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} \ No newline at end of file diff --git a/tasks/seq/muhina_m_min_of_vector_elements/func_tests/main.cpp b/tasks/seq/muhina_m_min_of_vector_elements/func_tests/main.cpp new file mode 100644 index 00000000000..97a02a08b37 --- /dev/null +++ b/tasks/seq/muhina_m_min_of_vector_elements/func_tests/main.cpp @@ -0,0 +1,142 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "seq/muhina_m_min_of_vector_elements/include/ops_seq.hpp" + +std::vector GetRandomVector(int sz, int min_value, int max_value) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = min_value + gen() % (max_value - min_value + 1); + } + return vec; +} + +TEST(muhina_m_min_of_vector_elements_seq, Test_Min_10) { + const int count = 10; + const int min_val = 0; + const int max_val = 100; + // Create data + std::vector in = GetRandomVector(count, min_val, max_val); + in[0] = 0; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + muhina_m_min_of_vector_elements_seq::MinOfVectorSequential MinOfVectorSequential(taskDataSeq); + ASSERT_EQ(MinOfVectorSequential.validation(), true); + MinOfVectorSequential.pre_processing(); + MinOfVectorSequential.run(); + MinOfVectorSequential.post_processing(); + ASSERT_EQ(0, out[0]); +} + +TEST(muhina_m_min_of_vector_elements_seq, Test_Min_20) { + const int count = 20; + const int min_val = 0; + const int max_val = 100; + // Create data + std::vector in = GetRandomVector(count, min_val, max_val); + in[1] = 0; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + muhina_m_min_of_vector_elements_seq::MinOfVectorSequential MinOfVectorSequential(taskDataSeq); + ASSERT_EQ(MinOfVectorSequential.validation(), true); + MinOfVectorSequential.pre_processing(); + MinOfVectorSequential.run(); + MinOfVectorSequential.post_processing(); + ASSERT_EQ(0, out[0]); +} + +TEST(muhina_m_min_of_vector_elements_seq, Test_Min_50) { + const int count = 50; + const int min_val = 0; + const int max_val = 100; + // Create data + std::vector in = GetRandomVector(count, min_val, max_val); + in[1] = 0; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + muhina_m_min_of_vector_elements_seq::MinOfVectorSequential MinOfVectorSequential(taskDataSeq); + ASSERT_EQ(MinOfVectorSequential.validation(), true); + MinOfVectorSequential.pre_processing(); + MinOfVectorSequential.run(); + MinOfVectorSequential.post_processing(); + ASSERT_EQ(0, out[0]); +} + +TEST(muhina_m_min_of_vector_elements_seq, Test_Min_70) { + const int count = 70; + const int min_val = 0; + const int max_val = 100; + // Create data + std::vector in = GetRandomVector(count, min_val, max_val); + in[1] = 0; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + muhina_m_min_of_vector_elements_seq::MinOfVectorSequential MinOfVectorSequential(taskDataSeq); + ASSERT_EQ(MinOfVectorSequential.validation(), true); + MinOfVectorSequential.pre_processing(); + MinOfVectorSequential.run(); + MinOfVectorSequential.post_processing(); + ASSERT_EQ(0, out[0]); +} + +TEST(muhina_m_min_of_vector_elements_seq, Test_Min_100) { + const int count = 100; + const int min_val = 0; + const int max_val = 100; + // Create data + std::vector in = GetRandomVector(count, min_val, max_val); + in[1] = 0; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + muhina_m_min_of_vector_elements_seq::MinOfVectorSequential MinOfVectorSequential(taskDataSeq); + ASSERT_EQ(MinOfVectorSequential.validation(), true); + MinOfVectorSequential.pre_processing(); + MinOfVectorSequential.run(); + MinOfVectorSequential.post_processing(); + ASSERT_EQ(0, out[0]); +} diff --git a/tasks/seq/muhina_m_min_of_vector_elements/include/ops_seq.hpp b/tasks/seq/muhina_m_min_of_vector_elements/include/ops_seq.hpp new file mode 100644 index 00000000000..815628780cf --- /dev/null +++ b/tasks/seq/muhina_m_min_of_vector_elements/include/ops_seq.hpp @@ -0,0 +1,24 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace muhina_m_min_of_vector_elements_seq { +int vectorMin(std::vector> v); + +class MinOfVectorSequential : public ppc::core::Task { + public: + explicit MinOfVectorSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + int res_{}; +}; +} // namespace muhina_m_min_of_vector_elements_seq \ No newline at end of file diff --git a/tasks/seq/muhina_m_min_of_vector_elements/perf_tests/main.cpp b/tasks/seq/muhina_m_min_of_vector_elements/perf_tests/main.cpp new file mode 100644 index 00000000000..5d72650790b --- /dev/null +++ b/tasks/seq/muhina_m_min_of_vector_elements/perf_tests/main.cpp @@ -0,0 +1,99 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/muhina_m_min_of_vector_elements/include/ops_seq.hpp" + +std::vector GetRandomVector(int sz, int min_value, int max_value) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = min_value + gen() % (max_value - min_value + 1); + } + return vec; +} + +TEST(muhina_m_min_of_vector_elements, test_pipeline_run) { + const int count = 2000000; + const int min_val = 0; + const int max_val = 100; + // Create data + std::vector in = GetRandomVector(count, min_val, max_val); + in[0] = 0; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto MinOfVectorSequential = + std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(MinOfVectorSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(0, out[0]); +} + +TEST(muhina_m_min_of_vector_elements, test_task_run) { + const int count = 10000000; + const int min_val = 0; + const int max_val = 100; + // Create data + std::vector in = GetRandomVector(count, min_val, max_val); + in[0] = 0; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto MinOfVectorSequential = + std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(MinOfVectorSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(0, out[0]); +} diff --git a/tasks/seq/muhina_m_min_of_vector_elements/src/ops_seq.cpp b/tasks/seq/muhina_m_min_of_vector_elements/src/ops_seq.cpp new file mode 100644 index 00000000000..a8f831431ee --- /dev/null +++ b/tasks/seq/muhina_m_min_of_vector_elements/src/ops_seq.cpp @@ -0,0 +1,53 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/muhina_m_min_of_vector_elements/include/ops_seq.hpp" + +#include +#include + +using namespace std::chrono_literals; + +int muhina_m_min_of_vector_elements_seq::vectorMin(std::vector> vect) { + int mini = vect[0]; + + for (size_t i = 1; i < vect.size(); i++) { + if (vect[i] < mini) { + mini = vect[i]; + } + } + return mini; +} + +bool muhina_m_min_of_vector_elements_seq::MinOfVectorSequential::pre_processing() { + internal_order_test(); + + // Init data vector + input_ = std::vector(taskData->inputs_count[0]); + auto* tempPtr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tempPtr[i]; + } + + return true; +} + +bool muhina_m_min_of_vector_elements_seq::MinOfVectorSequential::validation() { + internal_order_test(); + // Handle empty input vector + if (taskData->inputs_count[0] == 0) { + return taskData->outputs_count[0] == 0; + } + return taskData->outputs_count[0] == 1; +} + +bool muhina_m_min_of_vector_elements_seq::MinOfVectorSequential::run() { + internal_order_test(); + // Iterate through the vector + res_ = muhina_m_min_of_vector_elements_seq::vectorMin(input_); + return true; +} + +bool muhina_m_min_of_vector_elements_seq::MinOfVectorSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res_; + return true; +} diff --git a/tasks/seq/rezantseva_a_vector_dot_product/func_tests/main.cpp b/tasks/seq/rezantseva_a_vector_dot_product/func_tests/main.cpp new file mode 100644 index 00000000000..ccbf17a7dc7 --- /dev/null +++ b/tasks/seq/rezantseva_a_vector_dot_product/func_tests/main.cpp @@ -0,0 +1,225 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/rezantseva_a_vector_dot_product/include/ops_seq.hpp" +static int offset = 0; + +std::vector createRandomVector(int v_size) { + std::vector vec(v_size); + std::mt19937 gen; + gen.seed((unsigned)time(nullptr) + ++offset); + for (int i = 0; i < v_size; i++) vec[i] = gen() % 100; + return vec; +} + +TEST(rezantseva_a_vector_dot_product_seq, can_scalar_multiply_vec_size_10) { + const int count = 10; + // Create data + std::vector out(1, 0); + std::vector v1 = createRandomVector(count); + std::vector v2 = createRandomVector(count); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v2.data())); + + taskDataSeq->inputs_count.emplace_back(v1.size()); + taskDataSeq->inputs_count.emplace_back(v2.size()); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + rezantseva_a_vector_dot_product_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + int answer = rezantseva_a_vector_dot_product_seq::vectorDotProduct(v1, v2); + ASSERT_EQ(answer, out[0]); +} + +TEST(rezantseva_a_vector_dot_product_seq, can_scalar_multiply_vec_size_100) { + const int count = 100; + // Create data + std::vector out(1, 0); + + std::vector v1 = createRandomVector(count); + std::vector v2 = createRandomVector(count); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v2.data())); + + taskDataSeq->inputs_count.emplace_back(v1.size()); + taskDataSeq->inputs_count.emplace_back(v2.size()); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + rezantseva_a_vector_dot_product_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + int answer = rezantseva_a_vector_dot_product_seq::vectorDotProduct(v1, v2); + ASSERT_EQ(answer, out[0]); +} + +TEST(rezantseva_a_vector_dot_product_seq, check_none_equal_size_of_vec) { + const int count = 10; + // Create data + std::vector out(1, 0); + + std::vector v1 = createRandomVector(count); + std::vector v2 = createRandomVector(count + 1); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v2.data())); + + taskDataSeq->inputs_count.emplace_back(v1.size()); + taskDataSeq->inputs_count.emplace_back(v2.size()); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + rezantseva_a_vector_dot_product_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), false); +} + +TEST(rezantseva_a_vector_dot_product_seq, check_equal_size_of_vec) { + const int count = 10; + // Create data + std::vector out(1, 0); + + std::vector v1 = createRandomVector(count); + std::vector v2 = createRandomVector(count); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v2.data())); + + taskDataSeq->inputs_count.emplace_back(v1.size()); + taskDataSeq->inputs_count.emplace_back(v2.size()); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + rezantseva_a_vector_dot_product_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); +} + +TEST(rezantseva_a_vector_dot_product_seq, check_empty_vec_product_func) { + const int count = 0; + std::vector v1 = createRandomVector(count); + std::vector v2 = createRandomVector(count); + int answer = rezantseva_a_vector_dot_product_seq::vectorDotProduct(v1, v2); + ASSERT_EQ(0, answer); +} + +TEST(rezantseva_a_vector_dot_product_seq, check_empty_vec_product_run) { + const int count = 0; + // Create data + std::vector out(1, 0); + + std::vector v1 = createRandomVector(count); + std::vector v2 = createRandomVector(count); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v2.data())); + + taskDataSeq->inputs_count.emplace_back(v1.size()); + taskDataSeq->inputs_count.emplace_back(v2.size()); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + rezantseva_a_vector_dot_product_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + int answer = rezantseva_a_vector_dot_product_seq::vectorDotProduct(v1, v2); + ASSERT_EQ(answer, out[0]); +} + +TEST(rezantseva_a_vector_dot_product_seq, v1_dot_product_v2_equal_v2_dot_product_v1) { + const int count = 50; + // Create data + std::vector out(1, 0); + + std::vector v1 = createRandomVector(count); + std::vector v2 = createRandomVector(count); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v2.data())); + + taskDataSeq->inputs_count.emplace_back(v1.size()); + taskDataSeq->inputs_count.emplace_back(v2.size()); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + rezantseva_a_vector_dot_product_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + int answer = rezantseva_a_vector_dot_product_seq::vectorDotProduct(v2, v1); + ASSERT_EQ(answer, out[0]); +} +TEST(rezantseva_a_vector_dot_product_seq, check_run_right) { + // Create data + std::vector out(1, 0); + + std::vector v1 = {1, 2, 5}; + std::vector v2 = {4, 7, 8}; + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v2.data())); + + taskDataSeq->inputs_count.emplace_back(v1.size()); + taskDataSeq->inputs_count.emplace_back(v2.size()); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + rezantseva_a_vector_dot_product_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(58, out[0]); +} +TEST(rezantseva_a_vector_dot_product_seq, check_vectorDotProduct_right) { + // Create data + std::vector v1 = {1, 2, 5}; + std::vector v2 = {4, 7, 8}; + ASSERT_EQ(58, rezantseva_a_vector_dot_product_seq::vectorDotProduct(v1, v2)); +} diff --git a/tasks/seq/rezantseva_a_vector_dot_product/include/ops_seq.hpp b/tasks/seq/rezantseva_a_vector_dot_product/include/ops_seq.hpp new file mode 100644 index 00000000000..cf0d69b6ad8 --- /dev/null +++ b/tasks/seq/rezantseva_a_vector_dot_product/include/ops_seq.hpp @@ -0,0 +1,25 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace rezantseva_a_vector_dot_product_seq { +int vectorDotProduct(const std::vector& v1, const std::vector& v2); + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + int res{}; + std::vector> input_; +}; + +} // namespace rezantseva_a_vector_dot_product_seq \ No newline at end of file diff --git a/tasks/seq/rezantseva_a_vector_dot_product/perf_tests/main.cpp b/tasks/seq/rezantseva_a_vector_dot_product/perf_tests/main.cpp new file mode 100644 index 00000000000..31a79497b14 --- /dev/null +++ b/tasks/seq/rezantseva_a_vector_dot_product/perf_tests/main.cpp @@ -0,0 +1,106 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/rezantseva_a_vector_dot_product/include/ops_seq.hpp" + +static int offset = 0; + +std::vector createRandomVector(int v_size) { + std::vector vec(v_size); + std::mt19937 gen; + gen.seed((unsigned)time(nullptr) + ++offset); + for (int i = 0; i < v_size; i++) vec[i] = gen() % 100; + return vec; +} + +TEST(rezantseva_a_vector_dot_product_seq, test_pipeline_run) { + const int count = 100000000; + // Create data + std::vector out(1, 0); + + std::vector v1 = createRandomVector(count); + std::vector v2 = createRandomVector(count); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v2.data())); + + taskDataSeq->inputs_count.emplace_back(v1.size()); + taskDataSeq->inputs_count.emplace_back(v2.size()); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + int answer = rezantseva_a_vector_dot_product_seq::vectorDotProduct(v1, v2); + ASSERT_EQ(answer, out[0]); +} + +TEST(rezantseva_a_vector_dot_product_seq, test_task_run) { + const int count = 100000000; + // Create data + std::vector out(1, 0); + + std::vector v1 = createRandomVector(count); + std::vector v2 = createRandomVector(count); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + + taskDataSeq->inputs.emplace_back(reinterpret_cast(v1.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(v2.data())); + + taskDataSeq->inputs_count.emplace_back(v1.size()); + taskDataSeq->inputs_count.emplace_back(v2.size()); + + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + int answer = rezantseva_a_vector_dot_product_seq::vectorDotProduct(v1, v2); + ASSERT_EQ(answer, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/rezantseva_a_vector_dot_product/src/ops_seq.cpp b/tasks/seq/rezantseva_a_vector_dot_product/src/ops_seq.cpp new file mode 100644 index 00000000000..cc375d53f30 --- /dev/null +++ b/tasks/seq/rezantseva_a_vector_dot_product/src/ops_seq.cpp @@ -0,0 +1,48 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/rezantseva_a_vector_dot_product/include/ops_seq.hpp" + +bool rezantseva_a_vector_dot_product_seq::TestTaskSequential::validation() { + internal_order_test(); + // Check count elements of output + return (taskData->inputs.size() == taskData->inputs_count.size() && taskData->inputs.size() == 2) && + (taskData->inputs_count[0] == taskData->inputs_count[1]) && + (taskData->outputs.size() == taskData->outputs_count.size()) && taskData->outputs.size() == 1 && + taskData->outputs_count[0] == 1; +} + +bool rezantseva_a_vector_dot_product_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + // Init value for input and output + + input_ = std::vector>(taskData->inputs.size()); + for (size_t i = 0; i < input_.size(); i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + input_[i] = std::vector(taskData->inputs_count[i]); + for (size_t j = 0; j < taskData->inputs_count[i]; j++) { + input_[i][j] = tmp_ptr[j]; + } + } + res = 0; + return true; +} + +bool rezantseva_a_vector_dot_product_seq::TestTaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < input_[0].size(); i++) { + res += input_[0][i] * input_[1][i]; + } + + return true; +} + +bool rezantseva_a_vector_dot_product_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} + +int rezantseva_a_vector_dot_product_seq::vectorDotProduct(const std::vector& v1, const std::vector& v2) { + long long result = 0; + for (size_t i = 0; i < v1.size(); i++) result += v1[i] * v2[i]; + return result; +} \ No newline at end of file diff --git a/tasks/seq/shvedova_v_char_freq/func_tests/main.cpp b/tasks/seq/shvedova_v_char_freq/func_tests/main.cpp new file mode 100644 index 00000000000..599cee6f272 --- /dev/null +++ b/tasks/seq/shvedova_v_char_freq/func_tests/main.cpp @@ -0,0 +1,156 @@ +#include + +#include +#include + +#include "seq/shvedova_v_char_freq/include/ops_seq.hpp" + +TEST(shvedova_v_char_frequency_seq, test_char_frequency_a_in_abc) { + std::string input_str = "abcabc"; + char target_char = 'a'; + int expected_frequency = 2; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + shvedova_v_char_frequency_seq::CharFrequencyTaskSequential charFrequencyTask(taskDataSeq); + ASSERT_EQ(charFrequencyTask.validation(), true); + charFrequencyTask.pre_processing(); + charFrequencyTask.run(); + charFrequencyTask.post_processing(); + ASSERT_EQ(expected_frequency, out[0]); +} + +TEST(shvedova_v_char_frequency_seq, test_char_frequency_b_in_abc) { + std::string input_str = "abcabc"; + char target_char = 'b'; + int expected_frequency = 2; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + shvedova_v_char_frequency_seq::CharFrequencyTaskSequential charFrequencyTask(taskDataSeq); + ASSERT_EQ(charFrequencyTask.validation(), true); + charFrequencyTask.pre_processing(); + charFrequencyTask.run(); + charFrequencyTask.post_processing(); + ASSERT_EQ(expected_frequency, out[0]); +} + +TEST(shvedova_v_char_frequency_seq, test_char_frequency_c_in_abc) { + std::string input_str = "abcabc"; + char target_char = 'c'; + int expected_frequency = 2; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + shvedova_v_char_frequency_seq::CharFrequencyTaskSequential charFrequencyTask(taskDataSeq); + ASSERT_EQ(charFrequencyTask.validation(), true); + charFrequencyTask.pre_processing(); + charFrequencyTask.run(); + charFrequencyTask.post_processing(); + ASSERT_EQ(expected_frequency, out[0]); +} + +TEST(shvedova_v_char_frequency_seq, test_char_frequency_x_in_abc) { + std::string input_str = "abcabc"; + char target_char = 'x'; + int expected_frequency = 0; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + shvedova_v_char_frequency_seq::CharFrequencyTaskSequential charFrequencyTask(taskDataSeq); + ASSERT_EQ(charFrequencyTask.validation(), true); + charFrequencyTask.pre_processing(); + charFrequencyTask.run(); + charFrequencyTask.post_processing(); + ASSERT_EQ(expected_frequency, out[0]); +} + +TEST(shvedova_v_char_frequency_seq, test_char_frequency_a_in_long_string) { + std::string input_str(1000000, 'a'); + char target_char = 'a'; + int expected_frequency = 1000000; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + shvedova_v_char_frequency_seq::CharFrequencyTaskSequential charFrequencyTask(taskDataSeq); + ASSERT_EQ(charFrequencyTask.validation(), true); + charFrequencyTask.pre_processing(); + charFrequencyTask.run(); + charFrequencyTask.post_processing(); + ASSERT_EQ(expected_frequency, out[0]); +} + +TEST(shvedova_v_char_frequency_seq, test_char_frequency_in_empty_string) { + std::string input_str; + char target_char = 'a'; + int expected_frequency = 0; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + shvedova_v_char_frequency_seq::CharFrequencyTaskSequential charFrequencyTask(taskDataSeq); + ASSERT_EQ(charFrequencyTask.validation(), true); + charFrequencyTask.pre_processing(); + charFrequencyTask.run(); + charFrequencyTask.post_processing(); + ASSERT_EQ(expected_frequency, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/shvedova_v_char_freq/include/ops_seq.hpp b/tasks/seq/shvedova_v_char_freq/include/ops_seq.hpp new file mode 100644 index 00000000000..1d51823bbd6 --- /dev/null +++ b/tasks/seq/shvedova_v_char_freq/include/ops_seq.hpp @@ -0,0 +1,24 @@ +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace shvedova_v_char_frequency_seq { + +class CharFrequencyTaskSequential : public ppc::core::Task { + public: + explicit CharFrequencyTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::string input_str_; + char target_char_; + int frequency_ = 0; +}; + +} // namespace shvedova_v_char_frequency_seq diff --git a/tasks/seq/shvedova_v_char_freq/perf_tests/main.cpp b/tasks/seq/shvedova_v_char_freq/perf_tests/main.cpp new file mode 100644 index 00000000000..877e0c430a1 --- /dev/null +++ b/tasks/seq/shvedova_v_char_freq/perf_tests/main.cpp @@ -0,0 +1,81 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/shvedova_v_char_freq/include/ops_seq.hpp" + +TEST(shvedova_v_char_frequency_seq, test_pipeline_run) { + std::string input_str(100000, 'a'); + char target_char = 'a'; + int expected_frequency = 100000; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto charFrequencyTask = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(charFrequencyTask); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(expected_frequency, out[0]); +} + +TEST(shvedova_v_char_frequency_seq, test_task_run) { + std::string input_str(100000, 'a'); + char target_char = 'a'; + int expected_frequency = 100000; + + std::vector in_str(1, input_str); + std::vector in_char(1, target_char); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_str.data())); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in_char.data())); + taskDataSeq->inputs_count.emplace_back(in_str.size()); + taskDataSeq->inputs_count.emplace_back(in_char.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto charFrequencyTask = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + + auto perfAnalyzer = std::make_shared(charFrequencyTask); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(expected_frequency, out[0]); +} \ No newline at end of file diff --git a/tasks/seq/shvedova_v_char_freq/src/ops_seq.cpp b/tasks/seq/shvedova_v_char_freq/src/ops_seq.cpp new file mode 100644 index 00000000000..9b24750ee14 --- /dev/null +++ b/tasks/seq/shvedova_v_char_freq/src/ops_seq.cpp @@ -0,0 +1,32 @@ +#include "seq/shvedova_v_char_freq/include/ops_seq.hpp" + +#include +#include +#include + +using namespace std::chrono_literals; + +bool shvedova_v_char_frequency_seq::CharFrequencyTaskSequential::pre_processing() { + internal_order_test(); + input_str_ = *reinterpret_cast(taskData->inputs[0]); + target_char_ = *reinterpret_cast(taskData->inputs[1]); + frequency_ = 0; + return true; +} + +bool shvedova_v_char_frequency_seq::CharFrequencyTaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] == 1 && taskData->inputs_count[1] == 1 && taskData->outputs_count[0] == 1; +} + +bool shvedova_v_char_frequency_seq::CharFrequencyTaskSequential::run() { + internal_order_test(); + frequency_ = std::count(input_str_.begin(), input_str_.end(), target_char_); + return true; +} + +bool shvedova_v_char_frequency_seq::CharFrequencyTaskSequential::post_processing() { + internal_order_test(); + *reinterpret_cast(taskData->outputs[0]) = frequency_; + return true; +} \ No newline at end of file diff --git a/tasks/seq/solovyev_d_vector_max/func_tests/main.cpp b/tasks/seq/solovyev_d_vector_max/func_tests/main.cpp new file mode 100644 index 00000000000..e05edf14e91 --- /dev/null +++ b/tasks/seq/solovyev_d_vector_max/func_tests/main.cpp @@ -0,0 +1,153 @@ +#include + +#include +#include + +#include "seq/solovyev_d_vector_max/include/header.hpp" + +std::vector getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} + +TEST(solovyev_d_vector_max_mpi, Test_Empty) { + // Create data + std::vector in(0, 0); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxSequential(taskDataSeq); + ASSERT_EQ(VectorMaxSequential.validation(), false); +} + +TEST(solovyev_d_vector_max_mpi, Test_Max_10) { + const int count = 10; + + // Create data + std::vector in = getRandomVector(count); + in[count / 2] = 1024; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxSequential(taskDataSeq); + ASSERT_EQ(VectorMaxSequential.validation(), true); + VectorMaxSequential.pre_processing(); + VectorMaxSequential.run(); + VectorMaxSequential.post_processing(); + ASSERT_EQ(1024, out[0]); +} + +TEST(solovyev_d_vector_max_mpi, Test_Max_100) { + const int count = 20; + + // Create data + std::vector in = getRandomVector(count); + in[count / 2] = 1024; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxSequential(taskDataSeq); + ASSERT_EQ(VectorMaxSequential.validation(), true); + VectorMaxSequential.pre_processing(); + VectorMaxSequential.run(); + VectorMaxSequential.post_processing(); + ASSERT_EQ(1024, out[0]); +} + +TEST(solovyev_d_vector_max_mpi, Test_Max_1000) { + const int count = 50; + + // Create data + std::vector in = getRandomVector(count); + in[count / 2] = 1024; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxSequential(taskDataSeq); + ASSERT_EQ(VectorMaxSequential.validation(), true); + VectorMaxSequential.pre_processing(); + VectorMaxSequential.run(); + VectorMaxSequential.post_processing(); + ASSERT_EQ(1024, out[0]); +} + +TEST(solovyev_d_vector_max_mpi, Test_Max_10000) { + const int count = 70; + + // Create data + std::vector in = getRandomVector(count); + in[count / 2] = 1024; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxSequential(taskDataSeq); + ASSERT_EQ(VectorMaxSequential.validation(), true); + VectorMaxSequential.pre_processing(); + VectorMaxSequential.run(); + VectorMaxSequential.post_processing(); + ASSERT_EQ(1024, out[0]); +} + +TEST(solovyev_d_vector_max_mpi, Test_Max_100000) { + const int count = 100; + + // Create data + std::vector in = getRandomVector(count); + in[count / 2] = 1024; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + solovyev_d_vector_max_mpi::VectorMaxSequential VectorMaxSequential(taskDataSeq); + ASSERT_EQ(VectorMaxSequential.validation(), true); + VectorMaxSequential.pre_processing(); + VectorMaxSequential.run(); + VectorMaxSequential.post_processing(); + ASSERT_EQ(1024, out[0]); +} diff --git a/tasks/seq/solovyev_d_vector_max/include/header.hpp b/tasks/seq/solovyev_d_vector_max/include/header.hpp new file mode 100644 index 00000000000..712e45ed1ac --- /dev/null +++ b/tasks/seq/solovyev_d_vector_max/include/header.hpp @@ -0,0 +1,25 @@ + +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace solovyev_d_vector_max_mpi { +int vectorMax(std::vector> v); +class VectorMaxSequential : public ppc::core::Task { + public: + explicit VectorMaxSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector data; + int result{}; + std::string ops; +}; + +} // namespace solovyev_d_vector_max_mpi \ No newline at end of file diff --git a/tasks/seq/solovyev_d_vector_max/perf_tests/main.cpp b/tasks/seq/solovyev_d_vector_max/perf_tests/main.cpp new file mode 100644 index 00000000000..8323ffa5f34 --- /dev/null +++ b/tasks/seq/solovyev_d_vector_max/perf_tests/main.cpp @@ -0,0 +1,92 @@ +#include + +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/solovyev_d_vector_max/include/header.hpp" + +std::vector getRandomVector(int sz) { + std::random_device dev; + std::mt19937 gen(dev()); + std::vector vec(sz); + for (int i = 0; i < sz; i++) { + vec[i] = gen() % 100; + } + return vec; +} + +TEST(solovyev_d_vector_max_mpi, test_pipeline_run) { + const int count = 12000000; + + // Create data + std::vector in = getRandomVector(count); + in[count / 2] = 1024; + std::vector out(1, 0); + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1024, out[0]); +} + +TEST(solovyev_d_vector_max_mpi, test_task_run) { + const int count = 12000000; + + // Create data + std::vector in = getRandomVector(count); + in[count / 2] = 1024; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1024, out[0]); +} diff --git a/tasks/seq/solovyev_d_vector_max/src/source.cpp b/tasks/seq/solovyev_d_vector_max/src/source.cpp new file mode 100644 index 00000000000..6ead459248c --- /dev/null +++ b/tasks/seq/solovyev_d_vector_max/src/source.cpp @@ -0,0 +1,47 @@ +#include +#include + +#include "seq/solovyev_d_vector_max/include/header.hpp" + +using namespace std::chrono_literals; + +int solovyev_d_vector_max_mpi::vectorMax(std::vector> v) { + int m = -214748364; + for (std::string::size_type i = 0; i < v.size(); i++) { + if (v[i] > m) { + m = v[i]; + } + } + return m; +} + +bool solovyev_d_vector_max_mpi::VectorMaxSequential::pre_processing() { + internal_order_test(); + + // Init data vector + int* input_ = reinterpret_cast(taskData->inputs[0]); + data = std::vector(input_, input_ + taskData->inputs_count[0]); + // Init result value + result = 0; + return true; +} + +bool solovyev_d_vector_max_mpi::VectorMaxSequential::validation() { + internal_order_test(); + // Check count elements of output + return (taskData->outputs_count[0] == 1 and taskData->inputs_count[0] != 0); +} + +bool solovyev_d_vector_max_mpi::VectorMaxSequential::run() { + internal_order_test(); + + // Determine maximum value of data vector + result = vectorMax(data); + return true; +} + +bool solovyev_d_vector_max_mpi::VectorMaxSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = result; + return true; +} \ No newline at end of file diff --git a/tasks/seq/sorokin_a_check_lexicographic_order_of_strings/func_tests/main.cpp b/tasks/seq/sorokin_a_check_lexicographic_order_of_strings/func_tests/main.cpp new file mode 100644 index 00000000000..c0c70b78826 --- /dev/null +++ b/tasks/seq/sorokin_a_check_lexicographic_order_of_strings/func_tests/main.cpp @@ -0,0 +1,121 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/sorokin_a_check_lexicographic_order_of_strings/include/ops_seq.hpp" + +TEST(sorokin_a_check_lexicographic_order_of_strings_seq, The_difference_is_in_3_characters) { + // Create data + std::vector> in = {{'a', 'b', 'c'}, {'a', 'b', 'd'}}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + sorokin_a_check_lexicographic_order_of_strings_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(0, out[0]); +} + +TEST(sorokin_a_check_lexicographic_order_of_strings_seq, The_difference_is_in_1_characters_res1) { + // Create data + std::vector> in = {{'f', 'p', 'p'}, {'a', 'p', 'g'}}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + sorokin_a_check_lexicographic_order_of_strings_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(1, out[0]); +} + +TEST(sorokin_a_check_lexicographic_order_of_strings_seq, The_difference_is_in_2_characters_res1) { + // Create data + std::vector> in = {{'c', 'p', 'p'}, {'c', 'a', 'g'}}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + sorokin_a_check_lexicographic_order_of_strings_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(1, out[0]); +} + +TEST(sorokin_a_check_lexicographic_order_of_strings_seq, The_difference_is_in_1_characters) { + // Create data + std::vector> in = {{'a', 'p', 'p'}, {'b', 'a', 'g'}}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + sorokin_a_check_lexicographic_order_of_strings_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(0, out[0]); +} + +TEST(sorokin_a_check_lexicographic_order_of_strings_seq, The_difference_is_in_3_characters_res1) { + // Create data + std::vector> in = {{'b', 'p', 'p'}, {'b', 'p', 'g'}}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + sorokin_a_check_lexicographic_order_of_strings_seq::TestTaskSequential testTaskSequential(taskDataSeq); + ASSERT_EQ(testTaskSequential.validation(), true); + testTaskSequential.pre_processing(); + testTaskSequential.run(); + testTaskSequential.post_processing(); + ASSERT_EQ(1, out[0]); +} diff --git a/tasks/seq/sorokin_a_check_lexicographic_order_of_strings/include/ops_seq.hpp b/tasks/seq/sorokin_a_check_lexicographic_order_of_strings/include/ops_seq.hpp new file mode 100644 index 00000000000..8782efb0a9a --- /dev/null +++ b/tasks/seq/sorokin_a_check_lexicographic_order_of_strings/include/ops_seq.hpp @@ -0,0 +1,24 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace sorokin_a_check_lexicographic_order_of_strings_seq { + +class TestTaskSequential : public ppc::core::Task { + public: + explicit TestTaskSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector> input_; + int res_{}; +}; + +} // namespace sorokin_a_check_lexicographic_order_of_strings_seq \ No newline at end of file diff --git a/tasks/seq/sorokin_a_check_lexicographic_order_of_strings/perf_tests/main.cpp b/tasks/seq/sorokin_a_check_lexicographic_order_of_strings/perf_tests/main.cpp new file mode 100644 index 00000000000..0cd57c0110a --- /dev/null +++ b/tasks/seq/sorokin_a_check_lexicographic_order_of_strings/perf_tests/main.cpp @@ -0,0 +1,89 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/sorokin_a_check_lexicographic_order_of_strings/include/ops_seq.hpp" + +TEST(sorokin_a_check_lexicographic_order_of_strings_seq, The_difference_is_in_20000000_characters) { + // Create data + std::vector str1(20000000, 'a'); + std::vector str2(19999999, 'a'); + str2.push_back('b'); + std::vector> in = {str1, str2}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = + std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(0, out[0]); +} + +TEST(sorokin_a_check_lexicographic_order_of_strings_seq, The_difference_is_in_20000000_characters_res1) { + // Create data + std::vector str1(20000000, 'b'); + std::vector str2(19999999, 'b'); + str2.push_back('a'); + std::vector> in = {str1, str2}; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + for (unsigned int i = 0; i < in.size(); i++) + taskDataSeq->inputs.emplace_back(reinterpret_cast(in[i].data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->inputs_count.emplace_back(in[0].size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto testTaskSequential = + std::make_shared(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(1, out[0]); +} diff --git a/tasks/seq/sorokin_a_check_lexicographic_order_of_strings/src/ops_seq.cpp b/tasks/seq/sorokin_a_check_lexicographic_order_of_strings/src/ops_seq.cpp new file mode 100644 index 00000000000..7511ebfe6a5 --- /dev/null +++ b/tasks/seq/sorokin_a_check_lexicographic_order_of_strings/src/ops_seq.cpp @@ -0,0 +1,45 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/sorokin_a_check_lexicographic_order_of_strings/include/ops_seq.hpp" + +#include + +using namespace std::chrono_literals; + +bool sorokin_a_check_lexicographic_order_of_strings_seq::TestTaskSequential::pre_processing() { + internal_order_test(); + input_ = std::vector>(taskData->inputs_count[0], std::vector(taskData->inputs_count[1])); + + for (unsigned int i = 0; i < taskData->inputs_count[0]; i++) { + auto* tmp_ptr = reinterpret_cast(taskData->inputs[i]); + for (unsigned int j = 0; j < taskData->inputs_count[1]; j++) { + input_[i][j] = tmp_ptr[j]; + } + } + res_ = 0; + return true; +} + +bool sorokin_a_check_lexicographic_order_of_strings_seq::TestTaskSequential::validation() { + internal_order_test(); + return taskData->inputs_count[0] == 2 && taskData->outputs_count[0] == 1; +} + +bool sorokin_a_check_lexicographic_order_of_strings_seq::TestTaskSequential::run() { + internal_order_test(); + for (size_t i = 0; i < std::min(input_[0].size(), input_[1].size()); ++i) { + if (static_cast(input_[0][i]) > static_cast(input_[1][i])) { + res_ = 1; + break; + } + if (static_cast(input_[0][i]) < static_cast(input_[1][i])) { + break; + } + } + return true; +} + +bool sorokin_a_check_lexicographic_order_of_strings_seq::TestTaskSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res_; + return true; +} diff --git a/tasks/seq/sotskov_a_sum_element_matrix/func_tests/main.cpp b/tasks/seq/sotskov_a_sum_element_matrix/func_tests/main.cpp new file mode 100644 index 00000000000..922a3c59176 --- /dev/null +++ b/tasks/seq/sotskov_a_sum_element_matrix/func_tests/main.cpp @@ -0,0 +1,152 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include +#include +#include +#include +#include + +#include "seq/sotskov_a_sum_element_matrix/include/ops_seq.hpp" + +TEST(Sequential, Test_Sum_Large_Matrix) { + const int rows = 1000; + const int columns = 1000; + + std::vector global_matrix = sotskov_a_sum_element_matrix_seq::create_random_matrix_double(rows, columns); + std::vector reference_sum(1, 0); + + reference_sum[0] = sotskov_a_sum_element_matrix_seq::sum_matrix_elements_double(global_matrix); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(global_matrix.data()))); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + sotskov_a_sum_element_matrix_seq::TestTaskSequentialDouble testTask(taskDataSeq); + ASSERT_TRUE(testTask.validation()); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(reference_sum[0], sotskov_a_sum_element_matrix_seq::sum_matrix_elements_double(global_matrix)); +} + +TEST(Sequential, Test_Sum_Negative_Values) { + const int rows = 10; + const int columns = 10; + + std::vector global_matrix = sotskov_a_sum_element_matrix_seq::create_random_matrix_int(rows, columns); + for (auto& elem : global_matrix) { + elem = -abs(elem); + } + std::vector reference_sum(1, 0); + reference_sum[0] = sotskov_a_sum_element_matrix_seq::sum_matrix_elements_int(global_matrix); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(global_matrix.data()))); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + sotskov_a_sum_element_matrix_seq::TestTaskSequentialInt testTask(taskDataSeq); + ASSERT_TRUE(testTask.validation()); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(reference_sum[0], sotskov_a_sum_element_matrix_seq::sum_matrix_elements_int(global_matrix)); +} + +TEST(Sequential, Test_Sum_Int) { + srand(static_cast(time(nullptr))); + + const int rows = sotskov_a_sum_element_matrix_seq::random_range(1, 100); + const int columns = sotskov_a_sum_element_matrix_seq::random_range(1, 100); + + std::vector global_matrix = sotskov_a_sum_element_matrix_seq::create_random_matrix_int(rows, columns); + std::vector reference_sum(1, 0); + reference_sum[0] = sotskov_a_sum_element_matrix_seq::sum_matrix_elements_int(global_matrix); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(global_matrix.data()))); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + sotskov_a_sum_element_matrix_seq::TestTaskSequentialInt testTask(taskDataSeq); + ASSERT_TRUE(testTask.validation()); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(reference_sum[0], sotskov_a_sum_element_matrix_seq::sum_matrix_elements_int(global_matrix)); +} + +TEST(Sequential, Test_Sum_Double) { + srand(static_cast(time(nullptr))); + + const int rows = sotskov_a_sum_element_matrix_seq::random_range(1, 100); + const int columns = sotskov_a_sum_element_matrix_seq::random_range(1, 100); + + std::vector global_matrix = sotskov_a_sum_element_matrix_seq::create_random_matrix_double(rows, columns); + std::vector reference_sum(1, 0.0); + reference_sum[0] = sotskov_a_sum_element_matrix_seq::sum_matrix_elements_double(global_matrix); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(const_cast(global_matrix.data()))); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + sotskov_a_sum_element_matrix_seq::TestTaskSequentialDouble testTask(taskDataSeq); + ASSERT_TRUE(testTask.validation()); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(reference_sum[0], sotskov_a_sum_element_matrix_seq::sum_matrix_elements_double(global_matrix)); +} + +TEST(Sequential, Test_Empty_Matrix) { + std::vector reference_sum(1, 0); + std::vector empty_matrix; + + reference_sum[0] = sotskov_a_sum_element_matrix_seq::sum_matrix_elements_int(empty_matrix); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(empty_matrix.data())); + taskDataSeq->inputs_count.emplace_back(empty_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(reference_sum.data())); + taskDataSeq->outputs_count.emplace_back(reference_sum.size()); + + sotskov_a_sum_element_matrix_seq::TestTaskSequentialInt testTask(taskDataSeq); + ASSERT_TRUE(testTask.validation()); + testTask.pre_processing(); + testTask.run(); + testTask.post_processing(); + + ASSERT_EQ(reference_sum[0], 0); +} + +TEST(Sequential, Test_Zero_Columns_Rows) { + auto zero_columns = sotskov_a_sum_element_matrix_seq::create_random_matrix_int(1, 0); + EXPECT_TRUE(zero_columns.empty()); + auto zero_rows = sotskov_a_sum_element_matrix_seq::create_random_matrix_int(0, 1); + EXPECT_TRUE(zero_rows.empty()); +} + +TEST(Sequential, Test_Wrong_Validation) { + std::vector global_matrix; + std::vector global_sum(2, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(global_matrix.data())); + taskDataSeq->inputs_count.emplace_back(global_matrix.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(global_sum.data())); + taskDataSeq->outputs_count.emplace_back(global_sum.size()); + + sotskov_a_sum_element_matrix_seq::TestTaskSequentialInt testTask(taskDataSeq); + ASSERT_FALSE(testTask.validation()); +} diff --git a/tasks/seq/sotskov_a_sum_element_matrix/include/ops_seq.hpp b/tasks/seq/sotskov_a_sum_element_matrix/include/ops_seq.hpp new file mode 100644 index 00000000000..de3b47342ee --- /dev/null +++ b/tasks/seq/sotskov_a_sum_element_matrix/include/ops_seq.hpp @@ -0,0 +1,43 @@ +#pragma once + +#include +#include + +#include "core/task/include/task.hpp" + +namespace sotskov_a_sum_element_matrix_seq { + +std::vector create_random_matrix_int(int rows, int cols); +std::vector create_random_matrix_double(int rows, int cols); + +int sum_matrix_elements_int(const std::vector& matrix); +double sum_matrix_elements_double(const std::vector& matrix); +int random_range(int min, int max); + +class TestTaskSequentialInt : public ppc::core::Task { + public: + explicit TestTaskSequentialInt(std::shared_ptr task_data); + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_data_; + int result_{0}; +}; + +class TestTaskSequentialDouble : public ppc::core::Task { + public: + explicit TestTaskSequentialDouble(std::shared_ptr task_data); + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_data_; + double result_{0.0}; +}; + +} // namespace sotskov_a_sum_element_matrix_seq diff --git a/tasks/seq/sotskov_a_sum_element_matrix/perf_tests/main.cpp b/tasks/seq/sotskov_a_sum_element_matrix/perf_tests/main.cpp new file mode 100644 index 00000000000..458abe1262f --- /dev/null +++ b/tasks/seq/sotskov_a_sum_element_matrix/perf_tests/main.cpp @@ -0,0 +1,74 @@ +#include + +#include +#include +#include + +#include "core/perf/include/perf.hpp" +#include "seq/sotskov_a_sum_element_matrix/include/ops_seq.hpp" + +TEST(sotskov_a_sum_element_matrix, test_pipeline_run) { + const int rows = 10000; + const int columns = 10000; + + std::vector in(rows * columns, 1); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(std::accumulate(in.begin(), in.end(), 0), out[0]); +} + +TEST(sotskov_a_sum_element_matrix, test_task_run) { + const int rows = 8000; + const int columns = 8000; + + std::vector in(rows * columns, 1); + std::vector out(1, 0); + + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + auto testTaskSequential = std::make_shared(taskDataSeq); + + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + auto perfResults = std::make_shared(); + auto perfAnalyzer = std::make_shared(testTaskSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + + ASSERT_EQ(std::accumulate(in.begin(), in.end(), 0), out[0]); +} diff --git a/tasks/seq/sotskov_a_sum_element_matrix/src/ops_seq.cpp b/tasks/seq/sotskov_a_sum_element_matrix/src/ops_seq.cpp new file mode 100644 index 00000000000..03aae4aab7a --- /dev/null +++ b/tasks/seq/sotskov_a_sum_element_matrix/src/ops_seq.cpp @@ -0,0 +1,113 @@ +#include "seq/sotskov_a_sum_element_matrix/include/ops_seq.hpp" + +#include +#include +#include +#include + +int sotskov_a_sum_element_matrix_seq::sum_matrix_elements_int(const std::vector& matrix) { + return std::accumulate(matrix.begin(), matrix.end(), 0); +} + +double sotskov_a_sum_element_matrix_seq::sum_matrix_elements_double(const std::vector& matrix) { + return std::accumulate(matrix.begin(), matrix.end(), 0.0); +} + +int sotskov_a_sum_element_matrix_seq::random_range(int min, int max) { + static std::random_device rd; + static std::mt19937 gen(rd()); + std::uniform_int_distribution<> dis(min, max); + return dis(gen); +} + +std::vector sotskov_a_sum_element_matrix_seq::create_random_matrix_int(int rows, int cols) { + if (rows <= 0 || cols <= 0) { + return {}; + } + + std::vector matrix(rows * cols); + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution<> dis(-100, 100); + + std::generate(matrix.begin(), matrix.end(), [&]() { return dis(gen); }); + return matrix; +} + +std::vector sotskov_a_sum_element_matrix_seq::create_random_matrix_double(int rows, int cols) { + if (rows <= 0 || cols <= 0) { + return {}; + } + + std::vector matrix(rows * cols); + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_real_distribution<> dis(-100.0, 100.0); + + std::generate(matrix.begin(), matrix.end(), [&]() { return dis(gen); }); + return matrix; +} + +sotskov_a_sum_element_matrix_seq::TestTaskSequentialInt::TestTaskSequentialInt( + std::shared_ptr task_data) + : Task(std::move(task_data)) {} + +bool sotskov_a_sum_element_matrix_seq::TestTaskSequentialInt::pre_processing() { + internal_order_test(); + result_ = 0; + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + input_data_.assign(tmp_ptr, tmp_ptr + taskData->inputs_count[0]); + return true; +} + +bool sotskov_a_sum_element_matrix_seq::TestTaskSequentialInt::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} + +bool sotskov_a_sum_element_matrix_seq::TestTaskSequentialInt::run() { + internal_order_test(); + result_ = std::accumulate(input_data_.begin(), input_data_.end(), 0); + return true; +} + +bool sotskov_a_sum_element_matrix_seq::TestTaskSequentialInt::post_processing() { + internal_order_test(); + if (!taskData->outputs.empty() && taskData->outputs[0] != nullptr) { + reinterpret_cast(taskData->outputs[0])[0] = result_; + return true; + } + return false; +} + +sotskov_a_sum_element_matrix_seq::TestTaskSequentialDouble::TestTaskSequentialDouble( + std::shared_ptr task_data) + : Task(std::move(task_data)) {} + +bool sotskov_a_sum_element_matrix_seq::TestTaskSequentialDouble::pre_processing() { + internal_order_test(); + result_ = 0.0; + auto* tmp_ptr = reinterpret_cast(taskData->inputs[0]); + input_data_.assign(tmp_ptr, tmp_ptr + taskData->inputs_count[0]); + return true; +} + +bool sotskov_a_sum_element_matrix_seq::TestTaskSequentialDouble::validation() { + internal_order_test(); + return taskData->outputs_count[0] == 1; +} + +bool sotskov_a_sum_element_matrix_seq::TestTaskSequentialDouble::run() { + internal_order_test(); + result_ = std::accumulate(input_data_.begin(), input_data_.end(), 0.0); + return true; +} + +bool sotskov_a_sum_element_matrix_seq::TestTaskSequentialDouble::post_processing() { + internal_order_test(); + if (!taskData->outputs.empty() && taskData->outputs[0] != nullptr) { + reinterpret_cast(taskData->outputs[0])[0] = result_; + return true; + } + return false; +} diff --git a/tasks/seq/titov_s_vector_sum/func_tests/main.cpp b/tasks/seq/titov_s_vector_sum/func_tests/main.cpp new file mode 100644 index 00000000000..3c60cb5d6f5 --- /dev/null +++ b/tasks/seq/titov_s_vector_sum/func_tests/main.cpp @@ -0,0 +1,135 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "seq/titov_s_vector_sum/include/ops_seq.hpp" + +TEST(titov_s_vector_sum_seq, Test_Int) { + // Create data + std::vector in(1, 10); + const int expected_sum = 10; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + titov_s_vector_sum_seq::VectorSumSequential vectorSumSequential(taskDataSeq); + ASSERT_TRUE(vectorSumSequential.validation()); + vectorSumSequential.pre_processing(); + vectorSumSequential.run(); + vectorSumSequential.post_processing(); + ASSERT_EQ(expected_sum, out[0]); +} + +TEST(titov_s_vector_sum_seq, Test_Double) { + // Create data + std::vector in(1, 10); + const int expected_sum = 10; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + titov_s_vector_sum_seq::VectorSumSequential vectorSumSequential(taskDataSeq); + ASSERT_TRUE(vectorSumSequential.validation()); + vectorSumSequential.pre_processing(); + vectorSumSequential.run(); + vectorSumSequential.post_processing(); + EXPECT_NEAR(out[0], expected_sum, 1e-6); +} + +TEST(titov_s_vector_sum_seq, Test_Float) { + // Create data + std::vector in(1, 1.f); + std::vector out(1, 0.f); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + titov_s_vector_sum_seq::VectorSumSequential vectorSumSequential(taskDataSeq); + ASSERT_TRUE(vectorSumSequential.validation()); + vectorSumSequential.pre_processing(); + vectorSumSequential.run(); + vectorSumSequential.post_processing(); + EXPECT_NEAR(out[0], static_cast(in.size()), 1e-3f); +} + +TEST(titov_s_vector_sum_seq, Test_Int64_t) { + // Create data + std::vector in(75836, 1); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + titov_s_vector_sum_seq::VectorSumSequential vectorSumSequential(taskDataSeq); + ASSERT_TRUE(vectorSumSequential.validation()); + vectorSumSequential.pre_processing(); + vectorSumSequential.run(); + vectorSumSequential.post_processing(); + ASSERT_EQ(static_cast(out[0]), in.size()); +} + +TEST(titov_s_vector_sum_seq, Test_Uint8_t) { + // Create data + std::vector in(255, 1); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + titov_s_vector_sum_seq::VectorSumSequential vectorSumSequential(taskDataSeq); + ASSERT_TRUE(vectorSumSequential.validation()); + vectorSumSequential.pre_processing(); + vectorSumSequential.run(); + vectorSumSequential.post_processing(); + ASSERT_EQ(static_cast(out[0]), in.size()); +} + +TEST(titov_s_vector_sum_seq, Test_Empty_Array) { + // Create data + std::vector in(1, 0); + const int expected_sum = 0; + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + titov_s_vector_sum_seq::VectorSumSequential vectorSumSequential(taskDataSeq); + ASSERT_TRUE(vectorSumSequential.validation()); + vectorSumSequential.pre_processing(); + vectorSumSequential.run(); + vectorSumSequential.post_processing(); + ASSERT_EQ(expected_sum, out[0]); +} diff --git a/tasks/seq/titov_s_vector_sum/include/ops_seq.hpp b/tasks/seq/titov_s_vector_sum/include/ops_seq.hpp new file mode 100644 index 00000000000..d29d94269bd --- /dev/null +++ b/tasks/seq/titov_s_vector_sum/include/ops_seq.hpp @@ -0,0 +1,26 @@ +// Copyright 2023 Nesterov Alexander +#pragma once + +#include +#include +#include +#include + +#include "core/task/include/task.hpp" + +namespace titov_s_vector_sum_seq { +template +class VectorSumSequential : public ppc::core::Task { + public: + explicit VectorSumSequential(std::shared_ptr taskData_) : Task(std::move(taskData_)) {} + bool pre_processing() override; + bool validation() override; + bool run() override; + bool post_processing() override; + + private: + std::vector input_; + InOutType res; +}; + +} // namespace titov_s_vector_sum_seq diff --git a/tasks/seq/titov_s_vector_sum/perf_tests/main.cpp b/tasks/seq/titov_s_vector_sum/perf_tests/main.cpp new file mode 100644 index 00000000000..3b772d11c2b --- /dev/null +++ b/tasks/seq/titov_s_vector_sum/perf_tests/main.cpp @@ -0,0 +1,81 @@ +// Copyright 2023 Nesterov Alexander +#include + +#include + +#include "core/perf/include/perf.hpp" +#include "seq/titov_s_vector_sum/include/ops_seq.hpp" + +TEST(titov_s_vector_sum_seq, test_pipeline_run) { + const int count = 10000000; + + // Create data + std::vector in(count, 0); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto vectorSumSequential = std::make_shared>(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(vectorSumSequential); + perfAnalyzer->pipeline_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(0, out[0]); +} + +TEST(titov_s_vector_sum_seq, test_task_run) { + const int count = 10000000; + + // Create data + std::vector in(count, 0); + std::vector out(1, 0); + + // Create TaskData + std::shared_ptr taskDataSeq = std::make_shared(); + taskDataSeq->inputs.emplace_back(reinterpret_cast(in.data())); + taskDataSeq->inputs_count.emplace_back(in.size()); + taskDataSeq->outputs.emplace_back(reinterpret_cast(out.data())); + taskDataSeq->outputs_count.emplace_back(out.size()); + + // Create Task + auto vectorSumSequential = std::make_shared>(taskDataSeq); + + // Create Perf attributes + auto perfAttr = std::make_shared(); + perfAttr->num_running = 10; + const auto t0 = std::chrono::high_resolution_clock::now(); + perfAttr->current_timer = [&] { + auto current_time_point = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(current_time_point - t0).count(); + return static_cast(duration) * 1e-9; + }; + + // Create and init perf results + auto perfResults = std::make_shared(); + + // Create Perf analyzer + auto perfAnalyzer = std::make_shared(vectorSumSequential); + perfAnalyzer->task_run(perfAttr, perfResults); + ppc::core::Perf::print_perf_statistic(perfResults); + ASSERT_EQ(0, out[0]); +} diff --git a/tasks/seq/titov_s_vector_sum/src/ops_seq.cpp b/tasks/seq/titov_s_vector_sum/src/ops_seq.cpp new file mode 100644 index 00000000000..98ca0cac0b2 --- /dev/null +++ b/tasks/seq/titov_s_vector_sum/src/ops_seq.cpp @@ -0,0 +1,45 @@ +// Copyright 2024 Nesterov Alexander +#include "seq/titov_s_vector_sum/include/ops_seq.hpp" + +#include + +using namespace std::chrono_literals; + +template +bool titov_s_vector_sum_seq::VectorSumSequential::pre_processing() { + internal_order_test(); + input_ = std::vector(taskData->inputs_count[0]); + auto tmp_ptr = reinterpret_cast(taskData->inputs[0]); + for (unsigned i = 0; i < taskData->inputs_count[0]; i++) { + input_[i] = tmp_ptr[i]; + } + // Init value for output + res = 0; + return true; +} + +template +bool titov_s_vector_sum_seq::VectorSumSequential::validation() { + internal_order_test(); + // Check count elements of output + return taskData->inputs_count[0] > 0 && taskData->outputs_count[0] == 1; +} + +template +bool titov_s_vector_sum_seq::VectorSumSequential::run() { + internal_order_test(); + res = std::accumulate(input_.begin(), input_.end(), 0); + return true; +} + +template +bool titov_s_vector_sum_seq::VectorSumSequential::post_processing() { + internal_order_test(); + reinterpret_cast(taskData->outputs[0])[0] = res; + return true; +} +template class titov_s_vector_sum_seq::VectorSumSequential; +template class titov_s_vector_sum_seq::VectorSumSequential; +template class titov_s_vector_sum_seq::VectorSumSequential; +template class titov_s_vector_sum_seq::VectorSumSequential; +template class titov_s_vector_sum_seq::VectorSumSequential; diff --git a/tasks/stl/example/src/ops_stl.cpp b/tasks/stl/example/src/ops_stl.cpp index 8ba9fbb6652..1943cdf7c14 100644 --- a/tasks/stl/example/src/ops_stl.cpp +++ b/tasks/stl/example/src/ops_stl.cpp @@ -48,7 +48,6 @@ bool nesterov_a_test_task_stl::TestSTLTaskSequential::run() { } else if (ops == "-") { res -= std::accumulate(input_.begin(), input_.end(), 0); } - std::this_thread::sleep_for(20ms); return true; } diff --git a/tasks/tbb/example/src/ops_tbb.cpp b/tasks/tbb/example/src/ops_tbb.cpp index edb5f2a4978..2abe556fa88 100644 --- a/tasks/tbb/example/src/ops_tbb.cpp +++ b/tasks/tbb/example/src/ops_tbb.cpp @@ -50,7 +50,6 @@ bool nesterov_a_test_task_tbb::TestTBBTaskSequential::run() { } else if (ops == "*") { res = std::accumulate(input_.begin(), input_.end(), 1, std::multiplies<>()); } - std::this_thread::sleep_for(20ms); return true; }