diff --git a/.github/workflows/long-tests.yml b/.github/workflows/long-tests.yml new file mode 100644 index 00000000..be864006 --- /dev/null +++ b/.github/workflows/long-tests.yml @@ -0,0 +1,30 @@ +name: Long tests + +on: + # run tests on push events + push: + # run tests on PR events + pull_request: + types: [opened, synchronize] + + # run tests manually on a given branch (default is master) + workflow_dispatch: + # Inputs the workflow accepts. + inputs: + branch: + # branch to test + description: 'branch to test' + # Default value if no value is explicitly provided + default: 'master' + required: false + +# job +jobs: + parmmg-debug: + uses: ./.github/workflows/main-job.yml + with: + cmake_build_type: RelWithAssert + add_cmake_cfg_args: + branch_name: ${{github.event.inputs.branch}} + code_coverage: true + secrets: inherit diff --git a/.github/workflows/main-job.yml b/.github/workflows/main-job.yml new file mode 100644 index 00000000..c28877b3 --- /dev/null +++ b/.github/workflows/main-job.yml @@ -0,0 +1,266 @@ +on: + workflow_call: + inputs: + cmake_build_type: + description: 'Value for CMAKE_BUILD_TYPE' + type: string + default: Release + required: true + + add_cmake_cfg_args: + description: 'Additional arguments for configuration step' + required: false + type: string + + branch_name: + description: 'Branch to checkout when tests are manually triggered' + required: false + type: string + + code_coverage: + description: 'Code coverage' + required: false + default: true + type: boolean + +# job +jobs: + ci: + if: "! contains(github.event.head_commit.message, '[skip ci]')" + runs-on: ${{ matrix.os }} + + # Launch a matrix of jobs + strategy: + fail-fast: false + matrix: + os: [ubuntu-20.04,macos-12] + pointmap: [on,off] + scotch: [on,off] + mpich-instead-openmpi: [on,off] + additional-IOs: [off] + + include: + # test hdf5 IOs only without scotch and without pointmap. + # initial idea was to test vtk IOs too but installation of vtk + # with parallel support takes too much time + # (it would be possible with a pre-installed docker image) + - os: ubuntu-20.04 + pointmap: off + scotch: off + mpich-instead-openmpi: off + additional-IOs: on + + - os: ubuntu-20.04 + pointmap: off + scotch: off + mpich-instead-openmpi: on + additional-IOs: on + + # Remark: hdf5 I/Os are not tested with mpich as the hdf5-mpi + # homebrew package is built with open-mpi dependency + - os: macos-12 + pointmap: off + scotch: off + mpich-instead-openmpi: off + additional-IOs: on + + steps: + - name: Set cmake_build_type and export coverage flags + run: | + if ${{ matrix.os == 'ubuntu-20.04' && inputs.code_coverage == true }}; then + # if code coverage is enabled, linux build is runned in Debug mode + + if [[ ${{ inputs.cmake_build_type }} != Debug ]]; then + echo "WARNING: build type is forced to debug mode on ubuntu to allow coverage." + fi + + echo "BUILD_TYPE=Debug" >> "$GITHUB_ENV" + echo "C_FLG_PROF=-fprofile-arcs -ftest-coverage" >> "$GITHUB_ENV" + + else + echo "BUILD_TYPE=${{ inputs.cmake_build_type }}" >> "$GITHUB_ENV" + fi + + shell: bash + + - name: Print options and set environment variables + run: | + echo "${{ github.event.inputs.name }}: + Os: ${{ matrix.os }}, + Pointmap:${{ matrix.pointmap }}, + Scotch: ${{ matrix.scotch }}, + Mpich instead of openmpi: ${{ matrix.mpich-instead-openmpi }}, + Build: ${{ env.BUILD_TYPE }}, + additional-IOs: ${{matrix.additional-IOs}}" + + # gfortran compiler and scotch makefile depends on the os + if [ "$RUNNER_OS" == "macOS" ]; then + echo "SCOTCH_MAKE=Make.inc/Makefile.inc.i686_mac_darwin10" >> "$GITHUB_ENV" + echo "FORT_FLG=\"-DCMAKE_Fortran_COMPILER=gfortran-14\"" >> "$GITHUB_ENV" + # Exclude test cases that fail on OSX due to surface model issue in Mmg. + echo "EXCLUDE_TESTS=\"DistribSurf-A319\"" >> "$GITHUB_ENV" + + elif [ "$RUNNER_OS" == "Linux" ]; then + echo "SCOTCH_MAKE=Make.inc/Makefile.inc.x86-64_pc_linux2" >> "$GITHUB_ENV" + echo "FORT_FLG=\"-DCMAKE_Fortran_COMPILER=gfortran-9\"" >> "$GITHUB_ENV" + echo "EXCLUDE_TESTS=\"DistribSurf-A319-adp-0-4|DistribSurf-A319-adp-1-4\"" >> "$GITHUB_ENV" + fi + echo "NJOBS=$NJOBS" >> "$GITHUB_ENV" + # Remark: variable values are still empty inside this context + + shell: bash + env: + NJOBS: "1" + + - name: Install MPICH + # Download mpich + if: matrix.mpich-instead-openmpi == 'on' + run: | + if [ "$RUNNER_OS" == "macOS" ]; then + brew install mpich + elif [ "$RUNNER_OS" == "Linux" ]; then + sudo sed -i 's/azure\.//' /etc/apt/sources.list + sudo apt-get update + sudo apt-get install -y libmpich-dev + fi + + - name: Install OpenMPI + if: matrix.mpich-instead-openmpi == 'off' + run: | + if [ "$RUNNER_OS" == "macOS" ]; then + brew install open-mpi + elif [ "$RUNNER_OS" == "Linux" ]; then + sudo sed -i 's/azure\.//' /etc/apt/sources.list + sudo apt-get update + sudo apt-get install -y libopenmpi-dev + fi + + - name: Install Sctoch + # Download scotch only if used + if: matrix.scotch == 'on' + run: | + git clone https://gitlab.inria.fr/scotch/scotch.git + cd scotch + git checkout v6.1.3 + cd src + cp ${{ env.SCOTCH_MAKE }} Makefile.inc + make scotch -j ${{ env.NJOBS }} + make install scotch -j ${{ env.NJOBS }} + + #- name: Install VTK + # # Download vtk only if used + # if: matrix.additional-IOs == 'on' + # run: | + # if [ "$RUNNER_OS" == "Linux" ]; then + # # For opengl + # apt install -y libgl1-mesa-dev + # fi + # wget https://www.vtk.org/files/release/9.2/VTK-9.2.5.tar.gz + # tar -xvzf VTK-9.2.5.tar.gz + # cd VTK-9.2.5 + # cmake -Bbuild -S. -DVTK_USE_MPI=ON -DVTK_GROUP_ENABLE_MPI=YES + # cmake --build build --target install --config Release -j 4 + + - name: Install hdf5 + # Download hdf5 only if used + if: matrix.additional-IOs == 'on' + run: | + if [ "$RUNNER_OS" == "macOS" ]; then + brew install hdf5-mpi + elif [ "$RUNNER_OS" == "Linux" ]; then + sudo apt-get install -y libhdf5-mpi-dev + fi + + + # checkout the provided branch name if workflow is manually run + - uses: actions/checkout@v4 + if: inputs.branch_name + with: + ref: ${{github.event.inputs.branch}} + path: ParMmg + + # checkout the event branch for automatic workflows + - uses: actions/checkout@v4 + if: inputs.branch_name == '' + with: + path: ParMmg + + - name: Test compilation with shared libs linkage + run: | + cmake -SParMmg -Bbuild_shared -DCI_DIR=~/testparmmg \ + -DCMAKE_C_FLAGS="${{ env.C_FLG_PROF }}" \ + ${{ env.FORT_FLG }} \ + -DBUILD_TESTING=ON \ + -DCMAKE_BUILD_TYPE=${{ env.BUILD_TYPE }} \ + -DUSE_POINTMAP=${{ matrix.pointmap }} \ + -DUSE_SCOTCH=${{ matrix.scotch }} \ + -DSCOTCH_DIR=scotch \ + -DBUILD_SHARED_LIBS=ON \ + ${{ inputs.add_cmake_cfg_args }} + cmake --build build_shared --config ${{ env.BUILD_TYPE }} -j ${{ env.NJOBS }} + shell: bash + + - name: Configure Mmg with static libs (default behaviour) + run: | + cmake -SParMmg -Bbuild -DCI_DIR=~/testparmmg\ + -DCMAKE_C_FLAGS="${{ env.C_FLG_PROF }}" \ + ${{ env.FORT_FLG }} \ + -DBUILD_TESTING=ON \ + -DCMAKE_BUILD_TYPE=${{ env.BUILD_TYPE }} \ + -DUSE_POINTMAP=${{ matrix.pointmap }} \ + -DUSE_SCOTCH=${{ matrix.scotch }} \ + -DSCOTCH_DIR=scotch \ + ${{ inputs.add_cmake_cfg_args }} + shell: bash + + - name: Build ParMmg + run: | + cmake --build build --config ${{ env.BUILD_TYPE }} -j ${{ env.NJOBS }} + + - name: Install ParMmg + run: | + sudo cmake --build build --target install --config ${{ env.BUILD_TYPE }} -j ${{ env.NJOBS }} + + - name: Test ParMmg + run: | + cd build + ctest --timeout 7200 -VV -C ${{ env.BUILD_TYPE }} -E ${{ env.EXCLUDE_TESTS }} + + - name: Test non native I/Os (requiring install of dependencies) + if: matrix.additional-IOs == 'on' + run: | + cd build + ctest -R "hdf5" -VV -C ${{ env.BUILD_TYPE }} -j ${{ env.NJOBS }} + + - name: Archive production artifacts + uses: actions/upload-artifact@v4 + with: + name: ParMmg-build-${{ matrix.os }}-${{ matrix.pointmap }}-${{ matrix.scotch }}-${{ matrix.mpich-instead-openmpi }}-${{ matrix.additional-IOs }} + path: | + build + + upload_coverage: + runs-on: ubuntu-latest + needs: ci + + steps: + - name: Checkout repository + # Codecov need the source code to pair with coverage + uses: actions/checkout@v4 + with: + path: ParMmg + + - name: Download coverage artifact + uses: actions/download-artifact@v4 + with: + pattern: ParMmg-build-ubuntu-* + + - name: Upload coverage to Codecov + if: inputs.code_coverage == true + uses: codecov/codecov-action@v4 + with: + fail_ci_if_error: true + root_dir: . + verbose: true + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} diff --git a/.gitignore b/.gitignore index 157718f7..f24ab519 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,4 @@ doc/ cscope.files cscope.out tags +.vscode/ diff --git a/CMakeLists.txt b/CMakeLists.txt index 1efe19d2..4b9bf3e5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8.0) +CMAKE_MINIMUM_REQUIRED(VERSION 3.5) INCLUDE(CMakeDependentOption) INCLUDE(cmake/modules/macros.cmake) @@ -6,6 +6,10 @@ INCLUDE(CheckCSourceCompiles) PROJECT (parmmg) +LIST(APPEND CMAKE_MODULE_PATH + ${PROJECT_SOURCE_DIR}/cmake/modules + ${PROJECT_SOURCE_DIR}/cmake/testing ) + # Must use GNUInstallDirs to install libraries into correct # locations on all platforms. include(GNUInstallDirs) @@ -63,6 +67,8 @@ MARK_AS_ADVANCED(CMAKE_OSX_ARCHITECTURES CMAKE_OSX_DEPLOYMENT_TARGET CMAKE_OSX_SYSROOT) +include(add_build_types) + IF(CMAKE_COMPILER_IS_GNUCC) IF(APPLE) # Add flags to the compiler to work on old mac @@ -106,15 +112,32 @@ IF(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES) SET(CMAKE_CONFIGURATION_TYPES ${CMAKE_BUILD_TYPE} ) ENDIF() -OPTION ( LIBPARMMG_STATIC "Compile static library" ON) -OPTION ( LIBPARMMG_SHARED "Compile dynamic library" OFF) - # Explicitly set the DNDEBUG flag in case the user or a parent project overrides # it. if (NOT CMAKE_BUILD_TYPE MATCHES Debug) add_definitions(-DNDEBUG) endif() +#------------------------------- static or shared libs +FUNCTION (INVERT_BOOL OUTVAR INVAR) + IF(${INVAR}) + SET(${OUTVAR} OFF PARENT_SCOPE) + ELSE() + SET(${OUTVAR} ON PARENT_SCOPE) + ENDIF() +ENDFUNCTION() + +OPTION(BUILD_SHARED_LIBS "Build shared libraries" OFF) +INVERT_BOOL("BUILD_STATIC_LIBS" ${BUILD_SHARED_LIBS}) +IF ( (${BUILD_STATIC_LIBS} EQUAL ON) AND NOT CMAKE_POSITION_INDEPENDENT_CODE) + SET(CMAKE_POSITION_INDEPENDENT_CODE ON) +ENDIF() + +SET ( LIBPARMMG_SHARED ${BUILD_SHARED_LIBS}) +IF( NOT DEFINED LIBPARMMG_STATIC) + SET ( LIBPARMMG_STATIC ${BUILD_STATIC_LIBS}) +ENDIF() + ############################################################################ ##### ##### MPI @@ -138,7 +161,15 @@ EXECUTE_PROCESS ( COMMAND grep "OMPI_MAJOR_VERSION" "${MPI_C_INCLUDE_PATH}/mpi. OUTPUT_VARIABLE CMD_OUTPUT ) IF ( ${CMD_ERROR} MATCHES 0 ) - MESSAGE(WARNING "Possible deadlocks with open-mpi (see https://github.com/open-mpi/ompi/issues/6568 )...") + SET ( OMPI_LIST ${CMD_OUTPUT} ) + separate_arguments(OMPI_LIST) + list(LENGTH OMPI_LIST ompi_list_len) + MATH(EXPR ompi_list_len "${ompi_list_len}-1") + list ( GET OMPI_LIST ${ompi_list_len} OMPI_MAJOR_VERSION ) + IF ( OMPI_MAJOR_VERSION VERSION_LESS 4 ) + MESSAGE(WARNING "Possible deadlocks with version <= 3 of open-mpi + (see https://github.com/open-mpi/ompi/issues/6568 )...") + ENDIF() ENDIF() SET( CMAKE_C_FLAGS "-DUSE_MPI ${CMAKE_C_FLAGS}" ) @@ -147,13 +178,11 @@ MESSAGE( STATUS "Compilation with mpi" ) SET( LIBRARIES ${MPI_C_LIBRARIES} ${LIBRARIES} ) SET( LIBRARIES ${MPI_CXX_LIBRARIES} ${LIBRARIES} ) -EXECUTE_PROCESS ( COMMAND ${MPIEXEC} --help mapping +EXECUTE_PROCESS ( COMMAND ${MPIEXEC} --oversubscribe sleep 1 RESULT_VARIABLE CMD_ERROR OUTPUT_VARIABLE CMD_OUTPUT ) -STRING(REGEX REPLACE "\"" " " CMD_OUT "${CMD_OUTPUT}") - -IF ( "${CMD_OUT}" MATCHES "oversubscribe" ) +IF ( "${CMD_ERROR}" MATCHES "0" ) SET ( MPI_ARGS "-oversubscribe" ) ENDIF() @@ -179,68 +208,10 @@ ENDIF ( ) ############################################################################ ##### -##### Scotch +##### Search for optional libraries: scotch, vtk, linearelasticity ##### ############################################################################ -# Find SCOTCH library? -SET(SCOTCH_DIR "" CACHE PATH "Installation directory for scotch") -LIST(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake/modules) - -# add Scotch library? -OPTION ( USE_SCOTCH "Use SCOTCH TOOL for renumbering" ON ) - -IF ( USE_SCOTCH ) - - FIND_PACKAGE(SCOTCH) - - IF ( NOT SCOTCH_FOUND ) - MESSAGE ( WARNING "Scotch library not found:" - "Using scotch reduce the execution time of mmg3d " - "(see https://gforge.inria.fr/frs/?group_id=248 to download it)." - "If you have already installed Scotch and want to use it, " - "please set the CMake variable or environment variable SCOTCH_DIR " - "to your scotch directory.") - ENDIF ( ) - -ENDIF ( ) - - -############################################################################ -##### -##### VTK (to parse (p)vtp/(p)vtu files ) -##### -############################################################################ -OPTION ( USE_VTK "Use VTK I/O" ON ) - -IF ( USE_VTK ) - FIND_PACKAGE(VTK QUIET) - - IF ( NOT VTK_FOUND ) - MESSAGE ( WARNING "VTK library not found: vtk I/O will not be available.") - ENDIF ( ) -ENDIF ( ) - -############################################################################### -##### -##### Add dependent options -##### -############################################################################### - -IF( SCOTCH_FOUND ) - SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DUSE_SCOTCH") - MESSAGE(STATUS - "Compilation with scotch: ${SCOTCH_LIBRARIES}") - SET( LIBRARIES ${LIBRARIES} ${SCOTCH_LIBRARIES}) -ENDIF() - - -IF ( VTK_FOUND ) - ENABLE_LANGUAGE ( CXX ) - ADD_DEFINITIONS(-DUSE_VTK) - MESSAGE ( STATUS "Compilation with VTK: add vtp and vtu I/O." ) - INCLUDE ( ${VTK_USE_FILE} ) - SET( LIBRARIES ${LIBRARIES} "-lstdc++" ${VTK_LIBRARIES} ) -ENDIF ( ) +INCLUDE(optional-dependencies) ############################################################################ ##### @@ -255,8 +226,10 @@ SET ( SCOTCH_CFG -DUSE_SCOTCH=${USE_SCOTCH} -DSCOTCH_DIR=${SCOTCH_DIR} -DSCOTCH_scotch_LIBRARY=${SCOTCH_scotch_LIBRARY} -DSCOTCH_scotcherrexit_LIBRARY=${SCOTCH_scotcherrexit_LIBRARY} ) -# forward VTK config -SET ( VTK_CFG -DUSE_VTK=${USE_VTK} -DVTK_DIR=${VTK_DIR} ) +IF ( VTK_FOUND ) + # forward VTK config + SET ( VTK_CFG -DUSE_VTK=${USE_VTK} -DVTK_DIR=${VTK_DIR} ) +ENDIF() # forward compiler SET ( COMPILER_CFG -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} @@ -273,65 +246,63 @@ OPTION ( DOWNLOAD_MMG "Download and build automatically Mmg." ON ) IF ( DOWNLOAD_MMG ) UNSET(MMG_DIR CACHE) - UNSET(MMG_BUILDDIR CACHE) # Use pointmap - OPTION ( USE_POINTMAP "Use map for point tracking" OFF ) + OPTION ( USE_POINTMAP "Use map for point tracking" ON ) EXTERNALPROJECT_ADD ( Mmg GIT_REPOSITORY https://github.com/MmgTools/mmg.git - GIT_TAG 889d408419b5c48833c249695987cf6ec699d399 - INSTALL_COMMAND echo "Mmg installation DISABLED" + GIT_TAG v5.8.0 + INSTALL_COMMAND ${CMAKE_MAKE_PROGRAM} install CMAKE_ARGS ${MMG_ARGS} -DUSE_ELAS=OFF ${COMPILER_CFG} ${FLAGS_CFG} ${SCOTCH_CFG} ${VTK_CFG} -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} - -DBUILD=MMG -DBUILD_SHARED_LIBS=${LIBPARMMG_SHARED} -DUSE_POINTMAP=${USE_POINTMAP}) + -DBUILD=MMG3D -DBUILD_SHARED_LIBS=${LIBPARMMG_SHARED} + -DUSE_VTK=${USE_VTK} -DPMMG_CALL=1 + -DMMG_INSTALL_PRIVATE_HEADERS=ON + -DUSE_POINTMAP=${USE_POINTMAP} -DCMAKE_INSTALL_PREFIX=../Mmg-install) EXTERNALPROJECT_GET_PROPERTY ( Mmg BINARY_DIR ) SET ( MMG_BINARY_DIR ${BINARY_DIR} ) EXTERNALPROJECT_GET_PROPERTY ( Mmg SOURCE_DIR ) SET ( MMG_SOURCE_DIR ${SOURCE_DIR} ) + SET (MMG_INSTALL_DIR ${SOURCE_DIR}/../Mmg-install) + SET (MMG_INCLUDE_DIRS ${MMG_INSTALL_DIR}/include ) + EXTERNALPROJECT_ADD_STEP ( Mmg reconfigure COMMAND ${CMAKE_COMMAND} -E remove ${MMG_BINARY_DIR}/CMakeCache.txt DEPENDEES update DEPENDERS configure ) - INCLUDE_DIRECTORIES(${MMG_BINARY_DIR}/include) - IF( LIBPARMMG_SHARED ) - SET(MMG3D_LIBRARY ${MMG_BINARY_DIR}/lib/libmmg3d${CMAKE_SHARED_LIBRARY_SUFFIX}) + SET(MMG3D_LIBRARY ${MMG_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/libmmg3d${CMAKE_SHARED_LIBRARY_SUFFIX}) ELSE() # default behaviour is to link static libs - SET(MMG3D_LIBRARY ${MMG_BINARY_DIR}/lib/libmmg3d${CMAKE_STATIC_LIBRARY_SUFFIX}) + SET(MMG3D_LIBRARY ${MMG_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/libmmg3d${CMAKE_STATIC_LIBRARY_SUFFIX}) ENDIF() MESSAGE(STATUS "Compilation with Mmg: ${MMG3D_LIBRARY}") SET( LIBRARIES ${MMG3D_LIBRARY} ${LIBRARIES}) - # Additionnal directories to access the Mmg sources - INCLUDE_DIRECTORIES(${MMG_BINARY_DIR}/src/common) - INCLUDE_DIRECTORIES(${MMG_SOURCE_DIR}/src/mmg3d) - INCLUDE_DIRECTORIES(${MMG_SOURCE_DIR}/src/common) - ELSE ( ) UNSET ( USE_POINTMAP CACHE ) - SET(MMG_DIR "" CACHE PATH "Installation directory for mmg") - SET(MMG_BUILDDIR "" CACHE PATH "Build directory for mmg") + SET(MMG_DIR "" CACHE PATH "Installation or build directory for mmg") - MESSAGE ( STATUS "Manual installation of Mmg: please, specify the MMG_DIR and MMG_BUILDDIR CMake variables" ) + MESSAGE ( STATUS "Manual installation of Mmg: please, build and install MMG" + " with MMG_INSTALL_PRIVATE_HEADERS option enabled and provide the path to" + " the install directory in the MMG_DIR variable of ParMmg." ) FIND_PACKAGE(MMG) IF(NOT MMG_FOUND ) - MESSAGE ( FATAL_ERROR "ERROR: The installation directory for mmg is required:" - "(see https://github.com/MmgTools/mmg and download the branch develop)." + MESSAGE ( FATAL_ERROR "ERROR: The installation directory for mmg is required: " + "(see https://github.com/MmgTools/mmg and download the branch develop). " "If you have already installed Mmg and want to use it, " - "please set the CMake variable or environment variable MMG_DIR " - "to your mmg directory and the CMake variable or environment variable" - " MMG_BUILDDIR " - "to your mmg build directory.") + "please build Mmg with MMG_INSTALL_PRIVATE_HEADERS CMake " + "option enabled and set the MMG_DIR CMake variable or environment variable " + "to the mmg install directory.") ELSE() INCLUDE_DIRECTORIES(${MMG_INCLUDE_DIRS}) @@ -341,22 +312,13 @@ ELSE ( ) SET( LIBRARIES ${MMG_LIBRARIES} ${LIBRARIES}) - # Additionnal directories to access the Mmg sources - INCLUDE_DIRECTORIES(${MMG_BUILDDIR_INTERNAL}/src/common) - IF ( MMG_DIR ) - INCLUDE_DIRECTORIES(${MMG_DIR}/src/mmg2d) - INCLUDE_DIRECTORIES(${MMG_DIR}/src/mmg3d) - INCLUDE_DIRECTORIES(${MMG_DIR}/src/common) - ELSE ( ) - MESSAGE ( FATAL_ERROR "ERROR: The source directory for mmg is required:" - "(see https://github.com/MmgTools/mmg and download the branch develop)." - "If you have already installed Mmg and want to use it, " - "please set the CMake variable or environment variable MMG_DIR " - "to your mmg directory.") - ENDIF ( ) ENDIF ( ) ENDIF() +INCLUDE_DIRECTORIES(${MMG_INCLUDE_DIRS}) +INCLUDE_DIRECTORIES(${MMG_INCLUDE_DIRS}/mmg/mmg3d) +INCLUDE_DIRECTORIES(${MMG_INCLUDE_DIRS}/mmg/common) + ############################################################################ ##### ##### Metis / ParMetis @@ -367,9 +329,15 @@ OPTION ( DOWNLOAD_METIS "Download and build automatically Metis/ParMetis." ON ) IF ( DOWNLOAD_METIS ) UNSET ( METIS_DIR CACHE ) + # Avoid warning about DOWNLOAD_EXTRACT_TIMESTAMP in CMake 3.24: + if (CMAKE_VERSION VERSION_GREATER "3.23.4") + cmake_policy(SET CMP0135 NEW) + endif() + # Metis EXTERNALPROJECT_ADD ( Metis URL ${CMAKE_CURRENT_SOURCE_DIR}/dependencies/metis-5.1.0.tar.gz + URL_MD5 5465e67079419a69e0116de24fce58fe UPDATE_COMMAND "" CONFIGURE_COMMAND ${CMAKE_MAKE_PROGRAM} config prefix=../../../Metis-install BUILD_IN_SOURCE 1 @@ -566,6 +534,11 @@ INSTALL(FILES ${pmmg_headers} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/parmmg COM # Copy header files in project directory at every build step COPY_HEADERS_AND_CREATE_TARGET ( ${PMMG_SOURCE_DIR} ${PMMG_BINARY_DIR} ${PMMG_INCLUDE} ) +IF (LIBPARMMG_STATIC OR LIBPARMMG_SHARED) + SET(LIBPARMMG_INTERNAL ON ) +ELSE() + SET(LIBPARMMG_INTERNAL OFF ) +ENDIF() install(EXPORT ParMmgTargets FILE ParMmgTargets.cmake @@ -573,12 +546,6 @@ install(EXPORT ParMmgTargets DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/parmmg ) -IF (LIBPARMMG_STATIC OR LIBPARMMG_SHARED) - SET(LIBPARMMG_INTERNAL ON ) -ELSE() - SET(LIBPARMMG_INTERNAL OFF ) -ENDIF() - ############################################################################### ##### ##### Compile PMMG executable @@ -588,11 +555,11 @@ ADD_AND_INSTALL_EXECUTABLE ( ${PROJECT_NAME} "${pmmg_library_files}" ${pmmg_main_file} ) IF ( DOWNLOAD_MMG ) - Add_Dependencies(parmmg Mmg) + Add_Dependencies(${PROJECT_NAME} Mmg) ENDIF ( ) IF ( DOWNLOAD_METIS ) - Add_Dependencies(parmmg Metis) + Add_Dependencies(${PROJECT_NAME} Metis) ENDIF ( ) ############################################################################ @@ -603,17 +570,17 @@ ENDIF ( ) IF (NOT WIN32 OR MINGW) - ADD_CUSTOM_TARGET(GenerateGitHash + ADD_CUSTOM_TARGET(GenerateGitHashParMmg COMMAND ./git_log_pmmg.sh ${PROJECT_SOURCE_DIR} ${PMMG_BINARY_DIR} WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/scripts/ COMMENT "Getting git commit hash" ) - ADD_DEPENDENCIES(${PROJECT_NAME} GenerateGitHash) + ADD_DEPENDENCIES(${PROJECT_NAME} GenerateGitHashParMmg) IF( LIBPARMMG_STATIC ) - ADD_DEPENDENCIES(lib${PROJECT_NAME}_a GenerateGitHash) + ADD_DEPENDENCIES(lib${PROJECT_NAME}_a GenerateGitHashParMmg) ENDIF () IF( LIBPARMMG_SHARED ) - ADD_DEPENDENCIES(lib${PROJECT_NAME}_so GenerateGitHash) + ADD_DEPENDENCIES(lib${PROJECT_NAME}_so GenerateGitHashParMmg) ENDIF () INCLUDE_DIRECTORIES(${PMMG_BINARY_DIR}) diff --git a/README.md b/README.md index eb37cdc0..cbe475fc 100644 --- a/README.md +++ b/README.md @@ -53,11 +53,13 @@ To get and build ParMmg, you will need: If you don't have internet access and/or want to use your own installation of Mmg (resp. Metis), you can disable the automatic download of Mmg setting the - `DOWNLOAD_MMG` (resp. `DOWNLOAD_METIS`) CMake variable to `OFF`. In this case, - you can help CMake to find Mmg (resp. Metis) by specifying the source - directory of Mmg in the `MMG_DIR` variable and the build directory of Mmg in - the `MMG_BUILDDIR` variable (resp. the installation directory of Metis in the - `METIS_DIR` variable). + `DOWNLOAD_MMG` (resp. `DOWNLOAD_METIS`) CMake variable to `OFF`. In this case: + + - Mmg has to be built with the private header installation enabled (turn `ON` the ` MMG_INSTALL_PRIVATE_HEADERS` CMake variable in Mmg at cmake configuration step); + - you can help CMake to find Mmg by specifying the installation + directory of Mmg in the `MMG_DIR` variable; + - you can help CMake to find Metis by specifying the installation directory of Metis in the + `METIS_DIR` variable. Example: ```Shell diff --git a/cmake/modules/FindElas.cmake b/cmake/modules/FindElas.cmake new file mode 100644 index 00000000..1a304346 --- /dev/null +++ b/cmake/modules/FindElas.cmake @@ -0,0 +1,57 @@ +## ============================================================================= +## This file is part of the mmg software package for the tetrahedral +## mesh modification. +##** Copyright (c) Bx INP/Inria/UBordeaux/UPMC, 2004- . +## +## mmg is free software: you can redistribute it and/or modify it +## under the terms of the GNU Lesser General Public License as published +## by the Free Software Foundation, either version 3 of the License, or +## (at your option) any later version. +## +## mmg is distributed in the hope that it will be useful, but WITHOUT +## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +## FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +## License for more details. +## +## You should have received a copy of the GNU Lesser General Public +## License and of the GNU General Public License along with mmg (in +## files COPYING.LESSER and COPYING). If not, see +## . Please read their terms carefully and +## use this copy of the mmg distribution only if you accept them. +## ============================================================================= + +IF ((NOT WIN32) AND (NOT WIN64)) + SET ( ELAS_INCLUDE_DIR ELAS_INCLUDE_DIR-NOTFOUND ) + SET ( ELAS_LIBRARY ELAS_LIBRARY-NOTFOUND ) +ENDIF() + +FIND_PATH(ELAS_INCLUDE_DIR + NAMES elastic.h + HINTS ${ELAS_INCLUDE_DIR} + $ENV{ELAS_INCLUDE_DIR} + $ENV{HOME}/include + ${ELAS_DIR}/include + $ENV{ELAS_DIR}/include + ${ELAS_DIR}/sources + $ENV{ELAS_DIR}/sources + PATH_SUFFIXES ELAS + DOC "Directory of ELAS Header") + +# Check for elas library (search in common locations in order to prevent +# modifications in ELAS installation) +FIND_LIBRARY(ELAS_LIBRARY + NAMES Elas Elas${ELAS_LIB_SUFFIX} + HINTS ${ELAS_LIBRARY} + $ENV{ELAS_LIBRARY} + $ENV{HOME}/lib + ${ELAS_DIR}/lib + $ENV{ELAS_DIR}/lib + DOC "The ELAS library" + ) + +INCLUDE(FindPackageHandleStandardArgs) +FIND_PACKAGE_HANDLE_STANDARD_ARGS(ELAS DEFAULT_MSG + ELAS_INCLUDE_DIR ELAS_LIBRARY) +IF ((NOT WIN32) AND (NOT WIN64)) + MARK_AS_ADVANCED(ELAS_INCLUDE_DIR ELAS_LIBRARY) +ENDIF() diff --git a/cmake/modules/FindMMG.cmake b/cmake/modules/FindMMG.cmake index 222c53ec..b95c9a07 100644 --- a/cmake/modules/FindMMG.cmake +++ b/cmake/modules/FindMMG.cmake @@ -34,13 +34,12 @@ # MMG_LIBRARIES - mmg component libraries to be linked # # The user can give specific paths where to find the libraries adding cmake -# options at configure (ex: cmake path/to/project -DMMG_DIR=path/to/mmg): -# MMG_DIR - Where to find the base directory of mmg +# options at configure (ex: cmake path/to/project -DMMG_DIR=path/to/mmg/install): +# MMG_DIR - Where to find the install directory of mmg # MMG_INCDIR - Where to find the header files # MMG_LIBDIR - Where to find the library files -# MMG_BUILDDIR - Where to find the build directory of Mmg # The module can also look for the following environment variables if paths -# are not given as cmake variable: MMG_DIR, MMG_INCDIR, MMG_LIBDIR, MMG_BUILDDIR +# are not given as cmake variable: MMG_DIR, MMG_INCDIR, MMG_LIBDIR if (NOT MMG_FOUND) set(MMG_DIR "" CACHE PATH "Installation directory of MMG library") @@ -49,27 +48,6 @@ if (NOT MMG_FOUND) endif() endif() -# Looking for the Mmg build directory -# ----------------------------------- -set(ENV_MMG_BUILDDIR "$ENV{MMG_BUILDDIR}") - -if ( NOT MMG_BUILDDIR ) - FIND_PATH(MMG_BUILDDIR_INTERNAL - NAMES src/common/mmgcmakedefines.h - HINTS ${ENV_MMG_BUILDDIR} ${MMG_DIR} ${ENV_MMG_DIR} - PATH_SUFFIXES build Build BUILD builds Builds BUILDS - DOC "The mmg build directory" - ) -else () - set(MMG_BUILDDIR_INTERNAL "${MMG_BUILDDIR}") -endif() - -if ( NOT MMG_BUILDDIR AND MMG_BUILDDIR_INTERNAL ) - SET ( MMG_BUILDDIR "${MMG_BUILDDIR_INTERNAL}" ) -endif ( ) -mark_as_advanced(MMG_BUILDDIR_INTERNAL) - - # Looking for include # ------------------- @@ -81,22 +59,10 @@ set(ENV_MMG_INCDIR "$ENV{MMG_INCDIR}") if(ENV_MMG_INCDIR) list(APPEND _inc_env "${ENV_MMG_INCDIR}") -elseif(ENV_MMG_BUILDDIR) - list(APPEND _inc_env "${ENV_MMG_BUILDDIR}/include") - list(APPEND _inc_env "${ENV_MMG_BUILDDIR}/include/mmg") elseif(ENV_MMG_DIR) - if ( MMG_BUILDDIR ) - list(APPEND _inc_env "${MMG_BUILDDIR}/include") - list(APPEND _inc_env "${MMG_BUILDDIR}/include/mmg") - else ( ) - list(APPEND _inc_env "${ENV_MMG_DIR}") - list(APPEND _inc_env "${ENV_MMG_DIR}/include") - list(APPEND _inc_env "${ENV_MMG_DIR}/include/mmg") - if ( MMG_BUILDDIR_INTERNAL ) - list(APPEND _inc_env "${MMG_BUILDDIR_INTERNAL}/include") - list(APPEND _inc_env "${MMG_BUILDDIR_INTERNAL}/include/mmg") - endif() - endif() + list(APPEND _inc_env "${ENV_MMG_DIR}") + list(APPEND _inc_env "${ENV_MMG_DIR}/include") + list(APPEND _inc_env "${ENV_MMG_DIR}/include/mmg") else() if(WIN32) string(REPLACE ":" ";" _inc_env "$ENV{INCLUDE}") @@ -124,39 +90,24 @@ if(MMG_INCDIR) find_path(MMG_libmmgtypes.h_DIRS NAMES libmmgtypes.h HINTS ${MMG_INCDIR} - PATH_SUFFIXES "mmg2d" "mmgs" "mmg3d") -elseif(MMG_BUILDDIR) - set(MMG_libmmgtypes.h_DIRS "MMG_libmmgtypes.h_DIRS-NOTFOUND") - find_path(MMG_libmmgtypes.h_DIRS - NAMES libmmgtypes.h - HINTS ${MMG_BUILDDIR} - PATH_SUFFIXES "include" "include/mmg" "include/mmg/mmg2d" - "include/mmg/mmgs" "include/mmg/mmg3d") + PATH_SUFFIXES "mmg" "mmg/mmg2d" "mmg/mmgs" "mmg/mmg3d" "mmg/common" "mmg2d" + "mmgs" "mmg3d") else() if(MMG_DIR) set(MMG_libmmgtypes.h_DIRS "MMG_libmmgtypes.h_DIRS-NOTFOUND") - if ( MMG_BUILDDIR ) - find_path(MMG_libmmgtypes.h_DIRS - NAMES */libmmgtypes.h - HINTS ${MMG_BUILDDIR} - PATH_SUFFIXES "include" "include/mmg" "include/mmg/mmg2d" - "include/mmg/mmgs" "include/mmg/mmg3d") - else() - find_path(MMG_libmmgtypes.h_DIRS - NAMES libmmgtypes.h - HINTS ${MMG_DIR} ${MMG_BUILDDIR_INTERNAL} - PATH_SUFFIXES "include" "include/mmg" "include/mmg/mmg2d" - "include/mmg/mmgs" "include/mmg/mmg3d") - endif() - + find_path(MMG_libmmgtypes.h_DIRS + NAMES libmmgtypes.h + HINTS ${MMG_DIR}/include + PATH_SUFFIXES "mmg" "mmg/common") else() set(MMG_libmmgtypes.h_DIRS "MMG_libmmgtypes.h_DIRS-NOTFOUND") find_path(MMG_libmmgtypes.h_DIRS NAMES libmmgtypes.h - HINTS ${_inc_env}) + HINTS ${_inc_env} + PATH_SUFFIXES "mmg" "mmg/common") endif() endif() -STRING(REGEX REPLACE "(mmg/mmg2d)|(mmg/mmgs)|(mmg/mmg3d)" "" +STRING(REGEX REPLACE "(mmg/mmg2d)|(mmg/mmgs)|(mmg/mmg3d)|(mmg/common)" "" MMG_libmmgtypes.h_DIRS ${MMG_libmmgtypes.h_DIRS}) mark_as_advanced(MMG_libmmgtypes.h_DIRS) @@ -183,19 +134,9 @@ unset(_lib_env) set(ENV_MMG_LIBDIR "$ENV{MMG_LIBDIR}") if(ENV_MMG_LIBDIR) list(APPEND _lib_env "${ENV_MMG_LIBDIR}") -elseif(ENV_MMG_BUILDDIR) - list(APPEND _lib_env "${ENV_MMG_BUILDDIR}") - list(APPEND _lib_env "${ENV_MMG_BUILDDIR}/lib") elseif(ENV_MMG_DIR) - if ( MMG_BUILDDIR ) - list(APPEND _lib_env "${MMG_BUILDDIR}/lib") - else ( ) - list(APPEND _lib_env "${ENV_MMG_DIR}") - list(APPEND _lib_env "${ENV_MMG_DIR}/lib") - if ( MMG_BUILDDIR_INTERNAL ) - list(APPEND _lib_env "${MMG_BUILDDIR_INTERNAL}/lib") - endif() - endif() + list(APPEND _lib_env "${ENV_MMG_DIR}") + list(APPEND _lib_env "${ENV_MMG_DIR}/lib") else() if(WIN32) string(REPLACE ":" ";" _lib_env "$ENV{LIB}") @@ -223,17 +164,10 @@ else() if(MMG_DIR) set(MMG_mmg_LIBRARY "MMG_mmg_LIBRARY-NOTFOUND") - if ( MMG_BUILDDIR ) - find_library(MMG_mmg_LIBRARY - NAMES mmg - HINTS ${MMG_BUILDDIR} - PATH_SUFFIXES lib lib32 lib64) - else () - find_library(MMG_mmg_LIBRARY - NAMES mmg - HINTS ${MMG_DIR} ${MMG_BUILDDIR_INTERNAL} - PATH_SUFFIXES lib lib32 lib64) - endif() + find_library(MMG_mmg_LIBRARY + NAMES mmg + HINTS ${MMG_DIR} + PATH_SUFFIXES "lib" "lib32" "lib64") else() set(MMG_mmg_LIBRARY "MMG_mmg_LIBRARY-NOTFOUND") find_library(MMG_mmg_LIBRARY diff --git a/cmake/modules/add_build_types.cmake b/cmake/modules/add_build_types.cmake new file mode 100644 index 00000000..0186c40f --- /dev/null +++ b/cmake/modules/add_build_types.cmake @@ -0,0 +1,148 @@ +## ============================================================================= +## This file is part of the mmg software package for the tetrahedral +## mesh modification. +##** Copyright (c) Bx INP/Inria/UBordeaux/UPMC, 2004- . +## +## mmg is free software: you can redistribute it and/or modify it +## under the terms of the GNU Lesser General Public License as published +## by the Free Software Foundation, either version 3 of the License, or +## (at your option) any later version. +## +## mmg is distributed in the hope that it will be useful, but WITHOUT +## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +## FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +## License for more details. +## +## You should have received a copy of the GNU Lesser General Public +## License and of the GNU General Public License along with mmg (in +## files COPYING.LESSER and COPYING). If not, see +## . Please read their terms carefully and +## use this copy of the mmg distribution only if you accept them. +## ============================================================================= + +# Create compilation modes: +# - Maintener: adds compiler warnings to Debug mode +# - RelWithAssert: RelWithDebInfo without NDEBUG precompiler flag + +# Add Maintener mode +if (CMAKE_C_COMPILER_ID STREQUAL "Clang") + set ( CLANG_FLAGS + "-O0 -g -Weverything -Wno-sign-conversion -Wno-char-subscripts -Wno-padded") + set(CMAKE_CXX_FLAGS_MAINTAINER "${CLANG_FLAGS}" + CACHE STRING + "Flags used by the CXX compiler during Maintainer builds." + FORCE) + set(CMAKE_C_FLAGS_MAINTAINER "${CLANG_FLAGS}" + CACHE STRING + "Flags used by the C compiler during Maintainer builds." + FORCE) + set(CMAKE_EXE_LINKER_FLAGS_MAINTAINER ${CMAKE_EXE_LINKER_FLAGS_DEBUG} + CACHE STRING + "Flags used for linking binaries during Maintainer builds." + FORCE) + set(CMAKE_SHARED_LINKER_FLAGS_MAINTAINER ${CMAKE_SHARED_LINKER_FLAGS_DEBUG} + CACHE STRING + "Flags used by the shared libraries linker during Maintainer builds." + FORCE) + set(CMAKE_STATIC_LINKER_FLAGS_MAINTAINER ${CMAKE_STATIC_LINKER_FLAGS_DEBUG} + CACHE STRING + "Flags used by the static libraries linker during Maintainer builds." + FORCE) + +elseif (CMAKE_C_COMPILER_ID STREQUAL "GNU") + set(CMAKE_CXX_FLAGS_MAINTAINER "-O0 -g -Wall" CACHE STRING + "Flags used by the CXX compiler during Maintainer builds." + FORCE) + set(CMAKE_C_FLAGS_MAINTAINER "-O0 -g -Wall" CACHE STRING + "Flags used by the C compiler during Maintainer builds." + FORCE) + + set ( LD_LINKER_FLAGS "-Wl,--warn-unresolved-symbols,--warn-once" ) + + set(CMAKE_EXE_LINKER_FLAGS_MAINTAINER + ${LD_LINKER_FLAGS} ${CMAKE_EXE_LINKER_FLAGS_DEBUG} + CACHE STRING + "Flags used for linking binaries during Maintainer builds." + FORCE) + set(CMAKE_SHARED_LINKER_FLAGS_MAINTAINER + ${LD_LINKER_FLAGS} ${CMAKE_SHARED_LINKER_FLAGS_DEBUG} + CACHE STRING + "Flags used by the shared libraries linker during Maintainer builds." + FORCE) + # Static lib linking uses ar and not ld: -Wl is not supported + set(CMAKE_STATIC_LINKER_FLAGS_MAINTAINER + ${CMAKE_STATIC_LINKER_FLAGS_DEBUG} + CACHE STRING + "Flags used by the static libraries linker during Maintainer builds." + FORCE) + +else () + # Not implemented: use Debug flags + set(CMAKE_CXX_FLAGS_MAINTAINER "${CMAKE_CXX_FLAGS_DEBUG}" + CACHE STRING + "Flags used by the CXX compiler during Maintainer builds." + FORCE) + set(CMAKE_C_FLAGS_MAINTAINER "${CMAKE_C_FLAGS_DEBUG}" + CACHE STRING + "Flags used by the C compiler during Maintainer builds." + FORCE) + set(CMAKE_EXE_LINKER_FLAGS_MAINTAINER "${CMAKE_EXE_LINKER_FLAGS_DEBUG}" + CACHE STRING + "Flags used for linking binaries during Maintainer builds." + FORCE) + set(CMAKE_SHARED_LINKER_FLAGS_MAINTAINER "${CMAKE_SHARED_LINKER_FLAGS_DEBUG}" + CACHE STRING + "Flags used by the shared libraries linker during Maintainer builds." + FORCE) + set(CMAKE_STATIC_LINKER_FLAGS_MAINTAINER "${CMAKE_STATIC_LINKER_FLAGS_DEBUG}" + CACHE STRING + "Flags used by the static libraries linker during Maintainer builds." + FORCE) + +endif() + +mark_as_advanced( + CMAKE_CXX_FLAGS_MAINTAINER + CMAKE_C_FLAGS_MAINTAINER + CMAKE_EXE_LINKER_FLAGS_MAINTAINER + CMAKE_SHARED_LINKER_FLAGS_MAINTAINER + CMAKE_STATIC_LINKER_FLAGS_MAINTAINER + ) + +# Add RelWithAssert mode +STRING(REGEX REPLACE ".DNDEBUG" " " + RELWITHASSERT_C_FLAGS "${CMAKE_C_FLAGS_RELWITHDEBINFO}" ) + +STRING(REGEX REPLACE ".DNDEBUG" " " + RELWITHASSERT_CXX_FLAGS "${CMAKE_CXX_FLAGS_RELWITHDEBINFO}" ) + +set(CMAKE_CXX_FLAGS_RELWITHASSERT "${RELWITHASSERT_CXX_FLAGS}" + CACHE STRING + "Flags used by the CXX compiler during RelWithAssert builds." + FORCE) + +set(CMAKE_C_FLAGS_RELWITHASSERT "${RELWITHASSERT_C_FLAGS}" + CACHE STRING + "Flags used by the C compiler during RelWithAssert builds." + FORCE) + +set(CMAKE_EXE_LINKER_FLAGS_RELWITHASSERT "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO}" + CACHE STRING + "Flags used for linking binaries during RelWithAssert builds." + FORCE) +set(CMAKE_SHARED_LINKER_FLAGS_RELWITHASSERT "${CMAKE_SHARED_LINKER_FLAGS_RELWITHDEINFO}" + CACHE STRING + "Flags used by the shared libraries linker during maintainer builds." + FORCE) +set(CMAKE_STATIC_LINKER_FLAGS_RELWITHASSERT "${CMAKE_STATIC_LINKER_FLAGS_RELWITHDEINFO}" + CACHE STRING + "Flags used by the static libraries linker during maintainer builds." + FORCE) + +mark_as_advanced( + CMAKE_CXX_FLAGS_RELWITHASSERT + CMAKE_C_FLAGS_RELWITHASSERT + CMAKE_EXE_LINKER_FLAGS_RELWITHASSERT + CMAKE_SHARED_LINKER_FLAGS_RELWITHASSERT + CMAKE_STATIC_LINKER_FLAGS_RELWITHASSERT + ) diff --git a/cmake/modules/macros.cmake b/cmake/modules/macros.cmake index 6e9bdff0..bfeb2507 100644 --- a/cmake/modules/macros.cmake +++ b/cmake/modules/macros.cmake @@ -108,7 +108,7 @@ MACRO ( COPY_HEADERS_AND_CREATE_TARGET COPY_HEADER ( ${binary_dir} git_log_pmmg.h ${include_dir} git_log_pmmg.h - GenerateGitHash copy_pmmggithash ) + GenerateGitHashParMmg copy_pmmggithash ) LIST ( APPEND tgt_list copy_pmmggithash) ENDIF () @@ -142,8 +142,10 @@ MACRO ( ADD_AND_INSTALL_LIBRARY $ ) ENDIF ( ) - SET_TARGET_PROPERTIES ( ${target_name} - PROPERTIES OUTPUT_NAME ${output_name} ) + SET_TARGET_PROPERTIES ( ${target_name} PROPERTIES + OUTPUT_NAME ${output_name} + VERSION ${CMAKE_RELEASE_VERSION_MAJOR}.${CMAKE_RELEASE_VERSION_MINOR}.${CMAKE_RELEASE_VERSION_PATCH} + SOVERSION ${CMAKE_RELEASE_VERSION_MAJOR} ) SET_PROPERTY(TARGET ${target_name} PROPERTY C_STANDARD 99) diff --git a/cmake/modules/optional-dependencies.cmake b/cmake/modules/optional-dependencies.cmake new file mode 100644 index 00000000..ce0b34e7 --- /dev/null +++ b/cmake/modules/optional-dependencies.cmake @@ -0,0 +1,204 @@ +## ============================================================================= +## This file is part of the mmg software package for the tetrahedral +## mesh modification. +## Copyright (c) Bx INP/Inria/UBordeaux/UPMC, 2004- . +## +## mmg is free software: you can redistribute it and/or modify it +## under the terms of the GNU Lesser General Public License as published +## by the Free Software Foundation, either version 3 of the License, or +## (at your option) any later version. +## +## mmg is distributed in the hope that it will be useful, but WITHOUT +## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +## FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +## License for more details. +## +## You should have received a copy of the GNU Lesser General Public +## License and of the GNU General Public License along with mmg (in +## files COPYING.LESSER and COPYING). If not, see +## . Please read their terms carefully and +## use this copy of the mmg distribution only if you accept them. +## ============================================================================= + +############################################################################ +##### +##### Scotch +##### +############################################################################ +# Find SCOTCH library? +SET(SCOTCH_DIR "" CACHE PATH "Installation directory for scotch") + +# add Scotch library? +SET ( USE_SCOTCH "" CACHE STRING "Use SCOTCH tool for renumbering (ON, OFF or )" ) +SET_PROPERTY(CACHE USE_SCOTCH PROPERTY STRINGS "ON" "OFF" "") + +IF ( NOT DEFINED USE_SCOTCH OR USE_SCOTCH STREQUAL "" OR USE_SCOTCH MATCHES " +" ) + # Variable is not provided by user + FIND_PACKAGE(SCOTCH QUIET) + +ELSE () + IF ( USE_SCOTCH ) + # User wants to use scotch + FIND_PACKAGE(SCOTCH) + IF ( NOT SCOTCH_FOUND ) + MESSAGE ( FATAL_ERROR "Scotch library not found:" + "Using scotch reduce the execution time of mmg3d " + "(see https://gforge.inria.fr/frs/?group_id=248 to download it)." + "If you have already installed Scotch and want to use it, " + "please set the CMake variable or environment variable SCOTCH_DIR " + "to your scotch directory.") + ENDIF ( ) + ENDIF ( ) + +ENDIF ( ) + +If ( SCOTCH_FOUND AND NOT USE_SCOTCH MATCHES OFF) + add_definitions(-DUSE_SCOTCH) + + MESSAGE(STATUS + "Compilation with scotch: ${SCOTCH_LIBRARIES}") + SET( LIBRARIES ${SCOTCH_LIBRARIES} ${LIBRARIES}) +ENDIF() + + +############################################################################ +##### +##### LinearElasticity +##### +############################################################################ +# add LinearElasticity library? +SET(ELAS_DIR "" CACHE PATH "Installation directory for Elas") + +SET ( USE_ELAS "" CACHE STRING "Use the Elas library for lagrangian motion option (ON, OFF or )" ) +SET_PROPERTY(CACHE USE_ELAS PROPERTY STRINGS "ON" "OFF" "") + +IF ( NOT DEFINED USE_ELAS OR USE_ELAS STREQUAL "" OR USE_ELAS MATCHES " +" ) + INCLUDE(FindElas) + +ELSE() + IF ( USE_ELAS ) + # User wants to use elas + INCLUDE(FindElas) + IF ( NOT ELAS_FOUND ) + MESSAGE ( FATAL_ERROR "Elas is a library to solve the linear elasticity " + "problem (see https://github.com/ISCDtoolbox/LinearElasticity to" + " download it). " + "This library is needed to use the lagrangian motion option. " + "If you have already installed Elas and want to use it, " + "please set the CMake variable or environment variable ELAS_DIR " + "to your Elas directory.") + ENDIF ( ) + ENDIF ( ) + +ENDIF ( ) + +############################################################################ +##### +##### VTK (to parse (p)vtp/(p)vtu files ) +##### +############################################################################ +# add the VTK library ? +SET ( USE_VTK "" CACHE STRING "Use VTK I/O (ON, OFF or )" ) +SET_PROPERTY(CACHE USE_VTK PROPERTY STRINGS "ON" "OFF" "") + +IF ( NOT DEFINED USE_VTK OR USE_VTK STREQUAL "" OR USE_VTK MATCHES " +" OR USE_VTK ) + # Variable is not provided by the user or is setted to on + + # Handle vtk components name change between v8.2 and v9 + # Before v9 + FIND_PACKAGE(VTK QUIET) + IF ( VTK_FOUND ) + message (STATUS "ParMmg - VTK_VERSION: ${VTK_VERSION}") + IF (VTK_VERSION VERSION_LESS "9.0.0") + find_package(VTK COMPONENTS + vtkCommonCore + vtkCommonDataModel + vtkIOLegacy + vtkIOParallel + vtkIOParallelXML + vtkIOXML + vtkParallelCore + vtkParallelMPI + QUIET) + ELSE() + # After v9 + FIND_PACKAGE(VTK COMPONENTS + CommonCore + CommonDataModel + IOLegacy + IOParallel + IOParallelXML + IOXML + ParallelCore + ParallelMPI + QUIET) + ENDIF() + + ELSEIF ( USE_VTK ) + # USE_VTK is not empty so user explicitely ask for VTK... + # but it is not found: raise an error + MESSAGE(FATAL_ERROR "ParMmg - VTK library not found.") + ENDIF() +ENDIF() + +IF ( VTK_FOUND AND NOT USE_VTK MATCHES OFF) + + add_definitions(-DUSE_VTK) + + MESSAGE ( STATUS "ParMmg - Compilation with VTK: add vtk, vtp and vtu I/O." ) + + IF( "${VTK_MAJOR_VERSION}.${VTK_MINOR_VERSION}" LESS 8.90 ) + INCLUDE ( ${VTK_USE_FILE} ) + ENDIF() + + SET( LIBRARIES ${VTK_LIBRARIES} ${LIBRARIES} ) +ENDIF ( ) + +############################################################################ +##### +##### HDF5 +##### +############################################################################ +# Find HDF5 library? +SET ( USE_HDF5 "" CACHE STRING "Use HDF5 I/O (ON, OFF or )" ) +SET_PROPERTY(CACHE USE_HDF5 PROPERTY STRINGS "ON" "OFF" "") + +IF ( NOT DEFINED USE_HDF5 OR USE_HDF5 STREQUAL "" OR USE_HDF5 MATCHES " +" ) + # Variable is not provided by user + SET(HDF5_PREFER_PARALLEL TRUE) + + FIND_PACKAGE(HDF5 QUIET) + IF ( HDF5_FOUND AND NOT HDF5_IS_PARALLEL ) + UNSET ( HDF5_FOUND ) + ENDIF( ) + +ELSE ( ) + IF ( USE_HDF5 ) + # User wants to use HDF5 I/O + SET(HDF5_PREFER_PARALLEL TRUE) + + FIND_PACKAGE(HDF5) + + IF ( NOT HDF5_FOUND ) + MESSAGE ( FATAL_ERROR "HDF5 library not found." + " Please set the CMake variable USE_HDF5 to OFF to disable HDF5 I/Os.") + ENDIF ( ) + + IF ( NOT HDF5_IS_PARALLEL ) + MESSAGE ( FATAL_ERROR "HDF5 parallel library not found: " + "Please enable parallel support for HDF5 or set the CMake variable " + "USE_HDF5 to OFF to disable HDF5 I/Os.") + ENDIF( ) + + ENDIF ( ) +ENDIF ( ) + +IF ( HDF5_FOUND AND NOT USE_HDF5 MATCHES OFF ) + + INCLUDE_DIRECTORIES(${HDF5_INCLUDE_DIRS}) + + SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DUSE_HDF5") + MESSAGE ( STATUS "Compilation with HDF5: add HDF5 I/O." ) + SET( LIBRARIES ${LIBRARIES} ${HDF5_LIBRARIES} ) + +ENDIF ( ) diff --git a/cmake/testing/pmmg_tests.cmake b/cmake/testing/pmmg_tests.cmake index cf9c8dc8..1a6b051f 100644 --- a/cmake/testing/pmmg_tests.cmake +++ b/cmake/testing/pmmg_tests.cmake @@ -2,25 +2,31 @@ IF( BUILD_TESTING ) include( CTest ) set( CI_DIR ${CMAKE_BINARY_DIR}/testparmmg CACHE PATH "path to test meshes repository" ) - set( CI_DIR_RESULTS ${CI_DIR}/TEST_OUTPUTS ) + set( CI_DIR_RESULTS ${CMAKE_BINARY_DIR}/TEST_OUTPUTS ) file( MAKE_DIRECTORY ${CI_DIR_RESULTS} ) get_filename_component(PARENT_DIR ${CI_DIR} DIRECTORY) IF ( NOT ONLY_LIBRARY_TESTS ) - IF ( NOT EXISTS ${CI_DIR} ) + FIND_PACKAGE ( Git ) + + IF ( Git_FOUND ) + + IF ( NOT EXISTS ${CI_DIR} ) + EXECUTE_PROCESS( + COMMAND ${GIT_EXECUTABLE} clone https://gitlab.inria.fr/ParMmg/testparmmg.git --filter=blob:none + WORKING_DIRECTORY ${PARENT_DIR} + ) + ENDIF() EXECUTE_PROCESS( - COMMAND ${GIT_EXECUTABLE} clone https://gitlab.inria.fr/ParMmg/testparmmg.git - WORKING_DIRECTORY ${PARENT_DIR} + COMMAND ${GIT_EXECUTABLE} -C ${CI_DIR} fetch + COMMAND ${GIT_EXECUTABLE} -C ${CI_DIR} checkout 5091f86924742 + TIMEOUT 20 + WORKING_DIRECTORY ${CI_DIR} + #COMMAND_ECHO STDOUT ) - ENDIF() - EXECUTE_PROCESS( - COMMAND ${GIT_EXECUTABLE} -C ${CI_DIR} fetch - COMMAND ${GIT_EXECUTABLE} -C ${CI_DIR} checkout 31a48498a537edc64149da013748b0b0aa498554 - WORKING_DIRECTORY ${CI_DIR} - #COMMAND_ECHO STDOUT - ) + ENDIF ( ) set ( mesh_size 16384 ) set ( myargs -niter 2 -metis-ratio 82 -v 5 ) @@ -31,7 +37,7 @@ IF( BUILD_TESTING ) foreach( NP 1 2 4 6 8 ) add_test( NAME ${MESH}-${NP} COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ - ${CI_DIR}/Cube/${MESH}.mesh + ${CI_DIR}/Cube/${MESH}.meshb -out ${CI_DIR_RESULTS}/${MESH}-${NP}-out.mesh -m 11000 -mesh-size ${mesh_size} ${myargs}) endforeach() @@ -42,7 +48,7 @@ IF( BUILD_TESTING ) foreach( NP 1 2 4 6 8 ) add_test( NAME cube-unit-coarse-${MESH}-${NP} COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ - ${CI_DIR}/Cube/cube-unit-coarse.mesh + ${CI_DIR}/Cube/cube-unit-coarse.meshb -sol ${CI_DIR}/Cube/cube-unit-coarse-${MESH}.sol -out ${CI_DIR_RESULTS}/${MESH}-${NP}-out.mesh -mesh-size ${mesh_size} ${myargs} ) @@ -72,7 +78,7 @@ IF( BUILD_TESTING ) foreach( NP 1 6 8 ) add_test( NAME Sphere-${NP} COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ - ${CI_DIR}/Sphere/sphere.mesh + ${CI_DIR}/Sphere/sphere.meshb -out ${CI_DIR_RESULTS}/sphere-${NP}-out.mesh -mesh-size ${mesh_size} ${myargs} ) endforeach() @@ -83,7 +89,7 @@ IF( BUILD_TESTING ) add_test( NAME Sphere-optim-${OPTION}-${NP} COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ -${OPTION} - ${CI_DIR}/Sphere/sphere.mesh + ${CI_DIR}/Sphere/sphere.meshb -out ${CI_DIR_RESULTS}/sphere-${OPTION}-${NP}-out.mesh -mesh-size ${mesh_size} ${myargs} ) endforeach() @@ -143,7 +149,7 @@ IF( BUILD_TESTING ) add_test( NAME Sphere-optim-${test_name}-${NP} COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ ${test_option} ${test_val} - ${CI_DIR}/Sphere/sphere.mesh + ${CI_DIR}/Sphere/sphere.meshb -out ${CI_DIR_RESULTS}/sphere-${test_name}-${NP}-out.mesh -m 11000 -mesh-size ${test_mesh_size} ${myargs} ) ENDFOREACH() @@ -164,6 +170,31 @@ IF( BUILD_TESTING ) -out ${CI_DIR_RESULTS}/opnbdy-island.o.mesh ) + ### test -m option + #### The 2 tests are intentionnaly failing (inside an assert in debug mode, with an error + #### message otherwise) due to lack of memory when computing the hash table + add_test ( NAME memory-pmmg_sphere-2 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 2 $ + -mmg-v 5 -v 5 -m 15 + ${CI_DIR}/Sphere/sphere + -out ${CI_DIR_RESULTS}/memory-sphere.o.mesh + ) + set_property(TEST memory-pmmg_sphere-2 + PROPERTY + PASS_REGULAR_EXPRESSION "MAXIMUM MEMORY AUTHORIZED PER PROCESS \\(MB\\) 15" + ) + + add_test ( NAME memory-mmg_sphere-2 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 2 $ + -mmg-v 5 -v 5 -m 15 + ${CI_DIR}/Sphere/sphere + -out ${CI_DIR_RESULTS}/memory-sphere.o.mesh + ) + set_property(TEST memory-mmg_sphere-2 + PROPERTY + PASS_REGULAR_EXPRESSION "MAXIMUM MEMORY AUTHORIZED \\(MB\\) 15" + ) + ############################################################################### ##### ##### Test centralized/distributed I/O (on multidomain and openbdy tests) @@ -207,6 +238,98 @@ IF( BUILD_TESTING ) ${myargs} ) + # Tests for distributed pvtu output with dots in filename. + # Replacement of dots by dashes. + IF ( (NOT VTK_FOUND) OR USE_VTK MATCHES OFF ) + set(OutputVtkErr "VTK library not found.") + ENDIF ( ) + + set(OutputVtkRenameFilename "3D-cube-PvtuOut-2-a-o.pvtu") + set(OutputVtkRenameWarning "## WARNING: Filename has been changed.") + + add_test( NAME PvtuOut-RenameOut-2 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 2 $ + ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube.mesh + -out ${CI_DIR_RESULTS}/3D-cube-PvtuOut-2.a.o.pvtu) + + set_property(TEST PvtuOut-RenameOut-2 + PROPERTY PASS_REGULAR_EXPRESSION + "${OutputVtkRenameFilename}.*${OutputVtkRenameWarning}; + ${OutputVtkRenameWarning}.*${OutputVtkRenameFilename}") + + # Test Medit and hdf5 distributed inputs, with npartin < npart or npartin == + # npart with mesh only or mesh+metric. + ## Medit distributed with npart = 2 and npartin = 1, only mesh and hdf5 output using .h5 ext + add_test( NAME Medit-DisIn-MeshOnly-2 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 2 $ + ${CI_DIR}/Parallel_IO/Medit/1p/cube-unit-coarse.mesh -v 5 + -out ${CI_DIR_RESULTS}/Medit-DisIn-MeshOnly-2.o.h5) + + ## Medit distributed with npart = 2 and npartin = 1, mesh+met and hdf5 output using .xdmf ext + add_test( NAME Medit-DisIn-MeshAndMet-2 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 2 $ + -in ${CI_DIR}/Parallel_IO/Medit/1p/cube-unit-coarse-with-met -v 5 + -out ${CI_DIR_RESULTS}/Medit-DisIn-MeshAndMet-2.o.xdmf) + + ## Medit distributed with npart = 4 and npartin = 4, only mesh .h5 ext + add_test( NAME Medit-DisIn-MeshOnly-4 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 4 $ + -in ${CI_DIR}/Parallel_IO/Medit/4p/cube-unit-coarse.mesh -v 5 + ${CI_DIR_RESULTS}/Medit-DisIn-MeshOnly-4.o.h5) + + ## Medit distributed with npart = 6 and npartin = 4, only mesh .xdmf ext + add_test( NAME Medit-DisIn-MeshOnly-6 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 6 $ + ${CI_DIR}/Parallel_IO/Medit/4p/cube-unit-coarse -v 5 + ${CI_DIR_RESULTS}/Medit-DisIn-MeshOnly-6.o.xdmf) + + ## hdf5 distributed with npart = 2 and npartin = 1, only mesh and h5 output + add_test( NAME hdf5-DisIn-MeshOnly-2 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 2 $ + ${CI_DIR}/Parallel_IO/hdf5/1p/cube-unit-coarse.h5 -v 5 + -out ${CI_DIR_RESULTS}/hdf5-DisIn-MeshOnly-2.o.h5) + + ## hdf5 distributed with npart = 2 and npartin = 1, mesh+met and xdmf (h5) output + add_test( NAME hdf5-DisIn-MeshAndMet-2 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 2 $ + ${CI_DIR}/Parallel_IO/hdf5/1p/cube-unit-coarse-with-met.h5 -v 5 + -out ${CI_DIR_RESULTS}/hdf5-DisIn-MeshAndMet-2.o.xdmf) + + ## hdf5 distributed with npart = 8 and npartin = 4, mesh+met and h5 output + add_test( NAME hdf5-DisIn-MeshAndMet-8 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 8 $ + -in ${CI_DIR}/Parallel_IO/hdf5/4p/cube-unit-coarse-with-met.h5 -v 5 + ${CI_DIR_RESULTS}/hdf5-DisIn-MeshAndMet-8.o.h5) + + ## hdf5 distributed with npart = 8 and npartin = 4, mesh only and medit centralized output + add_test( NAME hdf5-DisIn-MeshOnly-8 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 8 $ + -in ${CI_DIR}/Parallel_IO/hdf5/4p/cube-unit-coarse.h5 -v 5 -centralized-output + -out ${CI_DIR_RESULTS}/hdf5-DisIn-MeshOnly-8.o.mesh) + + ## hdf5 distributed with npart = 4 and npartin = 4, mesh+met and h5 output + add_test( NAME hdf5-DisIn-MeshAndMet-4 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 4 $ + -in ${CI_DIR}/Parallel_IO/hdf5/4p/cube-unit-coarse-with-met.h5 -v 5 + ${CI_DIR_RESULTS}/hdf5-DisIn-MeshAndMet-8.o.h5) + + ## hdf5 distributed with npart = 4 and npartin = 4, mesh only and medit centralized output + add_test( NAME hdf5-DisIn-MeshOnly-4 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 4 $ + -in ${CI_DIR}/Parallel_IO/hdf5/4p/cube-unit-coarse.h5 -v 5 -centralized-output + -out ${CI_DIR_RESULTS}/hdf5-DisIn-MeshOnly-8.o.mesh) + + + IF ( (NOT HDF5_FOUND) OR USE_HDF5 MATCHES OFF ) + SET(expr "HDF5 library not found") + SET_PROPERTY( + TEST Medit-DisIn-MeshOnly-2 Medit-DisIn-MeshAndMet-2 Medit-DisIn-MeshOnly-4 + Medit-DisIn-MeshOnly-6 hdf5-DisIn-MeshOnly-2 hdf5-DisIn-MeshAndMet-2 + hdf5-DisIn-MeshAndMet-8 hdf5-DisIn-MeshOnly-8 + hdf5-DisIn-MeshAndMet-4 hdf5-DisIn-MeshOnly-4 + PROPERTY PASS_REGULAR_EXPRESSION "${expr}") + ENDIF ( ) + ############################################################################### ##### ##### Tests fields interpolation with or without metric @@ -214,22 +337,22 @@ IF( BUILD_TESTING ) ############################################################################### add_test( NAME InterpolationFields-withMet-4 COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 4 $ - ${CI_DIR}/Interpolation/coarse.mesh + ${CI_DIR}/Interpolation/coarse.meshb -out ${CI_DIR_RESULTS}/InterpolationFields-withMet-withFields-4-out.mesh -field ${CI_DIR}/Interpolation/sol-fields-coarse.sol - -sol field3_iso-coarse.sol + -sol ${CI_DIR}/Interpolation/field3_iso-coarse.sol -mesh-size 60000 ${myargs} ) add_test( NAME InterpolationFields-hsiz-4 COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 4 $ - ${CI_DIR}/Interpolation/coarse.mesh + ${CI_DIR}/Interpolation/coarse.meshb -out ${CI_DIR_RESULTS}/InterpolationFields-hsiz-withFields-4-out.mesh -field ${CI_DIR}/Interpolation/sol-fields-coarse.sol -mesh-size 60000 -hsiz 0.2 ${myargs} ) add_test( NAME InterpolationFields-noMet-withFields-4 COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 4 $ - ${CI_DIR}/Interpolation/coarse.mesh + ${CI_DIR}/Interpolation/coarse.meshb -out ${CI_DIR_RESULTS}/InterpolationFields-noMet-withFields-4-out.mesh -field ${CI_DIR}/Interpolation/sol-fields-coarse.sol -mesh-size 60000 ${myargs} ) @@ -240,6 +363,116 @@ IF( BUILD_TESTING ) -out ${CI_DIR_RESULTS}/InterpolationFields-refinement-4-out.mesh -field ${CI_DIR}/Interpolation/cube-unit-coarse-field.sol ${myargs} ) + ############################################################################### + ##### + ##### Tests pure-partitioning option + ##### + ############################################################################### + add_test( NAME PurePartitioning-CenIn-DisOut-withMetAndFields + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 4 $ + ${CI_DIR}/Interpolation/coarse.meshb + -out ${CI_DIR_RESULTS}/PurePartitioning-CenIn-DisOut-metAndFields-4-out.mesh + -field ${CI_DIR}/Interpolation/sol-fields-coarse.sol + -sol ${CI_DIR}/Interpolation/field3_iso-coarse.sol + -pure-partitioning + -distributed-output ) + + add_test( NAME PurePartitioning-CenIn-CenOut-withMetAndFields + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 4 $ + ${CI_DIR}/Interpolation/coarse.meshb + -out ${CI_DIR_RESULTS}/PurePartitioning-CenIn-CenOut-metAndFields-4-out.mesh + -field ${CI_DIR}/Interpolation/sol-fields-coarse.sol + -sol ${CI_DIR}/Interpolation/field3_iso-coarse.sol + -pure-partitioning + -centralized-output ) + + add_test( NAME PurePartitioning-CenIn-CenOut + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 4 $ + ${CI_DIR}/Interpolation/coarse.meshb + -out ${CI_DIR_RESULTS}/PurePartitioning-CenIn-CenOut-4-out.mesh + -pure-partitioning + -centralized-output ) + + add_test( NAME PurePartitioning-CenIn-DisOut + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 4 $ + ${CI_DIR}/Interpolation/coarse.meshb + -out ${CI_DIR_RESULTS}/PurePartitioning-CenIn-DisOut-4-out.mesh + -pure-partitioning + -distributed-output ) + + add_test( NAME PurePartitioning-CenIn-DisOut-withMet + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 4 $ + ${CI_DIR}/Interpolation/coarse.meshb + -out ${CI_DIR_RESULTS}/PurePartitioning-CenIn-DisOut-met-4-out.mesh + -sol ${CI_DIR}/Interpolation/field3_iso-coarse.sol + -pure-partitioning + -distributed-output ) + + add_test( NAME PurePartitioning-CenIn-CenOut-withMet + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 4 $ + ${CI_DIR}/Interpolation/coarse.meshb + -out ${CI_DIR_RESULTS}/PurePartitioning-CenIn-CenOut-met-4-out.mesh + -sol ${CI_DIR}/Interpolation/field3_iso-coarse.sol + -pure-partitioning + -centralized-output ) + + add_test( NAME PurePartitioning-CenIn-h5-withMetAndFields + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 4 $ + ${CI_DIR}/Interpolation/coarse.meshb + -out ${CI_DIR_RESULTS}/PurePartitioning-CenIn-h5-metAndFields-4-out.h5 + -field ${CI_DIR}/Interpolation/sol-fields-coarse.sol + -sol ${CI_DIR}/Interpolation/field3_iso-coarse.sol + -pure-partitioning ) + + add_test( NAME PurePartitioning-CenIn-DisOut-withMetAndLs-2 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 2 $ + ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube.mesh + -ls + -pure-partitioning + -distributed-output + -sol ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube-ls.sol + -met ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube-metric.sol + -out ${CI_DIR_RESULTS}/PurePartitioning-CenIn-DisOut-withMetAndLs-2.o.mesh) + + add_test( NAME PurePartitioning-CenIn-h5-withMetAndLs-2 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 2 $ + ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube.mesh + -ls + -pure-partitioning + -sol ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube-ls.sol + -met ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube-metric.sol + -out ${CI_DIR_RESULTS}/PurePartitioning-CenIn-DisOut-withMetAndLs-2.o.h5) + + add_test( NAME PurePartitioning-CenIn-DisOut-withLs-2 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 2 $ + ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube.mesh + -ls + -pure-partitioning + -distributed-output + -sol ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube-ls.sol + -out ${CI_DIR_RESULTS}/PurePartitioning-CenIn-DisOut-withLs-2.o.mesh) + + add_test( NAME PurePartitioning-CenIn-h5-withLs-2 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 2 $ + ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube.mesh + -ls + -pure-partitioning + -sol ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube-ls.sol + -out ${CI_DIR_RESULTS}/PurePartitioning-CenIn-DisOut-withLs-2.o.h5) + + + IF ( (NOT HDF5_FOUND) OR USE_HDF5 MATCHES OFF ) + SET(expr "HDF5 library not found") + SET_PROPERTY( + TEST + PurePartitioning-CenIn-h5-withMetAndFields + PurePartitioning-CenIn-h5-withMetAndLs-2 + PurePartitioning-CenIn-h5-withLs-2 + PROPERTY PASS_REGULAR_EXPRESSION "${expr}") + ENDIF ( ) + + + ############################################################################### ##### ##### Tests distributed surface adaptation @@ -270,7 +503,7 @@ IF( BUILD_TESTING ) ADD_TEST( NAME DistribSphere_NOM-gen-${API_mode}-${NP} COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ - ${CI_DIR}/Sphere_NOM/sphere_nom.mesh + ${CI_DIR}/Sphere_NOM/sphere_nom.meshb ${CI_DIR_RESULTS}/sphere_nom_${API_mode}-${NP}.mesh ${API_mode} ) ADD_TEST( NAME DistribSphere_NOM-adp-${API_mode}-${NP} @@ -303,8 +536,636 @@ IF( BUILD_TESTING ) ENDFOREACH() ENDFOREACH() - ENDIF() + # Test to verify the patch on update MG_REF tag. + # This test fail if the tag MG_REF is not updated by PMMG_updateTagRef_node in PMMG_update_analys. + # See ParMmg PR#103 + add_test( NAME update-ref-tag + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 2 $ + ${CI_DIR}/LevelSet/2p_toygeom/cube-distributed-faces-nomat-1edge.mesh -v 10 -hsiz 0.1 + -out ${CI_DIR_RESULTS}/update-ref-tag.o.mesh) + + # Test to check that when not using -opnbdy option, internal triangles are correctly removed. + # See ParMmg PR#110 + add_test( NAME extrainternaltriangles + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 3 $ + ${CI_DIR}/Cube/internaltriangles-P3.mesh -v 10 + -out ${CI_DIR_RESULTS}/internaltriangles-P3.o.mesh) + + ############################################################################### + ##### + ##### Tests overlap + ##### + ############################################################################### + # Test if overlap is created + set(overlapCreation " Create Overlap") + + add_test( NAME overlap-create + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 5 $ + ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube.mesh -v 10 -nomove -noinsert -noswap -nobalance -niter 1 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube-ls.sol + -out ${CI_DIR_RESULTS}/overlap-create.o.mesh) + + set_property(TEST overlap-create + PROPERTY PASS_REGULAR_EXPRESSION "${overlapCreation}" + ) + + # Test if overlap is deleted + set(overlapDelete " Delete Overlap") + + add_test( NAME overlap-delete + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 5 $ + ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube.mesh -v 10 -nomove -noinsert -noswap -nobalance -niter 1 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube-ls.sol + -out ${CI_DIR_RESULTS}/overlap-delete.o.mesh) + set_property(TEST overlap-delete + PROPERTY PASS_REGULAR_EXPRESSION "${overlapDelete}") + + # Tests if overlap is created correctly + set(overlapCheckP0P1 " part 0 sends 74 pts and 257 tetra to part 1") + set(overlapCheckP0P2 " part 0 sends 29 pts and 110 tetra to part 2") + set(overlapCheckP0P3 " part 0 sends 61 pts and 204 tetra to part 3") + set(overlapCheckP0P4 " part 0 sends 28 pts and 66 tetra to part 4") + set(overlapCheckP0 " part 0 has 433 pts and 1492 tetras after overlap creation") + + add_test( NAME overlap-check-P0P1 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 5 $ + ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube.mesh -v 10 -nomove -noinsert -noswap -nobalance -niter 1 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube-ls.sol + -out ${CI_DIR_RESULTS}/overlap-check-P0P1.o.mesh) + set_property(TEST overlap-check-P0P1 + PROPERTY PASS_REGULAR_EXPRESSION "${overlapCheckP0P1}") + + add_test( NAME overlap-check-P0P2 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 5 $ + ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube.mesh -v 10 -nomove -noinsert -noswap -nobalance -niter 1 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube-ls.sol + -out ${CI_DIR_RESULTS}/overlap-check-P0P2.o.mesh) + set_property(TEST overlap-check-P0P2 + PROPERTY PASS_REGULAR_EXPRESSION "${overlapCheckP0P2}") + + add_test( NAME overlap-check-P0P3 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 5 $ + ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube.mesh -v 10 -nomove -noinsert -noswap -nobalance -niter 1 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube-ls.sol + -out ${CI_DIR_RESULTS}/overlap-check-P0P3.o.mesh) + set_property(TEST overlap-check-P0P3 + PROPERTY PASS_REGULAR_EXPRESSION "${overlapCheckP0P3}") + + add_test( NAME overlap-check-P0P4 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 5 $ + ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube.mesh -v 10 -nomove -noinsert -noswap -nobalance -niter 1 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube-ls.sol + -out ${CI_DIR_RESULTS}/overlap-check-P0P4.o.mesh) + set_property(TEST overlap-check-P0P4 + PROPERTY PASS_REGULAR_EXPRESSION "${overlapCheckP0P4}") + + add_test( NAME overlap-check-P0 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 5 $ + ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube.mesh -v 10 -nomove -noinsert -noswap -nobalance -niter 1 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube-ls.sol + -out ${CI_DIR_RESULTS}/overlap-check-P0.o.mesh) + set_property(TEST overlap-check-P0 + PROPERTY PASS_REGULAR_EXPRESSION "${overlapCheckP0}") + + add_test( NAME overlap-check-P0-met + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 5 $ + ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube.mesh -v 10 -nomove -noinsert -noswap -nobalance -niter 1 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube-ls.sol + -met ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube-metric.sol + -out ${CI_DIR_RESULTS}/overlap-check-P0-met.o.mesh) + set_property(TEST overlap-check-P0-met + PROPERTY PASS_REGULAR_EXPRESSION "${overlapCheckP0}") + + # Tests if overlap is deleted correctly + set(overlapCheckDelete " part 0 has 282 pts and 882 tetras after overlap deletion") + add_test( NAME overlap-check-delete + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 5 $ + ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube.mesh -v 10 -nomove -noinsert -noswap -nobalance -niter 1 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube-ls.sol + -out ${CI_DIR_RESULTS}/overlap-check-delete.o.mesh) + set_property(TEST overlap-check-delete + PROPERTY PASS_REGULAR_EXPRESSION "${overlapCheckDelete}") + + add_test( NAME overlap-check-delete-met + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 5 $ + ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube.mesh -v 10 -nomove -noinsert -noswap -nobalance -niter 1 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/5p_cubegeom/3D-cube-ls.sol + -out ${CI_DIR_RESULTS}/overlap-check-delete-met.o.mesh) + set_property(TEST overlap-check-delete-met + PROPERTY PASS_REGULAR_EXPRESSION "${overlapCheckDelete}") + + + # Temporary while snapval implementation is in progress and enabled only + # when the PMMG_SNAPVAL environment variable is defined + set_tests_properties( + overlap-check-P0 + overlap-check-P0-met + overlap-check-P0P1 + overlap-check-P0P2 + overlap-check-P0P3 + overlap-check-P0P4 + overlap-check-delete + overlap-check-delete-met + overlap-create + overlap-delete + PROPERTIES ENVIRONMENT "PMMG_SNAPVAL=1" + ) + + ############################################################################### + ##### + ##### Test isovalue mode - ls discretization + ##### + ############################################################################### + #-------------------------------- + #--- CENTRALIZED INPUT (CenIn) + #-------------------------------- + # Tests of ls discretization for centralized mesh input + foreach( NP 1 2 4 8 ) + add_test( NAME ls-CenIn-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube.mesh + -ls 0.0 + -sol ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube-ls.sol + -out ${CI_DIR_RESULTS}/3D-cube-ls-CenIn-${NP}.o.mesh) + endforeach() + + # Check that the ls file is correctly opened with or without the ls value given + set(lsOpenFile "3D-cube-ls.sol OPENED") + set(lsOpenFileDefault "3D-cube.sol NOT FOUND. USE DEFAULT METRIC.") + + # Test of opening ls file when ls val is given + foreach( NP 1 2) + add_test( NAME ls-arg-option-openlsfile-lsval-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube.mesh -nomove -noinsert -noswap -nobalance + -ls 0.0 + -sol ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube-ls.sol + -out ${CI_DIR_RESULTS}/ls-arg-option-openlsfile-lsval-${NP}.o.mesh) + set_property(TEST ls-arg-option-openlsfile-lsval-${NP} + PROPERTY PASS_REGULAR_EXPRESSION "${lsOpenFile}") + endforeach() + + # Test of opening ls file when ls val is not given + foreach( NP 1 2) + add_test( NAME ls-arg-option-openlsfile-nolsval-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube.mesh -nomove -noinsert -noswap -nobalance + -ls + -sol ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube-ls.sol + -out ${CI_DIR_RESULTS}/ls-arg-option-openlsfile-nolsval-${NP}.o.mesh) + set_property(TEST ls-arg-option-openlsfile-nolsval-${NP} + PROPERTY PASS_REGULAR_EXPRESSION "${lsOpenFile}") + endforeach() + + # Test of opening ls file with a default name when ls val is given + # WRONG TEST:: Here we test metric file not LS file + foreach( NP 1 2) + add_test( NAME ls-arg-option-openlsfiledefault-lsval-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube.mesh -nomove -noinsert -noswap -nobalance + -ls 0.0 + -out ${CI_DIR_RESULTS}/ls-arg-option-openlsfiledefault-lsval-${NP}.o.mesh) + set_property(TEST ls-arg-option-openlsfiledefault-lsval-${NP} + PROPERTY PASS_REGULAR_EXPRESSION "${lsOpenFileDefault}") + endforeach() + + # Test of opening ls file with a default name when ls val is not given + # WRONG TEST:: Here we test metric file not LS file + foreach( NP 1 2) + add_test( NAME ls-arg-option-openlsfiledefault-nolsval-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube.mesh -nomove -noinsert -noswap -nobalance + -ls + -out ${CI_DIR_RESULTS}/ls-arg-option-openlsfiledefault-nolsval-${NP}.o.mesh) + set_property(TEST ls-arg-option-openlsfiledefault-nolsval-${NP} + PROPERTY PASS_REGULAR_EXPRESSION "${lsOpenFileDefault}") + endforeach() + + # Tests for ls + met for centralized mesh input + foreach( NP 1 2 4 8 ) + add_test( NAME ls-CenIn-met-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube.mesh + -ls 0.0 + -sol ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube-ls.sol + -met ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube-metric.sol + -out ${CI_DIR_RESULTS}/3D-cube-ls-CenIn-met-${NP}.o.mesh) + endforeach() + + # Tests for ls + fields for centralized mesh input + foreach( NP 1 2 4 8 ) + add_test( NAME ls-CenIn-fields-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube.mesh + -ls 0.0 + -sol ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube-ls.sol + -field ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube-fields.sol + -out ${CI_DIR_RESULTS}/3D-cube-ls-CenIn-fields-${NP}.o.mesh) + endforeach() + + # Tests for ls + met + fields for centralized mesh input + foreach( NP 1 2 4 8 ) + add_test( NAME ls-CenIn-met-fields-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube.mesh + -ls 0.0 + -sol ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube-ls.sol + -met ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube-metric.sol + -field ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube-fields.sol + -out ${CI_DIR_RESULTS}/3D-cube-ls-CenIn-met-fields-${NP}.o.mesh) + endforeach() + + # Tests of distributed pvtu output when ls mode + foreach( NP 1 2 4 8 ) + add_test( NAME ls-CenIn-DisOut-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube.mesh + -ls 0.0 + -sol ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube-ls.sol + -out ${CI_DIR_RESULTS}/3D-cube-ls-CenIn-DisOut-${NP}-out.pvtu) + + IF ( (NOT VTK_FOUND) OR USE_VTK MATCHES OFF ) + set_property(TEST ls-CenIn-DisOut-${NP} + PROPERTY PASS_REGULAR_EXPRESSION "${OutputVtkErr}") + ENDIF ( ) + + endforeach() + + #-------------------------------- + #--- DISTRIBUTED INPUT (DisIn) + #-------------------------------- + #*********************** + #** TOY GEOM LS tests + #*********************** + foreach( NP 2 5) + + foreach( MODE faces nodes) + + # Toy geom:: ls_val not given + no remesh + add_test( NAME ls-DisIn-toygeom-lsnotgiven-${MODE}-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/LevelSet/${NP}p_toygeom/cube-distributed-${MODE}-nomat-edges.mesh -v 5 + -noswap -nomove -noinsert -nobalance + -ls + -sol ${CI_DIR}/LevelSet/${NP}p_toygeom/cube-ls.sol + -out ${CI_DIR_RESULTS}/ls-DisIn-toygeom-lsnotgiven-${MODE}-${NP}.o.mesh) + + # Toy geom:: ls_val=0.0 + no remesh + add_test( NAME ls-DisIn-toygeom-${MODE}-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/LevelSet/${NP}p_toygeom/cube-distributed-${MODE}-nomat-edges.mesh -v 5 + -noswap -nomove -noinsert -nobalance + -ls 0.0 + -sol ${CI_DIR}/LevelSet/${NP}p_toygeom/cube-ls.sol + -out ${CI_DIR_RESULTS}/ls-DisIn-toygeom-${MODE}-${NP}.o.mesh) + + # Toy geom:: ls_val=0.5 + no remesh + add_test( NAME ls-DisIn-toygeom-lsval-${MODE}-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/LevelSet/${NP}p_toygeom/cube-distributed-${MODE}-nomat-edges.mesh -v 5 + -noswap -nomove -noinsert -nobalance + -ls 0.5 + -sol ${CI_DIR}/LevelSet/${NP}p_toygeom/cube-ls.sol + -out ${CI_DIR_RESULTS}/ls-DisIn-toygeom-lsval-${MODE}-${NP}.o.mesh) + + # Toy geom:: ls_val=0.0 + remesh + add_test( NAME ls-DisIn-toygeom-remesh-${MODE}-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/LevelSet/${NP}p_toygeom/cube-distributed-${MODE}-nomat-edges.mesh -v 5 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/${NP}p_toygeom/cube-ls.sol + -out ${CI_DIR_RESULTS}/ls-DisIn-toygeom-remesh-${MODE}-${NP}.o.mesh) + + # Toy geom:: ls_val=0.0 + remesh hsiz + add_test( NAME ls-DisIn-toygeom-hsiz-${MODE}-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/LevelSet/${NP}p_toygeom/cube-distributed-${MODE}-nomat-edges.mesh -v 5 + -hsiz 0.1 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/${NP}p_toygeom/cube-ls.sol + -out ${CI_DIR_RESULTS}/ls-DisIn-toygeom-hsiz-${MODE}-${NP}.o.mesh) + + # Toy geom:: ls_val=0.0 + remesh metric + add_test( NAME ls-DisIn-toygeom-metric-${MODE}-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/LevelSet/${NP}p_toygeom/cube-distributed-${MODE}-nomat-edges.mesh -v 5 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/${NP}p_toygeom/cube-ls.sol + -met ${CI_DIR}/LevelSet/${NP}p_toygeom/cube-metric.sol + -out ${CI_DIR_RESULTS}/ls-DisIn-toygeom-metric-${MODE}-${NP}.o.mesh) + + SET(metric-open "cube-metric.0.sol OPENED") + SET_PROPERTY( + TEST ls-DisIn-toygeom-metric-${MODE}-${NP} + PROPERTY PASS_REGULAR_EXPRESSION "${metric-open}") + + # Toy geom:: ls_val=0.0 + remesh metric + # TO DEBUG raises lot of warnings + add_test( NAME ls-DisIn-toygeom-metric-ani-${MODE}-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/LevelSet/${NP}p_toygeom/cube-distributed-${MODE}-nomat-edges.mesh -v 5 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/${NP}p_toygeom/cube-ls.sol + -met ${CI_DIR}/LevelSet/${NP}p_toygeom/cube-metric-ani.sol + -out ${CI_DIR_RESULTS}/ls-DisIn-toygeom-metric-${MODE}-${NP}.o.mesh) + + SET(metric-ani-open "cube-metric-ani.0.sol OPENED") + SET_PROPERTY( + TEST ls-DisIn-toygeom-metric-ani-${MODE}-${NP} + PROPERTY PASS_REGULAR_EXPRESSION "${metric-ani-open}") + + + # Toy geom:: ls_val=0.0 + no remesh + fields + add_test( NAME ls-DisIn-toygeom-fields-${MODE}-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/LevelSet/${NP}p_toygeom/cube-distributed-${MODE}-nomat-edges.mesh -v 5 + -noswap -nomove -noinsert -nobalance + -ls 0.0 + -sol ${CI_DIR}/LevelSet/${NP}p_toygeom/cube-ls.sol + -field ${CI_DIR}/LevelSet/${NP}p_toygeom/cube-fields.sol + -out ${CI_DIR_RESULTS}/ls-DisIn-toygeom-fields-${MODE}-${NP}.o.mesh) + + SET(fields-open "cube-fields.0.sol OPENED") + SET_PROPERTY( + TEST ls-DisIn-toygeom-fields-${MODE}-${NP} + PROPERTY PASS_REGULAR_EXPRESSION "${fields-open}") + + # Toy geom:: ls_val=0.0 + remesh metric + fields + add_test( NAME ls-DisIn-toygeom-metric-fields-${MODE}-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/LevelSet/${NP}p_toygeom/cube-distributed-${MODE}-nomat-edges.mesh -v 5 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/${NP}p_toygeom/cube-ls.sol + -met ${CI_DIR}/LevelSet/${NP}p_toygeom/cube-metric.sol + -field ${CI_DIR}/LevelSet/${NP}p_toygeom/cube-fields.sol + -out ${CI_DIR_RESULTS}/ls-DisIn-toygeom-metric-fields-${MODE}-${NP}.o.mesh) + + + endforeach() + + endforeach() + + foreach( MODE faces ) + + # Toy geom nosplit: 2 procs, ls_val=0.0 + remesh hsiz 0.1 + 4 iter + multimat nosplit + ## Remark : FAIL inside mmg3d scotch renum of iter 5 if niter = 5 + + SET( NP 2 ) + add_test( NAME ls-DisIn-toygeom-nosplit-${MODE}-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/LevelSet/${NP}p_toygeom/cube-distributed-${MODE}-mat-edges.mesh -v 5 + -f ${CI_DIR}/LevelSet/${NP}p_toygeom/nosplit.mmg3d + -hsiz 0.1 -niter 4 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/${NP}p_toygeom/cube-ls.sol + -out ${CI_DIR_RESULTS}/ls-DisIn-toygeom-nosplit-${MODE}-${NP}.o.mesh) + endforeach() + + #*********************** + #** COMPLEX GEOM LS tests + #*********************** + foreach( NP 2 5) + + # Complex geom:: ls_val=0.0 + remesh + add_test( NAME ls-DisIn-cubegeom-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/LevelSet/${NP}p_cubegeom/3D-cube.mesh -v 5 -niter 5 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/${NP}p_cubegeom/3D-cube-ls.sol + -out ${CI_DIR_RESULTS}/ls-DisIn-cubegeom-${NP}.o.mesh) + + # Complex geom:: ls_val=0.0 + remesh hsiz + add_test( NAME ls-DisIn-cubegeom-hsiz-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/LevelSet/${NP}p_cubegeom/3D-cube.mesh -v 5 -niter 5 -hsiz 0.1 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/${NP}p_cubegeom/3D-cube-ls.sol + -out ${CI_DIR_RESULTS}/ls-DisIn-cubegeom-hisiz-${NP}.o.mesh) + + # Complex geom:: ls_val=0.0 + remesh iso metric + add_test( NAME ls-DisIn-cubegeom-metric-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/LevelSet/${NP}p_cubegeom/3D-cube.mesh -v 5 -niter 5 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/${NP}p_cubegeom/3D-cube-ls.sol + -met ${CI_DIR}/LevelSet/${NP}p_cubegeom/3D-cube-metric.sol + -out ${CI_DIR_RESULTS}/ls-DisIn-cubegeom-metric-${NP}.o.mesh) + + # Complex geom:: ls_val=0.0 + remesh aniso metric + # Fail with "Assertion failed: (ps > 0. || ps2 > 0." error + # TO DEBUG + # + #dd_test( NAME ls-DisIn-cubegeom-metric-ani-${NP} + # COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + # ${CI_DIR}/LevelSet/${NP}p_cubegeom/3D-cube.mesh -v 5 -niter 5 + # -ls 0.0 + # -sol ${CI_DIR}/LevelSet/${NP}p_cubegeom/3D-cube-ls.sol + # -met ${CI_DIR}/LevelSet/${NP}p_cubegeom/3D-cube-metric-ani.sol + # -out ${CI_DIR_RESULTS}/ls-DisIn-cubegeom-metric-${NP}.o.mesh) + + # Complex geom:: ls_val=0.0 + remesh + fields + add_test( NAME ls-DisIn-cubegeom-fields-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/LevelSet/${NP}p_cubegeom/3D-cube.mesh -v 5 -niter 5 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/${NP}p_cubegeom/3D-cube-ls.sol + -field ${CI_DIR}/LevelSet/${NP}p_cubegeom/3D-cube-fields.sol + -out ${CI_DIR_RESULTS}/ls-DisIn-cubegeom-fields-${NP}.o.mesh) + + # Complex geom:: ls_val=0.0 + remesh metric + fields + add_test( NAME ls-DisIn-cubegeom-metric-fields-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/LevelSet/${NP}p_cubegeom/3D-cube.mesh -v 5 -niter 5 + -ls 0.0 + -sol ${CI_DIR}/LevelSet/${NP}p_cubegeom/3D-cube-ls.sol + -met ${CI_DIR}/LevelSet/${NP}p_cubegeom/3D-cube-metric.sol + -field ${CI_DIR}/LevelSet/${NP}p_cubegeom/3D-cube-fields.sol + -out ${CI_DIR_RESULTS}/ls-DisIn-cubegeom-metric-fields-${NP}.o.mesh) + + endforeach() + + + ############################################################################### + ##### + ##### Test with multi-material input file *mmg3d 'LSReferences' + ##### + ############################################################################### + #-------------------------------- + #--- CENTRALIZED INPUT (CenIn) + #-------------------------------- + #-- Test if the input file *mmg3d is read properly + SET(lsRefReadFile ".mmg3d OPENED") + foreach (NP 1 4) + add_test( NAME lsRef-Read-CenIn-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube.mesh -v 5 -nomove -noinsert -noswap -niter 1 + -out ${CI_DIR_RESULTS}/lsRef-Read-CenIn-${NP}.o.mesh) + + set_property(TEST lsRef-Read-CenIn-${NP} + PROPERTY PASS_REGULAR_EXPRESSION "${lsRefReadFile}") + endforeach() + + #-- Test if tests with input file *mmg3d run correctly without errors WHITOUT LS + foreach (NP 1 2 4) + add_test( NAME lsRef-Run-CenIn-CenOut-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube.mesh -v 5 + -out ${CI_DIR_RESULTS}/lsRef-Run-CenIn-CenOut-${NP}.o.mesh) + + add_test( NAME lsRef-Run-CenIn-DisOut-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/LevelSet/1p_cubegeom/3D-cube.mesh -v 5 -distributed-output + -out ${CI_DIR_RESULTS}/lsRef-Run-CenIn-DisOut-${NP}.o.mesh) + endforeach() + + #-- Test if tests with input file *mmg3d run correctly without errors WITH LS + # TODO once LS implemented + + #-------------------------------- + #--- DISTRIBUTED INPUT (DisIn) + #-------------------------------- + #-- Test if the input file *mmg3d is read properly + add_test( NAME lsRef-Read-DisIn-2 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 2 $ + ${CI_DIR}/LevelSet/2p_toygeom/cube-distributed-faces-mat-edges.mesh -v 5 -nomove -noinsert -noswap -niter 1 + -out ${CI_DIR_RESULTS}/locParam-Read-DisIn-2.o.mesh) + + set_property(TEST lsRef-Read-DisIn-2 + PROPERTY PASS_REGULAR_EXPRESSION "${lsRefReadFile}") + + #-- Test if tests with input file *mmg3d run correctly without errors WHITOUT LS + add_test( NAME lsRef-Run-DisIn-DisOut-2 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 2 $ + ${CI_DIR}/LevelSet/2p_toygeom/cube-distributed-faces-mat-edges.mesh -v 5 + -out ${CI_DIR_RESULTS}/lsRef-Run-DisIn-DisOut-2.o.mesh) + + add_test( NAME lsRef-Run-DisIn-CenOut-2 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 2 $ + ${CI_DIR}/LevelSet/2p_toygeom/cube-distributed-faces-mat-edges.mesh -v 5 -centralized-output + -out ${CI_DIR_RESULTS}/lsRef-Run-DisIn-CenOut-2.o.mesh) + + #-- Test if tests with input file *mmg3d run correctly without errors WITH LS + # TODO once LS implemented + + ############################################################################### + ##### + ##### Test with fields input and output + ##### + ############################################################################### + #-------------------------------- + #--- DISTRIBUTED INPUT (DisIn) + #-------------------------------- + # Test to read distributed input fields in Medit format + # and to write distributed output fields in VTK format + add_test( NAME fields-DisIn-DisOutVTK-2 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 2 $ + ${CI_DIR}/LevelSet/2p_cubegeom/3D-cube.mesh -v 10 + -field ${CI_DIR}/LevelSet/2p_cubegeom/3D-cube-fields.sol + -out ${CI_DIR_RESULTS}/3D-cube-fields-DisIn-DisOutVTK-2-out.pvtu) + + set(InputDistributedFields "3D-cube-fields.0.sol OPENED") + set(OutputVtkFields "Writing mesh, metric and fields.") + + set_property(TEST fields-DisIn-DisOutVTK-2 + PROPERTY PASS_REGULAR_EXPRESSION + "${InputDistributedFields}.*${OutputVtkFields}; + ${OutputVtkFields}.*${InputDistributedFields}") + + # Test to write distributed output fields and metric in Medit format + add_test( NAME fields-DisIn-DisOutMesh-2 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 2 $ + ${CI_DIR}/LevelSet/2p_cubegeom/3D-cube.mesh + -field ${CI_DIR}/LevelSet/2p_cubegeom/3D-cube-fields.sol + -out ${CI_DIR_RESULTS}/3D-cube-fields-DisIn-DisOutMesh-2.o.mesh) + + set(OutputFieldsName "3D-cube-fields.o.0.sol OPENED.") + set(OutputMetricName "3D-cube-fields-DisIn-DisOutMesh-2.o.0.sol OPENED.") + set_property(TEST fields-DisIn-DisOutMesh-2 + PROPERTY PASS_REGULAR_EXPRESSION + "${OutputFieldsName}.*${OutputMetricName};${OutputMetricName}.*${OutputFieldsName}") + + # Test saving of solution fields on 4 procs at hdf5 format + add_test( NAME hdf5-CenIn-DisOutHdf5-4 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 4 $ + ${CI_DIR}/Interpolation/coarse.meshb -v 5 + -out ${CI_DIR_RESULTS}/hdf5-CenIn-DisOutHdf5-4.o.h5) + + IF ( (NOT HDF5_FOUND) OR USE_HDF5 MATCHES OFF ) + SET(expr "HDF5 library not found") + SET_PROPERTY( + TEST hdf5-CenIn-DisOutHdf5-4 + PROPERTY PASS_REGULAR_EXPRESSION "${expr}") + ENDIF ( ) + + ############################################################################### + ##### + ##### Test with local parameters input file *mmg3d 'parameters' + ##### + ############################################################################### + #-------------------------------- + #--- CENTRALIZED INPUT (CenIn) + #-------------------------------- + #-- Test if the input file *mmg3d is read properly + SET(locParamReadFile ".mmg3d OPENED") + foreach (NP 1 4) + add_test( NAME locParam-Read-CenIn-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/HausdLoc_2Spheres/centralized/2spheres.mesh -v 5 -nomove -noinsert -noswap -niter 1 + -out ${CI_DIR_RESULTS}/locParam-Read-CenIn-${NP}.o.mesh) + + set_property(TEST locParam-Read-CenIn-${NP} + PROPERTY PASS_REGULAR_EXPRESSION "${locParamReadFile}") + endforeach() + + #-- Test if tests with input file *mmg3d run correctly without errors + foreach (NP 1 2 4) + add_test( NAME locParam-Run-CenIn-CenOut-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/HausdLoc_2Spheres/centralized/2spheres.mesh -v 5 + -out ${CI_DIR_RESULTS}/locParam-Run-CenIn-CenOut-${NP}.o.mesh) + + add_test( NAME locParam-Run-CenIn-DisOut-${NP} + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ + ${CI_DIR}/HausdLoc_2Spheres/centralized/2spheres.mesh -v 5 -distributed-output + -out ${CI_DIR_RESULTS}/locParam-Run-CenIn-DisOut-${NP}.o.mesh) + endforeach() + + + #-------------------------------- + #--- DISTRIBUTED INPUT (DisIn) + #-------------------------------- + #-- Test if the input file *mmg3d is read properly + add_test( NAME locParam-Read-DisIn-2 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 2 $ + ${CI_DIR}/HausdLoc_2Spheres/centralized/2spheres.mesh -v 5 -nomove -noinsert -noswap -niter 1 + -out ${CI_DIR_RESULTS}/locParam-Read-DisIn-2.o.mesh) + + set_property(TEST locParam-Read-DisIn-2 + PROPERTY PASS_REGULAR_EXPRESSION "${locParamReadFile}") + + #-- Test if tests with input file *mmg3d run correctly without errors + add_test( NAME locParam-Run-DisIn-DisOut-2 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 2 $ + ${CI_DIR}/HausdLoc_2Spheres/centralized/2spheres.mesh -v 5 + -out ${CI_DIR_RESULTS}/locParam-Run-DisIn-DisOut-2.o.mesh) + + add_test( NAME locParam-Run-DisIn-CenOut-2 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 2 $ + ${CI_DIR}/HausdLoc_2Spheres/centralized/2spheres.mesh -v 5 -centralized-output + -out ${CI_DIR_RESULTS}/locParam-Run-DisIn-CenOut-2.o.mesh) + + ENDIF() ############################################################################### ##### @@ -634,10 +1495,10 @@ IF( BUILD_TESTING ) COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ ${input_mesh} -sol ${input_met} -out ${CI_DIR_RESULTS}/${test_name}-${NP}-out.mesh - -niter 3 -nobalance -v 10 -surf ) + -niter 3 -nobalance -v 10 ) ENDFOREACH() - SET ( input_mesh ${CI_DIR}/Tennis/tennis.mesh ) + SET ( input_mesh ${CI_DIR}/Tennis/tennis.meshb ) SET ( input_met ${CI_DIR}/Tennis/tennis.sol ) SET ( test_name TennisSurf_interp ) @@ -646,7 +1507,7 @@ IF( BUILD_TESTING ) COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} ${NP} $ ${input_mesh} -sol ${input_met} -out ${CI_DIR_RESULTS}/${test_name}-${NP}-out.mesh - -niter 3 -nobalance -v 10 -surf ) + -niter 3 -nobalance -v 10 ) ENDFOREACH() diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 00000000..b9547418 --- /dev/null +++ b/codecov.yml @@ -0,0 +1,3 @@ +fixes: + - "ParMmg/ParMmg::" + diff --git a/libexamples/adaptation_example0/parallel_IO/automatic_IO/main.c b/libexamples/adaptation_example0/parallel_IO/automatic_IO/main.c index 7dc29232..d7e77532 100644 --- a/libexamples/adaptation_example0/parallel_IO/automatic_IO/main.c +++ b/libexamples/adaptation_example0/parallel_IO/automatic_IO/main.c @@ -28,7 +28,8 @@ /** Include the parmmg and libmmg3d library hader file */ #include "libparmmg.h" -#include "mmg3d.h" // for developpers only: to use MMG3D_bdryBuild +#include "libmmg3d.h" // for developpers only: to use MMG3D_bdryBuild +#include "libmmg3d_private.h" // for developpers only: to use MMG3D_bdryBuild #define MAX2(a,b) (((a) > (b)) ? (a) : (b)) #define MAX4(a,b,c,d) (((MAX2(a,b)) > (MAX2(c,d))) ? (MAX2(a,b)) : (MAX2(c,d))) diff --git a/libexamples/adaptation_example0/parallel_IO/external_IO/gen_distributedMesh.c b/libexamples/adaptation_example0/parallel_IO/external_IO/gen_distributedMesh.c index 7e061dc2..c9c0d57a 100644 --- a/libexamples/adaptation_example0/parallel_IO/external_IO/gen_distributedMesh.c +++ b/libexamples/adaptation_example0/parallel_IO/external_IO/gen_distributedMesh.c @@ -104,13 +104,13 @@ int main(int argc,char *argv[]) { /** with PMMG_loadMesh_centralized function */ if ( PMMG_loadMesh_centralized(parmesh,filename) != 1 ) { - MPI_Finalize(); + MPI_Abort(MPI_COMM_WORLD,EXIT_FAILURE); exit(EXIT_FAILURE); } /** with PMMG_loadMet_centralized function */ if( PMMG_loadMet_centralized(parmesh,filename) == -1 ) { - MPI_Finalize(); + MPI_Abort(MPI_COMM_WORLD,EXIT_FAILURE); exit(EXIT_FAILURE); } diff --git a/libexamples/adaptation_example1/main.c b/libexamples/adaptation_example1/main.c index a7ea2204..90c22963 100644 --- a/libexamples/adaptation_example1/main.c +++ b/libexamples/adaptation_example1/main.c @@ -114,60 +114,59 @@ int main(int argc,char *argv[]) { /* Load mesh and communicators */ if ( !PMMG_loadMesh_distributed(parmesh,filename) ) { fprintf ( stderr, "Error: Unable to load %s distributed mesh.\n",filename); - MPI_Finalize(); - exit(EXIT_FAILURE); + ierlib = 100; + goto end; } - /** ------------------------------ STEP II ---------------------------- */ /** remesh step */ /* Set the number of remeshing iterations */ niter = 3; if( !PMMG_Set_iparameter( parmesh, PMMG_IPARAM_niter, niter ) ) { - MPI_Finalize(); - exit(EXIT_FAILURE); + ierlib = 101; + goto end; }; /* Remesh the surface */ if( !PMMG_Set_iparameter( parmesh, PMMG_IPARAM_nosurf, 0 ) ) { - MPI_Finalize(); - exit(EXIT_FAILURE); + ierlib = 102; + goto end; }; if( !PMMG_Set_iparameter( parmesh, PMMG_IPARAM_angle, 45 ) ) { - MPI_Finalize(); - exit(EXIT_FAILURE); + ierlib = 103; + goto end; }; if( !PMMG_Set_iparameter( parmesh, PMMG_IPARAM_numberOfLocalParam, 2 ) ) { - MPI_Finalize(); - exit(EXIT_FAILURE); + ierlib = 104; + goto end; }; if( !PMMG_Set_localParameter( parmesh, MMG5_Triangle, 4, 0.01, 0.5, 0.1 ) ) { - MPI_Finalize(); - exit(EXIT_FAILURE); + ierlib = 105; + goto end; }; if( !PMMG_Set_localParameter( parmesh, MMG5_Triangle, 5, 3.0, 5.0, 1.0 ) ) { - MPI_Finalize(); - exit(EXIT_FAILURE); + ierlib = 106; + goto end; }; if( !PMMG_Set_dparameter( parmesh, PMMG_DPARAM_hsiz, 1.0 ) ) { - MPI_Finalize(); - exit(EXIT_FAILURE); + ierlib = 107; + goto end; }; if( !PMMG_Set_iparameter( parmesh, PMMG_IPARAM_verbose, 6 ) ) { - MPI_Finalize(); - exit(EXIT_FAILURE); + ierlib = 108; + goto end; }; @@ -177,22 +176,7 @@ int main(int argc,char *argv[]) { if ( ierlib == PMMG_STRONGFAILURE ) { fprintf(stdout,"BAD ENDING OF PARMMGLIB: UNABLE TO SAVE MESH\n"); - /* Free the PMMG5 structures */ - PMMG_Free_all(PMMG_ARG_start, - PMMG_ARG_ppParMesh,&parmesh, - PMMG_ARG_end); - - free(filename); - filename = NULL; - - free(fileout); - fileout = NULL; - - free(metout); - metout = NULL; - - - MPI_Finalize(); + goto end; } @@ -207,7 +191,8 @@ int main(int argc,char *argv[]) { sprintf(fileout,"%s_%d.mesh",fileout,rank); if( !(inm = fopen(fileout,"w")) ) { fprintf(stderr," ** UNABLE TO OPEN OUTPUT MESH FILE.\n"); - exit(EXIT_FAILURE); + ierlib = 200; + goto end; } fprintf(inm,"MeshVersionFormatted 2\n"); fprintf(inm,"\nDimension 3\n"); @@ -262,32 +247,32 @@ int main(int argc,char *argv[]) { int *ref = (int*)calloc(MAX4(nVertices,nTetrahedra,nTriangles,nEdges),sizeof(int)); if ( !ref ) { perror(" ## Memory problem: ref calloc"); - MPI_Finalize(); - exit(EXIT_FAILURE); + ierlib = 201; + goto end; } /* Table to know if a vertex is corner */ int *corner = (int*)calloc(nVertices,sizeof(int)); if ( !corner ) { perror(" ## Memory problem: corner calloc"); - MPI_Finalize(); - exit(EXIT_FAILURE); + ierlib = 202; + goto end; } /* Table to know if a vertex/tetra/tria/edge is required */ int *required = (int*)calloc(MAX4(nVertices,nTetrahedra,nTriangles,nEdges),sizeof(int)); if ( !required ) { perror(" ## Memory problem: required calloc"); - MPI_Finalize(); - exit(EXIT_FAILURE); + ierlib = 203; + goto end; } /* Table to know if an edge delimits a sharp angle */ int *ridge = (int*)calloc(nEdges ,sizeof(int)); if ( !ridge ) { perror(" ## Memory problem: ridge calloc"); - MPI_Finalize(); - exit(EXIT_FAILURE); + ierlib = 204; + goto end; } /** Vertex recovering */ @@ -570,7 +555,7 @@ int main(int argc,char *argv[]) { free(sol); /** ------------------------------ STEP VI -------------------------- */ - +end: /** 5) Free the PMMG5 structures */ PMMG_Free_all(PMMG_ARG_start, PMMG_ARG_ppParMesh,&parmesh, diff --git a/scripts/genheader.c b/scripts/genheader.c index ee60f748..73213c26 100644 --- a/scripts/genheader.c +++ b/scripts/genheader.c @@ -97,7 +97,7 @@ int main (int argc, char ** argv) fprintf(file,"#include \"%s/libparmmgtypesf.h\"\n\n",libparmmg_include); } else { - fprintf(file,"#include \"mmg/mmg3d/libmmgtypesf.h\"\n\n"); + fprintf(file,"#include \"mmg/common/libmmgtypesf.h\"\n\n"); } fclose(file); @@ -106,8 +106,8 @@ int main (int argc, char ** argv) strlen(libparmmg_h)+ strlen(header_f)+128)*sizeof(char)))) return EXIT_FAILURE; - sprintf(cmd, "perl %s -f %s >> %s;", - genfort, libparmmg_h, header_f); + snprintf(cmd,strlen(genfort)+strlen(libparmmg_h)+strlen(header_f)+15, "perl %s -f %s >> %s;", + genfort, libparmmg_h, header_f); fprintf(stdout, "%s\n", cmd); if (-1 == system(cmd)) return EXIT_FAILURE; diff --git a/src/API_functions_pmmg.c b/src/API_functions_pmmg.c index 94b6f862..96361c99 100644 --- a/src/API_functions_pmmg.c +++ b/src/API_functions_pmmg.c @@ -382,6 +382,77 @@ int PMMG_Set_outputSolsName(PMMG_pParMesh parmesh, const char* solout) { return ier; } +int PMMG_Set_outputLsName(PMMG_pParMesh parmesh, const char* lsout) { + MMG5_pMesh mesh; + MMG5_pSol ls; + int k,ier,pathlen,baselen; + char *basename,*path,*nopath; + + /* If \a lsout is not provided we want to use the basename of the input ls + * name and the path of the output mesh name */ + if ( parmesh->lsout ) { + PMMG_DEL_MEM(parmesh,parmesh->lsout,char,"lsout unalloc"); + } + + if ( (!lsout) || (!*lsout) ) { + + if ( (!parmesh->meshout) || (!*parmesh->meshout) ) { + fprintf(stderr, " ## Error: %s: please, provide an output mesh" + " name before calling this function without string.\n", + __func__); + return 0; + } + + /* Get input ls base name and remove .mesh extension */ + if ( (!parmesh->lsin) || (!*parmesh->lsin) ) { + fprintf(stderr, " ## Error: %s: please, provide an input ls" + " name before calling this function without string.\n", + __func__); + return 0; + } + + path = MMG5_Get_path(parmesh->meshout); + nopath = MMG5_Get_basename(parmesh->lsin); + basename = MMG5_Remove_ext ( nopath,".sol" ); + + pathlen = baselen = 0; + if ( path ) pathlen = strlen(path)+1; + if ( basename ) baselen = strlen(basename); + PMMG_MALLOC(parmesh,parmesh->lsout,pathlen+baselen+1,char,"lsout",return 0); + if ( pathlen ) { + strncpy(parmesh->lsout,path,pathlen-1); + parmesh->lsout[pathlen-1] = MMG5_PATHSEP; + } + if ( baselen ) { + strncpy(parmesh->lsout+pathlen,basename,baselen); + parmesh->lsout[pathlen+baselen] = '\0'; + } + + if ( parmesh->lsout ) { + /* Add .o.sol extension */ + PMMG_REALLOC(parmesh,parmesh->lsout,strlen(parmesh->lsout)+7, + strlen(parmesh->lsout)+1,char,"lsout",return 0); + strncat ( parmesh->lsout,".o.sol",7 ); + } + + MMG5_SAFE_FREE ( path ); + free ( nopath ); nopath = NULL; + MMG5_SAFE_FREE ( basename ); + } + else { + PMMG_MALLOC(parmesh,parmesh->lsout,strlen(lsout)+1,char,"lsout",return 0); + strcpy(parmesh->lsout,lsout); + } + + for ( k=0; kngrp; ++k ) { + mesh = parmesh->listgrp[k].mesh; + ls = parmesh->listgrp[k].ls; + ier = MMG3D_Set_outputSolName(mesh,ls,parmesh->lsout); + } + return ier; +} + + int PMMG_Set_outputMetName(PMMG_pParMesh parmesh, const char* metout) { MMG5_pMesh mesh; MMG5_pSol met; @@ -410,7 +481,7 @@ void PMMG_Init_parameters(PMMG_pParMesh parmesh,MPI_Comm comm) { parmesh->ddebug = PMMG_NUL; parmesh->iter = PMMG_UNSET; parmesh->niter = PMMG_NITER; - parmesh->info.fem = MMG5_FEM; + parmesh->info.setfem = MMG5_FEM; parmesh->info.repartitioning = PMMG_REDISTRIBUTION_mode; parmesh->info.ifc_layers = PMMG_MVIFCS_NLAYERS; parmesh->info.grps_ratio = PMMG_GRPS_RATIO; @@ -421,16 +492,26 @@ void PMMG_Init_parameters(PMMG_pParMesh parmesh,MPI_Comm comm) { parmesh->info.metis_ratio = PMMG_RATIO_MMG_METIS; parmesh->info.API_mode = PMMG_APIDISTRIB_faces; parmesh->info.globalNum = PMMG_NUL; + parmesh->info.globalVNumGot = PMMG_NUL; + parmesh->info.globalTNumGot = PMMG_NUL; parmesh->info.sethmin = PMMG_NUL; parmesh->info.sethmax = PMMG_NUL; parmesh->info.fmtout = PMMG_FMT_Unknown; - /* Init MPI data */ - parmesh->comm = comm; + parmesh->info.iso = MMG5_OFF; + parmesh->info.isosurf = MMG5_OFF; + parmesh->info.lag = MMG5_LAG; + + /** Init MPI data */ + parmesh->comm = comm; + /* Initialize the input communicator to computationnal communicator: this value + * will be overwritten if needed (for example for hdf5 I/O) */ + parmesh->info.read_comm = comm; MPI_Initialized(&flag); parmesh->size_shm = 1; if ( flag ) { + MPI_Comm_set_errhandler(parmesh->comm, MPI_ERRORS_RETURN); MPI_Comm_size( parmesh->comm, &parmesh->nprocs ); MPI_Comm_rank( parmesh->comm, &parmesh->myrank ); } @@ -438,8 +519,11 @@ void PMMG_Init_parameters(PMMG_pParMesh parmesh,MPI_Comm comm) { parmesh->nprocs = 1; parmesh->myrank = PMMG_NUL; } + /* Initialize the number of partitions used for inputs to the number of procs: + * this value will be overwritten if needed (for example for hdf5 I/O) */ + parmesh->info.npartin = parmesh->nprocs; - /* ParMmg verbosity */ + /** ParMmg verbosity */ if ( parmesh->myrank==parmesh->info.root ) { parmesh->info.imprim = PMMG_IMPRIM; } @@ -455,7 +539,10 @@ void PMMG_Init_parameters(PMMG_pParMesh parmesh,MPI_Comm comm) { mesh->info.imprim = MG_MIN ( parmesh->info.imprim,PMMG_MMG_IMPRIM ); } - /* Default memory */ + /** I/Os: set default entities to save */ + PMMG_Set_defaultIOEntities( parmesh ); + + /** Default memory */ PMMG_parmesh_SetMemGloMax( parmesh ); PMMG_parmesh_SetMemMax( parmesh ); @@ -611,6 +698,11 @@ int PMMG_Set_iparameter(PMMG_pParMesh parmesh, int iparam,int val) { parmesh->ddebug = val; break; + case PMMG_IPARAM_purePartitioning : + + parmesh->info.pure_partitioning = val; + break; + case PMMG_IPARAM_distributedOutput : if ( val == 1 ) { @@ -637,12 +729,21 @@ int PMMG_Set_iparameter(PMMG_pParMesh parmesh, int iparam,int val) { } break; case PMMG_IPARAM_iso : + parmesh->info.iso = val; for ( k=0; kngrp; ++k ) { mesh = parmesh->listgrp[k].mesh; if ( !MMG3D_Set_iparameter(mesh,NULL,MMG3D_IPARAM_iso,val) ) return 0; } break; + case PMMG_IPARAM_isosurf : + fprintf(stderr," ## Error: Splitting boundaries on isovalue not yet" + " implemented."); + return 0; + case PMMG_IPARAM_lag : + fprintf(stderr," ## Error: Lagrangian motion not yet implemented."); + return 0; + for ( k=0; kngrp; ++k ) { mesh = parmesh->listgrp[k].mesh; if ( !MMG3D_Set_iparameter(mesh,NULL,MMG3D_IPARAM_lag,val) ) return 0; @@ -650,7 +751,7 @@ int PMMG_Set_iparameter(PMMG_pParMesh parmesh, int iparam,int val) { break; case PMMG_IPARAM_nofem : - parmesh->info.fem = (val==1)? 0 : 1; + parmesh->info.setfem = (val==1)? 0 : 1; for ( k=0; kngrp; ++k ) { mesh = parmesh->listgrp[k].mesh; met = parmesh->listgrp[k].met; @@ -664,7 +765,7 @@ int PMMG_Set_iparameter(PMMG_pParMesh parmesh, int iparam,int val) { } if( val ) { fprintf(stderr," ## Warning: Surface adaptation not supported with opnbdy." - "\nSetting nosurf on.\n"); + "\nSetting nosurf option to on.\n"); for ( k=0; kngrp; ++k ) { mesh = parmesh->listgrp[k].mesh; if ( !MMG3D_Set_iparameter(mesh,NULL,MMG3D_IPARAM_nosurf,val) ) return 0; @@ -706,7 +807,7 @@ int PMMG_Set_iparameter(PMMG_pParMesh parmesh, int iparam,int val) { mesh = parmesh->listgrp[k].mesh; if( !val && mesh->info.opnbdy ) fprintf(stderr," ## Warning: Surface adaptation not supported with opnbdy." - "\nCannot set nosurf off.\n"); + "\nCannot set nosurf option to off.\n"); else if ( !MMG3D_Set_iparameter(mesh,NULL,MMG3D_IPARAM_nosurf,val) ) return 0; } break; @@ -1396,6 +1497,38 @@ int PMMG_Get_NodeCommunicator_nodes(PMMG_pParMesh parmesh, int** local_index) { return 1; } +int PMMG_Get_ithNodeCommunicator_nodes(PMMG_pParMesh parmesh, int ext_comm_index, int* local_index) { + PMMG_pGrp grp; + PMMG_pInt_comm int_node_comm; + PMMG_pExt_comm ext_node_comm; + MMG5_pMesh mesh; + int ip,i,idx; + + /* Meshes are merged in grp 0 */ + int_node_comm = parmesh->int_node_comm; + grp = &parmesh->listgrp[0]; + mesh = grp->mesh; + + + /** 1) Store node index in intvalues */ + PMMG_CALLOC(parmesh,int_node_comm->intvalues,int_node_comm->nitem,int,"intvalues",return 0); + for( i = 0; i < grp->nitem_int_node_comm; i++ ){ + ip = grp->node2int_node_comm_index1[i]; + idx = grp->node2int_node_comm_index2[i]; + parmesh->int_node_comm->intvalues[idx] = ip; + } + + /** 2) For each external communicator, get node index from intvalues */ + ext_node_comm = &parmesh->ext_node_comm[ext_comm_index]; + for( i = 0; i < ext_node_comm->nitem; i++ ){ + idx = ext_node_comm->int_comm_index[i]; + local_index[i] = int_node_comm->intvalues[idx]; + } + + PMMG_DEL_MEM(parmesh,int_node_comm->intvalues,int,"intvalues"); + return 1; +} + int PMMG_Get_FaceCommunicator_faces(PMMG_pParMesh parmesh, int** local_index) { MMG5_Hash hash; PMMG_pGrp grp; @@ -1860,10 +1993,20 @@ int PMMG_Check_Get_FaceCommunicators(PMMG_pParMesh parmesh, * the triangle. * If of the triangle is simply a parallel face (but not a boundary), its owner * will be negative. + * + * \remark We may want to provide the MPI communicator as argument to allow to + * get global numbering from different communicators. For now it uses the + * communicator used during computations */ int PMMG_Get_triangleGloNum( PMMG_pParMesh parmesh, int *idx_glob, int *owner ) { MMG5_pMesh mesh; MMG5_pTria ptr; + int ier; + + if( !parmesh->info.globalTNumGot ) { + ier = PMMG_Compute_trianglesGloNum( parmesh,parmesh->comm ); + parmesh->info.globalTNumGot = 1; + } if( !parmesh->info.globalNum ) { fprintf(stderr,"\n ## Error: %s: Triangle global numbering has not been computed.\n", @@ -1914,11 +2057,20 @@ int PMMG_Get_triangleGloNum( PMMG_pParMesh parmesh, int *idx_glob, int *owner ) * each node. * If of the triangle is simply a parallel face (but not a boundary), its owner * will be negative. + * + * \remark We may want to provide the MPI communicator as argument to allow to + * get global numbering from different communicators. For now it uses the + * communicator used during computations. */ int PMMG_Get_trianglesGloNum( PMMG_pParMesh parmesh, int *idx_glob, int *owner ) { MMG5_pMesh mesh; MMG5_pTria ptr; - int k; + int k,ier; + + if( !parmesh->info.globalTNumGot ) { + ier = PMMG_Compute_trianglesGloNum( parmesh,parmesh->comm ); + parmesh->info.globalTNumGot = 1; + } if( !parmesh->info.globalNum ) { fprintf(stderr,"\n ## Error: %s: Triangles global numbering has not been computed.\n", @@ -1949,10 +2101,19 @@ int PMMG_Get_trianglesGloNum( PMMG_pParMesh parmesh, int *idx_glob, int *owner ) * Get global node numbering (starting from 1) and rank of the process owning * the node. * + * \remark We may want to provide the MPI communicator as argument to allow to + * get global numbering from different communicators. For now it uses the + * communicator used during computations */ int PMMG_Get_vertexGloNum( PMMG_pParMesh parmesh, int *idx_glob, int *owner ) { MMG5_pMesh mesh; MMG5_pPoint ppt; + int ier; + + if( !parmesh->info.globalVNumGot ) { + ier = PMMG_Compute_verticesGloNum( parmesh,parmesh->comm ); + parmesh->info.globalVNumGot = 1; + } if( !parmesh->info.globalNum ) { fprintf(stderr,"\n ## Error: %s: Nodes global numbering has not been computed.\n", @@ -2002,11 +2163,19 @@ int PMMG_Get_vertexGloNum( PMMG_pParMesh parmesh, int *idx_glob, int *owner ) { * Get global nodes numbering (starting from 1) and ranks of processes owning * each node. * + * \remark We may want to provide the MPI communicator as argument to allow to + * get global numbering from different communicators. For now it uses the + * communicator used during computations */ int PMMG_Get_verticesGloNum( PMMG_pParMesh parmesh, int *idx_glob, int *owner ) { MMG5_pMesh mesh; MMG5_pPoint ppt; - int ip; + int ip,ier; + + if( !parmesh->info.globalVNumGot ) { + ier = PMMG_Compute_verticesGloNum( parmesh,parmesh->comm ); + parmesh->info.globalVNumGot = 1; + } if( !parmesh->info.globalNum ) { fprintf(stderr,"\n ## Error: %s: Nodes global numbering has not been computed.\n", @@ -2042,8 +2211,8 @@ int PMMG_Get_NodeCommunicator_owners(PMMG_pParMesh parmesh,int **owner,int **idx PMMG_pInt_comm int_node_comm; PMMG_pExt_comm ext_node_comm; PMMG_pGrp grp; - MPI_Request request; - MPI_Status status; + MPI_Request *request; + MPI_Status *status; int *intvalues,*itosend,*itorecv,*iproc2comm; int color,nitem; int label,*nlabels,*displ,mydispl,unique; @@ -2058,13 +2227,31 @@ int PMMG_Get_NodeCommunicator_owners(PMMG_pParMesh parmesh,int **owner,int **idx PMMG_CALLOC(parmesh,int_node_comm->intvalues,int_node_comm->nitem,int,"intvalues",return 0); intvalues = int_node_comm->intvalues; - /* Allocate label counts and offsets */ - PMMG_CALLOC(parmesh,nlabels,parmesh->nprocs,int,"nlabels",return 0); - PMMG_CALLOC(parmesh,displ,parmesh->nprocs+1,int,"displ",return 0); + /* register heap arrays */ + size_t iptr,nptr = 3; + void** ptr_int[3]; + ptr_int[0] = (void*)&iproc2comm; + ptr_int[1] = (void*)&nlabels; + ptr_int[2] = (void*)&displ; + /* nullify them to allow to always call free() on them */ + for( iptr = 0; iptr < nptr; iptr++ ) { + *ptr_int[iptr] = NULL; + } + request = NULL; + status = NULL; + itosend = itorecv = NULL; /* Array to reorder communicators */ PMMG_MALLOC(parmesh,iproc2comm,parmesh->nprocs,int,"iproc2comm",return 0); + /* Allocate label counts and offsets */ + PMMG_CALLOC(parmesh,nlabels,parmesh->nprocs,int,"nlabels", + PMMG_destroy_int(parmesh,ptr_int,nptr,"Get_NodeCommunicator_owners"); + return 0); + PMMG_CALLOC(parmesh,displ,parmesh->nprocs+1,int,"displ", + PMMG_destroy_int(parmesh,ptr_int,nptr,"Get_NodeCommunicator_owners"); + return 0); + for( iproc = 0; iproc < parmesh->nprocs; iproc++ ) iproc2comm[iproc] = PMMG_UNSET; @@ -2170,13 +2357,37 @@ int PMMG_Get_NodeCommunicator_owners(PMMG_pParMesh parmesh,int **owner,int **idx /** * 4) Communicate global numbering to the ghost copies. */ + + PMMG_MALLOC(parmesh,request,parmesh->nprocs,MPI_Request, + "mpi request array", + PMMG_destroy_int(parmesh,ptr_int,nptr,"Get_NodeCommunicator_owners"); + return 0); + for ( i=0; inprocs; ++i ) { + request[i] = MPI_REQUEST_NULL; + } + + PMMG_MALLOC(parmesh,status,parmesh->nprocs,MPI_Status, + "mpi status array", + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"Get_NodeCommunicator_owners"); + return 0); + for( icomm = 0; icomm < parmesh->next_node_comm; icomm++ ) { ext_node_comm = &parmesh->ext_node_comm[icomm]; color = ext_node_comm->color_out; nitem = ext_node_comm->nitem; - PMMG_CALLOC(parmesh,ext_node_comm->itosend,nitem,int,"itosend",return 0); - PMMG_CALLOC(parmesh,ext_node_comm->itorecv,nitem,int,"itorecv",return 0); + PMMG_CALLOC(parmesh,ext_node_comm->itosend,nitem,int,"itosend", + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"Get_NodeCommunicator_owners"); + return 0); + PMMG_CALLOC(parmesh,ext_node_comm->itorecv,nitem,int,"itorecv", + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"Get_NodeCommunicator_owners"); + return 0); itosend = ext_node_comm->itosend; itorecv = ext_node_comm->itorecv; @@ -2191,11 +2402,19 @@ int PMMG_Get_NodeCommunicator_owners(PMMG_pParMesh parmesh,int **owner,int **idx itosend[i] = intvalues[idx]; } MPI_CHECK( MPI_Isend(itosend,nitem,MPI_INT,dst,tag, - parmesh->comm,&request),return 0 ); + parmesh->comm,&request[color]), + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"Get_NodeCommunicator_owners"); + return 0 ); } if ( parmesh->myrank == dst ) { MPI_CHECK( MPI_Recv(itorecv,nitem,MPI_INT,src,tag, - parmesh->comm,&status),return 0 ); + parmesh->comm,&status[0]), + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"Get_NodeCommunicator_owners"); + return 0 ); /* Store recv buffer in the internal communicator */ for( i = 0; i < nitem; i++ ) { idx = ext_node_comm->int_comm_index[i]; @@ -2206,7 +2425,6 @@ int PMMG_Get_NodeCommunicator_owners(PMMG_pParMesh parmesh,int **owner,int **idx } } - /** * 5) Store numbering results in the output array. */ @@ -2221,6 +2439,11 @@ int PMMG_Get_NodeCommunicator_owners(PMMG_pParMesh parmesh,int **owner,int **idx } } + MPI_CHECK( MPI_Waitall(parmesh->nprocs,request,status), + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"Get_NodeCommunicator_owners"); + return 0); #ifndef NDEBUG /* Check global IDs */ @@ -2243,11 +2466,11 @@ int PMMG_Get_NodeCommunicator_owners(PMMG_pParMesh parmesh,int **owner,int **idx tag = parmesh->nprocs*src+dst; if( parmesh->myrank == src ) { MPI_CHECK( MPI_Isend(idx_glob[icomm],nitem,MPI_INT,dst,tag, - parmesh->comm,&request),return 0 ); + parmesh->comm,&request[color]),return 0 ); } if ( parmesh->myrank == dst ) { MPI_CHECK( MPI_Recv(itorecv,nitem,MPI_INT,src,tag, - parmesh->comm,&status),return 0 ); + parmesh->comm,&status[0]),return 0 ); for( i=0; i < nitem; i++ ) { idx = ext_node_comm->int_comm_index[i]; assert( idx_glob[icomm][i] == intvalues[idx] ); @@ -2271,19 +2494,30 @@ int PMMG_Get_NodeCommunicator_owners(PMMG_pParMesh parmesh,int **owner,int **idx PMMG_DEL_MEM(parmesh,mylabels,int,"mylabels"); #endif + MPI_CHECK( MPI_Waitall(parmesh->nprocs,request,status), + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"Get_NodeCommunicator_owners"); + return 0); + + // Commented the 11/02/22 by Algiane: useless I think /* Don't free buffers before they have been received */ - MPI_CHECK( MPI_Barrier(parmesh->comm),return 0 ); + /* MPI_CHECK( MPI_Barrier(parmesh->comm), */ + /* PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); */ + /* PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); */ + /* PMMG_destroy_int(parmesh,ptr_int,nptr,"Get_NodeCommunicator_owners"); */ + /* return 0 ); */ /* Free arrays */ - PMMG_DEL_MEM(parmesh,nlabels,int,"nlabels"); - PMMG_DEL_MEM(parmesh,displ,int,"displ"); - PMMG_DEL_MEM(parmesh,iproc2comm,int,"iproc2comm"); - for( icomm = 0; icomm < parmesh->next_node_comm; icomm++ ) { ext_node_comm = &parmesh->ext_node_comm[icomm]; PMMG_DEL_MEM(parmesh,ext_node_comm->itosend,int,"itosend"); PMMG_DEL_MEM(parmesh,ext_node_comm->itorecv,int,"itorecv"); } + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); + + PMMG_destroy_int(parmesh,ptr_int,nptr,"Get_NodeCommunicator_owners"); PMMG_DEL_MEM(parmesh,int_node_comm->intvalues,int,"intvalues"); @@ -2302,8 +2536,8 @@ int PMMG_Get_NodeCommunicator_owners(PMMG_pParMesh parmesh,int **owner,int **idx */ int PMMG_Get_FaceCommunicator_owners(PMMG_pParMesh parmesh,int **owner,int **idx_glob,int *nunique,int *ntot) { PMMG_pExt_comm ext_face_comm; - MPI_Request request; - MPI_Status status; + MPI_Request *request; + MPI_Status *status; int unique; int color,nitem,npairs_loc,*npairs,*displ_pair,*glob_pair_displ; int src,dst,tag,sendbuffer,recvbuffer,iproc,icomm,i; @@ -2311,9 +2545,23 @@ int PMMG_Get_FaceCommunicator_owners(PMMG_pParMesh parmesh,int **owner,int **idx /* Do this only if there is one group */ assert( parmesh->ngrp == 1 ); - PMMG_CALLOC(parmesh,npairs,parmesh->nprocs,int,"npair",return 0); - PMMG_CALLOC(parmesh,displ_pair,parmesh->nprocs+1,int,"displ_pair",return 0); + /* register heap arrays */ + size_t iptr,nptr = 3; + void** ptr_int[3]; + ptr_int[0] = (void*)&npairs; + ptr_int[1] = (void*)&displ_pair; + ptr_int[2] = (void*)&glob_pair_displ; + /* nullify them to allow to always call free() on them */ + for( iptr = 0; iptr < nptr; iptr++ ) { + *ptr_int[iptr] = NULL; + } + request = NULL; + status = NULL; + PMMG_CALLOC(parmesh,npairs,parmesh->nprocs,int,"npair",return 0); + PMMG_CALLOC(parmesh,displ_pair,parmesh->nprocs+1,int,"displ_pair", + PMMG_destroy_int(parmesh,ptr_int,nptr,"Get_FaceCommunicator_owners"); + return 0); /** * 1) Compute face owners and count nb of new pair faces hosted on myrank. @@ -2347,7 +2595,10 @@ int PMMG_Get_FaceCommunicator_owners(PMMG_pParMesh parmesh,int **owner,int **idx for( iproc = 0; iproc < parmesh->nprocs; iproc++ ) displ_pair[iproc+1] = displ_pair[iproc]+npairs[iproc]; - PMMG_CALLOC(parmesh,glob_pair_displ,parmesh->next_face_comm+1,int,"glob_pair_displ",return 0); + PMMG_CALLOC(parmesh,glob_pair_displ,parmesh->next_face_comm+1,int,"glob_pair_displ", + PMMG_destroy_int(parmesh,ptr_int,nptr,"Get_FaceCommunicator_owners"); + return 0); + for( icomm = 0; icomm < parmesh->next_face_comm; icomm++ ) glob_pair_displ[icomm] = displ_pair[parmesh->myrank]; @@ -2365,6 +2616,21 @@ int PMMG_Get_FaceCommunicator_owners(PMMG_pParMesh parmesh,int **owner,int **idx glob_pair_displ[icomm+1] = glob_pair_displ[icomm]+nitem;//+1; } + PMMG_MALLOC(parmesh,request,parmesh->nprocs,MPI_Request, + "mpi request array", + PMMG_destroy_int(parmesh,ptr_int,nptr,"Get_FaceCommunicator_owners"); + return 0); + for ( i=0; inprocs; ++i ) { + request[i] = MPI_REQUEST_NULL; + } + + PMMG_MALLOC(parmesh,status,parmesh->nprocs,MPI_Status, + "mpi status array", + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"Get_FaceCommunicator_owners"); + return 0); + /* Compute global pair faces enumeration */ for( icomm = 0; icomm < parmesh->next_face_comm; icomm++ ) { ext_face_comm = &parmesh->ext_face_comm[icomm]; @@ -2378,11 +2644,19 @@ int PMMG_Get_FaceCommunicator_owners(PMMG_pParMesh parmesh,int **owner,int **idx if( parmesh->myrank == src ) { sendbuffer = glob_pair_displ[icomm]; MPI_CHECK( MPI_Isend(&sendbuffer,1,MPI_INT,dst,tag, - parmesh->comm,&request),return 0 ); + parmesh->comm,&request[color]), + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"Get_FaceCommunicator_owners"); + return 0 ); } if ( parmesh->myrank == dst ) { MPI_CHECK( MPI_Recv(&recvbuffer,1,MPI_INT,src,tag, - parmesh->comm,&status),return 0 ); + parmesh->comm,&status[0]), + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"Get_FaceCommunicator_owners"); + return 0 ); glob_pair_displ[icomm] = recvbuffer; } } @@ -2396,8 +2670,19 @@ int PMMG_Get_FaceCommunicator_owners(PMMG_pParMesh parmesh,int **owner,int **idx idx_glob[icomm][i] = glob_pair_displ[icomm]+i+1; /* index starts from 1 */ } + MPI_CHECK( MPI_Waitall(parmesh->nprocs,request,status), + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"Get_FaceCommunicator_owners"); + return 0); + + // Commented the 11/02/22 by Algiane: useless I think /* Don't free buffers before they have been received */ - MPI_CHECK( MPI_Barrier(parmesh->comm),return 0 ); + /* MPI_CHECK( MPI_Barrier(parmesh->comm), */ + /* PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); */ + /* PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); */ + /* PMMG_destroy_int(parmesh,ptr_int,nptr,"Get_FaceCommunicator_owners"); */ + /* return 0 ); */ /* Free arrays */ PMMG_DEL_MEM(parmesh,npairs,int,"npairs"); @@ -2411,25 +2696,47 @@ int PMMG_Get_FaceCommunicator_owners(PMMG_pParMesh parmesh,int **owner,int **idx ext_face_comm = &parmesh->ext_face_comm[icomm]; color = ext_face_comm->color_out; nitem = ext_face_comm->nitem; - PMMG_CALLOC(parmesh,ext_face_comm->itorecv,nitem,int,"itorecv",return 0); + PMMG_CALLOC(parmesh,ext_face_comm->itorecv,nitem,int,"itorecv", + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"Get_FaceCommunicator_owners"); + return 0); src = MG_MIN(parmesh->myrank,color); dst = MG_MAX(parmesh->myrank,color); tag = parmesh->nprocs*src+dst; if( parmesh->myrank == src ) { MPI_CHECK( MPI_Isend(idx_glob[icomm],nitem,MPI_INT,dst,tag, - parmesh->comm,&request),return 0 ); + parmesh->comm,&request[color]), + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"Get_FaceCommunicator_owners"); + return 0 ); } if ( parmesh->myrank == dst ) { MPI_CHECK( MPI_Recv(ext_face_comm->itorecv,nitem,MPI_INT,src,tag, - parmesh->comm,&status),return 0 ); + parmesh->comm,&status[0]), + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"Get_FaceCommunicator_owners"); + return 0 ); for( i = 0; i < nitem; i++ ) assert( idx_glob[icomm][i] == ext_face_comm->itorecv[i] ); } } + MPI_CHECK( MPI_Waitall(parmesh->nprocs,request,status), + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"Get_FaceCommunicator_owners"); + return 0); + /* Don't free buffers before they have been received */ - MPI_CHECK( MPI_Barrier(parmesh->comm),return 0 ); + /* MPI_CHECK( MPI_Barrier(parmesh->comm), */ + /* PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); */ + /* PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); */ + /* PMMG_destroy_int(parmesh,ptr_int,nptr,"Get_FaceCommunicator_owners"); */ + /* return 0 ); */ for( icomm = 0; icomm < parmesh->next_face_comm; icomm++ ) { ext_face_comm = &parmesh->ext_face_comm[icomm]; @@ -2437,9 +2744,126 @@ int PMMG_Get_FaceCommunicator_owners(PMMG_pParMesh parmesh,int **owner,int **idx } #endif + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"Get_FaceCommunicator_owners"); + + return 1; +} + +/** + * \param ptr pointer toward the file extension (dot included) + * \param fmt default file format. + * + * \return and index associated to the file format detected from the extension. + * + * Get the wanted file format from the mesh extension. If \a fmt is provided, it + * is used as default file format (\a ptr==NULL), otherwise, the default file + * format is the medit one. + * + * \remark relies on the MMG5_Get_format function and adds the formats that are + * specifics to ParMmg + */ +int PMMG_Get_format( char *ptr, int fmt ) { + /* Default is the format given as input */ + int defFmt = fmt; + + if ( (!ptr) || !(*ptr) ) return defFmt; + + /* Search if Format is known by Mmg */ + int tmp_fmt = MMG5_Get_format(ptr, MMG5_FMT_Unknown); + + if ( tmp_fmt == MMG5_FMT_Unknown ) { + /* If format is not known by Mmg, search in ParMmg formats */ + if ( !strncmp ( ptr,".h5",strlen(".h5") ) ) { + return PMMG_FMT_HDF5; + } + else if ( !strncmp ( ptr,".xdmf",strlen(".xdmf") ) ) { + return PMMG_FMT_HDF5; + } + } + return tmp_fmt; +} + + +int PMMG_Set_defaultIOEntities(PMMG_pParMesh parmesh) { + return PMMG_Set_defaultIOEntities_i(parmesh->info.io_entities); +} + +/** + * \param io_entites array to specify which entites to save + * \return 0 if failed, 1 otherwise. + * + * Set the default entities to save into an hdf5 file. + * + * \remark For internal use + */ +int PMMG_Set_defaultIOEntities_i(int io_entities[PMMG_IO_ENTITIES_size] ) { + + /* Default: save/load everything */ + for (int i = 0 ; i < PMMG_IO_ENTITIES_size ; i++) { + io_entities[i] = 1; + } + return 1; } +int PMMG_Set_IOEntities(PMMG_pParMesh parmesh, int target, int val) { + return PMMG_Set_IOEntities_i(parmesh->info.io_entities,target,val); +} + +/** + * \param io_entites array to specify which entites to save + * \param target type of entity for which we want to enable/disable saving. + * target value has to be one of the PMMG_IO_entities values. + * \pararm enable saving if PMMG_ON is passed, disable it if PMMG_OFF is passed. + * \return 0 if failed, 1 otherwise. + * + * Enable or disable entities to save depending on the \a val value. + * + * \remark For internal use + */ +int PMMG_Set_IOEntities_i(int io_entities[PMMG_IO_ENTITIES_size], int target, int val) { + + if ( (val != PMMG_ON) && (val != PMMG_OFF) ) { + fprintf(stderr, " ## Error: %s: Unexpected value for val parameter" + " (%d)\n",__func__,val); + fprintf(stderr, " Please pass PMMG_ON or PMMG_OFF value."); + return 0; + } + + switch(target) { + + case PMMG_IO_Required: + io_entities[PMMG_IO_RequiredVertex] = val; + io_entities[PMMG_IO_RequiredEdge] = val; + io_entities[PMMG_IO_RequiredTria] = val; + io_entities[PMMG_IO_RequiredQuad] = val; + io_entities[PMMG_IO_RequiredTetra] = val; + break; + + case PMMG_IO_Parallel: + io_entities[PMMG_IO_ParallelVertex] = val; + io_entities[PMMG_IO_ParallelEdge] = val; + io_entities[PMMG_IO_ParallelTria] = val; + io_entities[PMMG_IO_ParallelQuad] = val; + io_entities[PMMG_IO_ParallelTetra] = val; + break; + + default: + if ( target >= PMMG_IO_ENTITIES_size || target < 0 ) { + fprintf(stderr, " ## Error: %s: Unexpected value for target parameter" + " (%d)\n",__func__,target); + fprintf(stderr, " Value has to be one of the listed" + " PMMG_IO_entities.\n"); + } + return 0; + io_entities[target] = val; + } + return 1; +} + + int PMMG_Free_names(PMMG_pParMesh parmesh) { PMMG_DEL_MEM ( parmesh, parmesh->meshin,char,"meshin" ); @@ -2447,6 +2871,7 @@ int PMMG_Free_names(PMMG_pParMesh parmesh) PMMG_DEL_MEM ( parmesh, parmesh->metin,char,"metin" ); PMMG_DEL_MEM ( parmesh, parmesh->metout,char,"metout" ); PMMG_DEL_MEM ( parmesh, parmesh->lsin,char,"lsin" ); + PMMG_DEL_MEM ( parmesh, parmesh->lsout,char,"lsout" ); PMMG_DEL_MEM ( parmesh, parmesh->dispin,char,"dispin" ); PMMG_DEL_MEM ( parmesh, parmesh->fieldin,char,"fieldin" ); PMMG_DEL_MEM ( parmesh, parmesh->fieldout,char,"fieldout" ); diff --git a/src/API_functionsf_pmmg.c b/src/API_functionsf_pmmg.c index e5d4f0f7..fe47ec71 100644 --- a/src/API_functionsf_pmmg.c +++ b/src/API_functionsf_pmmg.c @@ -824,6 +824,17 @@ FORTRAN_NAME(PMMG_SET_NUMBEROFNODECOMMUNICATORS, pmmg_set_numberofnodecommunicat return; } +/** + * See \ref PMMG_Get_numberOfNodeCommunicators function in \ref libparmmg.h file. + */ +FORTRAN_NAME(PMMG_GET_NUMBEROFNODECOMMUNICATORS, pmmg_get_numberofnodecommunicators, + (PMMG_pParMesh *parmesh,int* next_comm, + int* retval), + (parmesh, next_comm, retval)) { + *retval = PMMG_Get_numberOfNodeCommunicators(*parmesh,next_comm); + return; +} + /** * See \ref PMMG_Set_numberOfFaceCommunicators function in \ref libparmmg.h file. */ @@ -912,6 +923,17 @@ FORTRAN_NAME(PMMG_GET_NODECOMMUNICATOR_NODES, pmmg_get_nodecommunicator_nodes, return; } +/** + * See \ref PMMG_Get_ithNodeCommunicator_nodes function in \ref libparmmg.h file. + */ +FORTRAN_NAME(PMMG_GET_ITHNODECOMMUNICATOR_NODES, pmmg_get_ithnodecommunicator_nodes, + (PMMG_pParMesh *parmesh, int* ext_comm_index, int* local_index, + int* retval), + (parmesh, ext_comm_index, local_index, retval)) { + *retval = PMMG_Get_ithNodeCommunicator_nodes(*parmesh,*ext_comm_index,local_index); + return; +} + /** * See \ref PMMG_Get_FaceCommunicator_faces function in \ref libparmmg.h file. */ @@ -990,6 +1012,27 @@ FORTRAN_NAME(PMMG_GET_VERTEXGLONUM, pmmg_get_vertexglonum, return; } +/** + * See \ref PMMG_Set_defaultIOEntities function in \ref libparmmg.h file. + */ +FORTRAN_NAME(PMMG_SET_DEFAULTIOENTITIES, pmmg_set_defaultioentities, + (PMMG_pParMesh *parmesh,int* retval), + (parmesh,retval)) { + *retval = PMMG_Set_defaultIOEntities(*parmesh); + return; +} + +/** + * See \ref PMMG_Set_IOEntities function in \ref libparmmg.h file. + */ +FORTRAN_NAME(PMMG_SET_IOENTITIES, pmmg_set_ioentities, + (PMMG_pParMesh *parmesh,int* target,int* val,int* retval), + (parmesh,target,val,retval)) { + *retval = PMMG_Set_IOEntities(*parmesh,*target,*val); + return; +} + + /** * See \ref PMMG_Free_all function in \ref mmg3d/libmmg3d.h file. */ @@ -1019,6 +1062,16 @@ FORTRAN_NAME(PMMG_PARMMGLIB_DISTRIBUTED,pmmg_parmmglib_distributed, return; } +/** + * See \ref PMMG_parmmgls_distributed function in \ref libparmmg.h file. + */ +FORTRAN_NAME(PMMG_PARMMGLS_DISTRIBUTED,pmmg_parmmgls_distributed, + (PMMG_pParMesh *parmesh,int* retval), + (parmesh,retval)) { + *retval = PMMG_parmmgls_distributed(*parmesh); + return; +} + /** * See \ref PMMG_parmmglib_centralized function in \ref libparmmg.h file. */ @@ -1029,6 +1082,16 @@ FORTRAN_NAME(PMMG_PARMMGLIB_CENTRALIZED,pmmg_parmmglib_centralized, return; } +/** + * See \ref PMMG_parmmgls_centralized function in \ref libparmmg.h file. + */ +FORTRAN_NAME(PMMG_PARMMGLS_CENTRALIZED,pmmg_parmmgls_centralized, + (PMMG_pParMesh *parmesh,int* retval), + (parmesh,retval)) { + *retval = PMMG_parmmgls_centralized(*parmesh); + return; +} + /** * See \ref PMMG_loadMesh function in \ref libparmmg.h file. */ @@ -1160,6 +1223,25 @@ FORTRAN_NAME(PMMG_LOADSOL_CENTRALIZED,pmmg_loadsol_centralized, return; } +/** + * See \ref PMMG_loadSol_distributed function in \ref libparmmg.h file. + */ +FORTRAN_NAME(PMMG_LOADSOL_DISTRIBUTED,pmmg_loadsol_distributed, + (PMMG_pParMesh *parmesh,char* filename, int *strlen,int* retval), + (parmesh,filename,strlen,retval)){ + char *tmp = NULL; + + MMG5_SAFE_MALLOC(tmp,(*strlen+1),char,); + strncpy(tmp,filename,*strlen); + tmp[*strlen] = '\0'; + + *retval = PMMG_loadSol_distributed(*parmesh,tmp); + + MMG5_SAFE_FREE(tmp); + + return; +} + /** * See \ref PMMG_loadAllSols_centralized function in \ref libparmmg.h file. */ @@ -1179,6 +1261,25 @@ FORTRAN_NAME(PMMG_LOADALLSOLS_CENTRALIZED,pmmg_loadallsols_centralized, return; } +/** + * See \ref PMMG_loadAllSols_distributed function in \ref libparmmg.h file. + */ +FORTRAN_NAME(PMMG_LOADALLSOLS_DISTRIBUTED,pmmg_loadallsols_distributed, + (PMMG_pParMesh *parmesh,char* filename, int *strlen,int* retval), + (parmesh,filename,strlen,retval)){ + char *tmp = NULL; + + MMG5_SAFE_MALLOC(tmp,(*strlen+1),char,); + strncpy(tmp,filename,*strlen); + tmp[*strlen] = '\0'; + + *retval = PMMG_loadAllSols_distributed(*parmesh,tmp); + + MMG5_SAFE_FREE(tmp); + + return; +} + /** * See \ref PMMG_saveMesh_centralized function in \ref libparmmg.h file. */ @@ -1255,6 +1356,44 @@ FORTRAN_NAME(PMMG_SAVEMET_DISTRIBUTED,pmmg_savemet_distributed, return; } +/** + * See \ref PMMG_saveLs_centralized function in \ref libparmmg.h file. + */ +FORTRAN_NAME(PMMG_SAVELS_CENTRALIZED,pmmg_savels_centralized, + (PMMG_pParMesh *parmesh,char* filename, int *strlen,int* retval), + (parmesh,filename,strlen,retval)){ + char *tmp = NULL; + + MMG5_SAFE_MALLOC(tmp,(*strlen+1),char,); + strncpy(tmp,filename,*strlen); + tmp[*strlen] = '\0'; + + *retval = PMMG_saveLs_centralized(*parmesh,tmp); + + MMG5_SAFE_FREE(tmp); + + return; +} + +/** + * See \ref PMMG_saveLs_distributed function in \ref libparmmg.h file. + */ +FORTRAN_NAME(PMMG_SAVELS_DISTRIBUTED,pmmg_savels_distributed, + (PMMG_pParMesh *parmesh,char* filename, int *strlen,int* retval), + (parmesh,filename,strlen,retval)){ + char *tmp = NULL; + + MMG5_SAFE_MALLOC(tmp,(*strlen+1),char,); + strncpy(tmp,filename,*strlen); + tmp[*strlen] = '\0'; + + *retval = PMMG_saveLs_distributed(*parmesh,tmp); + + MMG5_SAFE_FREE(tmp); + + return; +} + /** * See \ref PMMG_saveAllSols_centralized function in \ref libparmmg.h file. */ @@ -1274,6 +1413,64 @@ FORTRAN_NAME(PMMG_SAVEALLSOLS_CENTRALIZED,pmmg_saveallsols_centralized, return; } +/** + * See \ref PMMG_saveAllSols_distributed function in \ref libparmmg.h file. + */ +FORTRAN_NAME(PMMG_SAVEALLSOLS_DISTRIBUTED,pmmg_saveallsols_distributed, + (PMMG_pParMesh *parmesh,char* filename, int *strlen,int* retval), + (parmesh,filename,strlen,retval)){ + char *tmp = NULL; + + MMG5_SAFE_MALLOC(tmp,(*strlen+1),char,); + strncpy(tmp,filename,*strlen); + tmp[*strlen] = '\0'; + + *retval = PMMG_saveAllSols_distributed(*parmesh,tmp); + + MMG5_SAFE_FREE(tmp); + + return; +} + +/** + * See \ref PMMG_loadMesh_hdf5 function in \ref libparmmg.h file. + */ +FORTRAN_NAME(PMMG_LOADMESH_HDF5,pmmg_loadmesh_hdf5, + (PMMG_pParMesh *parmesh,char* filename, int *strlen,int* retval), + (parmesh,filename,strlen, retval)){ + char *tmp = NULL; + + MMG5_SAFE_MALLOC(tmp,(*strlen+1),char,); + strncpy(tmp,filename,*strlen); + tmp[*strlen] = '\0'; + + *retval = PMMG_loadMesh_hdf5(*parmesh,tmp); + + MMG5_SAFE_FREE(tmp); + + return; +} + +/** + * See \ref PMMG_saveMesh_hdf5 function in \ref libparmmg.h file. + */ +FORTRAN_NAME(PMMG_SAVEMESH_HDF5,pmmg_savemesh_hdf5, + (PMMG_pParMesh *parmesh,char* filename, int *strlen,int* retval), + (parmesh,filename,strlen, retval)){ + char *tmp = NULL; + + MMG5_SAFE_MALLOC(tmp,(*strlen+1),char,); + strncpy(tmp,filename,*strlen); + tmp[*strlen] = '\0'; + + *retval = PMMG_saveMesh_hdf5(*parmesh,tmp); + + MMG5_SAFE_FREE(tmp); + + return; +} + + /** * See \ref PMMG_Free_names function in \ref libparmmg.h file. */ diff --git a/src/analys_pmmg.c b/src/analys_pmmg.c index a6d7a5cd..ac799608 100644 --- a/src/analys_pmmg.c +++ b/src/analys_pmmg.c @@ -33,6 +33,8 @@ */ #include "parmmg.h" +#include "libmmg3d.h" +#include "mmgexterns_private.h" /** * \param ppt pointer toward the point structure @@ -89,7 +91,7 @@ typedef struct { MMG5_pxTetra pxt; MMG5_pPoint ppt; double n[3]; - int16_t tag; + uint16_t tag; int ie,ifac,iloc,iadj; int ip,ip1,ip2; int updloc,updpar; @@ -108,8 +110,8 @@ typedef struct { * stored for each of the two edge orientations. */ static inline -int PMMG_hGetOri( MMG5_HGeom *hash,int ip0,int ip1,int *ref,int16_t *color ) { - int16_t tag; +int PMMG_hGetOri( MMG5_HGeom *hash,int ip0,int ip1,int *ref,uint16_t *color ) { + uint16_t tag; /* Get edge from hash table */ if( !MMG5_hGet( hash, @@ -135,8 +137,8 @@ int PMMG_hGetOri( MMG5_HGeom *hash,int ip0,int ip1,int *ref,int16_t *color ) { * each of the two edge orientations. */ static inline -int PMMG_hTagOri( MMG5_HGeom *hash,int ip0,int ip1,int ref,int16_t color ) { - int16_t tag; +int PMMG_hTagOri( MMG5_HGeom *hash,int ip0,int ip1,int ref,uint16_t color ) { + uint16_t tag; /* Set bitwise tag from color */ if( color ) { @@ -272,8 +274,8 @@ int PMMG_hashNorver_loop( PMMG_pParMesh parmesh,PMMG_hn_loopvar *var,int16_t ski */ static inline int PMMG_hash_nearParEdges( PMMG_pParMesh parmesh,PMMG_hn_loopvar *var ) { - int ia[2],ip[2],j; - int16_t tag; + int ia[2],ip[2],j; + uint16_t tag; /* Get points */ ia[0] = MMG5_iarf[var->ifac][MMG5_iprv2[var->iloc]]; @@ -308,11 +310,11 @@ int PMMG_hash_nearParEdges( PMMG_pParMesh parmesh,PMMG_hn_loopvar *var ) { static inline int PMMG_hashNorver_edges( PMMG_pParMesh parmesh,PMMG_hn_loopvar *var ) { MMG5_pPoint ppt[2]; - double *doublevalues; - int ia[2],ip[2],gip; - int *intvalues,idx,d,edg,j,pos; - int16_t tag; - int8_t found; + double *doublevalues; + int ia[2],ip[2],gip; + int *intvalues,idx,d,edg,j,pos; + uint16_t tag; + int8_t found; doublevalues = parmesh->int_node_comm->doublevalues; intvalues = parmesh->int_node_comm->intvalues; @@ -354,7 +356,11 @@ int PMMG_hashNorver_edges( PMMG_pParMesh parmesh,PMMG_hn_loopvar *var ) { pos++; } assert(found < 2); + +#ifndef NDEBUG if( pos == 2 ) assert(found); +#endif + if( !found ) { assert( pos < 2 ); intvalues[2*idx+pos] = gip; @@ -382,12 +388,13 @@ static inline int PMMG_hashNorver_switch( PMMG_pParMesh parmesh,PMMG_hn_loopvar *var ) { int idx; int ia[2],ip[2],j; - int16_t tag; + uint16_t tag; /* Only process ridge points */ if( !(var->ppt->tag & MG_GEO ) ) return 1; /* If non-manifold, only process exterior points */ +#warning this should also work on the border of a OPNBDY surface if( (var->ppt->tag & MG_NOM) && var->iadj ) return 1; /* Get internal communicator index */ @@ -407,7 +414,7 @@ int PMMG_hashNorver_switch( PMMG_pParMesh parmesh,PMMG_hn_loopvar *var ) { parmesh->int_node_comm->intvalues[2*idx] ){ if( !PMMG_hTagOri( var->hash, var->ip,ip[j], /* pair (ip,np+ip[j]) */ - (int)tag, /* still the same tag */ + tag, /* still the same tag */ 1 ) ) /* switch color on */ return 0; } @@ -439,7 +446,7 @@ int PMMG_hashNorver_switch( PMMG_pParMesh parmesh,PMMG_hn_loopvar *var ) { static inline int PMMG_hashNorver_sweep( PMMG_pParMesh parmesh,PMMG_hn_loopvar *var ) { int edg; - int16_t color_old,color_new; + uint16_t color_old,color_new; /* If non-manifold, only process exterior points */ if( (var->ppt->tag & MG_NOM) && var->iadj ) return 1; @@ -493,7 +500,7 @@ int PMMG_hashNorver_edge2paredge( PMMG_pParMesh parmesh,PMMG_hn_loopvar *var, int idx ) { MMG5_pEdge pa; int *intvalues,edg,j,i[2],ip,ip1; - int16_t color_old,color_new; + uint16_t color_old,color_new; /* Get internal communicator */ intvalues = parmesh->int_edge_comm->intvalues; @@ -541,7 +548,7 @@ int PMMG_hashNorver_paredge2edge( PMMG_pParMesh parmesh,MMG5_HGeom *hash, MMG5_pMesh mesh = parmesh->listgrp[0].mesh; MMG5_pEdge pa; int *intvalues,edg,j,i[2],ip,ip1; - int16_t color_old,color_new; + uint16_t color_old,color_new; assert( parmesh->ngrp == 1 ); @@ -577,12 +584,20 @@ int PMMG_hashNorver_paredge2edge( PMMG_pParMesh parmesh,MMG5_HGeom *hash, /** * \param parmesh pointer toward the parmesh structure * \param var pointer toward the structure for local loop variables + * \param comm pointer toward the MPI communicator to use: when called before + * the first mesh balancing (at preprocessing stage) we have to use the + * read_comm communicator (i.e. the communicator used to provide the inputs). + * For all ather calls, comm has to be the communicator to use for computations. + * * \return 0 if fail, 1 if success. * + * \todo all MPI_Abort have to be removed and replaced by a clean error handling + * without deadlocks. + * * Local iterations of surface coloring to assign normal vectors on parallel * ridge points. */ -int PMMG_hashNorver_locIter( PMMG_pParMesh parmesh,PMMG_hn_loopvar *var ){ +int PMMG_hashNorver_locIter( PMMG_pParMesh parmesh,PMMG_hn_loopvar *var,MPI_Comm comm ){ PMMG_pGrp grp = &parmesh->listgrp[0]; int i,idx; @@ -596,20 +611,23 @@ int PMMG_hashNorver_locIter( PMMG_pParMesh parmesh,PMMG_hn_loopvar *var ){ var->updloc = 0; /* Sweep loop upstream edge -> triangle -> downstream edge */ - if( !PMMG_hashNorver_loop( parmesh,var,MG_CRN,&PMMG_hashNorver_sweep ) ) - return 0; + if( !PMMG_hashNorver_loop( parmesh,var,MG_CRN,&PMMG_hashNorver_sweep ) ) { + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } } /* Set color on parallel edges */ for( i = 0; i < grp->nitem_int_edge_comm; i++ ){ idx = grp->edge2int_edge_comm_index2[i]; - if( !PMMG_hashNorver_edge2paredge( parmesh,var,idx ) ) - return 0; + if( !PMMG_hashNorver_edge2paredge( parmesh,var,idx ) ) { + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } } /* Check if any process has marked the need for a parallel update */ MPI_CHECK( MPI_Allreduce( MPI_IN_PLACE,&var->updpar,1,MPI_INT16_T,MPI_MAX, - parmesh->comm ),return 0 ); + comm ), + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); return 1; } @@ -629,23 +647,29 @@ int PMMG_hashNorver_compExt( const void *a,const void *b ) { /** * \param parmesh pointer toward the parmesh structure * \param mesh pointer toward the parmesh structure + * \param comm pointer toward the MPI communicator to use: when called before + * the first mesh balancing (at preprocessing stage) we have to use the + * read_comm communicator (i.e. the communicator used to provide the inputs). + * For all ather calls, comm has to be the communicator to use for computations. + * * \return 0 if fail, 1 if success. * + * \todo all MPI_Abort have to be removed and replaced by a clean error handling + * without deadlocks. + * * Communicate ridge extremities on parallel ridge points. */ -int PMMG_hashNorver_communication_ext( PMMG_pParMesh parmesh,MMG5_pMesh mesh ) { +int PMMG_hashNorver_communication_ext( PMMG_pParMesh parmesh,MMG5_pMesh mesh,MPI_Comm comm ) { PMMG_pGrp grp = &parmesh->listgrp[0]; PMMG_pExt_comm ext_node_comm; double *rtosend,*rtorecv,*doublevalues; int *itosend,*itorecv,*intvalues; int k,nitem,color,i,idx,j,pos,d; - MPI_Comm comm; MPI_Status status; assert( parmesh->ngrp == 1 ); assert( grp->mesh == mesh ); - comm = parmesh->comm; intvalues = parmesh->int_node_comm->intvalues; doublevalues = parmesh->int_node_comm->doublevalues; @@ -680,11 +704,12 @@ int PMMG_hashNorver_communication_ext( PMMG_pParMesh parmesh,MMG5_pMesh mesh ) { MPI_CHECK( MPI_Sendrecv(itosend,2*nitem,MPI_INT,color,MPI_ANALYS_TAG+2, itorecv,2*nitem,MPI_INT,color,MPI_ANALYS_TAG+2, - comm,&status),return 0 ); + comm,&status),MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); + MPI_CHECK( MPI_Sendrecv(rtosend,6*nitem,MPI_DOUBLE,color,MPI_ANALYS_TAG+3, rtorecv,6*nitem,MPI_DOUBLE,color,MPI_ANALYS_TAG+3, - comm,&status),return 0 ); + comm,&status),MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); } /* Fill internal communicator */ @@ -733,19 +758,25 @@ int PMMG_hashNorver_communication_ext( PMMG_pParMesh parmesh,MMG5_pMesh mesh ) { /** * \param parmesh pointer toward the parmesh structure + * \param comm pointer toward the MPI communicator to use: when called before + * the first mesh balancing (at preprocessing stage) we have to use the + * read_comm communicator (i.e. the communicator used to provide the inputs). + * For all ather calls, comm has to be the communicator to use for computations. + * * \return 0 if fail, 1 if success. * + * \todo all MPI_Abort have to be removed and replaced by a clean error handling + * without deadlocks. + * * Communicate bitwise integer flags on parallel points, and reduce them on the * internal point communicator. */ -int PMMG_hashNorver_communication( PMMG_pParMesh parmesh ){ +int PMMG_hashNorver_communication( PMMG_pParMesh parmesh,MPI_Comm comm ){ PMMG_pExt_comm ext_edge_comm; int *itosend,*itorecv,*intvalues; int k,nitem,color,i,idx,j; - MPI_Comm comm; MPI_Status status; - comm = parmesh->comm; intvalues = parmesh->int_edge_comm->intvalues; /** Exchange values on the interfaces among procs */ @@ -769,7 +800,7 @@ int PMMG_hashNorver_communication( PMMG_pParMesh parmesh ){ MPI_CHECK( MPI_Sendrecv(itosend,2*nitem,MPI_INT,color,MPI_ANALYS_TAG+2, itorecv,2*nitem,MPI_INT,color,MPI_ANALYS_TAG+2, - comm,&status),return 0 ); + comm,&status),MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); } /* Fill internal communicator */ @@ -791,19 +822,25 @@ int PMMG_hashNorver_communication( PMMG_pParMesh parmesh ){ /** * \param parmesh pointer toward the parmesh structure + * \param comm pointer toward the MPI communicator to use: when called before + * the first mesh balancing (at preprocessing stage) we have to use the + * read_comm communicator (i.e. the communicator used to provide the inputs). + * For all ather calls, comm has to be the communicator to use for computations. + * * \return 0 if fail, 1 if success. * + * \todo all MPI_Abort have to be removed and replaced by a clean error handling + * without deadlocks. + * * Communicate contributions to normal and tangent vectors on triangles touching * parallel points, and add them to the point vectors. */ -int PMMG_hashNorver_communication_nor( PMMG_pParMesh parmesh ) { +int PMMG_hashNorver_communication_nor( PMMG_pParMesh parmesh,MPI_Comm comm ) { PMMG_pExt_comm ext_node_comm; double *rtosend,*rtorecv,*doublevalues; int *itosend,*itorecv,*intvalues,k,nitem,color,i,idx,j; - MPI_Comm comm; MPI_Status status; - comm = parmesh->comm; intvalues = parmesh->int_node_comm->intvalues; doublevalues = parmesh->int_node_comm->doublevalues; @@ -831,11 +868,11 @@ int PMMG_hashNorver_communication_nor( PMMG_pParMesh parmesh ) { MPI_CHECK( MPI_Sendrecv(itosend,nitem,MPI_INT,color,MPI_ANALYS_TAG+1, itorecv,nitem,MPI_INT,color,MPI_ANALYS_TAG+1, - comm,&status),return 0 ); + comm,&status),MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); MPI_CHECK( MPI_Sendrecv(rtosend,6*nitem,MPI_DOUBLE,color,MPI_ANALYS_TAG+2, rtorecv,6*nitem,MPI_DOUBLE,color,MPI_ANALYS_TAG+2, - comm,&status),return 0 ); + comm,&status),MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); } /* Fill internal communicator */ @@ -895,25 +932,73 @@ int PMMG_hn_sumnor( PMMG_pParMesh parmesh,PMMG_hn_loopvar *var ) { /** * \param parmesh pointer toward the parmesh structure * \param var pointer toward the structure for local loop variables + * \param comm pointer toward the MPI communicator to use: when called before + * the first mesh balancing (at preprocessing stage) we have to use the + * read_comm communicator (i.e. the communicator used to provide the inputs). + * For all ather calls, comm has to be the communicator to use for computations. + * * \return 0 if fail, 1 if success. * + * \todo all MPI_Abort have to be removed and replaced by a clean error handling + * without deadlocks. + * * Compute normal and tangent vectors on parallel points, using a hash tables * for edges. once C1 portions of a surface touching a parallel ridge point have * been consistently colored. */ -int PMMG_hashNorver_normals( PMMG_pParMesh parmesh, PMMG_hn_loopvar *var ){ +int PMMG_hashNorver_normals( PMMG_pParMesh parmesh, PMMG_hn_loopvar *var,MPI_Comm comm ){ MMG5_pxPoint pxp; double *doublevalues,dd,l[2],*c[2]; int *intvalues,idx,d,j; +#warning Luca: fix opnbdy treatment intvalues = parmesh->int_node_comm->intvalues; doublevalues = parmesh->int_node_comm->doublevalues; + /* Check consistency between point tag and stored infos before erasing + * intvalues: if MG_EDG edges have been found on both side of a non singular + * MG_EDG point, the intvalues array should be filled with the global id of + * the 2 neigbouring points along the feature edge. + * It aims to solve the + * following issue with input edges along parallel interfaces: an input REF or + * GEO edge may be stored inside a boundary triangle at interface * between + * tetra with same reference. In this case, the LS split, * creates a points + * with REF or GEO tag to match the tag of the * triangle edge (in + * setfeatures) but the triangle and associated * edge tag are not stored in + * the xtetra, ended with a division by 0 * when computing the tangent at + * point. */ + for( var->ip = 1; var->ip <= var->mesh->np; var->ip++ ) { + var->ppt = &var->mesh->point[var->ip]; + + /* Loop on parallel, non-singular points (they have been flagged in + * PMMG_hashNorver_xp_init()) */ + if( var->ppt->flag && MG_EDG(var->ppt->tag) ) { + + idx = PMMG_point2int_comm_index_get( var->ppt ); + + if ( (!intvalues[2*idx]) || (!intvalues[2*idx+1]) ) { + /* We will miss infos to compute the tangent... Erase point tag */ + + if ( parmesh->ddebug ) { + printf(" ## Warning: %s:%d: rank %d: tag inconsistency: ppt %d tag %u" + " - edge extremities %d %d\n Point tag is removed.\n", + __func__,__LINE__,parmesh->myrank,var->ip, + var->ppt->tag,intvalues[2*idx],intvalues[2*idx+1]); + } + + var->ppt->tag &= ( (~MG_REF) & (~MG_GEO) ); + } + } + } + + memset(intvalues,0,parmesh->int_node_comm->nitem*sizeof(int)); /* Accumulate normal vector contributions */ - if( !PMMG_hashNorver_loop( parmesh, var, MG_CRN, &PMMG_hn_sumnor ) ) - return 0; + if( !PMMG_hashNorver_loop( parmesh, var, MG_CRN, &PMMG_hn_sumnor ) ) { + /* Because we will have a comm in hashNorver_communication_nor */ + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } /* Load communicator */ for( var->ip = 1; var->ip <= var->mesh->np; var->ip++ ) { @@ -927,6 +1012,7 @@ int PMMG_hashNorver_normals( PMMG_pParMesh parmesh, PMMG_hn_loopvar *var ){ pxp = &var->mesh->xpoint[var->ppt->xp]; /* Compute tangent (as in MMG3D_boulenm) */ +#warning Luca: why not like in MMG5_boulec? if( MG_EDG(var->ppt->tag) ) { c[0] = &doublevalues[6*idx]; @@ -984,8 +1070,9 @@ int PMMG_hashNorver_normals( PMMG_pParMesh parmesh, PMMG_hn_loopvar *var ){ } /* Parallel reduction on normal vectors */ - if( !PMMG_hashNorver_communication_nor( parmesh ) ) - return 0; + if( !PMMG_hashNorver_communication_nor( parmesh,comm ) ) { + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } /* Unload communicator */ for( var->ip = 1; var->ip <= var->mesh->np; var->ip++ ) { @@ -1035,6 +1122,7 @@ int PMMG_hashNorver_normals( PMMG_pParMesh parmesh, PMMG_hn_loopvar *var ){ if( intvalues[idx] ) { pxp = &var->mesh->xpoint[var->ppt->xp]; +#warning skip opnbdy until ready, as wrong orientation can mess up normals if( var->ppt->tag & MG_OPNBDY ) continue; /* Loop on manifold or non-manifold exterior points */ @@ -1164,12 +1252,20 @@ int PMMG_hashNorver_xp_init( PMMG_pParMesh parmesh,PMMG_hn_loopvar *var ) { /** * \param parmesh pointer toward the parmesh structure * \param hpar hash table parallel edges + * \param comm pointer toward the MPI communicator to use: when called before + * the first mesh balancing (at preprocessing stage) we have to use the + * read_comm communicator (i.e. the communicator used to provide the inputs). + * For all ather calls, comm has to be the communicator to use for computations. + * * \return 0 if failure, 1 if success. * + * \todo all MPI_Abort have to be removed and replaced by a clean error handling + * without deadlocks. + * * Set the owner rank of each parallel edge, and store it the \var base field * of the edge structure. */ -int PMMG_set_edge_owners( PMMG_pParMesh parmesh,MMG5_HGeom *hpar ) { +int PMMG_set_edge_owners( PMMG_pParMesh parmesh,MMG5_HGeom *hpar,MPI_Comm comm ) { PMMG_pInt_comm int_edge_comm; PMMG_pExt_comm ext_edge_comm; MMG5_pMesh mesh; @@ -1178,11 +1274,9 @@ int PMMG_set_edge_owners( PMMG_pParMesh parmesh,MMG5_HGeom *hpar ) { MMG5_pEdge pa; int *intvalues,*itosend,*itorecv; int idx,k,nitem,color,edg,ia,ie,ifac,ip[2],i; - int16_t tag; - MPI_Comm comm; + uint16_t tag; MPI_Status status; - comm = parmesh->comm; assert( parmesh->ngrp == 1 ); mesh = parmesh->listgrp[0].mesh; @@ -1197,6 +1291,7 @@ int PMMG_set_edge_owners( PMMG_pParMesh parmesh,MMG5_HGeom *hpar ) { if( !MG_EOK(pt) || !pt->xt ) continue; pxt = &mesh->xtetra[pt->xt]; for( ifac = 0; ifac < 4; ifac++ ) { + if( !MG_GET(pxt->ori,ifac) ) continue; tag = pxt->ftag[ifac]; /* Skip non-boundary faces */ if( !(tag & MG_BDY) || ( (tag & MG_PARBDY) && !(tag & MG_PARBDYBDY) ) ) @@ -1234,7 +1329,7 @@ int PMMG_set_edge_owners( PMMG_pParMesh parmesh,MMG5_HGeom *hpar ) { MPI_CHECK( MPI_Sendrecv(itosend,nitem,MPI_INT,color,MPI_ANALYS_TAG+2, itorecv,nitem,MPI_INT,color,MPI_ANALYS_TAG+2, - comm,&status),return 0 ); + comm,&status),MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); } /* Fill internal communicator */ @@ -1259,14 +1354,103 @@ int PMMG_set_edge_owners( PMMG_pParMesh parmesh,MMG5_HGeom *hpar ) { return 1; } +/** + * \param parmesh pointer toward the parmesh structure + * \param hpar hash table parallel edges + * \param comm pointer toward the MPI communicator to use: when called before + * the first mesh balancing (at preprocessing stage) we have to use the + * read_comm communicator (i.e. the communicator used to provide the inputs). + * For all ather calls, comm has to be the communicator to use for computations. + * + * \return 0 if failure, 1 if success. + * + * \todo all MPI_abort have to be removed and replaced by a clean error handling + * without deadlocks. + * + * Check that every edge has one and only one owner. + */ +int PMMG_check_edge_owners( PMMG_pParMesh parmesh,MMG5_HGeom *hpar,MPI_Comm comm ) { + PMMG_pInt_comm int_edge_comm; + PMMG_pExt_comm ext_edge_comm; + MMG5_pMesh mesh; + MMG5_pEdge pa; + int *intvalues, *itosend, *itorecv; + int i, idx, k, nitem, color, ia; + MPI_Status status; + + assert( parmesh->ngrp == 1 ); + mesh = parmesh->listgrp[0].mesh; + + int_edge_comm = parmesh->int_edge_comm; + intvalues = int_edge_comm->intvalues; + + /** Store list of parallel edge owners */ + for (ia=1;ia<=mesh->na;ia++) { + pa = &mesh->edge[ia]; + intvalues[ia-1] = pa->base; + if (!(pa->tag & MG_PARBDYBDY)) continue; + if (pa->base == parmesh->nprocs) { + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } + } + + /** Exchange values on the interfaces among procs */ + for ( k = 0; k < parmesh->next_edge_comm; ++k ) { + ext_edge_comm = &parmesh->ext_edge_comm[k]; + nitem = ext_edge_comm->nitem; + color = ext_edge_comm->color_out; + + itosend = ext_edge_comm->itosend; + itorecv = ext_edge_comm->itorecv; + + /* Fill buffers */ + for ( i=0; iint_comm_index[i]; + itosend[i] = intvalues[idx]; + } + + /* Communication */ + MPI_CHECK( + MPI_Sendrecv(itosend,nitem,MPI_INT,color,MPI_ANALYS_TAG+2, + itorecv,nitem,MPI_INT,color,MPI_ANALYS_TAG+2, + comm,&status), + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); + } + + /* Check that all edges have the same owner over the whole mesh */ + for ( k = 0; k < parmesh->next_edge_comm; ++k ) { + ext_edge_comm = &parmesh->ext_edge_comm[k]; + + itorecv = ext_edge_comm->itorecv; + + for ( i=0; initem; ++i ) { + idx = ext_edge_comm->int_comm_index[i]; + if (!(intvalues[idx] == itorecv[i])) { + fprintf(stderr,"Parallel edge has two different owners. \n"); + return 0; + } + } + } + + return 1; +} + /** * \param parmesh pointer toward the parmesh structure * \param mesh pointer toward the mesh structure * \param hash pointer toward the hash table for edges touching a parallel point * \param hpar pointer toward the hash table for parallel edges * \param var pointer toward the structure for local loop variables + * \param comm pointer toward the MPI communicator to use: when called before + * the first mesh balancing (at preprocessing stage) we have to use the + * read_comm communicator (i.e. the communicator used to provide the inputs). + * For all ather calls, comm has to be the communicator to use for computations. + * * \return 0 if fail, 1 if success. * + * \todo all MPI_Abort have to be removed and replaced by a clean error handling + * without deadlocks. + * * Compute normal and tangent vectors on parallel points, using hash tables * for edges. This is necessary as it is not convenient to travel the surface * ball of a parallel point (it could be fragmented among many partitions). @@ -1275,7 +1459,7 @@ int PMMG_set_edge_owners( PMMG_pParMesh parmesh,MMG5_HGeom *hpar ) { * normal vectors. */ int PMMG_hashNorver( PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HGeom *hash, - MMG5_HGeom *hpar,PMMG_hn_loopvar *var ){ + MMG5_HGeom *hpar,PMMG_hn_loopvar *var,MPI_Comm comm ){ PMMG_pGrp grp = &parmesh->listgrp[0]; PMMG_pInt_comm int_node_comm,int_edge_comm; MMG5_pTetra pt; @@ -1308,21 +1492,30 @@ int PMMG_hashNorver( PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HGeom *hash, } /* Create xpoints */ - if( !PMMG_hashNorver_xp_init( parmesh,var ) ) - return 0; + if( !PMMG_hashNorver_xp_init( parmesh,var ) ) { + /* Because we will have a comm in hashNorver_communication */ + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } /** 1) Find local ridge extremities. */ - if( !PMMG_hashNorver_loop( parmesh, var, MG_CRN, &PMMG_hashNorver_edges ) ) - return 0; + if( !PMMG_hashNorver_loop( parmesh, var, MG_CRN, &PMMG_hashNorver_edges ) ) { + /* Because we will have a comm in hashNorver_communication */ + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } /** 2) Parallel exchange of ridge extremities, and update color on second * extremity. */ - if( !PMMG_hashNorver_communication_ext( parmesh,mesh ) ) return 0; + if( !PMMG_hashNorver_communication_ext( parmesh,mesh,comm ) ) { + /* Because we will have a comm in hashNorver_communication */ + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } /* Switch edge color if its extremity is found */ - if( !PMMG_hashNorver_loop( parmesh, var, MG_CRN, &PMMG_hashNorver_switch ) ) - return 0; + if( !PMMG_hashNorver_loop( parmesh, var, MG_CRN, &PMMG_hashNorver_switch ) ) { + /* Because we will have a comm in hashNorver_communication */ + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } /** 3) Propagate surface colors: @@ -1341,7 +1534,10 @@ int PMMG_hashNorver( PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HGeom *hash, } /* 3.1) Local update iterations */ - if( !PMMG_hashNorver_locIter( parmesh,var ) ) return 0; + if( !PMMG_hashNorver_locIter( parmesh,var,comm ) ) { + /* Because we will have a comm in hashNorver_communication */ + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } /* 3.2) Parallel update iterations */ while( var->updpar ) { @@ -1350,132 +1546,37 @@ int PMMG_hashNorver( PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HGeom *hash, var->updpar = 0; /* 3.2.1) Parallel communication */ - if( !PMMG_hashNorver_communication( parmesh ) ) return 0; + if( !PMMG_hashNorver_communication( parmesh,comm ) ) { + /* Because we will have a comm in PMMG_hashNorver_locIter */ + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } /* 3.2.2) Get color from parallel edges */ for( i = 0; i < grp->nitem_int_edge_comm; i++ ){ idx = grp->edge2int_edge_comm_index2[i]; - if( !PMMG_hashNorver_paredge2edge( parmesh,hash,idx ) ) return 0; + if( !PMMG_hashNorver_paredge2edge( parmesh,hash,idx ) ) { + /* Because we will have a comm in PMMG_hashNorver_locIter */ + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } } /* 3.2.3) Local update iterations */ - if( !PMMG_hashNorver_locIter( parmesh,var ) ) return 0; + if( !PMMG_hashNorver_locIter( parmesh,var,comm ) ) { + /* Because we will have a comm in PMMG_hashNorver_normals */ + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } } /** 4) Compute normal vectors */ - if( !PMMG_hashNorver_normals( parmesh,var ) ) return 0; + if( !PMMG_hashNorver_normals( parmesh,var,comm ) ) return 0; return 1; } -/** - * \param parmesh pointer to the parmesh structure - * \param mesh pointer to the mesh structure - * - * \return 1 if success, 0 if failure. - * - * Compute continuous geometric support (normal and tangent vectors) on - * non-manifold MG_OLDPARBDY points. - * - * \remark Analogous to the MMG3D_nmgeom function, but it only travels on - * old parallel points. - * \remark Normal and tangent vectors on these points are overwritten. - * - */ -int PMMG_update_nmgeom(PMMG_pParMesh parmesh,MMG5_pMesh mesh){ - MMG5_pTetra pt; - MMG5_pPoint p0; - MMG5_pxPoint pxp; - int k,base; - int *adja; - double n[3],t[3]; - int ip; - int8_t i,j,ier; - - for( ip = 1; ip <= mesh->np; ip++ ) { - mesh->point[ip].flag = mesh->base; - } - - base = ++mesh->base; - for (k=1; k<=mesh->ne; k++) { - pt = &mesh->tetra[k]; - if( !MG_EOK(pt) ) continue; - adja = &mesh->adja[4*(k-1)+1]; - for (i=0; i<4; i++) { - if ( adja[i] ) continue; - for (j=0; j<3; j++) { - ip = MMG5_idir[i][j]; - p0 = &mesh->point[pt->v[ip]]; - if ( p0->flag == base ) continue; - else if ( !(p0->tag & MG_OLDPARBDY) ) continue; - else if ( !(p0->tag & MG_NOM) ) continue; - - p0->flag = base; - ier = MMG5_boulenm(mesh,k,ip,i,n,t); - - if ( ier < 0 ) - return 0; - else if ( !ier ) { - p0->tag |= MG_REQ; - p0->tag &= ~MG_NOSURF; - } - else { - if ( !p0->xp ) { - ++mesh->xp; - if(mesh->xp > mesh->xpmax){ - MMG5_TAB_RECALLOC(mesh,mesh->xpoint,mesh->xpmax,MMG5_GAP,MMG5_xPoint, - "larger xpoint table", - mesh->xp--; - fprintf(stderr," Exit program.\n");return 0;); - } - p0->xp = mesh->xp; - } - pxp = &mesh->xpoint[p0->xp]; - memcpy(pxp->n1,n,3*sizeof(double)); - memcpy(p0->n,t,3*sizeof(double)); - } - } - } - } - /* Deal with the non-manifold points that do not belong to a surface - * tetra (a tetra that has a face without adjacent)*/ - for (k=1; k<=mesh->ne; k++) { - pt = &mesh->tetra[k]; - if( !MG_EOK(pt) ) continue; - - for (i=0; i<4; i++) { - p0 = &mesh->point[pt->v[i]]; - if ( !(p0->tag & MG_OLDPARBDY) ) continue; - else if ( p0->tag & MG_PARBDY || p0->tag & MG_REQ || !(p0->tag & MG_NOM) || p0->xp ) continue; - ier = MMG5_boulenmInt(mesh,k,i,t); - if ( ier ) { - ++mesh->xp; - if(mesh->xp > mesh->xpmax){ - MMG5_TAB_RECALLOC(mesh,mesh->xpoint,mesh->xpmax,MMG5_GAP,MMG5_xPoint, - "larger xpoint table", - mesh->xp--; - fprintf(stderr," Exit program.\n");return 0;); - } - p0->xp = mesh->xp; - pxp = &mesh->xpoint[p0->xp]; - memcpy(p0->n,t,3*sizeof(double)); - } - else { - p0->tag |= MG_REQ; - p0->tag &= ~MG_NOSURF; - } - } - } - - /*for (k=1; k<=mesh->np; k++) { - p0 = &mesh->point[k]; - if ( !(p0->tag & MG_NOM) || p0->xp ) continue; - p0->tag |= MG_REQ; - p0->tag &= ~MG_NOSURF; - }*/ - - return 1; +static inline +uint16_t MMG5_skip_nonOldParBdy ( uint16_t tag ) { + return !(tag & MG_OLDPARBDY); } /** @@ -1488,73 +1589,9 @@ int PMMG_update_nmgeom(PMMG_pParMesh parmesh,MMG5_pMesh mesh){ */ static inline int PMMG_update_singul(PMMG_pParMesh parmesh,MMG5_pMesh mesh) { - MMG5_pTetra ptet; - MMG5_pPoint ppt; - MMG5_Hash hash; - int k,i; - int nc, nre, ng, nrp,ier; - - /* Second: seek the non-required non-manifold points and try to analyse - * whether they are corner or required. */ - - /* Hash table used by boulernm to store the special edges passing through - * a given point */ - if ( ! MMG5_hashNew(mesh,&hash,mesh->np,(int)(3.71*mesh->np)) ) return 0; - - nc = nre = 0; - ++mesh->base; - for (k=1; k<=mesh->ne; ++k) { - ptet = &mesh->tetra[k]; - if ( !MG_EOK(ptet) ) continue; - - for ( i=0; i<4; ++i ) { - ppt = &mesh->point[ptet->v[i]]; - - /* Skip non-previously-parallel points */ - if ( !(ppt->tag & MG_OLDPARBDY) ) continue; - if ( (!MG_VOK(ppt)) || (ppt->flag==mesh->base) ) continue; - ppt->flag = mesh->base; + return MMG5_setVertexNmTag(mesh,MMG5_skip_nonOldParBdy); - if ( (!MG_EDG(ppt->tag)) || MG_SIN(ppt->tag) ) continue; - - ier = MMG5_boulernm(mesh,&hash, k, i, &ng, &nrp); - if ( ier < 0 ) return 0; - else if ( !ier ) continue; - - if ( (ng+nrp) > 2 ) { - ppt->tag |= MG_CRN + MG_REQ; - ppt->tag &= ~MG_NOSURF; - nre++; - nc++; - } - else if ( (ng == 1) && (nrp == 1) ) { - ppt->tag |= MG_REQ; - ppt->tag &= ~MG_NOSURF; - nre++; - } - else if ( ng == 1 && !nrp ){ - ppt->tag |= MG_CRN + MG_REQ; - ppt->tag &= ~MG_NOSURF; - nre++; - nc++; - } - else if ( ng == 1 && !nrp ){ - ppt->tag |= MG_CRN + MG_REQ; - ppt->tag &= ~MG_NOSURF; - nre++; - nc++; - } - } - } - - /* Free the edge hash table */ - MMG5_DEL_MEM(mesh,hash.item); - - if ( mesh->info.ddebug || abs(mesh->info.imprim) > 3 ) - fprintf(stdout," %d corner and %d required vertices added\n",nc,nre); - - return 1; } /** @@ -1595,8 +1632,12 @@ int PMMG_update_analys(PMMG_pParMesh parmesh) { return 0; } + /* Update tag MG_REF of points if edge tag is MG_REF in xtetra */ + PMMG_updateTagRef_node(parmesh,mesh); + /* First: seek edges at the interface of two distinct domains and mark it as * required */ +#warning Luca: add a function like MMG5_setEdgeNmTag(mesh,hash) } @@ -1617,7 +1658,7 @@ int PMMG_loopr(PMMG_pParMesh parmesh,PMMG_hn_loopvar *var ) { MMG5_pPoint ppt[2]; double *doublevalues; int *intvalues,ip[2],k,j,idx,ns0,edg,d; - int16_t tag; + uint16_t tag; int8_t isEdg; /* Get node communicator */ @@ -1625,7 +1666,7 @@ int PMMG_loopr(PMMG_pParMesh parmesh,PMMG_hn_loopvar *var ) { doublevalues = parmesh->int_node_comm->doublevalues; /* Loop on near-parallel edges */ - for( k = 1; k <= var->hash->max; k++ ) { + for( k = 0; k <= var->hash->max; k++ ) { ph = &var->hash->geom[k]; if( !ph->a ) continue; @@ -1672,15 +1713,22 @@ int PMMG_loopr(PMMG_pParMesh parmesh,PMMG_hn_loopvar *var ) { * \param parmesh pointer toward the parmesh structure * \param mesh pointer toward the mesh structure * \param var pointer toward the structure for local loop variables + * \param comm pointer toward the MPI communicator to use: when called before + * the first mesh balancing (at preprocessing stage) we have to use the + * read_comm communicator (i.e. the communicator used to provide the inputs). + * For all ather calls, comm has to be the communicator to use for computations. * * Check for singularities. + * + * \todo all MPI_abort have to be removed and replaced by a clean error handling + * without deadlocks. + * * \remark Modeled after the MMG5_singul function. */ -int PMMG_singul(PMMG_pParMesh parmesh,MMG5_pMesh mesh,PMMG_hn_loopvar *var) { +int PMMG_singul(PMMG_pParMesh parmesh,MMG5_pMesh mesh,PMMG_hn_loopvar *var,MPI_Comm comm) { PMMG_pGrp grp; PMMG_pInt_comm int_node_comm; PMMG_pExt_comm ext_node_comm; - MPI_Comm comm; MPI_Status status; MMG5_pPoint ppt; double ux,uy,uz,vx,vy,vz,dd; @@ -1690,7 +1738,6 @@ int PMMG_singul(PMMG_pParMesh parmesh,MMG5_pMesh mesh,PMMG_hn_loopvar *var) { int *intvalues,*itosend,*itorecv,*iproc2comm; double *doublevalues,*rtosend,*rtorecv; - comm = parmesh->comm; assert( parmesh->ngrp == 1 ); grp = &parmesh->listgrp[0]; int_node_comm = parmesh->int_node_comm; @@ -1718,7 +1765,9 @@ int PMMG_singul(PMMG_pParMesh parmesh,MMG5_pMesh mesh,PMMG_hn_loopvar *var) { /* Array to reorder communicators */ - PMMG_MALLOC(parmesh,iproc2comm,parmesh->nprocs,int,"iproc2comm",return 0); + PMMG_MALLOC(parmesh,iproc2comm,parmesh->nprocs,int,"iproc2comm", + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) + ); for( iproc = 0; iproc < parmesh->nprocs; iproc++ ) iproc2comm[iproc] = PMMG_UNSET; @@ -1787,7 +1836,9 @@ int PMMG_singul(PMMG_pParMesh parmesh,MMG5_pMesh mesh,PMMG_hn_loopvar *var) { MPI_CHECK( MPI_Sendrecv(itosend,2*nitem,MPI_INT,color,MPI_ANALYS_TAG, itorecv,2*nitem,MPI_INT,color,MPI_ANALYS_TAG, - comm,&status),return 0 ); + comm,&status), + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + ); } /* Get tags and reset buffers and communicator */ @@ -1822,8 +1873,9 @@ int PMMG_singul(PMMG_pParMesh parmesh,MMG5_pMesh mesh,PMMG_hn_loopvar *var) { /** Local singularity analysis */ - if( !PMMG_loopr( parmesh, var ) ) - return 0; + if( !PMMG_loopr( parmesh, var ) ) { + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } /** Exchange values on the interfaces among procs */ for ( k = 0; k < parmesh->next_node_comm; ++k ) { @@ -1850,12 +1902,16 @@ int PMMG_singul(PMMG_pParMesh parmesh,MMG5_pMesh mesh,PMMG_hn_loopvar *var) { MPI_CHECK( MPI_Sendrecv(itosend,2*nitem,MPI_INT,color,MPI_ANALYS_TAG, itorecv,2*nitem,MPI_INT,color,MPI_ANALYS_TAG, - comm,&status),return 0 ); + comm,&status), + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + ); MPI_CHECK( MPI_Sendrecv(rtosend,6*nitem,MPI_DOUBLE,color,MPI_ANALYS_TAG+1, rtorecv,6*nitem,MPI_DOUBLE,color,MPI_ANALYS_TAG+1, - comm,&status),return 0 ); + comm,&status), + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + ); } /** First pass: Sum nb. of singularities, Store received edge vectors in @@ -1981,10 +2037,17 @@ int PMMG_singul(PMMG_pParMesh parmesh,MMG5_pMesh mesh,PMMG_hn_loopvar *var) { * \param parmesh pointer to the parmesh structure * \param mesh pointer to the mesh structure * \param pHash pointer to the parallel edges hash table + * \param comm pointer toward the MPI communicator to use: when called before + * the first mesh balancing (at preprocessing stage) we have to use the + * read_comm communicator (i.e. the communicator used to provide the inputs). + * For all ather calls, comm has to be the communicator to use for computations. * * \return 1 if success, 0 if failure. * - * Check dihedral angle to detect ridges on parallel edges. + * Check features along parallel edges: + * - check for non-manifold edges + * - check for reference edges (the edge shares 2 surfaces with different refs) + * - if needed, check dihedral angle to detect ridges on parallel edges. * * The integer communicator is dimensioned to store the number of triangles seen * by a parallel edge on each partition, and a "flag" to check the references of @@ -1997,8 +2060,14 @@ int PMMG_singul(PMMG_pParMesh parmesh,MMG5_pMesh mesh,PMMG_hn_loopvar *var) { * Boundary triangles shared between two processes have been tagged as * MG_PARBDYBDY only on the process who has them with the right orientation * (by PMMG_parbdyTria), so they will be processed only once. + * + * \todo Do not add MG_GEO tag to MG_NOM edges and fix tag non-consistencies in this case. + * + * \todo all MPI_abort have to be removed and replaced by a clean error handling + * without deadlocks. + * */ -int PMMG_setdhd(PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HGeom *pHash ) { +int PMMG_setfeatures(PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HGeom *pHash,MPI_Comm comm ) { PMMG_pGrp grp; PMMG_pInt_comm int_edge_comm; PMMG_pExt_comm ext_edge_comm; @@ -2010,15 +2079,13 @@ int PMMG_setdhd(PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HGeom *pHash ) { int k,ne,nr,nm,j; int i,i1,i2; int idx,edg,d; - int16_t tag; - MPI_Comm comm; + uint16_t tag; MPI_Status status; assert( parmesh->ngrp == 1 ); grp = &parmesh->listgrp[0]; assert( mesh == grp->mesh ); - comm = parmesh->comm; int_edge_comm = parmesh->int_edge_comm; /* Allocated edge intvalues to tag non-manifold and reference edges */ @@ -2030,7 +2097,7 @@ int PMMG_setdhd(PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HGeom *pHash ) { memset(doublevalues,0x00,6*parmesh->int_edge_comm->nitem*sizeof(double)); - /** Loop on boundary triangles and store a MG_REQ tag in the edge internal + /** Step 1: Loop on boundary triangles and store a MG_REQ tag in the edge internal * communicator where the triangle touches a parallel edge. * (Loop on all triangles, as the tags on corresponding edges are not * required to match yet) */ @@ -2043,13 +2110,47 @@ int PMMG_setdhd(PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HGeom *pHash ) { i1 = MMG5_inxt2[i]; i2 = MMG5_inxt2[i1]; + /* At this stage, the `tag` field store only the MG_PARBDY tag if pHash + * has been created by the `PMMG_hashPar_fromFaceComm` function. */ if ( !MMG5_hGet( pHash, ptr->v[i1], ptr->v[i2], &edg, &tag ) ) continue; idx = edg-1; - /* Store edge tag in the internal communicator */ + /* Mark required in the internal communicator (note that we need intvalue + * to be a signed integer array while Mmg tags are unsigned ints so we + * must be careful to not directly store the edge tag).*/ if( (ptr->tag[i] & MG_REQ) && !(ptr->tag[i] & MG_NOSURF) ) { intvalues[idx] |= MG_REQ; } + + /* Store other relevant edge tag that we want to synchronize through the + * parallel interfaces: + - MG_NOM, MG_OPNBDY and MG_GEO will be analyzed after (this analysis + should be consistent through the procs); + - MG_REQ and MG_NOSURF tags are already dealed too; + - OLDPARBDY, PARBDY and OVERLAP are related to ParMmg and + should be consistent. + + It left us with: + - the MG_REF tag that may be not consistent (if, on a + partition, the edge belongs to only PARBDY faces (non PARBDYBDY), + it is + not marked as REF, while it may be marked as ref if it belongs to a + true boundary and is provided as a user ref edge between triangles + with same references on another partition.) + - the MG_PARBDYBDY tag that may be inconsistent between trias due to + the call of PMMG_parbdyTria: for a physical boundary triangle + at partition interface. On the domain with lower ref, the + PARBDYPARBDY tag is removed from edges (to ensure the tria + orientation during analysis, the tria will be looked from one rank + only, the rank that owned the domain with higer ref). If the edge + also belongs to another boundary triangle, it will still have + the PARBDYBDY tag on this triangle. + We don't want to synchronize this specific tag as it is used for + the parallel analysis (for example in hashNorver_loop to loop + on well oriented true boundary faces). + */ + tag = ptr->tag[i] & MG_REF; + intvalues[idx] |= tag; } } @@ -2071,7 +2172,8 @@ int PMMG_setdhd(PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HGeom *pHash ) { MPI_CHECK( MPI_Sendrecv(itosend,nitem,MPI_INT,color,MPI_ANALYS_TAG+2, itorecv,nitem,MPI_INT,color,MPI_ANALYS_TAG+2, - comm,&status),return 0 ); + comm,&status), + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); } /* Update edge tags in the internal communicator */ @@ -2079,7 +2181,6 @@ int PMMG_setdhd(PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HGeom *pHash ) { ext_edge_comm = &parmesh->ext_edge_comm[k]; itorecv = ext_edge_comm->itorecv; - rtorecv = ext_edge_comm->rtorecv; for ( i=0; initem; ++i ) { idx = ext_edge_comm->int_comm_index[i]; @@ -2088,6 +2189,11 @@ int PMMG_setdhd(PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HGeom *pHash ) { intvalues[idx] |= MG_REQ; intvalues[idx] &= ~MG_NOSURF; } + + if ( itorecv[i] & MG_REF ) { + /* Sync ref tags */ + intvalues[idx] |= MG_REF; + } } } @@ -2111,6 +2217,10 @@ int PMMG_setdhd(PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HGeom *pHash ) { ptr->tag[i] |= MG_REQ; ptr->tag[i] &= ~MG_NOSURF; } + + if ( intvalues[idx] & MG_REF ) { + ptr->tag[i] |= MG_REF; + } } } @@ -2124,7 +2234,7 @@ int PMMG_setdhd(PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HGeom *pHash ) { } - /** Loop on true boundary triangles and store the normal in the edge internal + /** Step 2: Loop on true boundary triangles and store the normal in the edge internal * communicator where the triangle touches a parallel edge. */ for( k = 1; k <= mesh->nt; k++ ) { ptr = &mesh->tria[k]; @@ -2141,14 +2251,19 @@ int PMMG_setdhd(PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HGeom *pHash ) { /* Get parallel edge touched by a boundary face and store normal vectors */ for (i=0; i<3; i++) { - /* Skip non-manifold edges */ - if ( (ptr->tag[i] & MG_NOM) ) continue; i1 = MMG5_inxt2[i]; i2 = MMG5_inxt2[i1]; if ( !MMG5_hGet( pHash, ptr->v[i1], ptr->v[i2], &edg, &tag ) ) continue; idx = edg-1; + if ( (ptr->tag[i] & MG_NOM) ) { + /* We only need to store that the edge is non-manifold to share the + * information with the other MPI processes */ + intvalues[2*idx] = 3; + continue; + } + /* Count how many times the edge is seen locally */ intvalues[2*idx]++; /* Do not store anything else for non-manifold */ @@ -2171,7 +2286,7 @@ int PMMG_setdhd(PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HGeom *pHash ) { } } - /** Exchange values on the interfaces among procs */ + /** Step 3: Exchange values on the interfaces among procs */ for ( k = 0; k < parmesh->next_edge_comm; ++k ) { ext_edge_comm = &parmesh->ext_edge_comm[k]; nitem = ext_edge_comm->nitem; @@ -2196,15 +2311,15 @@ int PMMG_setdhd(PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HGeom *pHash ) { MPI_CHECK( MPI_Sendrecv(itosend,2*nitem,MPI_INT,color,MPI_ANALYS_TAG+2, itorecv,2*nitem,MPI_INT,color,MPI_ANALYS_TAG+2, - comm,&status),return 0 ); + comm,&status), MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); MPI_CHECK( MPI_Sendrecv(rtosend,6*nitem,MPI_DOUBLE,color,MPI_ANALYS_TAG+3, rtorecv,6*nitem,MPI_DOUBLE,color,MPI_ANALYS_TAG+3, - comm,&status),return 0 ); + comm,&status), MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); } - /** First pass: Increment the number of seen triangles, check for reference + /** Step 4 - First pass: Increment the number of seen triangles, check for reference * edges and mark them with PMMG_UNSET, and store new triangles normals if * there is room for them. */ for ( k = 0; k < parmesh->next_edge_comm; ++k ) { @@ -2247,33 +2362,35 @@ int PMMG_setdhd(PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HGeom *pHash ) { } } - /** Second pass: Check dihedral angle and mark geometric edge with - * 2*PMMG_UNSET (3x if it is already a reference edge) */ - for ( k = 0; k < parmesh->next_edge_comm; ++k ) { - ext_edge_comm = &parmesh->ext_edge_comm[k]; + if ( mesh->info.dhd > MMG5_ANGLIM ) { + /** Step 4 - Second pass: Check dihedral angle and mark geometric edge with + * 2*PMMG_UNSET (3x if it is already a reference edge) */ + for ( k = 0; k < parmesh->next_edge_comm; ++k ) { + ext_edge_comm = &parmesh->ext_edge_comm[k]; - for ( i=0; initem; ++i ) { - idx = ext_edge_comm->int_comm_index[i]; + for ( i=0; initem; ++i ) { + idx = ext_edge_comm->int_comm_index[i]; - nt1 = intvalues[2*idx]; + nt1 = intvalues[2*idx]; - if( nt1 == 2 ) { - for( d = 0; d < 3; d++ ) { - n1[d] = doublevalues[6*idx+d]; - n2[d] = doublevalues[6*idx+3+d]; - } - dhd = n1[0]*n2[0] + n1[1]*n2[1] + n1[2]*n2[2]; - if ( dhd <= mesh->info.dhd ) { - if( intvalues[2*idx+1] != PMMG_UNSET ) - intvalues[2*idx+1] = 2*PMMG_UNSET; - else - intvalues[2*idx+1] = 3*PMMG_UNSET; + if( nt1 == 2 ) { + for( d = 0; d < 3; d++ ) { + n1[d] = doublevalues[6*idx+d]; + n2[d] = doublevalues[6*idx+3+d]; + } + dhd = n1[0]*n2[0] + n1[1]*n2[1] + n1[2]*n2[2]; + if ( dhd <= mesh->info.dhd ) { + if( intvalues[2*idx+1] != PMMG_UNSET ) + intvalues[2*idx+1] = 2*PMMG_UNSET; + else + intvalues[2*idx+1] = 3*PMMG_UNSET; + } } } } } - /** Third pass: Loop on triangles to tag edges and points. + /** Step 4 - Third pass: Loop on triangles to tag edges and points. * Now we loop on all triangles because there could be parallel boundary * edges not touched by triangles on the local process, but we want to add * tags on them. */ @@ -2284,52 +2401,82 @@ int PMMG_setdhd(PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HGeom *pHash ) { /* Get parallel edge touched by a MG_BDY face and store normal vectors */ for (i=0; i<3; i++) { - /* Skip non-manifold edges */ - if ( (ptr->tag[i] & MG_NOM) ) continue; i1 = MMG5_inxt2[i]; i2 = MMG5_inxt2[i1]; - if ( !MMG5_hGet( pHash, ptr->v[i1], ptr->v[i2], &edg, &tag ) ) continue; + + MMG5_int ip1 = ptr->v[i1]; + MMG5_int ip2 = ptr->v[i2]; + + if ( !MMG5_hGet( pHash, ip1, ip2, &edg, &tag ) ) continue; idx = edg-1; + + /* Skip non-manifold edges */ +#ifndef NDEBUG + if ( (ptr->tag[i] & MG_NOM) ) { + assert ( intvalues[2*idx] != 2 ); + } +#endif + if( intvalues[2*idx] == 1 ) { /* no adjacent */ - ptr->tag[i] |= MG_GEO + MG_NOM; - i1 = MMG5_inxt2[i]; - i2 = MMG5_inxt2[i1]; - mesh->point[ptr->v[i1]].tag |= MG_GEO + MG_NOM; - mesh->point[ptr->v[i2]].tag |= MG_GEO + MG_NOM; +#warning remove MG_GEO for consistency with Mmg ? + /* MG_REF info is not analyzed in parallel for non-manifold edges (only + serially by Mmy). As we need to ensure the tag consistency across + the processes and for sake of simplicity, we simply mark all the + MG_NOM edges as MG_REF */ + ptr->tag[i] |= MG_GEO + MG_NOM + MG_REF; + mesh->point[ip1].tag |= MG_GEO + MG_NOM + MG_REF; + mesh->point[ip2].tag |= MG_GEO + MG_NOM + MG_REF; nr++; } else { if( (intvalues[2*idx+1] == PMMG_UNSET) || (intvalues[2*idx+1] == 3*PMMG_UNSET) ) { /* reference edge */ ptr->tag[i] |= MG_REF; - i1 = MMG5_inxt2[i]; - i2 = MMG5_inxt2[i1]; - mesh->point[ptr->v[i1]].tag |= MG_REF; - mesh->point[ptr->v[i2]].tag |= MG_REF; + mesh->point[ip1].tag |= MG_REF; + mesh->point[ip2].tag |= MG_REF; ne++; } if( (intvalues[2*idx+1] == 2*PMMG_UNSET) || (intvalues[2*idx+1] == 3*PMMG_UNSET) ) { /* geometric edge */ ptr->tag[i] |= MG_GEO; - i1 = MMG5_inxt2[i]; - i2 = MMG5_inxt2[i1]; - mesh->point[ptr->v[i1]].tag |= MG_GEO; - mesh->point[ptr->v[i2]].tag |= MG_GEO; + mesh->point[ip1].tag |= MG_GEO; + mesh->point[ip2].tag |= MG_GEO; nr++; } if( intvalues[2*idx] > 2 ) { /* non-manifold edge */ - ptr->tag[i] |= MG_GEO + MG_NOM; - i1 = MMG5_inxt2[i]; - i2 = MMG5_inxt2[i1]; - mesh->point[ptr->v[i1]].tag |= MG_GEO + MG_NOM; - mesh->point[ptr->v[i2]].tag |= MG_GEO + MG_NOM; +#warning remove MG_GEO for consistency with Mmg ? + /* MG_REF info is not analyzed in parallel for non-manifold edges (only + serially by Mmy). As we need to ensure the tag consistency across + the processes and for sake of simplicity, we simply mark all the + MG_NOM edges as MG_REF */ + ptr->tag[i] |= MG_GEO + MG_NOM + MG_REF; + mesh->point[ip1].tag |= MG_GEO + MG_NOM + MG_REF; + mesh->point[ip2].tag |= MG_GEO + MG_NOM + MG_REF; nm++; } + + /* If a feature edge has been provided it is possible that the edge tag + * has not been transferred to the edge extremities (for example a + * MG_REF edge along boundary triangles of same references may be split + * by the level-set. In this case, the new point has tag 0 and its tag + * has not yet been updated). + */ + tag = mesh->point[ip1].tag; + mesh->point[ip1].tag |= ptr->tag[i]; + // Remove the MG_NOSURF tag if the vertex is really required. + if ( (tag & MG_REQ) && !(tag & MG_NOSURF) ) { + mesh->point[ip1].tag &= ~MG_NOSURF; + } + tag = mesh->point[ip2].tag; + mesh->point[ip2].tag |= ptr->tag[i]; + // Remove the MG_NOSURF tag if the vertex is really required. + if ( (tag & MG_REQ) && !(tag & MG_NOSURF) ) { + mesh->point[ip2].tag &= ~MG_NOSURF; + } } } } - if ( abs(mesh->info.imprim) > 3 && nr > 0 ) fprintf(stdout," %d ridges, %d edges updated\n",nr,ne); @@ -2345,10 +2492,11 @@ int PMMG_setdhd(PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HGeom *pHash ) { * * Check all boundary triangles. */ -int PMMG_analys_tria(PMMG_pParMesh parmesh,MMG5_pMesh mesh) { +int PMMG_analys_tria(PMMG_pParMesh parmesh,MMG5_pMesh mesh, MMG5_int *permtria) { + int ier; /**--- stage 1: data structures for surface */ - if ( abs(mesh->info.imprim) > 3 ) + if ( parmesh->info.imprim > PMMG_VERB_VERSION ) fprintf(stdout,"\n ** SURFACE ANALYSIS\n"); /* create tetra adjacency */ @@ -2357,6 +2505,16 @@ int PMMG_analys_tria(PMMG_pParMesh parmesh,MMG5_pMesh mesh) { return 0; } + /* Update the xtetra data after the ls discretization */ + if ( mesh->info.iso && mesh->info.opnbdy ) { + ier = MMG3D_update_xtetra ( mesh ); + if ( !ier ) { + fprintf(stderr,"\n ## Problem when updating the xtetra data after ls discretization." + " Exit program.\n"); + return 0; + } + } + /* create prism adjacency */ if ( !MMG3D_hashPrism(mesh) ) { fprintf(stderr,"\n ## Prism hashing problem. Exit program.\n"); @@ -2369,7 +2527,7 @@ int PMMG_analys_tria(PMMG_pParMesh parmesh,MMG5_pMesh mesh) { } /* identify surface mesh */ - if ( !MMG5_chkBdryTria(mesh) ) { + if ( !PMMG_chkBdryTria(mesh,permtria) ) { fprintf(stderr,"\n ## Boundary problem. Exit program.\n"); return 0; } @@ -2569,133 +2727,248 @@ int PMMG_analys_comms_init( PMMG_pParMesh parmesh ) { /** * \param parmesh pointer toward the parmesh structure * \param mesh pointer toward the mesh structure + * \param comm pointer toward the MPI communicator to use: when called before + * the first mesh balancing (at preprocessing stage) we have to use the + * read_comm communicator (i.e. the communicator used to provide the inputs). + * For all ather calls, comm has to be the communicator to use for computations. * * \remark Modeled after the MMG3D_analys function, it doesn't deallocate the * tria structure in order to be able to build communicators. + * + * \todo all MPI_abort have to be removed and replaced by a clean error handling + * without deadlocks. */ -int PMMG_analys(PMMG_pParMesh parmesh,MMG5_pMesh mesh) { +int PMMG_analys(PMMG_pParMesh parmesh,MMG5_pMesh mesh,MPI_Comm comm) { MMG5_Hash hash; MMG5_HGeom hpar,hnear; PMMG_hn_loopvar var; + int ier = 1; + + /* Initialization to avoid memleaks when we try to deallocate memory */ + memset(&hpar,0x0,sizeof(MMG5_HGeom)); + memset(&hnear,0x0,sizeof(MMG5_HGeom)); /* Tag parallel triangles on material interfaces as boundary */ if( !PMMG_parbdyTria( parmesh ) ) { fprintf(stderr,"\n ## Unable to recognize parallel triangles on material interfaces. Exit program.\n"); - return 0; + ier = 0; } - /* Set surface triangles to required in nosurf mode or for parallel boundaries */ MMG3D_set_reqBoundaries(mesh); - /* create surface adjacency */ - if ( !MMG3D_hashTria(mesh,&hash) ) { + if ( ier && !MMG3D_hashTria(mesh,&hash) ) { MMG5_DEL_MEM(mesh,hash.item); fprintf(stderr,"\n ## Hashing problem (2). Exit program.\n"); - return 0; + ier = 0; } - /* build hash table for geometric edges */ - if ( !MMG5_hGeom(mesh) ) { + /* build hash table for geometric edges: gather tag infos from edges and + * triangles and store these infos in tria. Skip non PARBDYBDY // edges. */ + if ( ier && !MMG5_hGeom(mesh) ) { fprintf(stderr,"\n ## Hashing problem (0). Exit program.\n"); MMG5_DEL_MEM(mesh,hash.item); MMG5_DEL_MEM(mesh,mesh->htab.geom); - return 0; + ier = 0; } /**--- stage 2: surface analysis */ - if ( abs(mesh->info.imprim) > 5 || mesh->info.ddebug ) + if ( parmesh->info.imprim > PMMG_VERB_VERSION ) fprintf(stdout," ** SETTING TOPOLOGY\n"); /* identify connexity */ - if ( !MMG5_setadj(mesh) ) { + if ( ier && !MMG5_setadj(mesh) ) { fprintf(stderr,"\n ## Topology problem. Exit program.\n"); + MMG5_DEL_MEM(mesh,mesh->htab.geom); MMG5_DEL_MEM(mesh,hash.item); - return 0; + ier = 0; } - /* Hash parallel edges */ - if( PMMG_hashPar_pmmg( parmesh,&hpar ) != PMMG_SUCCESS ) return 0; + /* Hash parallel edges from tetra and face communicator: store edges and the + MG_PARBDY tag (other edge tags are not stored).*/ + if( ier && (PMMG_hashPar_fromFaceComm( parmesh,&hpar ) != PMMG_SUCCESS) ) { + fprintf(stderr,"\n ## Impossible to compute the hash parallel edge." + " Exit program.\n"); + MMG5_DEL_MEM(mesh,mesh->htab.geom); + MMG5_DEL_MEM(mesh,hash.item); + MMG5_DEL_MEM(mesh,hpar.geom); + ier = 0; + } /* Build edge communicator */ - if( !PMMG_build_edgeComm( parmesh,mesh,&hpar ) ) return 0; + if ( !ier ) { + /* Avoid deadlock in comms in build_edgeComm */ + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } + + if ( !PMMG_build_edgeComm( parmesh,mesh,&hpar,comm ) ) { + fprintf(stderr,"\n ## Impossible to build edge communicator." + " Exit program\n"); + MMG5_DEL_MEM(mesh,mesh->htab.geom); + MMG5_DEL_MEM(mesh,hash.item); + MMG5_DEL_MEM(mesh,hpar.geom); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } /* Compute global node numbering and store it in ppt->tmp */ - if( !PMMG_Compute_verticesGloNum( parmesh ) ) return 0; + if( !PMMG_Compute_verticesGloNum( parmesh,comm ) ) { + fprintf(stderr,"\n ## Impossible to compute node global numbering." + " Exit program\n"); + + MMG5_DEL_MEM(mesh,mesh->htab.geom); + MMG5_DEL_MEM(mesh,hash.item); + MMG5_DEL_MEM(mesh,hpar.geom); + PMMG_edge_comm_free( parmesh ); + PMMG_DEL_MEM(parmesh, parmesh->int_edge_comm,PMMG_Int_comm,"int edge comm"); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } /* Allocate communicator buffers */ - if( !PMMG_analys_comms_init( parmesh ) ) return 0; + if( !PMMG_analys_comms_init( parmesh ) ) { + MMG5_DEL_MEM(mesh,mesh->htab.geom); + MMG5_DEL_MEM(mesh,hash.item); + MMG5_DEL_MEM(mesh,hpar.geom); + PMMG_edge_comm_free( parmesh ); + PMMG_DEL_MEM(parmesh, parmesh->int_edge_comm,PMMG_Int_comm,"int edge comm"); + ier = 0; + } /* check for ridges: check dihedral angle using adjacent triangle normals */ - if ( mesh->info.dhd > MMG5_ANGLIM && !MMG5_setdhd(mesh) ) { + /* 1. Call \ref MMG5_setdhd to analyze edges at interfaces of 2 "true" + * boundary faces. Skip pure parallel faces */ + if ( ier && (mesh->info.dhd > MMG5_ANGLIM && !MMG5_setdhd(mesh)) ) { fprintf(stderr,"\n ## Geometry problem. Exit program.\n"); MMG5_DEL_MEM(mesh,hash.item); + MMG5_DEL_MEM(mesh,mesh->htab.geom); + MMG5_DEL_MEM(mesh,hpar.geom); PMMG_analys_comms_free( parmesh ); - return 0; + PMMG_edge_comm_free( parmesh ); + PMMG_DEL_MEM(parmesh, parmesh->int_edge_comm,PMMG_Int_comm,"int edge comm"); + ier = 0; + } + + /* 2. Call PMMG_setfeatures to analyze edges splitted by the parallel interface: + * 2.1. adds possibly missing MG_NOM tags (add MG_REF tags to MG_NOM edges); + * 2.2. computes dihedral angle and set MG_GEO (ridge) tag if needed; + * 2.3. adds MG_REF tag for edges separating surfaces with different refs; + * 2.4. transfer edges tags to edge vertices. + * + * Analysis uses the tria array so if it is not allocated, this part of the + * analysis will not be complete. + */ + + if ( !ier ) { + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); } - if ( mesh->info.dhd > MMG5_ANGLIM && !PMMG_setdhd( parmesh,mesh,&hpar ) ) { + if ( !PMMG_setfeatures( parmesh,mesh,&hpar,comm ) ) { fprintf(stderr,"\n ## Geometry problem on parallel edges. Exit program.\n"); MMG5_DEL_MEM(mesh,hash.item); + MMG5_DEL_MEM(mesh,mesh->htab.geom); + MMG5_DEL_MEM(mesh,hpar.geom); PMMG_analys_comms_free( parmesh ); - return 0; + PMMG_edge_comm_free( parmesh ); + PMMG_DEL_MEM(parmesh, parmesh->int_edge_comm,PMMG_Int_comm,"int edge comm"); + ier = 0; } /* identify singularities on interior points */ - if ( !MMG5_singul(mesh) ) { + if ( ier && !MMG5_singul(mesh) ) { fprintf(stderr,"\n ## MMG5_singul problem. Exit program.\n"); MMG5_DEL_MEM(mesh,hash.item); + MMG5_DEL_MEM(mesh,mesh->htab.geom); + MMG5_DEL_MEM(mesh,hpar.geom); PMMG_analys_comms_free( parmesh ); - return 0; + PMMG_edge_comm_free( parmesh ); + PMMG_DEL_MEM(parmesh, parmesh->int_edge_comm,PMMG_Int_comm,"int edge comm"); + ier = 0; } - - if ( abs(mesh->info.imprim) > 3 || mesh->info.ddebug ) fprintf(stdout," ** DEFINING GEOMETRY\n"); /* define (and regularize) normals: create xpoints */ - if ( !MMG5_norver( mesh ) ) { + if ( ier && !MMG5_norver( mesh ) ) { fprintf(stderr,"\n ## Normal problem. Exit program.\n"); MMG5_DEL_MEM(mesh,hash.item); + MMG5_DEL_MEM(mesh,mesh->htab.geom); + MMG5_DEL_MEM(mesh,hpar.geom); PMMG_analys_comms_free( parmesh ); - return 0; + MMG5_DEL_MEM(mesh,mesh->xpoint); + PMMG_edge_comm_free( parmesh ); + PMMG_DEL_MEM(parmesh, parmesh->int_edge_comm,PMMG_Int_comm,"int edge comm"); + ier = 0; } /* set bdry entities to tetra: create xtetra and set references */ - if ( !MMG5_bdrySet(mesh) ) { + if ( ier && !MMG5_bdrySet(mesh) ) { fprintf(stderr,"\n ## Boundary problem. Exit program.\n"); MMG5_DEL_MEM(mesh,hash.item); + MMG5_DEL_MEM(mesh,mesh->htab.geom); + MMG5_DEL_MEM(mesh,hpar.geom); PMMG_analys_comms_free( parmesh ); - return 0; + MMG5_DEL_MEM(mesh,mesh->xpoint); + PMMG_edge_comm_free( parmesh ); + PMMG_DEL_MEM(parmesh, parmesh->int_edge_comm,PMMG_Int_comm,"int edge comm"); + PMMG_edge_comm_free( parmesh ); + PMMG_DEL_MEM(parmesh, parmesh->int_edge_comm,PMMG_Int_comm,"int edge comm"); + ier = 0; + } + + /* Tag parallel faces on material interfaces as boundary (ie, add \ref + * MG_PARBDYBDY tag) and remove spurious boundary MG_PARBDYBDY tags coming + * from spurious internal triangles (between tetras of same references) along + * partition interfaces. */ + + if ( !ier ) { + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); } - /* Tag parallel faces on material interfaces as boundary */ if( !PMMG_parbdySet( parmesh ) ) { fprintf(stderr,"\n ## Unable to recognize parallel faces on material interfaces. Exit program.\n"); MMG5_DEL_MEM(mesh,hash.item); + MMG5_DEL_MEM(mesh,mesh->htab.geom); + MMG5_DEL_MEM(mesh,hpar.geom); PMMG_analys_comms_free( parmesh ); - return 0; + MMG5_DEL_MEM(mesh,mesh->xpoint); + PMMG_edge_comm_free( parmesh ); + PMMG_DEL_MEM(parmesh, parmesh->int_edge_comm,PMMG_Int_comm,"int edge comm"); + ier = 0; } /* set non-manifold edges sharing non-intersecting multidomains as required */ if ( abs(mesh->info.imprim) > 5 || mesh->info.ddebug ) fprintf(stdout," ** UPDATING TOPOLOGY AT NON-MANIFOLD POINTS\n"); - if ( !MMG5_setNmTag(mesh,&hash) ) { + /* 1. set non-manifold edges sharing non-intersecting multidomains as required + 2. travel points lying along non-manifold edges and set tags depending on + the number of feature edges passing through the point */ + if ( ier && !MMG5_setNmTag(mesh,&hash) ) { fprintf(stderr,"\n ## Non-manifold topology problem. Exit program.\n"); MMG5_DEL_MEM(mesh,hash.item); + MMG5_DEL_MEM(mesh,mesh->htab.geom); + MMG5_DEL_MEM(mesh,hpar.geom); MMG5_DEL_MEM(mesh,mesh->xpoint); PMMG_analys_comms_free( parmesh ); - return 0; + PMMG_edge_comm_free( parmesh ); + PMMG_DEL_MEM(parmesh, parmesh->int_edge_comm,PMMG_Int_comm,"int edge comm"); + ier = 0; } /* Hash table used to store edges touching a parallel point. * Assume that in the worst case each parallel faces has the three edges in * the table, plus two other internal edges. */ - if ( !MMG5_hNew(mesh,&hnear,3*parmesh->int_face_comm->nitem,5*parmesh->int_face_comm->nitem) ) - return 0; + if ( ier && !MMG5_hNew(mesh,&hnear,3*parmesh->int_face_comm->nitem,5*parmesh->int_face_comm->nitem) ) { + MMG5_DEL_MEM(mesh,hash.item); + MMG5_DEL_MEM(mesh,mesh->htab.geom); + MMG5_DEL_MEM(mesh,hpar.geom); + MMG5_DEL_MEM(mesh,mesh->xpoint); + PMMG_analys_comms_free( parmesh ); + PMMG_edge_comm_free( parmesh ); + PMMG_DEL_MEM(parmesh, parmesh->int_edge_comm,PMMG_Int_comm,"int edge comm"); + ier = 0; + } var.mesh = mesh; var.hash = &hnear; var.hpar = &hpar; @@ -2703,69 +2976,159 @@ int PMMG_analys(PMMG_pParMesh parmesh,MMG5_pMesh mesh) { /** 0) Loop on edges touching a parallel point and insert them in the * hash table. */ - if( !PMMG_hashNorver_loop( parmesh, &var, MG_CRN, &PMMG_hash_nearParEdges ) ) - return 0; - if( !PMMG_set_edge_owners( parmesh,&hpar ) ) return 0; + if ( !ier ) { + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } + + if( !PMMG_hashNorver_loop( parmesh, &var, MG_CRN, &PMMG_hash_nearParEdges ) ) { + fprintf(stderr,"\n ## Unable to hash edges connected to parallel points. Exit program. \n"); + MMG5_DEL_MEM(mesh,hash.item); + MMG5_DEL_MEM(mesh,mesh->htab.geom); + MMG5_DEL_MEM(mesh,hpar.geom); + MMG5_DEL_MEM(mesh,hnear.geom); + MMG5_DEL_MEM(mesh,mesh->xpoint); + PMMG_analys_comms_free( parmesh ); + MMG5_DEL_MEM(mesh,mesh->xpoint); + PMMG_edge_comm_free( parmesh ); + PMMG_DEL_MEM(parmesh, parmesh->int_edge_comm,PMMG_Int_comm,"int edge comm"); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } + + if( !PMMG_set_edge_owners( parmesh,&hpar,comm ) ) { + fprintf(stderr,"\n ## Unable to compute edge owners. Exit program. \n"); + MMG5_DEL_MEM(mesh,hash.item); + MMG5_DEL_MEM(mesh,mesh->htab.geom); + MMG5_DEL_MEM(mesh,hpar.geom); + MMG5_DEL_MEM(mesh,hnear.geom); + MMG5_DEL_MEM(mesh,mesh->xpoint); + PMMG_analys_comms_free( parmesh ); + MMG5_DEL_MEM(mesh,mesh->xpoint); + PMMG_edge_comm_free( parmesh ); + PMMG_DEL_MEM(parmesh, parmesh->int_edge_comm,PMMG_Int_comm,"int edge comm"); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } + +#ifndef NDEBUG + if (!PMMG_check_edge_owners(parmesh,&hpar,comm)) { + fprintf(stderr,"\n ## Parallel edge has no owner or too many owners. Exit program. \n"); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } +#endif /* identify singularities on parallel points. * No need to call a *_setVertexNmTag function, as it already takes into * account non-manifold configurations. */ - if ( !PMMG_singul(parmesh,mesh,&var) ) { + if ( !PMMG_singul(parmesh,mesh,&var,comm) ) { fprintf(stderr,"\n ## PMMG_singul problem. Exit program.\n"); MMG5_DEL_MEM(mesh,hash.item); + MMG5_DEL_MEM(mesh,mesh->htab.geom); + MMG5_DEL_MEM(mesh,hpar.geom); + MMG5_DEL_MEM(mesh,hnear.geom); + MMG5_DEL_MEM(mesh,mesh->xpoint); PMMG_analys_comms_free( parmesh ); - return 0; + MMG5_DEL_MEM(mesh,mesh->xpoint); + PMMG_edge_comm_free( parmesh ); + PMMG_DEL_MEM(parmesh, parmesh->int_edge_comm,PMMG_Int_comm,"int edge comm"); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); } - if( !PMMG_hashNorver( parmesh,mesh,&hnear,&hpar,&var ) ) { + if( !PMMG_hashNorver( parmesh,mesh,&hnear,&hpar,&var,comm ) ) { fprintf(stderr,"\n ## Normal problem on parallel points. Exit program.\n"); MMG5_DEL_MEM(mesh,hash.item); + MMG5_DEL_MEM(mesh,mesh->htab.geom); + MMG5_DEL_MEM(mesh,hpar.geom); + MMG5_DEL_MEM(mesh,hnear.geom); + MMG5_DEL_MEM(mesh,mesh->xpoint); PMMG_analys_comms_free( parmesh ); - return 0; + MMG5_DEL_MEM(mesh,mesh->xpoint); + PMMG_edge_comm_free( parmesh ); + PMMG_DEL_MEM(parmesh, parmesh->int_edge_comm,PMMG_Int_comm,"int edge comm"); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); } /* Deallocate communicator buffers */ PMMG_analys_comms_free( parmesh ); /* check subdomains connected by a vertex and mark these vertex as corner and required */ +#warning Luca: check that parbdy are skipped MMG5_chkVertexConnectedDomains(mesh); - /* build hash table for geometric edges */ + /* build hash table for geometric edges: gather tag infos from edges and + * triangles and store these infos in tria. Skip non PARBDYBDY // edges. */ if ( !mesh->na && !MMG5_hGeom(mesh) ) { fprintf(stderr,"\n ## Hashing problem (0). Exit program.\n"); + PMMG_edge_comm_free( parmesh ); + PMMG_DEL_MEM(parmesh, parmesh->int_edge_comm,PMMG_Int_comm,"int edge comm"); + MMG5_DEL_MEM(mesh,hpar.geom); + MMG5_DEL_MEM(mesh,hnear.geom); + MMG5_DEL_MEM(mesh,mesh->adjt); MMG5_DEL_MEM(mesh,mesh->xpoint); - MMG5_DEL_MEM(mesh,mesh->htab.geom); + + if ( mesh->nprism ) MMG5_DEL_MEM(mesh,mesh->adjapr); return 0; } - /* Update edges tags and references for xtetras */ + /* Update MG_REQ and MG_NOSURF edges tags as well as references for xtetras */ if ( !MMG5_bdryUpdate(mesh) ) { fprintf(stderr,"\n ## Boundary problem. Exit program.\n"); + PMMG_edge_comm_free( parmesh ); + PMMG_DEL_MEM(parmesh, parmesh->int_edge_comm,PMMG_Int_comm,"int edge comm"); + MMG5_DEL_MEM(mesh,hpar.geom); + MMG5_DEL_MEM(mesh,hnear.geom); + MMG5_DEL_MEM(mesh,mesh->htab.geom); + MMG5_DEL_MEM(mesh,mesh->adjt); MMG5_DEL_MEM(mesh,mesh->xpoint); + mesh->na = 0; + + if ( mesh->nprism ) MMG5_DEL_MEM(mesh,mesh->adjapr); return 0; } /* define geometry for non manifold points */ - if ( !MMG3D_nmgeom(mesh) ) return 0; + if ( !MMG3D_nmgeom(mesh) ) { + PMMG_edge_comm_free( parmesh ); + PMMG_DEL_MEM(parmesh, parmesh->int_edge_comm,PMMG_Int_comm,"int edge comm"); + MMG5_DEL_MEM(mesh,hpar.geom); + MMG5_DEL_MEM(mesh,hnear.geom); + MMG5_DEL_MEM(mesh,mesh->htab.geom); + MMG5_DEL_MEM(mesh,mesh->adjt); + MMG5_DEL_MEM(mesh,mesh->xpoint); + mesh->na = 0; + + if ( mesh->nprism ) MMG5_DEL_MEM(mesh,mesh->adjapr); + return 0; + } #ifdef USE_POINTMAP /* Initialize source point with input index */ - int ip; + MMG5_int ip; for( ip = 1; ip <= mesh->np; ip++ ) mesh->point[ip].src = ip; #endif /* release memory */ PMMG_edge_comm_free( parmesh ); + PMMG_DEL_MEM(parmesh, parmesh->int_edge_comm,PMMG_Int_comm,"int edge comm"); + MMG5_DEL_MEM(mesh,hash.item); MMG5_DEL_MEM(mesh,hpar.geom); MMG5_DEL_MEM(mesh,hnear.geom); MMG5_DEL_MEM(mesh,mesh->htab.geom); MMG5_DEL_MEM(mesh,mesh->adjt); - MMG5_DEL_MEM(mesh,mesh->edge); mesh->na = 0; if ( mesh->nprism ) MMG5_DEL_MEM(mesh,mesh->adjapr); +#ifndef NDEBUG + MMG5_int i; + for ( i=0; ingrp; ++i ) { + + if ( !MMG5_chkmsh(parmesh->listgrp[i].mesh,1,1) ) { + fprintf(stderr," ## Problem. Invalid mesh.\n"); + return 0; + } + } +#endif + return 1; } diff --git a/src/boulep_pmmg.c b/src/boulep_pmmg.c index 1922cc4d..7d896b0b 100644 --- a/src/boulep_pmmg.c +++ b/src/boulep_pmmg.c @@ -242,7 +242,7 @@ int PMMG_boulen(PMMG_pParMesh parmesh,MMG5_pMesh mesh,int start,int ip,int iface double dd,l0,l1; int base,nump,nr,nnm,k,piv,na,nb,adj,nvstart,fstart,aux,ip0,ip1; int *adja,color; - int16_t tag; + uint16_t tag; int8_t iopp,ipiv,indb,inda,i,isface; int8_t indedg[4][4] = { {-1,0,1,2}, {0,-1,3,4}, {1,3,-1,5}, {2,4,5,-1} }; diff --git a/src/chkcomm_pmmg.c b/src/chkcomm_pmmg.c index deba9da8..c0076918 100644 --- a/src/chkcomm_pmmg.c +++ b/src/chkcomm_pmmg.c @@ -229,14 +229,18 @@ int PMMG_check_intNodeComm( PMMG_pParMesh parmesh ) double dd,bb_min[3],bb_max[3],delta,dist[3],dist_norm; int ier; int ngrp = parmesh->ngrp; - int nitem = parmesh->int_node_comm->nitem; + int nitem; int commIdx2 = 0; int commIdx1 = 0; int commSizeLoc = 0; int commIdx,k,j; + if ( !parmesh->int_node_comm ) return 1; + if ( !parmesh->int_node_comm->nitem ) return 1; + nitem = parmesh->int_node_comm->nitem; + ier = 0; coor_list = NULL; @@ -422,6 +426,8 @@ int PMMG_check_intFaceComm( PMMG_pParMesh parmesh ) { int *intvalues; int k,i,j,l,iel,ifac,iploc,ip,idx,idx_ori,nitem,ier; + if ( !parmesh->int_face_comm ) return 1; + if ( !parmesh->int_face_comm->nitem ) return 1; ier = 0; @@ -595,6 +601,10 @@ int PMMG_check_intFaceComm( PMMG_pParMesh parmesh ) { /** * \param parmesh pointer to current parmesh stucture + * \param comm pointer toward the MPI communicator to use: when called before + * the first mesh balancing (at preprocessing stage) we have to use the + * read_comm communicator (i.e. the communicator used to provide the inputs). + * For all ather calls, comm has to be the communicator to use for computations. * * \return 0 (on all procs) if fail, 1 otherwise * @@ -602,7 +612,7 @@ int PMMG_check_intFaceComm( PMMG_pParMesh parmesh ) { * the coordinates of the listed points. * */ -int PMMG_check_extEdgeComm( PMMG_pParMesh parmesh ) +int PMMG_check_extEdgeComm( PMMG_pParMesh parmesh, MPI_Comm comm ) { PMMG_pExt_comm ext_edge_comm; PMMG_pGrp grp; @@ -635,7 +645,7 @@ int PMMG_check_extEdgeComm( PMMG_pParMesh parmesh ) PMMG_CALLOC(parmesh,parmesh->int_edge_comm->doublevalues,6*nitem,double, "edge communicator",ier = 0); - MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,parmesh->comm ),ieresult=0 ); + MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,comm ),ieresult=0 ); if ( !ieresult ) return 0; doublevalues = parmesh->int_edge_comm->doublevalues; @@ -661,20 +671,23 @@ int PMMG_check_extEdgeComm( PMMG_pParMesh parmesh ) for ( j=0; j<3; ++j ) doublevalues[6*idx+j] = dd * (ppt0->c[j] - bb_min_all[j]); for ( j=0; j<3; ++j ) doublevalues[6*idx+3+j] = dd * (ppt1->c[j] - bb_min_all[j]); } - MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,parmesh->comm ),ieresult=0 ); + MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,comm ),ieresult=0 ); if ( !ieresult ) return 0; /** Step 3: Send the values that need to be communicate to the suitable * processor */ PMMG_MALLOC(parmesh,request,2*parmesh->next_edge_comm,MPI_Request, "mpi request array",ier=0); + for ( j=0; j<2*parmesh->next_edge_comm; ++j ) { + request[j] = MPI_REQUEST_NULL; + } PMMG_MALLOC(parmesh,status,2*parmesh->next_edge_comm,MPI_Status, "mpi status array",ier=0); PMMG_CALLOC(parmesh,r2send_size,parmesh->next_edge_comm,int, "size of the r2send array",ier=0); - MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,parmesh->comm ),ieresult=0 ); + MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,comm ),ieresult=0 ); if ( !ieresult ) goto end; ireq= 0; @@ -697,18 +710,16 @@ int PMMG_check_extEdgeComm( PMMG_pParMesh parmesh ) rtosend[6*i+j] = doublevalues[6*idx+j]; } - request[ireq] = MPI_REQUEST_NULL; MPI_CHECK( MPI_Isend(&ext_edge_comm->nitem,1,MPI_INT,color, MPI_CHKCOMM_EDGE_TAG, - parmesh->comm,&request[ireq++]),ier=0 ); + comm,&request[ireq++]),ier=0 ); - request[ireq] = MPI_REQUEST_NULL; MPI_CHECK( MPI_Isend(rtosend,6*ext_edge_comm->nitem,MPI_DOUBLE,color, MPI_CHKCOMM_EDGE_TAG+1, - parmesh->comm,&request[ireq++]),ier=0 ); - } + comm,&request[ireq++]),ier=0 ); + } - MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,parmesh->comm ),ieresult=0 ); + MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,comm ),ieresult=0 ); if ( !ieresult ) goto end; /** Step 4: Recv the values from the senders and check: @@ -725,7 +736,7 @@ int PMMG_check_extEdgeComm( PMMG_pParMesh parmesh ) color = ext_edge_comm->color_out; MPI_CHECK( MPI_Recv(&nitem_color_out,1,MPI_INT,color, - MPI_CHKCOMM_EDGE_TAG,parmesh->comm, + MPI_CHKCOMM_EDGE_TAG,comm, &status[0]), ier=0 ); /* Check the size of the communicators */ @@ -743,7 +754,7 @@ int PMMG_check_extEdgeComm( PMMG_pParMesh parmesh ) } rtorecv = ext_edge_comm->rtorecv; MPI_CHECK( MPI_Recv(rtorecv,6*nitem_color_out,MPI_DOUBLE,color, - MPI_CHKCOMM_EDGE_TAG+1,parmesh->comm, + MPI_CHKCOMM_EDGE_TAG+1,comm, &status[0]), ier=0 ); /* Check the values of the edge in the communicator */ for ( i=0; initem; ++i ) { @@ -772,11 +783,11 @@ int PMMG_check_extEdgeComm( PMMG_pParMesh parmesh ) } } } - MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,parmesh->comm ),ieresult=0 ); + MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,comm ),ieresult=0 ); if ( !ieresult ) goto end; MPI_CHECK( MPI_Waitall(2*parmesh->next_edge_comm,request,status), ier=0 ); - MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,parmesh->comm ),ieresult=0 ); + MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,comm ),ieresult=0 ); end: for ( k=0; knext_edge_comm; ++k ) { @@ -805,6 +816,10 @@ int PMMG_check_extEdgeComm( PMMG_pParMesh parmesh ) /** * \param parmesh pointer to current parmesh stucture + * \param comm pointer toward the MPI communicator to use: when called before + * the first mesh balancing (at preprocessing stage) we have to use the + * read_comm communicator (i.e. the communicator used to provide the inputs). + * For all ather calls, comm has to be the communicator to use for computations. * * \return 0 (on all procs) if fail, 1 otherwise * @@ -812,7 +827,7 @@ int PMMG_check_extEdgeComm( PMMG_pParMesh parmesh ) * the coordinates of the listed points. * */ -int PMMG_check_extNodeComm( PMMG_pParMesh parmesh ) +int PMMG_check_extNodeComm( PMMG_pParMesh parmesh, MPI_Comm comm ) { PMMG_pExt_comm ext_node_comm; PMMG_pGrp grp; @@ -830,8 +845,10 @@ int PMMG_check_extNodeComm( PMMG_pParMesh parmesh ) request = NULL; status = NULL; - MPI_CHECK ( MPI_Allreduce ( &parmesh->ngrp,&ngrp_all,1,MPI_INT,MPI_SUM,parmesh->comm), return 0); - + if (parmesh->iter == PMMG_UNSET) return 1; + + MPI_CHECK ( MPI_Allreduce ( &parmesh->ngrp,&ngrp_all,1,MPI_INT,MPI_SUM,comm), return 0); + /** Step 1: Find the internal communicator bounding box */ if ( ngrp_all == 1 ) { ier = 1; @@ -843,8 +860,8 @@ int PMMG_check_extNodeComm( PMMG_pParMesh parmesh ) else { ier = PMMG_find_intNodeCommBoundingBox(parmesh,bb_min,bb_max,&delta); - MPI_CHECK ( MPI_Allreduce ( &delta,&delta_all,1,MPI_DOUBLE,MPI_MAX,parmesh->comm), return 0); - MPI_CHECK ( MPI_Allreduce ( bb_min,bb_min_all,3,MPI_DOUBLE,MPI_MIN,parmesh->comm), return 0); + MPI_CHECK ( MPI_Allreduce ( &delta,&delta_all,1,MPI_DOUBLE,MPI_MAX,comm), return 0); + MPI_CHECK ( MPI_Allreduce ( bb_min,bb_min_all,3,MPI_DOUBLE,MPI_MIN,comm), return 0); if ( delta_all < MMG5_EPSD ) { if ( parmesh->myrank == parmesh->info.root ) @@ -859,7 +876,7 @@ int PMMG_check_extNodeComm( PMMG_pParMesh parmesh ) PMMG_CALLOC(parmesh,parmesh->int_node_comm->doublevalues,3*nitem,double, "node communicator",ier = 0); - MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,parmesh->comm ),ieresult=0 ); + MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,comm ),ieresult=0 ); if ( !ieresult ) return 0; doublevalues = parmesh->int_node_comm->doublevalues; @@ -885,13 +902,16 @@ int PMMG_check_extNodeComm( PMMG_pParMesh parmesh ) * processor */ PMMG_MALLOC(parmesh,request,2*parmesh->next_node_comm,MPI_Request, "mpi request array",ier=0); + for ( j=0; j<2*parmesh->next_node_comm; ++j ) { + request[j] = MPI_REQUEST_NULL; + } PMMG_MALLOC(parmesh,status,2*parmesh->next_node_comm,MPI_Status, "mpi status array",ier=0); PMMG_CALLOC(parmesh,r2send_size,parmesh->next_node_comm,int, "size of the r2send array",ier=0); - MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,parmesh->comm ),ieresult=0 ); + MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,comm ),ieresult=0 ); if ( !ieresult ) goto end; ireq= 0; @@ -914,18 +934,16 @@ int PMMG_check_extNodeComm( PMMG_pParMesh parmesh ) rtosend[3*i+j] = doublevalues[3*idx+j]; } - request[ireq] = MPI_REQUEST_NULL; MPI_CHECK( MPI_Isend(&ext_node_comm->nitem,1,MPI_INT,color, MPI_CHKCOMM_NODE_TAG, - parmesh->comm,&request[ireq++]),ier=0 ); + comm,&request[ireq++]),ier=0 ); - request[ireq] = MPI_REQUEST_NULL; MPI_CHECK( MPI_Isend(rtosend,3*ext_node_comm->nitem,MPI_DOUBLE,color, MPI_CHKCOMM_NODE_TAG+1, - parmesh->comm,&request[ireq++]),ier=0 ); + comm,&request[ireq++]),ier=0 ); } - MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,parmesh->comm ),ieresult=0 ); + MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,comm ),ieresult=0 ); if ( !ieresult ) goto end; /** Step 4: Recv the values from the senders and check: @@ -942,7 +960,7 @@ int PMMG_check_extNodeComm( PMMG_pParMesh parmesh ) color = ext_node_comm->color_out; MPI_CHECK( MPI_Recv(&nitem_color_out,1,MPI_INT,color, - MPI_CHKCOMM_NODE_TAG,parmesh->comm, + MPI_CHKCOMM_NODE_TAG,comm, &status[0]), ier=0 ); /* Check the size of the communicators */ @@ -960,7 +978,7 @@ int PMMG_check_extNodeComm( PMMG_pParMesh parmesh ) } rtorecv = ext_node_comm->rtorecv; MPI_CHECK( MPI_Recv(rtorecv,3*nitem_color_out,MPI_DOUBLE,color, - MPI_CHKCOMM_NODE_TAG+1,parmesh->comm, + MPI_CHKCOMM_NODE_TAG+1,comm, &status[0]), ier=0 ); /* Check the values of the node in the communicator */ for ( i=0; initem; ++i ) { @@ -983,11 +1001,11 @@ int PMMG_check_extNodeComm( PMMG_pParMesh parmesh ) } } } - MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,parmesh->comm ),ieresult=0 ); + MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,comm ),ieresult=0 ); if ( !ieresult ) goto end; MPI_CHECK( MPI_Waitall(2*parmesh->next_node_comm,request,status), ier=0 ); - MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,parmesh->comm ),ieresult=0 ); + MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,comm ),ieresult=0 ); end: for ( k=0; knext_node_comm; ++k ) { @@ -1017,6 +1035,10 @@ int PMMG_check_extNodeComm( PMMG_pParMesh parmesh ) /** * \param parmesh pointer to current parmesh stucture + * \param comm pointer toward the MPI communicator to use: when called before + * the first mesh balancing (at preprocessing stage) we have to use the + * read_comm communicator (i.e. the communicator used to provide the inputs). + * For all ather calls, comm has to be the communicator to use for computations. * * \return 0 if fail, 1 otherwise * @@ -1024,7 +1046,7 @@ int PMMG_check_extNodeComm( PMMG_pParMesh parmesh ) * the coordinates of the face points. * */ -int PMMG_check_extFaceComm( PMMG_pParMesh parmesh ) +int PMMG_check_extFaceComm( PMMG_pParMesh parmesh, MPI_Comm comm ) { PMMG_pExt_comm ext_face_comm; PMMG_pGrp grp; @@ -1044,7 +1066,9 @@ int PMMG_check_extFaceComm( PMMG_pParMesh parmesh ) request = NULL; status = NULL; - MPI_CHECK ( MPI_Allreduce ( &parmesh->ngrp,&ngrp_all,1,MPI_INT,MPI_SUM,parmesh->comm), return 0); + if (parmesh->iter == PMMG_UNSET) return 1; + + MPI_CHECK ( MPI_Allreduce ( &parmesh->ngrp,&ngrp_all,1,MPI_INT,MPI_SUM,comm), return 0); /** Step 0: Find the internal communicator bounding box */ if ( ngrp_all == 1 ) { @@ -1057,8 +1081,8 @@ int PMMG_check_extFaceComm( PMMG_pParMesh parmesh ) else { ier = PMMG_find_intFaceCommBoundingBox(parmesh,bb_min,bb_max,&delta); - MPI_CHECK ( MPI_Allreduce ( &delta,&delta_all,1,MPI_DOUBLE,MPI_MAX,parmesh->comm), return 0); - MPI_CHECK ( MPI_Allreduce ( bb_min,bb_min_all,3,MPI_DOUBLE,MPI_MIN,parmesh->comm), return 0); + MPI_CHECK ( MPI_Allreduce ( &delta,&delta_all,1,MPI_DOUBLE,MPI_MAX,comm), return 0); + MPI_CHECK ( MPI_Allreduce ( bb_min,bb_min_all,3,MPI_DOUBLE,MPI_MIN,comm), return 0); if ( delta_all < MMG5_EPSD ) { if ( parmesh->myrank == parmesh->info.root ) @@ -1073,7 +1097,7 @@ int PMMG_check_extFaceComm( PMMG_pParMesh parmesh ) PMMG_CALLOC(parmesh,parmesh->int_face_comm->doublevalues,9*nitem,double, "face communicator",ier = 0); - MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,parmesh->comm ),ieresult=0 ); + MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,comm ),ieresult=0 ); if ( !ieresult ) return 0; doublevalues = parmesh->int_face_comm->doublevalues; @@ -1119,13 +1143,16 @@ int PMMG_check_extFaceComm( PMMG_pParMesh parmesh ) * processor */ PMMG_MALLOC(parmesh,request,2*parmesh->next_face_comm,MPI_Request, "mpi request array",ier=0); + for ( j=0; j<2*parmesh->next_face_comm; ++j ) { + request[j] = MPI_REQUEST_NULL; + } PMMG_MALLOC(parmesh,status,2*parmesh->next_face_comm,MPI_Status, "mpi status array",ier=0); PMMG_CALLOC(parmesh,r2send_size,parmesh->next_face_comm,int, "size of the r2send array",ier=0); - MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,parmesh->comm ),ieresult=0 ); + MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,comm ),ieresult=0 ); if ( !ieresult ) goto end; ireq = 0; @@ -1149,17 +1176,15 @@ int PMMG_check_extFaceComm( PMMG_pParMesh parmesh ) rtosend[9*i+j] = doublevalues[9*idx+j]; } - request[ireq] = MPI_REQUEST_NULL; MPI_CHECK( MPI_Isend(&ext_face_comm->nitem,1,MPI_INT,color, MPI_CHKCOMM_FACE_TAG, - parmesh->comm,&request[ireq++]),ier=0 ); + comm,&request[ireq++]),ier=0 ); - request[ireq] = MPI_REQUEST_NULL; MPI_CHECK( MPI_Isend(rtosend,9*ext_face_comm->nitem,MPI_DOUBLE,color, MPI_CHKCOMM_FACE_TAG+1, - parmesh->comm,&request[ireq++]),ier=0 ); + comm,&request[ireq++]),ier=0 ); } - MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,parmesh->comm ),ieresult=0 ); + MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,comm ),ieresult=0 ); if ( !ieresult ) goto end; /** Step 3: Recv the values from the senders and check: @@ -1176,7 +1201,7 @@ int PMMG_check_extFaceComm( PMMG_pParMesh parmesh ) color = ext_face_comm->color_out; MPI_CHECK( MPI_Recv(&nitem_color_out,1,MPI_INT,color, - MPI_CHKCOMM_FACE_TAG,parmesh->comm, + MPI_CHKCOMM_FACE_TAG,comm, &status[0]), ier=0 ); /* Check the size of the communicators */ @@ -1195,7 +1220,7 @@ int PMMG_check_extFaceComm( PMMG_pParMesh parmesh ) rtorecv = ext_face_comm->rtorecv; MPI_CHECK( MPI_Recv(rtorecv,9*nitem_color_out,MPI_DOUBLE,color, - MPI_CHKCOMM_FACE_TAG+1,parmesh->comm, + MPI_CHKCOMM_FACE_TAG+1,comm, &status[0]), ier = 0 ); /* Check the values of the face in the communicator */ @@ -1243,11 +1268,11 @@ int PMMG_check_extFaceComm( PMMG_pParMesh parmesh ) } } } - MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,parmesh->comm ),ieresult=0 ); + MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,comm ),ieresult=0 ); if ( !ieresult ) goto end; MPI_CHECK( MPI_Waitall(2*parmesh->next_face_comm,request,status), ier=0 ); - MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,parmesh->comm ),ieresult=0 ); + MPI_CHECK ( MPI_Allreduce( &ier,&ieresult,1,MPI_INT,MPI_MIN,comm ),ieresult=0 ); end: for ( k=0; knext_face_comm; ++k ) { diff --git a/src/communicators_pmmg.c b/src/communicators_pmmg.c index afa10065..25fa9fa2 100644 --- a/src/communicators_pmmg.c +++ b/src/communicators_pmmg.c @@ -42,8 +42,9 @@ */ void PMMG_parmesh_int_comm_free( PMMG_pParMesh parmesh,PMMG_pInt_comm comm ) { - if ( comm == NULL ) + if ( comm == NULL ) { return; + } if ( NULL != comm->intvalues ) { assert ( comm->nitem != 0 && "incorrect parameters in internal communicator" ); @@ -88,11 +89,11 @@ void PMMG_parmesh_ext_comm_free( PMMG_pParMesh parmesh,PMMG_pExt_comm listcomm, } if ( NULL != comm->rtosend ) { assert ( comm->nitem != 0 && "incorrect parameters in external communicator" ); - PMMG_DEL_MEM(parmesh,comm->rtosend,int,"ext comm rtosend array"); + PMMG_DEL_MEM(parmesh,comm->rtosend,double,"ext comm rtosend array"); } if ( NULL != comm->rtorecv ) { assert ( comm->nitem != 0 && "incorrect parameters in external communicator" ); - PMMG_DEL_MEM(parmesh,comm->rtorecv,int,"ext comm rtorecv array"); + PMMG_DEL_MEM(parmesh,comm->rtorecv,double,"ext comm rtorecv array"); } } } @@ -137,13 +138,16 @@ void PMMG_edge_comm_free( PMMG_pParMesh parmesh ) PMMG_DEL_MEM(parmesh, parmesh->ext_edge_comm,PMMG_Ext_comm,"ext edge comm"); parmesh->next_edge_comm = 0; - parmesh->int_edge_comm->nitem = 0; + + if ( parmesh->int_edge_comm ) { + parmesh->int_edge_comm->nitem = 0; + } } /** * \param parmesh pointer toward a parmesh structure * - * Deallocate the nodal communicatorsof the parmesh + * Deallocate the nodal communicators of the parmesh * */ void PMMG_node_comm_free( PMMG_pParMesh parmesh ) @@ -163,7 +167,9 @@ void PMMG_node_comm_free( PMMG_pParMesh parmesh ) PMMG_DEL_MEM(parmesh, parmesh->ext_node_comm,PMMG_Ext_comm,"ext node comm"); parmesh->next_node_comm = 0; - parmesh->int_node_comm->nitem = 0; + if ( parmesh->int_node_comm ) { + parmesh->int_node_comm->nitem = 0; + } } /** @@ -256,7 +262,7 @@ int PMMG_fillExtEdgeComm_fromFace( PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HG PMMG_pExt_comm ext_edge_comm,MMG5_pTetra pt,int ifac,int iloc,int j,int color,int *item ) { MMG5_pEdge pa; int edg; - int16_t tag; + uint16_t tag; int8_t i1,i2; /* Take the edge opposite to vertex iloc+j on face ifac */ @@ -276,6 +282,10 @@ int PMMG_fillExtEdgeComm_fromFace( PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HG /** * \param parmesh pointer toward a parmesh structure + * \param comm pointer toward the MPI communicator to use: when called before + * the first mesh balancing (at preprocessing stage) we have to use the + * read_comm communicator (i.e. the communicator used to provide the inputs). + * For all ather calls, comm has to be the communicator to use for computations. * * \return 1 if success, 0 if fail. * @@ -283,7 +293,7 @@ int PMMG_fillExtEdgeComm_fromFace( PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HG * faces to detect all the processors to which each edge belongs. * */ -int PMMG_build_completeExtEdgeComm( PMMG_pParMesh parmesh ) { +int PMMG_build_completeExtEdgeComm( PMMG_pParMesh parmesh, MPI_Comm comm ) { PMMG_pExt_comm ext_edge_comm,*comm_ptr; PMMG_pInt_comm int_edge_comm; PMMG_cellLnkdList **proclists,list; @@ -385,7 +395,7 @@ int PMMG_build_completeExtEdgeComm( PMMG_pParMesh parmesh ) { * recieve the proc list of all the edges to/from the other processors. At the * end of this loop, each edge has the entire list of the proc to which it * belongs */ - alloc_size = parmesh->next_edge_comm; + alloc_size = parmesh->nprocs; PMMG_MALLOC(parmesh,request, alloc_size,MPI_Request,"mpi request array",goto end); PMMG_MALLOC(parmesh,status, alloc_size,MPI_Status,"mpi status array",goto end); PMMG_CALLOC(parmesh,i2send_size,alloc_size,int,"size of the i2send array",goto end); @@ -397,9 +407,12 @@ int PMMG_build_completeExtEdgeComm( PMMG_pParMesh parmesh ) { glob_update = loc_update = 0; /** Send the list of procs to which belong each point of the communicator */ - for ( k=0; knext_edge_comm; ++k ) { + for ( k=0; knext_edge_comm; ++k ) { ext_edge_comm = &parmesh->ext_edge_comm[k]; /* Computation of the number of data to send to the other procs (we want @@ -430,7 +443,7 @@ int PMMG_build_completeExtEdgeComm( PMMG_pParMesh parmesh ) { assert ( pos==nitem2comm ); MPI_CHECK( MPI_Isend(itosend,nitem2comm,MPI_INT,color, - MPI_COMMUNICATORS_EDGE_TAG,parmesh->comm, + MPI_COMMUNICATORS_EDGE_TAG,comm, &request[color]),goto end ); } @@ -442,7 +455,7 @@ int PMMG_build_completeExtEdgeComm( PMMG_pParMesh parmesh ) { color = ext_edge_comm->color_out; - MPI_CHECK( MPI_Probe(color,MPI_COMMUNICATORS_EDGE_TAG,parmesh->comm, + MPI_CHECK( MPI_Probe(color,MPI_COMMUNICATORS_EDGE_TAG,comm, &status[0] ),goto end); MPI_CHECK( MPI_Get_count(&status[0],MPI_INT,&nitem2comm),goto end); @@ -456,7 +469,7 @@ int PMMG_build_completeExtEdgeComm( PMMG_pParMesh parmesh ) { itorecv = ext_edge_comm->itorecv; MPI_CHECK( MPI_Recv(itorecv,nitem2comm,MPI_INT,color, - MPI_COMMUNICATORS_EDGE_TAG,parmesh->comm, + MPI_COMMUNICATORS_EDGE_TAG,comm, &status[0]), goto end ); pos = 0; @@ -478,9 +491,9 @@ int PMMG_build_completeExtEdgeComm( PMMG_pParMesh parmesh ) { } } - MPI_CHECK( MPI_Waitall(parmesh->next_edge_comm,request,status), goto end ); + MPI_CHECK( MPI_Waitall(alloc_size,request,status), goto end ); MPI_CHECK( MPI_Allreduce(&loc_update,&glob_update,1,MPI_INT8_T,MPI_LOR, - parmesh->comm),goto end); + comm),goto end); } while ( glob_update ); @@ -631,11 +644,18 @@ int PMMG_build_completeExtEdgeComm( PMMG_pParMesh parmesh ) { /** * \param parmesh pointer to parmesh structure * \param mesh pointer to the mesh structure - * \param hpar hash table of parallel edges + * \param hpar hash table of parallel edges. Read only array. + * \param comm pointer toward the MPI communicator to use: when called before + * the first mesh balancing (at preprocessing stage) we have to use the + * read_comm communicator (i.e. the communicator used to provide the inputs). + * For all ather calls, comm has to be the communicator to use for computations. * * Build edge communicator. + * + * \todo clean parallel error handling (without MPI_abort call and without deadlocks) + * */ -int PMMG_build_edgeComm( PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HGeom *hpar ) { +int PMMG_build_edgeComm( PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HGeom *hpar,MPI_Comm comm) { PMMG_pGrp grp; PMMG_pInt_comm int_face_comm,int_edge_comm; PMMG_pExt_comm ext_face_comm,ext_edge_comm; @@ -644,7 +664,7 @@ int PMMG_build_edgeComm( PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HGeom *hpar MMG5_hgeom *ph; int *nitems_ext_comm,color,k,i,idx,ie,ifac,iloc,j,item; int edg; - int16_t tag; + uint16_t tag; int8_t ia,i1,i2; assert( parmesh->ngrp == 1 ); @@ -746,11 +766,13 @@ int PMMG_build_edgeComm( PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HGeom *hpar } /** Complete the external edge communicator */ - if( !PMMG_build_completeExtEdgeComm( parmesh ) ) return 0; - + if( !PMMG_build_completeExtEdgeComm( parmesh,comm ) ) return 0; /* Reorder edge nodes */ - if( !PMMG_color_commNodes( parmesh ) ) return 0; + if( !PMMG_color_commNodes( parmesh,comm ) ) { + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } + MMG5_pPoint ppt0,ppt1; int swp; for( k = 1; k <= mesh->na; k++ ) { @@ -766,11 +788,12 @@ int PMMG_build_edgeComm( PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HGeom *hpar } /** Check the external edge communicator */ - assert( PMMG_check_extEdgeComm( parmesh ) ); - + assert( PMMG_check_extEdgeComm( parmesh,comm ) ); /* Free */ - PMMG_DEL_MEM(parmesh,int_face_comm->intvalues,int,"int_face_comm"); + if ( int_face_comm ) { + PMMG_DEL_MEM(parmesh,int_face_comm->intvalues,int,"int_face_comm"); + } PMMG_DEL_MEM(parmesh,nitems_ext_comm,int,"nitem_int_face_comm"); return 1; @@ -849,29 +872,12 @@ void PMMG_tria2elmFace_coords( PMMG_pParMesh parmesh ) { /* Process tria stored in index1 */ for( i=0; initem_int_face_comm; i++ ) { kt = grp->face2int_face_comm_index1[i]; - ptt = &mesh->tria[kt]; + ptt = &mesh->tria[kt]; ie = ptt->cc/4; ifac = ptt->cc%4; /* Get triangle node with highest coordinates */ - iploc = 0; - ppt = &mesh->point[ptt->v[0]]; - cmax[0] = ppt->c[0]; - cmax[1] = ppt->c[1]; - cmax[2] = ppt->c[2]; - for( iloc=1; iloc<3; iloc++ ) { - ppt = &mesh->point[ptt->v[iloc]]; - for( idim=0; idim<3; idim++ ) { - if( ppt->c[idim] - cmax[idim] < -MMG5_EPSOK*20 ) break; - if( ppt->c[idim] - cmax[idim] > MMG5_EPSOK*20 ) { - cmax[0] = ppt->c[0]; - cmax[1] = ppt->c[1]; - cmax[2] = ppt->c[2]; - iploc = iloc; - break; - } - } - } + iploc = PMMG_tria_highestcoord(mesh,ptt->v); /* Store ie-ifac-iploc in index1 */ grp->face2int_face_comm_index1[i] = 12*ie+3*ifac+iploc; @@ -883,6 +889,42 @@ void PMMG_tria2elmFace_coords( PMMG_pParMesh parmesh ) { } } +/** + * \param mesh pointer toward the mesh structure + * \param ptt_v indices of a triangle vertices + * \return iploc (0, 1 or 2) local node index + * + * Get triangle node with highest coordinates + * + */ +int PMMG_tria_highestcoord( MMG5_pMesh mesh, MMG5_int *ptt_v) { + MMG5_pPoint ppt; + int idim,iloc,iploc; + double cmax[3]; + + /* Get triangle node with highest coordinates */ + iploc = 0; + ppt = &mesh->point[ptt_v[0]]; + cmax[0] = ppt->c[0]; + cmax[1] = ppt->c[1]; + cmax[2] = ppt->c[2]; + for( iloc=1; iloc<3; iloc++ ) { + ppt = &mesh->point[ptt_v[iloc]]; + for( idim=0; idim<3; idim++ ) { + if( ppt->c[idim] - cmax[idim] < -MMG5_EPSOK*20 ) break; + if( ppt->c[idim] - cmax[idim] > MMG5_EPSOK*20 ) { + cmax[0] = ppt->c[0]; + cmax[1] = ppt->c[1]; + cmax[2] = ppt->c[2]; + iploc = iloc; + break; + } + } + } + + return iploc; +} + /** * \param parmesh pointer toward a parmesh structure * \return 0 if fail, 1 if success @@ -931,7 +973,7 @@ int PMMG_build_nodeCommIndex( PMMG_pParMesh parmesh ) { nitem_int_node_comm++; } } - + /* Allocate group communicators */ int_node_comm = parmesh->int_node_comm; int_node_comm->nitem = nitem_int_node_comm; @@ -972,7 +1014,7 @@ int PMMG_build_nodeCommIndex( PMMG_pParMesh parmesh ) { * stored in the external face communicator. * */ -int PMMG_build_faceCommIndex( PMMG_pParMesh parmesh ) { +int PMMG_build_faceCommIndex( PMMG_pParMesh parmesh, MMG5_int* permtria ) { PMMG_pGrp grp; PMMG_pInt_comm int_face_comm; PMMG_pExt_comm ext_face_comm; @@ -988,7 +1030,7 @@ int PMMG_build_faceCommIndex( PMMG_pParMesh parmesh ) { ext_face_comm = &parmesh->ext_face_comm[iext_comm]; nitem_int_face_comm += ext_face_comm->nitem; } - + /* Allocate group communicators */ int_face_comm = parmesh->int_face_comm; int_face_comm->nitem = nitem_int_face_comm; @@ -1002,7 +1044,12 @@ int PMMG_build_faceCommIndex( PMMG_pParMesh parmesh ) { for( iext_comm = 0; iext_comm < parmesh->next_face_comm; iext_comm++ ) { ext_face_comm = &parmesh->ext_face_comm[iext_comm]; for( iext = 0; iext < ext_face_comm->nitem; iext++ ) { - grp->face2int_face_comm_index1[iint] = ext_face_comm->int_comm_index[iext]; + if (permtria) { + grp->face2int_face_comm_index1[iint] = permtria[ext_face_comm->int_comm_index[iext]]; + } + else { + grp->face2int_face_comm_index1[iint] = ext_face_comm->int_comm_index[iext]; + } grp->face2int_face_comm_index2[iint] = iint; ext_face_comm->int_comm_index[iext] = iint++; } @@ -1012,7 +1059,20 @@ int PMMG_build_faceCommIndex( PMMG_pParMesh parmesh ) { return 1; } -int PMMG_build_faceCommFromNodes( PMMG_pParMesh parmesh ) { +/** + * \param parmesh pointer toward the parmesh structure + * + * \param comm pointer toward the MPI communicator to use: when called before + * the first mesh balancing (at preprocessing stage) we have to use the + * read_comm communicator (i.e. the communicator used to provide the inputs). + * For all ather calls, comm has to be the communicator to use for computations. + * + * \return 0 if fail, 1 if succeed + * + * Construction of the face communicators from the node communicators + * + */ +int PMMG_build_faceCommFromNodes( PMMG_pParMesh parmesh,MPI_Comm comm ) { PMMG_pExt_comm ext_node_comm; PMMG_pGrp grp; MMG5_pMesh mesh; @@ -1022,14 +1082,11 @@ int PMMG_build_faceCommFromNodes( PMMG_pParMesh parmesh ) { int *fNodes_loc,*fNodes_par,*fColors; int nb_fNodes_loc,*nb_fNodes_par,*displs,*counter,*iproc2comm; int kt,ia,ib,ic,i,icomm,iproc,iloc,iglob,myrank,next_face_comm,ier; - MPI_Comm comm; - comm = parmesh->comm; myrank = parmesh->myrank; grp = &parmesh->listgrp[0]; mesh = grp->mesh; - /** 1) Store global node ids in point flags */ /* Reset point flags */ for( i=1; i<=mesh->np; i++ ) @@ -1168,7 +1225,7 @@ int PMMG_build_faceCommFromNodes( PMMG_pParMesh parmesh ) { } /** 6) Set communicators indexing, convert tria index into iel face index */ - ier = PMMG_build_faceCommIndex( parmesh ); + ier = PMMG_build_faceCommIndex( parmesh, NULL ); PMMG_tria2elmFace_flags( parmesh ); @@ -1194,16 +1251,20 @@ int PMMG_build_faceCommFromNodes( PMMG_pParMesh parmesh ) { /** * \param parmesh pointer toward a parmesh structure + * \param comm pointer toward the MPI communicator to use: when called before + * the first mesh balancing (at preprocessing stage) we have to use the + * read_comm communicator (i.e. the communicator used to provide the inputs). + * For all ather calls, comm has to be the communicator to use for computations. * * \return 1 if success, 0 if fail. * * Build the node communicators (externals and internals) from the faces ones. * */ -int PMMG_build_nodeCommFromFaces( PMMG_pParMesh parmesh ) { +int PMMG_build_nodeCommFromFaces( PMMG_pParMesh parmesh, MPI_Comm comm ) { int ier, ier_glob; - assert ( PMMG_check_extFaceComm ( parmesh ) ); + assert ( PMMG_check_extFaceComm ( parmesh,comm ) ); assert ( PMMG_check_intFaceComm ( parmesh ) ); /** Build the internal node communicator from the faces ones */ @@ -1230,12 +1291,12 @@ int PMMG_build_nodeCommFromFaces( PMMG_pParMesh parmesh ) { /* Check that all steps have successed until here (because the next function * involves MPI comms) */ - MPI_Allreduce( &ier, &ier_glob, 1, MPI_INT, MPI_MIN, parmesh->comm); + MPI_Allreduce( &ier, &ier_glob, 1, MPI_INT, MPI_MIN, comm); if ( !ier_glob ) return 0; /** Fill the external node communicator */ - ier = PMMG_build_completeExtNodeComm(parmesh); - MPI_Allreduce( &ier, &ier_glob, 1, MPI_INT, MPI_MIN, parmesh->comm); + ier = PMMG_build_completeExtNodeComm(parmesh,comm); + MPI_Allreduce( &ier, &ier_glob, 1, MPI_INT, MPI_MIN, comm); if ( !ier ) { fprintf(stderr,"\n ## Error: %s: unable to complete the external node" " communicators.\n",__func__); @@ -1459,6 +1520,7 @@ int PMMG_build_intNodeComm( PMMG_pParMesh parmesh ) { /** Step 1: give a unique position in the internal communicator for each mesh * point but don't care about the unicity of the position for a point shared * by multiple groups */ + assert ( !parmesh->int_node_comm->nitem ); nitem_node = 0; @@ -1733,29 +1795,25 @@ int PMMG_build_intNodeComm( PMMG_pParMesh parmesh ) { coor_list[i].idx = i; } - /* Sort coor_list depending on its coordinates */ - qsort(coor_list,nitem_node,sizeof(PMMG_coorCell),PMMG_compare_coorCell); - - /* Travel the list and remove the identic nodes */ - idx = 0; + /* Travel the list and remove the identic nodes (use naive algorithm after + * issues using point sorting). */ if ( nitem_node ) { - new_pos[coor_list[0].idx] = 0; - for ( i=1; iext_node_comm,parmesh->nprocs, parmesh->next_node_comm,PMMG_Ext_comm, "list of external communicators",goto end); + next_comm = parmesh->next_node_comm; parmesh->next_node_comm = parmesh->nprocs; @@ -1925,7 +1988,7 @@ int PMMG_build_completeExtNodeComm( PMMG_pParMesh parmesh ) { * recieve the proc list of all the nodes to/from the other processors. At the * end of this loop, each node has the entire list of the proc to which it * belongs */ - alloc_size = parmesh->next_node_comm; + alloc_size = parmesh->nprocs; PMMG_MALLOC(parmesh,request, alloc_size,MPI_Request,"mpi request array",goto end); PMMG_MALLOC(parmesh,status, alloc_size,MPI_Status,"mpi status array",goto end); PMMG_CALLOC(parmesh,i2send_size,alloc_size,int,"size of the i2send array",goto end); @@ -1937,9 +2000,12 @@ int PMMG_build_completeExtNodeComm( PMMG_pParMesh parmesh ) { glob_update = loc_update = 0; /** Send the list of procs to which belong each point of the communicator */ - for ( k=0; knext_node_comm; ++k ) { + for ( k=0; knext_node_comm; ++k ) { ext_node_comm = &parmesh->ext_node_comm[k]; /* Computation of the number of data to send to the other procs (we want @@ -1970,7 +2036,7 @@ int PMMG_build_completeExtNodeComm( PMMG_pParMesh parmesh ) { assert ( pos==nitem2comm ); MPI_CHECK( MPI_Isend(itosend,nitem2comm,MPI_INT,color, - MPI_COMMUNICATORS_NODE_TAG,parmesh->comm, + MPI_COMMUNICATORS_NODE_TAG,comm, &request[color]),goto end ); } @@ -1982,7 +2048,7 @@ int PMMG_build_completeExtNodeComm( PMMG_pParMesh parmesh ) { color = ext_node_comm->color_out; - MPI_CHECK( MPI_Probe(color,MPI_COMMUNICATORS_NODE_TAG,parmesh->comm, + MPI_CHECK( MPI_Probe(color,MPI_COMMUNICATORS_NODE_TAG,comm, &status[0] ),goto end); MPI_CHECK( MPI_Get_count(&status[0],MPI_INT,&nitem2comm),goto end); @@ -1996,7 +2062,7 @@ int PMMG_build_completeExtNodeComm( PMMG_pParMesh parmesh ) { itorecv = ext_node_comm->itorecv; MPI_CHECK( MPI_Recv(itorecv,nitem2comm,MPI_INT,color, - MPI_COMMUNICATORS_NODE_TAG,parmesh->comm, + MPI_COMMUNICATORS_NODE_TAG,comm, &status[0]), goto end ); pos = 0; @@ -2018,9 +2084,9 @@ int PMMG_build_completeExtNodeComm( PMMG_pParMesh parmesh ) { } } - MPI_CHECK( MPI_Waitall(parmesh->next_node_comm,request,status), goto end ); + MPI_CHECK( MPI_Waitall(alloc_size,request,status), goto end ); MPI_CHECK( MPI_Allreduce(&loc_update,&glob_update,1,MPI_INT8_T,MPI_LOR, - parmesh->comm),goto end); + comm),goto end); } while ( glob_update ); diff --git a/src/coorcell_pmmg.c b/src/coorcell_pmmg.c index e1e0e625..3b7d333a 100644 --- a/src/coorcell_pmmg.c +++ b/src/coorcell_pmmg.c @@ -36,8 +36,8 @@ * \param a pointer toward a PMMG_coorCell structure. * \param b pointer toward a PMMG_coorCell structure. * - * \return 1 if a is greater than b, -1 if b is greater than 1, 0 if they are - * equals. + * \return 1 if a is different from b, 0 if they are + * equals to within a given tolerance. * * Compare 2 coor cells (can be used inside the qsort C fnuction), first on * their x-coordinates, second ond their y-coordinates then on their @@ -62,9 +62,8 @@ int PMMG_compare_coorCell (const void * a, const void * b) { for ( k=0; k<3; ++k ) { dist[k] = cell1->c[k]-cell2->c[k]; - if ( dist[k] > MMG5_EPSOK*tol ) return 1; + if ( fabs(dist[k]) > MMG5_EPSOK*tol ) return 1; - if ( dist[k] < -MMG5_EPSOK*tol ) return -1; } assert ( dist[0]*dist[0]+dist[1]*dist[1]+dist[2]*dist[2]myrank, grpId ); + snprintf( name, strlen(basename)+9+4*sizeof(int),"%s-P%02d-%02d.mesh", + basename, parmesh->myrank, grpId ); fid = fopen(name,"w"); fprintf(fid,"MeshVersionFormatted 2\n"); @@ -541,7 +542,8 @@ int PMMG_grp_to_saveMesh( PMMG_pParMesh parmesh, int grpId, char *basename ) { field= grp->field; assert( ( strlen( basename ) < 2048 - 14 ) && "filename too big" ); - sprintf( name, "%s-P%02d-%02d.mesh", basename, parmesh->myrank, grpId ); + snprintf( name, strlen(basename)+9+4*sizeof(int),"%s-P%02d-%02d.mesh", + basename, parmesh->myrank, grpId ); /* Rebuild boundary */ if ( !mesh->adja ) { @@ -558,14 +560,16 @@ int PMMG_grp_to_saveMesh( PMMG_pParMesh parmesh, int grpId, char *basename ) { /* Save metrics */ if ( met->m ) { - sprintf( name, "%s-P%02d-%02d.sol", basename, parmesh->myrank, grpId ); + snprintf( name, strlen(basename)+8+4*sizeof(int),"%s-P%02d-%02d.sol", + basename, parmesh->myrank, grpId ); MMG3D_saveSol( mesh, met, name ); } /* Save field */ if ( mesh->nsols ) { assert ( field ); - sprintf( name, "%s-fields-P%02d-%02d.sol", basename, parmesh->myrank, grpId ); + snprintf( name, strlen(basename)+8+7+4*sizeof(int),"%s-fields-P%02d-%02d.sol", + basename, parmesh->myrank, grpId ); MMG3D_saveAllSols( mesh, &field, name ); } @@ -590,8 +594,8 @@ int PMMG_grp_mark_to_saveMesh( PMMG_pParMesh parmesh, int grpId, char *basename mesh = grp->mesh; assert( ( strlen( basename ) < 2048 - 14 ) && "filename too big" ); - sprintf( name, "%s-P%02d-%02d.mesh", basename, parmesh->myrank, grpId ); - + snprintf( name, strlen(basename)+9+4*sizeof(int),"%s-P%02d-%02d.mesh", + basename, parmesh->myrank, grpId ); ier = MMG3D_hashTetra( mesh, 0 ); MMG3D_bdryBuild( mesh ); //note: no error checking @@ -601,7 +605,8 @@ int PMMG_grp_mark_to_saveMesh( PMMG_pParMesh parmesh, int grpId, char *basename MMG3D_saveMesh( mesh, name ); - sprintf( name, "%s-P%02d-%02d.sol", basename, parmesh->myrank, grpId ); + snprintf( name, strlen(basename)+8+4*sizeof(int),"%s-P%02d-%02d.sol", + basename, parmesh->myrank, grpId ); PMMG_saveMark( mesh, name ); @@ -626,7 +631,8 @@ int PMMG_grp_quality_to_saveMesh( PMMG_pParMesh parmesh, int grpId, char *basena mesh = grp->mesh; assert( ( strlen( basename ) < 2048 - 14 ) && "filename too big" ); - sprintf( name, "%s-P%02d-%02d.mesh", basename, parmesh->myrank, grpId ); + snprintf( name, strlen(basename)+9+4*sizeof(int),"%s-P%02d-%02d.mesh", + basename, parmesh->myrank, grpId ); ier = MMG3D_hashTetra( mesh, 0 ); MMG3D_bdryBuild( mesh ); //note: no error checking @@ -636,7 +642,8 @@ int PMMG_grp_quality_to_saveMesh( PMMG_pParMesh parmesh, int grpId, char *basena MMG3D_saveMesh( mesh, name ); - sprintf( name, "%s-P%02d-%02d.sol", basename, parmesh->myrank, grpId ); + snprintf( name, strlen(basename)+8+4*sizeof(int),"%s-P%02d-%02d.sol", + basename, parmesh->myrank, grpId ); PMMG_saveQual( mesh, name ); @@ -729,10 +736,10 @@ void PMMG_print_ext_comm( PMMG_pParMesh parmesh, PMMG_pInt_comm int_comm, */ void PMMG_dump_malloc_allocator_info( char *msg, int id ) { - char name[ 16 ]; + char name[ MMG5_FILENAME_LEN_MAX ]; FILE *fp; - sprintf(name,"mem_info-%02d.txt", id ); + snprintf(name,14+2*sizeof(int),"mem_info-%02d.txt", id ); fp = PMMG_my_fopen( name, "a" ); #ifdef __linux__ diff --git a/src/debug_pmmg.h b/src/debug_pmmg.h index 1e58df33..d61d8775 100644 --- a/src/debug_pmmg.h +++ b/src/debug_pmmg.h @@ -32,7 +32,7 @@ void PMMG_grplst_meshes_to_txt( char *name, PMMG_pGrp grp, int ngrp ); void PMMG_tetras_of_mesh_to_txt( char *name, MMG5_pMesh mesh, int num ); void PMMG_find_tetras_referencing_null_points_to_txt( char *name, PMMG_pGrp grp, int nmsh ); void PMMG_listgrp_meshes_adja_of_tetras_to_txt( char *name, PMMG_pGrp grp, int ngrp ); -int PMMG_grp_to_saveEdges( PMMG_pParMesh parmesh,int grpId,int16_t tag,char *basename ); +int PMMG_grp_to_saveEdges( PMMG_pParMesh parmesh,int grpId,uint16_t tag,char *basename ); int PMMG_grp_to_saveMesh( PMMG_pParMesh parmesh, int grpId, char *basename ); int PMMG_listgrp_to_saveMesh( PMMG_pParMesh parmesh, char *basename ); int PMMG_listgrp_quality_to_saveMesh( PMMG_pParMesh parmesh, char *basename ); diff --git a/src/distributegrps_pmmg.c b/src/distributegrps_pmmg.c index 610a8043..a9f7c14e 100644 --- a/src/distributegrps_pmmg.c +++ b/src/distributegrps_pmmg.c @@ -931,8 +931,8 @@ int PMMG_merge_grps2send(PMMG_pParMesh parmesh,idx_t **part) { if ( !PMMG_pack_faceCommunicators(parmesh) ) ier = -1; - assert ( PMMG_check_extFaceComm(parmesh) ); - assert ( PMMG_check_extNodeComm(parmesh) ); + assert ( PMMG_check_extFaceComm(parmesh,parmesh->comm) ); + assert ( PMMG_check_extNodeComm(parmesh,parmesh->comm) ); /* Update tag on points, tetra */ if ( !PMMG_updateTag(parmesh) ) return -1; @@ -1215,10 +1215,11 @@ int PMMG_transfer_grps_fromMetoJ(PMMG_pParMesh parmesh,const int recv, ier0 = 1; PMMG_MALLOC ( parmesh,*trequest,nprocs,MPI_Request,"request_tab", ier0 = 0; ier = MG_MIN(ier,ier0); ); - if ( ier0 ) + if ( ier0 ) { for ( k=0; klistgrp ); - PMMG_CALLOC ( parmesh,parmesh->listgrp,grpscount,PMMG_Grp,"listgrp", - ier0 = 0;ier = 0 ); + if ( !parmesh->listgrp ) + PMMG_CALLOC ( parmesh,parmesh->listgrp,grpscount,PMMG_Grp,"listgrp", + ier0 = 0;ier = 0 ); + else + PMMG_RECALLOC( parmesh,parmesh->listgrp,grpscount,1,PMMG_Grp,"listgrp", + ier0 = 0;ier = 0 ); } if ( ier0 ) @@ -1856,6 +1860,7 @@ int PMMG_transfer_all_grps(PMMG_pParMesh parmesh,idx_t *part,int called_from_dis nprocs = parmesh->nprocs; comm = parmesh->comm; max_ngrp = 0; + ier = 1; send_grps = NULL; recv_grps = NULL; @@ -1872,7 +1877,7 @@ int PMMG_transfer_all_grps(PMMG_pParMesh parmesh,idx_t *part,int called_from_dis interaction_map = NULL; interactions = NULL; - /** Step 1: Merge all the groups that must be sended to a given proc into 1 + /** Step 1: Merge all the groups that must be sent to a given proc into 1 * group */ ier = PMMG_merge_grps2send(parmesh,&part); @@ -1961,7 +1966,7 @@ int PMMG_transfer_all_grps(PMMG_pParMesh parmesh,idx_t *part,int called_from_dis } /** Step 6: Node communicators reconstruction from the face ones */ - if ( !PMMG_build_nodeCommFromFaces(parmesh) ) { + if ( !PMMG_build_nodeCommFromFaces(parmesh,parmesh->comm) ) { fprintf(stderr,"\n ## Unable to build the new node communicators from" " the face ones.\n"); ier = -1; @@ -2027,6 +2032,11 @@ int PMMG_transfer_all_grps(PMMG_pParMesh parmesh,idx_t *part,int called_from_dis /** * \param parmesh pointer toward the mesh structure. + * \param repartitionning_mode strategy to use for repartitionning (we want to balance + * the graph if the function is called at preprocessing stage due to inputs from + * a different number of parititions than the number used during the run (before + * libparmmg1 call) or at the end of the iterations to return a balanced mesh + * but we want to perform interfaces migration during internal iterations ) * * \return -1 if we fail and can not save the meshes, 0 if we fail but can save * the meshes, 1 otherwise @@ -2035,24 +2045,18 @@ int PMMG_transfer_all_grps(PMMG_pParMesh parmesh,idx_t *part,int called_from_dis * processors and send and recieve the groups from the other processors. * */ -int PMMG_distribute_grps( PMMG_pParMesh parmesh ) { +int PMMG_distribute_grps( PMMG_pParMesh parmesh, int repartitioning_mode ) { idx_t *part; int ngrp,ier; MPI_Allreduce( &parmesh->ngrp, &ngrp, 1, MPI_INT, MPI_MIN, parmesh->comm); -// if ( !ngrp ) { -// fprintf(stderr,"Error:%s:%d: Empty partition. Not yet implemented\n", -// __func__,__LINE__); -// return 0; -// } - /** Get the new partition of groups (1 group = 1 metis node) */ part = NULL; PMMG_CALLOC(parmesh,part,parmesh->ngrp,idx_t,"allocate parmetis buffer", return 0); - if( parmesh->info.repartitioning == PMMG_REDISTRIBUTION_ifc_displacement ) { + if( repartitioning_mode == PMMG_REDISTRIBUTION_ifc_displacement ) { ier = PMMG_part_getProcs( parmesh, part ); diff --git a/src/distributemesh_pmmg.c b/src/distributemesh_pmmg.c index 9c51e851..ce3ef8df 100644 --- a/src/distributemesh_pmmg.c +++ b/src/distributemesh_pmmg.c @@ -1194,8 +1194,8 @@ int PMMG_distribute_mesh( PMMG_pParMesh parmesh ) /* Check the communicators */ assert ( PMMG_check_intNodeComm(parmesh) && "Wrong internal node comm" ); assert ( PMMG_check_intFaceComm(parmesh) && "Wrong internal face comm" ); - assert ( PMMG_check_extNodeComm(parmesh) && "Wrong external node comm" ); - assert ( PMMG_check_extFaceComm(parmesh) && "Wrong external face comm" ); + assert ( PMMG_check_extNodeComm(parmesh,parmesh->comm) && "Wrong external node comm" ); + assert ( PMMG_check_extFaceComm(parmesh,parmesh->comm) && "Wrong external face comm" ); /* The part array is deallocated when groups to be sent are merged (do not * do it here) */ diff --git a/src/free_pmmg.c b/src/free_pmmg.c index fabeb938..eb97185e 100644 --- a/src/free_pmmg.c +++ b/src/free_pmmg.c @@ -158,3 +158,19 @@ int PMMG_clean_emptyMesh( PMMG_pParMesh parmesh, PMMG_pGrp listgrp, int ngrp ) { return 1; } + +/** + * \param parmesh pointer toward a parmesh structure. + * \param ptr list of pointers toward integers. + * \param nptr list size + * \param mess deallocation message + * + * Free a list of integer arrays. + * + */ +void PMMG_destroy_int( PMMG_pParMesh parmesh, void **ptr[], size_t nptr,char *mess ) { + size_t i; + for( i = 0; i < nptr; i++ ) { + PMMG_DEL_MEM(parmesh,*ptr[i],int,mess); + } +} diff --git a/src/grpsplit_pmmg.c b/src/grpsplit_pmmg.c index 8d00f95f..fad23421 100644 --- a/src/grpsplit_pmmg.c +++ b/src/grpsplit_pmmg.c @@ -33,6 +33,7 @@ */ #include "parmmg.h" #include "metis_pmmg.h" +#include "mmgexterns_private.h" /** * \param nelem number of elements in the initial group @@ -800,6 +801,7 @@ static int PMMG_splitGrps_updateFaceCommOld( PMMG_pParMesh parmesh, * \param ngrp nb. of new groups * \param grpIdOld index of the group that is splitted in the old list of groups * \param grpId index of the group that we create in the list of groups + * \param hash table storing tags of boundary edges of original mesh. * \param ne number of elements in the new group mesh * \param np pointer toward number of points in the new group mesh * \param f2ifc_max maximum number of elements in the face2int_face_comm arrays @@ -817,7 +819,8 @@ static int PMMG_splitGrps_updateFaceCommOld( PMMG_pParMesh parmesh, * */ static int -PMMG_splitGrps_fillGroup( PMMG_pParMesh parmesh,PMMG_pGrp listgrp,int ngrp,int grpIdOld,int grpId,int ne, +PMMG_splitGrps_fillGroup( PMMG_pParMesh parmesh,PMMG_pGrp listgrp,int ngrp,int grpIdOld,int grpId, + MMG5_HGeom hash,int ne, int *np,int *f2ifc_max,int *n2inc_max,idx_t *part, int* posInIntFaceComm,int* iplocFaceComm ) { PMMG_pGrp const grp = &listgrp[grpId]; @@ -1041,7 +1044,7 @@ PMMG_splitGrps_fillGroup( PMMG_pParMesh parmesh,PMMG_pGrp listgrp,int ngrp,int g pos ) ) return 0; for ( j=0; j<3; ++j ) { - /* Update the face and face vertices tags */ + /** Update the face and face vertices tags */ PMMG_tag_par_edge(pxt,MMG5_iarf[fac][j]); ppt = &mesh->point[tetraCur->v[MMG5_idir[fac][j]]]; PMMG_tag_par_node(ppt); @@ -1088,6 +1091,41 @@ PMMG_splitGrps_fillGroup( PMMG_pParMesh parmesh,PMMG_pGrp listgrp,int ngrp,int g if( !PMMG_splitGrps_updateNodeCommNew( parmesh,grp,mesh,meshOld,tetraCur,pt, tet,grpId,n2inc_max,part ) ) return 0; + + + /* Xtetra have been added by the previous loop, take advantage of the + * current one to update possible inconsistencies in edge tags (if a + * boundary face has been added along an edge that was previously boundary + * but not belonging to a boundary face, some of the edge tags may be + * missing). */ + if ( !tetraCur->xt ) continue; + + pxt = &mesh->xtetra[tetraCur->xt]; + for ( j=0; j<6; j++ ) { + + /* Tag infos have to be consistent for all edges marked as boundary */ + if ( !(pxt->tag[j] & MG_BDY) ) continue; + + int ip0 = pt->v[MMG5_iare[j][0]]; + int ip1 = pt->v[MMG5_iare[j][1]]; + + uint16_t tag; + int ref; + + /* get the tag stored in the hash table (old mesh) and set it the xtetra + * edge (new mesh): hGet may return 0 as edges of the old mesh are not + * hashed if they were not belonging to a boundary face (but due to the + * new partitionning, it is possible that they are now belonging to a bdy + * face). */ + MMG5_hGet( &hash, ip0, ip1, &ref, &tag ); + pxt->tag[j] |= tag; + + /* Remove spurious NOSURF tag for user required edges */ + if ( (tag & MG_REQ) && !(tag & MG_NOSURF) ) { + pxt->tag[j] &= ~MG_NOSURF; + } + } + } return 1; @@ -1254,6 +1292,7 @@ int PMMG_update_oldGrps( PMMG_pParMesh parmesh ) { * \param ngrp number of groups to split the mesh into. * \param countPerGrp number of tetras in each new groups. * \param part array of the mesh element partitioning. + * \param hash table storing tags of boundary edges of original mesh. * * \return -1 : no possibility to save the mesh * 0 : failed but the mesh is correct @@ -1265,7 +1304,7 @@ int PMMG_update_oldGrps( PMMG_pParMesh parmesh ) { * */ int PMMG_split_eachGrp( PMMG_pParMesh parmesh,int grpIdOld,PMMG_pGrp grpsNew, - idx_t ngrp,int *countPerGrp,idx_t *part ) { + idx_t ngrp,int *countPerGrp,idx_t *part,MMG5_HGeom hash ) { PMMG_pGrp grpOld,grpCur; MMG5_pMesh meshOld,meshCur; /** size of allocated node2int_node_comm_idx. when comm is ready trim to @@ -1352,7 +1391,7 @@ int PMMG_split_eachGrp( PMMG_pParMesh parmesh,int grpIdOld,PMMG_pGrp grpsNew, grpCur = &grpsNew[grpId]; meshCur = grpCur->mesh; - if ( !PMMG_splitGrps_fillGroup(parmesh,grpsNew,ngrp,grpIdOld,grpId, + if ( !PMMG_splitGrps_fillGroup(parmesh,grpsNew,ngrp,grpIdOld,grpId,hash, countPerGrp[grpId],&poiPerGrp[grpId], &f2ifc_max[grpId], &n2inc_max[grpId],part,posInIntFaceComm, @@ -1461,17 +1500,47 @@ int PMMG_split_eachGrp( PMMG_pParMesh parmesh,int grpIdOld,PMMG_pGrp grpsNew, * \warning tetra must be packed. * */ -int PMMG_split_grps( PMMG_pParMesh parmesh,int grpIdOld,int ngrp,idx_t *part,int fitMesh ) { +int PMMG_split_grps( PMMG_pParMesh parmesh,int grpIdOld,int ngrp,idx_t *part,int fitMesh) { PMMG_pGrp grpOld; PMMG_pGrp grpsNew = NULL; MMG5_pMesh meshOld; int *countPerGrp = NULL; int ret_val = 1; + if (!part) return 1; + /* Get mesh to split */ grpOld = &parmesh->listgrp[grpIdOld]; meshOld = parmesh->listgrp[grpIdOld].mesh; + + /* Create hash table to store edge tags (to keep tag consistency along + * boundary edges that doesn't belong to a boundary face in meshOld (and + * doesn't has valid tags) but that will belongs to a PARBDY face after group + * splitting). Such kind of inconsistencies may be detected by calling the + * MMG3D_chkmesh function. */ + MMG5_HGeom hash; + if ( !MMG5_hNew(meshOld, &hash, 6*meshOld->xt, 8*meshOld->xt) ) return 0; + + int k,j,i; + for ( k=1; k<=meshOld->ne; k++ ) { + MMG5_pTetra pt = &meshOld->tetra[k]; + if ( !pt->xt ) continue; + + MMG5_pxTetra pxt = &meshOld->xtetra[pt->xt]; + for ( j=0; j<4; j++ ) { + /* We recover edge tag infos from boundary faces */ + if ( !(pxt->ftag[j] & MG_BDY) ) continue; + + for ( i=0; i<3; ++i ) { + int ia = MMG5_iarf[j][i]; + int ip0 = pt->v[MMG5_iare[ia][0]]; + int ip1 = pt->v[MMG5_iare[ia][1]]; + if( !MMG5_hEdge( meshOld, &hash, ip0, ip1, 0, pxt->tag[ia] ) ) return 0; + } + } + } + /* count_per_grp: count new tetra per group, and store new ID in the old * tetra flag */ PMMG_CALLOC(parmesh,countPerGrp,ngrp,int,"counter buffer ",return 0); @@ -1483,7 +1552,9 @@ int PMMG_split_grps( PMMG_pParMesh parmesh,int grpIdOld,int ngrp,idx_t *part,int /** Perform group splitting */ - ret_val = PMMG_split_eachGrp( parmesh,grpIdOld,grpsNew,ngrp,countPerGrp,part ); + ret_val = PMMG_split_eachGrp( parmesh,grpIdOld,grpsNew,ngrp,countPerGrp,part,hash ); + PMMG_DEL_MEM( meshOld, hash.geom, MMG5_hgeom, "Edge hash table" ); + if( ret_val != 1) goto fail_counters; PMMG_listgrp_free(parmesh, &parmesh->listgrp, parmesh->ngrp); @@ -1505,7 +1576,6 @@ int PMMG_split_grps( PMMG_pParMesh parmesh,int grpIdOld,int ngrp,idx_t *part,int PMMG_DEL_MEM(parmesh,countPerGrp,int,"counter buffer "); #ifndef NDEBUG - int i; for( i = 0; i < parmesh->ngrp; i++ ) PMMG_MEM_CHECK(parmesh,parmesh->listgrp[i].mesh,return 0); #endif @@ -1517,17 +1587,19 @@ int PMMG_split_grps( PMMG_pParMesh parmesh,int grpIdOld,int ngrp,idx_t *part,int /** * \param parmesh pointer toward the parmesh * \param ier error value to return + * \param comm MPI communicator to use + * * \return \a ier * * Check communicator consistency. * */ static inline -int PMMG_check_allComm(PMMG_pParMesh parmesh,int ier) { +int PMMG_check_allComm(PMMG_pParMesh parmesh,const int ier,MPI_Comm comm) { assert ( PMMG_check_intNodeComm(parmesh) && "Wrong internal node comm" ); assert ( PMMG_check_intFaceComm(parmesh) && "Wrong internal face comm" ); - assert ( PMMG_check_extNodeComm(parmesh) && "Wrong external node comm" ); - assert ( PMMG_check_extFaceComm(parmesh) && "Wrong external face comm" ); + assert ( PMMG_check_extNodeComm(parmesh,comm) && "Wrong external node comm" ); + assert ( PMMG_check_extFaceComm(parmesh,comm) && "Wrong external face comm" ); return ier; } @@ -1561,57 +1633,65 @@ int PMMG_splitPart_grps( PMMG_pParMesh parmesh,int target,int fitMesh,int redist int noldgrps_all[parmesh->nprocs]; int npmax,nemax,xpmax,xtmax; - if ( !parmesh->ngrp ) { - /* Check the communicators and return failure */ - return PMMG_check_allComm(parmesh,ret_val); - } - /* We are splitting group 0 */ grpIdOld = 0; - grpOld = &parmesh->listgrp[grpIdOld]; - meshOld = parmesh->listgrp[grpIdOld].mesh; - assert ( (parmesh->ngrp == 1) && " split_grps can not split m groups to n"); + if ( !parmesh->ngrp ) { + /* Check the communicators */ + PMMG_check_allComm(parmesh,ret_val,parmesh->comm); + grpOld = NULL; + meshOld = NULL; + } else { + assert ( (parmesh->ngrp == 1) && " split_grps can not split m groups to n"); + grpOld = &parmesh->listgrp[grpIdOld]; + meshOld = parmesh->listgrp[grpIdOld].mesh; + } if ( !meshOld ) { - /* Check the communicators and return failure */ - return PMMG_check_allComm(parmesh,ret_val); + /* Check the communicators */ + PMMG_check_allComm(parmesh,ret_val,parmesh->comm); } /* Count how many groups to split into */ - if( (redistrMode == PMMG_REDISTRIBUTION_ifc_displacement) && - (target == PMMG_GRPSPL_DISTR_TARGET) ) { - /* Set to a value higher than 1 just to continue until the true - * computation (which is after a jump on ngrp==1) */ - ngrp = 2; - } else { + if ( parmesh->ngrp ) { - ngrp = PMMG_howManyGroups( meshOld->ne,abs(parmesh->info.target_mesh_size) ); - if ( parmesh->info.target_mesh_size < 0 ) { - /* default value : do not authorize large number of groups */ - ngrp = MG_MIN ( PMMG_REMESHER_NGRPS_MAX, ngrp ); - } + if( (redistrMode == PMMG_REDISTRIBUTION_ifc_displacement) && + (target == PMMG_GRPSPL_DISTR_TARGET) ) { + /* Set to a value higher than 1 just to continue until the true + * computation (which is after a jump on ngrp==1) */ +#warning: fix this conditional jump + ngrp = 2; + } else { - if ( target == PMMG_GRPSPL_DISTR_TARGET ) { - /* Compute the number of metis nodes from the number of groups */ - ngrp = MG_MIN( ngrp*abs(parmesh->info.metis_ratio), meshOld->ne/PMMG_REDISTR_NELEM_MIN+1 ); - if ( parmesh->info.metis_ratio < 0 ) { + ngrp = PMMG_howManyGroups( meshOld->ne,abs(parmesh->info.target_mesh_size) ); + if ( parmesh->info.target_mesh_size < 0 ) { /* default value : do not authorize large number of groups */ - if ( ngrp > PMMG_REDISTR_NGRPS_MAX ) { - printf(" ## Warning: %s: too much metis nodes needed...\n" - " Partitions may remains freezed. Try to use more processors.\n", - __func__); - ngrp = PMMG_REDISTR_NGRPS_MAX; - } + ngrp = MG_MIN ( PMMG_REMESHER_NGRPS_MAX, ngrp ); } - if ( ngrp > meshOld->ne ) { - /* Correction if it leads to more groups than elements */ - printf(" ## Warning: %s: too much metis nodes needed...\n" - " Partitions may remains freezed. Try to reduce the number of processors.\n", - __func__); - ngrp = MG_MIN ( meshOld->ne, ngrp ); + + if ( target == PMMG_GRPSPL_DISTR_TARGET ) { + /* Compute the number of metis nodes from the number of groups */ + ngrp = MG_MIN( ngrp*abs(parmesh->info.metis_ratio), meshOld->ne/PMMG_REDISTR_NELEM_MIN+1 ); + if ( parmesh->info.metis_ratio < 0 ) { + /* default value : do not authorize large number of groups */ + if ( ngrp > PMMG_REDISTR_NGRPS_MAX ) { + printf(" ## Warning: %s: too much metis nodes needed...\n" + " Partitions may remains freezed. Try to use more processors.\n", + __func__); + ngrp = PMMG_REDISTR_NGRPS_MAX; + } + } + if ( ngrp > meshOld->ne ) { + /* Correction if it leads to more groups than elements */ + printf(" ## Warning: %s: %d: too much metis nodes needed...\n" + " Partitions may remains freezed. Try to reduce the number of processors.\n", + __func__, parmesh->myrank); + ngrp = MG_MIN ( meshOld->ne, ngrp ); + } } } + + if (!meshOld->ne) ngrp = 1; } /* Share old number of groups with all procs: must be done here to ensure that @@ -1628,84 +1708,87 @@ int PMMG_splitPart_grps( PMMG_pParMesh parmesh,int target,int fitMesh,int redist MPI_CHECK( MPI_Gather(spltinfo,2,MPI_INT,spltinfo_all,2,MPI_INT,0,parmesh->comm), PMMG_CLEAN_AND_RETURN(parmesh,PMMG_LOWFAILURE) ); } - if ( parmesh->info.imprim > PMMG_VERB_DETQUAL ) { - int i; - for( i=0; inprocs; i++ ) { - fprintf(stdout," rank %d splitting %d elts into %d grps\n", - i,spltinfo_all[2*i+1],spltinfo_all[2*i]); - } - } - /* Does the group need to be further subdivided to subgroups or not? */ - if ( ngrp == 1 ) { - if ( parmesh->ddebug ) { - fprintf( stdout, - "[%d-%d]: %d group is enough, no need to create sub groups.\n", - parmesh->myrank+1, parmesh->nprocs, ngrp ); - } - return PMMG_check_allComm(parmesh,ret_val); - } else { - if ( parmesh->ddebug ) - fprintf( stdout, - "[%d-%d]: %d groups required, splitting into sub groups...\n", - parmesh->myrank+1, parmesh->nprocs, ngrp ); - } - - /* Crude check whether there is enough free memory to allocate the new group */ - if ( parmesh->memCur+2*meshOld->memCur>parmesh->memGloMax ) { - npmax = meshOld->npmax; - nemax = meshOld->nemax; - xpmax = meshOld->xpmax; - xtmax = meshOld->xtmax; - meshOld->npmax = meshOld->np; - meshOld->nemax = meshOld->ne; - meshOld->xpmax = meshOld->xp; - meshOld->xtmax = meshOld->xt; - if ( (!PMMG_setMeshSize_realloc( meshOld, npmax, xpmax, nemax, xtmax )) || - parmesh->memCur+2*meshOld->memCur>parmesh->memGloMax ) { - fprintf( stderr, "Not enough memory to create listgrp struct\n" ); - return 0; + if ( parmesh->ngrp ) { + if ( parmesh->info.imprim > PMMG_VERB_DETQUAL ) { + int i; + for( i=0; inprocs; i++ ) { + fprintf(stdout," rank %d splitting %d elts into %d grps\n", + i,spltinfo_all[2*i+1],spltinfo_all[2*i]); + } } - } - - /* use metis to partition the mesh into the computed number of groups needed - part array contains the groupID computed by metis for each tetra */ - PMMG_CALLOC(parmesh,part,meshOld->ne,idx_t,"metis buffer ", return 0); - meshOld_ne = meshOld->ne; - /* Get interfaces layers or call metis. Use interface displacement if: - * - This is the required method, and - * - you are before group distribution or there are not too many groups. - */ - if( (redistrMode == PMMG_REDISTRIBUTION_ifc_displacement) && - ((target == PMMG_GRPSPL_DISTR_TARGET) || - ((target == PMMG_GRPSPL_MMG_TARGET) && - (ngrp <= parmesh->info.grps_ratio*parmesh->nold_grp))) ) { - - ngrp = PMMG_part_getInterfaces( parmesh, part, noldgrps_all, target ); + /* Does the group need to be further subdivided to subgroups or not? */ if ( ngrp == 1 ) { - if ( parmesh->ddebug ) + if ( parmesh->ddebug ) { fprintf( stdout, "[%d-%d]: %d group is enough, no need to create sub groups.\n", parmesh->myrank+1, parmesh->nprocs, ngrp ); - goto fail_part; + } + return PMMG_check_allComm(parmesh,ret_val,parmesh->comm); + } else { + if ( parmesh->ddebug ) + fprintf( stdout, + "[%d-%d]: %d groups required, splitting into sub groups...\n", + parmesh->myrank+1, parmesh->nprocs, ngrp ); } - } - else { - if ( (redistrMode == PMMG_REDISTRIBUTION_ifc_displacement) && - (parmesh->info.imprim > PMMG_VERB_ITWAVES) ) - fprintf(stdout,"\n calling Metis on proc%d\n\n",parmesh->myrank); - if ( !PMMG_part_meshElts2metis(parmesh, part, ngrp) ) { - ret_val = 0; - goto fail_part; + + /* Crude check whether there is enough free memory to allocate the new group */ + if ( parmesh->memCur+2*meshOld->memCur>parmesh->memGloMax ) { + npmax = meshOld->npmax; + nemax = meshOld->nemax; + xpmax = meshOld->xpmax; + xtmax = meshOld->xtmax; + meshOld->npmax = meshOld->np; + meshOld->nemax = meshOld->ne; + meshOld->xpmax = meshOld->xp; + meshOld->xtmax = meshOld->xt; + if ( (!PMMG_setMeshSize_realloc( meshOld, npmax, xpmax, nemax, xtmax )) || + parmesh->memCur+2*meshOld->memCur>parmesh->memGloMax ) { + fprintf( stderr, "Not enough memory to create listgrp struct\n" ); + return 0; + } } - /* If this is the first split of the input mesh, and interface displacement - * will be performed, check that the groups are contiguous. */ - if( parmesh->info.repartitioning == PMMG_REDISTRIBUTION_ifc_displacement ) - if( !PMMG_fix_contiguity_split( parmesh,ngrp,part ) ) return 0; - } + /* use metis to partition the mesh into the computed number of groups needed + part array contains the groupID computed by metis for each tetra */ + + PMMG_CALLOC(parmesh,part,meshOld->ne,idx_t,"metis buffer ", return 0); + meshOld_ne = meshOld->ne; + + /* Get interfaces layers or call metis. Use interface displacement if: + * - This is the required method, and + * - you are before group distribution or there are not too many groups. + */ + if( (redistrMode == PMMG_REDISTRIBUTION_ifc_displacement) && + ((target == PMMG_GRPSPL_DISTR_TARGET) || + ((target == PMMG_GRPSPL_MMG_TARGET) && + (ngrp <= parmesh->info.grps_ratio*parmesh->nold_grp))) ) { + + ngrp = PMMG_part_getInterfaces( parmesh, part, noldgrps_all, target ); + if ( ngrp == 1 ) { + if ( parmesh->ddebug ) + fprintf( stdout, + "[%d-%d]: %d group is enough, no need to create sub groups.\n", + parmesh->myrank+1, parmesh->nprocs, ngrp ); + goto fail_part; + } + } + else { + if ( (redistrMode == PMMG_REDISTRIBUTION_ifc_displacement) && + (parmesh->info.imprim > PMMG_VERB_ITWAVES) ) + fprintf(stdout,"\n calling Metis on proc%d\n\n",parmesh->myrank); + if ( !PMMG_part_meshElts2metis(parmesh, part, ngrp) ) { + ret_val = 0; + goto fail_part; + } + /* If this is the first split of the input mesh, and interface displacement + * will be performed, check that the groups are contiguous. */ + if( parmesh->info.repartitioning == PMMG_REDISTRIBUTION_ifc_displacement ) + if( !PMMG_fix_contiguity_split( parmesh,ngrp,part ) ) return 0; + } + } /* Split the mesh */ ret_val = PMMG_split_grps( parmesh,grpIdOld,ngrp,part,fitMesh ); @@ -1716,8 +1799,8 @@ int PMMG_splitPart_grps( PMMG_pParMesh parmesh,int target,int fitMesh,int redist /* Check the communicators */ assert ( PMMG_check_intNodeComm(parmesh) && "Wrong internal node comm" ); assert ( PMMG_check_intFaceComm(parmesh) && "Wrong internal face comm" ); - assert ( PMMG_check_extNodeComm(parmesh) && "Wrong external node comm" ); - assert ( PMMG_check_extFaceComm(parmesh) && "Wrong external face comm" ); + assert ( PMMG_check_extNodeComm(parmesh,parmesh->comm) && "Wrong external node comm" ); + assert ( PMMG_check_extFaceComm(parmesh,parmesh->comm) && "Wrong external face comm" ); return ret_val; } @@ -1727,13 +1810,14 @@ int PMMG_splitPart_grps( PMMG_pParMesh parmesh,int target,int fitMesh,int redist * \param target software for which we split the groups * (\a PMMG_GRPSPL_DISTR_TARGET or \a PMMG_GRPSPL_MMG_TARGET) * \param fitMesh alloc the meshes at their exact sizes + * \param repartitioning_mode strategy to use for repartitioning * * \return 0 if fail, 1 if success, -1 if the mesh is not correct * * Redistribute the n groups of listgrps into \a target_mesh_size groups. * */ -int PMMG_split_n2mGrps(PMMG_pParMesh parmesh,int target,int fitMesh) { +int PMMG_split_n2mGrps(PMMG_pParMesh parmesh,int target,int fitMesh,int repartitioning_mode) { int *vtxdist,*priorityMap; int ier,ier1; #ifndef NDEBUG @@ -1744,9 +1828,9 @@ int PMMG_split_n2mGrps(PMMG_pParMesh parmesh,int target,int fitMesh) { char stim[32]; assert ( PMMG_check_intFaceComm ( parmesh ) ); - assert ( PMMG_check_extFaceComm ( parmesh ) ); + assert ( PMMG_check_extFaceComm ( parmesh,parmesh->comm ) ); assert ( PMMG_check_intNodeComm ( parmesh ) ); - assert ( PMMG_check_extNodeComm ( parmesh ) ); + assert ( PMMG_check_extNodeComm ( parmesh,parmesh->comm ) ); if ( parmesh->info.imprim > PMMG_VERB_DETQUAL ) { tminit(ctim,3); @@ -1755,7 +1839,7 @@ int PMMG_split_n2mGrps(PMMG_pParMesh parmesh,int target,int fitMesh) { } /* Store the nb of tetra per group bbefore merging */ - if( (parmesh->info.repartitioning == PMMG_REDISTRIBUTION_ifc_displacement) && + if( (repartitioning_mode == PMMG_REDISTRIBUTION_ifc_displacement) && (target == PMMG_GRPSPL_DISTR_TARGET) ) { if( !PMMG_init_ifcDirection( parmesh, &vtxdist, &priorityMap ) ) return 0; } @@ -1766,6 +1850,15 @@ int PMMG_split_n2mGrps(PMMG_pParMesh parmesh,int target,int fitMesh) { fprintf(stderr,"\n ## Merge groups problem.\n"); } +#ifndef NDEBUG + for (int k=0; kngrp; ++k ) { + if ( !MMG5_chkmsh(parmesh->listgrp[k].mesh,1,1) ) { + fprintf(stderr," ## Problem. Invalid mesh.\n"); + return 0; + } + } +#endif + if ( parmesh->info.imprim > PMMG_VERB_DETQUAL ) { chrono(OFF,&(ctim[tim])); printim(ctim[tim].gdif,stim); @@ -1800,8 +1893,8 @@ int PMMG_split_n2mGrps(PMMG_pParMesh parmesh,int target,int fitMesh) { if ( parmesh->ddebug ) { - PMMG_qualhisto( parmesh, PMMG_INQUA, 0 ); - PMMG_prilen( parmesh, 0, 0 ); + PMMG_qualhisto( parmesh, PMMG_INQUA, 0, parmesh->comm ); + PMMG_prilen( parmesh, 0, 0, parmesh->comm ); } @@ -1810,7 +1903,7 @@ int PMMG_split_n2mGrps(PMMG_pParMesh parmesh,int target,int fitMesh) { chrono(ON,&(ctim[tim])); } - if( parmesh->info.repartitioning == PMMG_REDISTRIBUTION_ifc_displacement ) { + if( repartitioning_mode == PMMG_REDISTRIBUTION_ifc_displacement ) { /* Rebuild tetra adjacency (mesh graph construction is skipped) */ MMG5_pMesh mesh = parmesh->listgrp[0].mesh; if ( !mesh->adja ) { @@ -1829,8 +1922,26 @@ int PMMG_split_n2mGrps(PMMG_pParMesh parmesh,int target,int fitMesh) { } /** Split the group into the suitable number of groups */ +#ifndef NDEBUG + for (int k=0; kngrp; ++k ) { + if ( !MMG5_chkmsh(parmesh->listgrp[k].mesh,1,1) ) { + fprintf(stderr," ## Problem. Invalid mesh.\n"); + return 0; + } + } +#endif + if ( ier ) - ier = PMMG_splitPart_grps(parmesh,target,fitMesh,parmesh->info.repartitioning); + ier = PMMG_splitPart_grps(parmesh,target,fitMesh,repartitioning_mode); + +#ifndef NDEBUG + for (int k=0; kngrp; ++k ) { + if ( !MMG5_chkmsh(parmesh->listgrp[k].mesh,1,1) ) { + fprintf(stderr," ## Problem. Invalid mesh.\n"); + return 0; + } + } +#endif if ( parmesh->info.imprim > PMMG_VERB_DETQUAL ) { chrono(OFF,&(ctim[tim])); @@ -1842,9 +1953,9 @@ int PMMG_split_n2mGrps(PMMG_pParMesh parmesh,int target,int fitMesh) { fprintf(stderr,"\n ## Split group problem.\n"); assert ( PMMG_check_intFaceComm ( parmesh ) ); - assert ( PMMG_check_extFaceComm ( parmesh ) ); + assert ( PMMG_check_extFaceComm ( parmesh,parmesh->comm ) ); assert ( PMMG_check_intNodeComm ( parmesh ) ); - assert ( PMMG_check_extNodeComm ( parmesh ) ); + assert ( PMMG_check_extNodeComm ( parmesh,parmesh->comm ) ); return ier; } diff --git a/src/hash_pmmg.c b/src/hash_pmmg.c index a70ca724..9adf63b6 100644 --- a/src/hash_pmmg.c +++ b/src/hash_pmmg.c @@ -28,11 +28,11 @@ * \author Algiane Froehly (Inria/UBordeaux) * \author Nikolas Pattakos (Inria) * \author Luca Cirrottola (Inria) + * \author Laetitia Mottet (UBordeaux) * \version * \copyright * */ -#include "mmg3d.h" #include "parmmg.h" int PMMG_hashOldPar_pmmg( PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_Hash *hash ) { @@ -144,19 +144,19 @@ int PMMG_hashOldPar_pmmg( PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_Hash *hash * Hash the parallel edges. Only use face communicators to this purpose. * */ -int PMMG_hashPar_pmmg( PMMG_pParMesh parmesh,MMG5_HGeom *pHash ) { +int PMMG_hashPar_fromFaceComm( PMMG_pParMesh parmesh,MMG5_HGeom *pHash ) { PMMG_pGrp grp = &parmesh->listgrp[0]; MMG5_pMesh mesh = grp->mesh; MMG5_pTetra pt; MMG5_pxTetra pxt; PMMG_pInt_comm int_face_comm; - int k,na; - int i,ie,ifac,j,ia,i1,i2; + MMG5_int na,ie; + int i,ifac,j,ia,i1,i2; assert( parmesh->ngrp == 1 ); /** Allocation of hash table to store parallel edges */ - na = (int)(mesh->np*0.2); // Euler-Poincare + na = (MMG5_int)(mesh->np*0.2); // Euler-Poincare if ( 1 != MMG5_hNew( mesh, pHash, na, 3 * na ) ) return PMMG_FAILURE; @@ -181,14 +181,18 @@ int PMMG_hashPar_pmmg( PMMG_pParMesh parmesh,MMG5_HGeom *pHash ) { /** * \param mesh pointer toward a MMG5 mesh structure. * \param pHash pointer to the edge hash table. + * * \return PMMG_FAILURE * PMMG_SUCCESS * - * Hash the edges. Use the assumption that all paralle edges are seen by a - * MG_PARBDY face on an xtetra. + * Hash the edges belonging to parallel faces and store their tags with the + * additionnal MG_PARBDY tag. + * + * \remark Use the assumption that all paralle edges are + * seen by a MG_PARBDY face on an xtetra. * */ -int PMMG_hashPar( MMG5_pMesh mesh,MMG5_HGeom *pHash ) { +int PMMG_hashParTag_fromXtet( MMG5_pMesh mesh,MMG5_HGeom *pHash ) { MMG5_pTetra pt; MMG5_pxTetra pxt; int k,na; @@ -239,14 +243,14 @@ int PMMG_bdryUpdate( MMG5_pMesh mesh ) MMG5_pxTetra pxt; MMG5_HGeom hash; int k,edg; - int16_t tag; + uint16_t tag; int8_t i,i1,i2; assert ( !mesh->htab.geom ); /* Hash the MG_PARBDY edges */ - if( PMMG_hashPar(mesh,&hash) != PMMG_SUCCESS ) return PMMG_FAILURE; + if( PMMG_hashParTag_fromXtet(mesh,&hash) != PMMG_SUCCESS ) return PMMG_FAILURE; /** Update xtetra edge tag if needed */ for (k=1; k<=mesh->ne; ++k) { @@ -274,3 +278,137 @@ int PMMG_bdryUpdate( MMG5_pMesh mesh ) return PMMG_SUCCESS; } + +/** + * \param mesh pointer to the mesh structure. + * \return 1 if success, 0 otherwise. + * + * - Remove double triangles from tria array. + * + * - Remove triangles that do not belong to a boundary (non opnbdy mode) from + * tria array. + * + * - Check the matching between actual and given number of faces in the mesh: + * Count the number of faces in mesh and compare this number to the number of + * given triangles. + * + * - If the founded number exceed the given one, add the missing + * boundary triangles (call to MMG5_bdryTria). Do nothing otherwise. + * + * - Fill the adjacency relationship between prisms and tetra (fill adjapr with + * a negative value to mark this special faces). + * + * - Set to required the triangles at interface betwen prisms and tet. + * + */ +int PMMG_chkBdryTria(MMG5_pMesh mesh, MMG5_int* permtria) { + MMG5_int ntmesh,ntpres; + int ier; + MMG5_Hash hashElt; + + /** Step 1: scan the mesh and count the boundaries */ + ier = MMG5_chkBdryTria_countBoundaries(mesh,&ntmesh,&ntpres); + + /** Step 2: detect the extra boundaries (that will be ignored) provided by the + * user */ + if ( mesh->nt ) { + ier = MMG5_chkBdryTria_hashBoundaries(mesh,ntmesh,&hashElt); + // Travel through the tria, flag those that are not in the hash tab or + // that are stored more that once. + ier = MMG5_chkBdryTria_flagExtraTriangles(mesh,&ntpres,&hashElt); + // Delete flagged triangles + ier = MMG5_chkBdryTria_deleteExtraTriangles(mesh, permtria); + } + ntmesh +=ntpres; + + /** Step 3: add the missing boundary triangles or, if the mesh contains + * prisms, set to required the triangles at interface betwen prisms and tet */ + ier = MMG5_chkBdryTria_addMissingTriangles(mesh,ntmesh,ntpres); + + return 1; +} + +/** + * \param mesh pointer toward the mesh structure. + * \param hash pointer toward the hash table of edges. + * \param a index of the first extremity of the edge. + * \param b index of the second extremity of the edge. + * + * \param s If ls mode: 1 for a parallel edge that belongs + * to at least one element whose reference has to be splitted (either because we + * are not in multi-mat mode or because the reference is split in multi-mat + * mode). To avoid useless checks, some non parallel edges may be marked. + * If the edge belongs only to non-split references, s has to be 0. + * + * \return PMMG_SUCCESS if success, PMMG_FAILURE if fail (edge is not found). + * + * Update the value of the s field stored along the edge \f$[a;b]\f$ + * + */ +int PMMG_hashUpdate_s(MMG5_Hash *hash, MMG5_int a,MMG5_int b,MMG5_int s) { + MMG5_hedge *ph; + MMG5_int key; + MMG5_int ia,ib; + + ia = MG_MIN(a,b); + ib = MG_MAX(a,b); + key = (MMG5_KA*(int64_t)ia + MMG5_KB*(int64_t)ib) % hash->siz; + ph = &hash->item[key]; + + while ( ph->a ) { + if ( ph->a == ia && ph->b == ib ) { + ph->s = s; + return PMMG_SUCCESS; + } + + if ( !ph->nxt ) return PMMG_FAILURE; + + ph = &hash->item[ph->nxt]; + + } + + return PMMG_FAILURE; +} + +/** + * \param hash pointer toward the hash table of edges. + * \param a index of the first extremity of the edge. + * \param b index of the second extremity of the edge. + * \param k index of new point along the edge [a,b]. + * \param s If ls mode in ParMmg: index of new point in internal edge communicator; + * otherwise, the value stored in variable s. + * \return PMMG_SUCCESS if success, PMMG_FAILURE if fail (edge is not found). + * + * Find the index of the new point stored along the edge \f$[a;b]\f$ (similar to MMG5_hashGet in mmg). + * If ls mode in ParMmg: find the index of the new point in internal edge communicator; + * otherwise, find the value stored in variable s. + * + */ +MMG5_int PMMG_hashGet_all(MMG5_Hash *hash,MMG5_int a,MMG5_int b,MMG5_int *k,MMG5_int *s) { + MMG5_hedge *ph; + MMG5_int key; + MMG5_int ia,ib; + + if ( !hash->item ) return 0; + + ia = MG_MIN(a,b); + ib = MG_MAX(a,b); + key = (MMG5_KA*(int64_t)ia + MMG5_KB*(int64_t)ib) % hash->siz; + ph = &hash->item[key]; + + if ( !ph->a ) return PMMG_FAILURE; + if ( ph->a == ia && ph->b == ib ) { + *k = ph->k; + *s = ph->s; + return PMMG_SUCCESS; + } + while ( ph->nxt ) { + ph = &hash->item[ph->nxt]; + if ( ph->a == ia && ph->b == ib ) { + *k = ph->k; + *s = ph->s; + return PMMG_SUCCESS; + } + } + return PMMG_FAILURE; +} diff --git a/src/hdf_pmmg.h b/src/hdf_pmmg.h new file mode 100644 index 00000000..40c06bf7 --- /dev/null +++ b/src/hdf_pmmg.h @@ -0,0 +1,52 @@ +/* ============================================================================= +** This file is part of the parmmg software package for parallel tetrahedral +** mesh modification. +** Copyright (c) Bx INP/Inria/UBordeaux, 2017- +** +** parmmg is free software: you can redistribute it and/or modify it +** under the terms of the GNU Lesser General Public License as published +** by the Free Software Foundation, either version 3 of the License, or +** (at your option) any later version. +** +** parmmg is distributed in the hope that it will be useful, but WITHOUT +** ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +** FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +** License for more details. +** +** You should have received a copy of the GNU Lesser General Public +** License and of the GNU General Public License along with parmmg (in +** files COPYING.LESSER and COPYING). If not, see +** . Please read their terms carefully and +** use this copy of the parmmg distribution only if you accept them. +** ============================================================================= +*/ + +#ifdef USE_HDF5 + +#ifndef HDF_PMMG_H +#define HDF_PMMG_H +/** + * \file hdf_pmmg.h + * \brief HDF5 tools that are used in different part of parMmg + * \author Gabriel Suau (Bx INP/Inria) + * \version 5 + * \copyright GNU Lesser General Public License. + * + */ +#include "hdf5.h" + +#define HDF_CHECK(func_call,on_failure) do { \ + int hdf_ret_val; \ + \ + hdf_ret_val = func_call; \ + \ + if ( hdf_ret_val < 0 ) { \ + fprintf(stderr," ## Error: %s:%d: HDF5 error\n.", \ + __func__,__LINE__ ); \ + on_failure; \ + } \ + } while(0) + +#endif + +#endif diff --git a/src/inout_pmmg.c b/src/inout_pmmg.c index ebcb1a34..777d1c84 100644 --- a/src/inout_pmmg.c +++ b/src/inout_pmmg.c @@ -41,9 +41,10 @@ * \return the number of digits of n. * */ -static inline int PMMG_count_digits(int n) { + if ( n==0 ) return 1; + int count = 0; while (n != 0) { n /= 10; @@ -211,6 +212,10 @@ int PMMG_loadCommunicators( PMMG_pParMesh parmesh,const char *filename ) { assert( parmesh->ngrp == 1 ); mesh = parmesh->listgrp[0].mesh; + /** Open mesh file */ + ier = MMG3D_openMesh(mesh->info.imprim,filename,&inm,&bin,"rb","rb"); + if ( !ier ) return 0; + /* A non-// tria may be marked as // in Medit serial I/O (if its 3 edges are * //): as we can infer // triangles from communicators, reset useless (and * maybe erroneous) tags */ @@ -224,9 +229,6 @@ int PMMG_loadCommunicators( PMMG_pParMesh parmesh,const char *filename ) { } } - /** Open mesh file */ - ier = MMG3D_openMesh(mesh->info.imprim,filename,&inm,&bin,"rb","rb"); - /** Read communicators */ pos = 0; ncomm = 0; @@ -241,8 +243,10 @@ int PMMG_loadCommunicators( PMMG_pParMesh parmesh,const char *filename ) { fgets(strskip,MMG5_FILESTR_LGTH,inm); continue; } - - if(!strncmp(chaine,"ParallelTriangleCommunicators",strlen("ParallelTriangleCommunicators"))) { + if (!strncmp(chaine,"NumberOfPartitions",strlen("NumberOfPartitions"))) { + MMG_FSCANF(inm,"%d",&parmesh->info.npartin); + continue; + } else if(!strncmp(chaine,"ParallelTriangleCommunicators",strlen("ParallelTriangleCommunicators"))) { MMG_FSCANF(inm,"%d",&ncomm); pos = ftell(inm); API_mode = PMMG_APIDISTRIB_faces; @@ -250,7 +254,10 @@ int PMMG_loadCommunicators( PMMG_pParMesh parmesh,const char *filename ) { } else if(!strncmp(chaine,"ParallelVertexCommunicators",strlen("ParallelVertexCommunicators"))) { MMG_FSCANF(inm,"%d",&ncomm); pos = ftell(inm); - API_mode = PMMG_APIDISTRIB_nodes; + if (API_mode == PMMG_UNSET) { + /** if both parallel nodes and parallel faces are provided, use faces to build communicators */ + API_mode = PMMG_APIDISTRIB_nodes; + } break; } } @@ -267,7 +274,14 @@ int PMMG_loadCommunicators( PMMG_pParMesh parmesh,const char *filename ) { while(fread(&binch,MMG5_SW,1,inm)!=0 && endcount != 2 ) { if(iswp) binch=MMG5_swapbin(binch); if(binch==54) break; - if(!ncomm && binch==70) { // ParallelTriangleCommunicators + if( binch==74 ) { // NumberOfCommunicators + MMG_FREAD(&bpos,MMG5_SW,1,inm); //NulPos + if(iswp) bpos=MMG5_swapbin(bpos); + MMG_FREAD(&parmesh->info.npartin,MMG5_SW,1,inm); + if(iswp) parmesh->info.npartin=MMG5_swapbin(parmesh->info.npartin); + pos = ftell(inm); + } + else if(!ncomm && binch==70) { // ParallelTriangleCommunicators MMG_FREAD(&bpos,MMG5_SW,1,inm); //NulPos if(iswp) bpos=MMG5_swapbin(bpos); MMG_FREAD(&ncomm,MMG5_SW,1,inm); @@ -281,7 +295,9 @@ int PMMG_loadCommunicators( PMMG_pParMesh parmesh,const char *filename ) { MMG_FREAD(&ncomm,MMG5_SW,1,inm); if(iswp) ncomm=MMG5_swapbin(ncomm); pos = ftell(inm); - API_mode = PMMG_APIDISTRIB_nodes; + if (API_mode == PMMG_UNSET) { + API_mode = PMMG_APIDISTRIB_nodes; + } rewind(inm); fseek(inm,bpos,SEEK_SET); continue; @@ -298,22 +314,27 @@ int PMMG_loadCommunicators( PMMG_pParMesh parmesh,const char *filename ) { } /* Set API mode */ - if( API_mode == PMMG_UNSET ) { - fprintf(stderr,"### Error: No parallel communicators provided on rank %d!\n",parmesh->myrank); + if ( API_mode == PMMG_UNSET ) { + fprintf(stderr,"## Error: No parallel communicators provided on rank %d!\n",parmesh->myrank); + fclose(inm); return 0; } else if( !PMMG_Set_iparameter( parmesh, PMMG_IPARAM_APImode, API_mode ) ) { + fclose(inm); return 0; } /* memory allocation */ - PMMG_CALLOC(parmesh,nitem_comm,ncomm,int,"nitem_comm",return 0); - PMMG_CALLOC(parmesh,color,ncomm,int,"color",return 0); - PMMG_CALLOC(parmesh,idx_loc,ncomm,int*,"idx_loc pointer",return 0); - PMMG_CALLOC(parmesh,idx_glo,ncomm,int*,"idx_glo pointer",return 0); + PMMG_CALLOC(parmesh,nitem_comm,ncomm,int,"nitem_comm",fclose(inm);return 0); + PMMG_CALLOC(parmesh,color,ncomm,int,"color",fclose(inm);return 0); + PMMG_CALLOC(parmesh,idx_loc,ncomm,int*,"idx_loc pointer",fclose(inm);return 0); + PMMG_CALLOC(parmesh,idx_glo,ncomm,int*,"idx_glo pointer",fclose(inm);return 0); /* Load the communicator */ if( !PMMG_loadCommunicator( parmesh,inm,bin,iswp,pos,ncomm,nitem_comm,color, - idx_loc,idx_glo ) ) return 0; + idx_loc,idx_glo ) ) { + fclose(inm); + return 0; + } /* Set triangles or nodes interfaces depending on API mode */ switch( API_mode ) { @@ -359,6 +380,11 @@ int PMMG_loadCommunicators( PMMG_pParMesh parmesh,const char *filename ) { break; } + /* Close the file */ + if( filename ) { + fclose(inm); + } + /* Release memory and return */ PMMG_DEL_MEM(parmesh,nitem_comm,int,"nitem_comm"); PMMG_DEL_MEM(parmesh,color,int,"color"); @@ -401,7 +427,7 @@ void PMMG_insert_rankIndex(PMMG_pParMesh parmesh,char **endname,const char *init strcpy(*endname,initname); - ptr = strstr(*endname,".meshb"); + ptr = strstr(*endname,binext); fmt = 0; /* noext */ if( ptr ) { @@ -409,13 +435,15 @@ void PMMG_insert_rankIndex(PMMG_pParMesh parmesh,char **endname,const char *init fmt = 1; /* binary */ } else { - ptr = strstr(*endname,".mesh"); + ptr = strstr(*endname,ASCIIext); if( ptr ) { *ptr = '\0'; fmt = 2; /* ASCII */ } } - sprintf(*endname, "%s.%d", *endname, parmesh->myrank ); + int len = strlen(*endname); + int rank_len = PMMG_count_digits(parmesh->myrank); + snprintf((*endname)+len,2+rank_len*sizeof(int), ".%d",parmesh->myrank ); if ( fmt==1 ) { strcat ( *endname, binext ); } @@ -443,7 +471,7 @@ int PMMG_loadMesh_distributed(PMMG_pParMesh parmesh,const char *filename) { char* data = NULL; if ( parmesh->ngrp != 1 ) { - fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.", + fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.\n", __func__); return 0; } @@ -467,16 +495,82 @@ int PMMG_loadMesh_distributed(PMMG_pParMesh parmesh,const char *filename) { ier = MMG3D_loadMesh(mesh,data); + /* Check the presence of a partition on root rank (to allow the future + * broadcast of npartin from root rank) */ + if ( parmesh->info.root == parmesh->myrank ) { + if ( !ier ) { + fprintf(stderr,"\n ## Error: %s: the specified root rank expects to find a" + " mesh file containing a partition.\n",__func__); + } + } + + /* Count the number of partitions used to write the input mesh */ + int count_npartin; + int has_file = (ier > 0) ? 1 : 0 ; + MPI_Allreduce(&has_file,&count_npartin,1,MPI_INT,MPI_SUM,parmesh->comm); + + /* Check mesh parser errors */ + int ier_glob; + MPI_Allreduce(&ier,&ier_glob,1,MPI_INT,MPI_MIN,parmesh->comm); + if ( ier_glob < 0) { + /* Mesh has been opened with success but either one reader at least has + * failed or root rank has no mesh file */ + MMG5_SAFE_FREE(data); + return 0; + } + /* Restore the mmg verbosity to its initial value */ mesh->info.imprim = parmesh->info.mmg_imprim; - if ( ier < 1 ) { - MMG5_SAFE_FREE(data); - return ier; + if ( ier ) { + /* Load parallel communicators */ + ier = PMMG_loadCommunicators( parmesh,data ); } + else { + /* Very ugly : if the rank is above the number of partitions of the input mesh, + allocate the internal communicators. + Also set ngrp to 0 for loadBalancing to work properly. */ + parmesh->ngrp = 0; - /* Load parallel communicators */ - ier = PMMG_loadCommunicators( parmesh,data ); + parmesh->info.API_mode = PMMG_APIDISTRIB_faces; + PMMG_CALLOC(parmesh, parmesh->int_node_comm, 1, PMMG_Int_comm, "int_node_comm", return 0); + PMMG_CALLOC(parmesh, parmesh->int_face_comm, 1, PMMG_Int_comm, "int_face_comm", return 0); + + ier = 1; + } + + /* If we load mesh from an higer number of processes, some processes haven't + * readed npartin (which can be lower or greater than the number of procs). + * Broadcast this value from rank root: it means that we assume that the root + * process has an input file. */ + MPI_CHECK(MPI_Bcast(&parmesh->info.npartin,1,MPI_INT,parmesh->info.root,parmesh->comm), + MMG5_SAFE_FREE(data);ier=0); + + /* If mesh contains info about the number of partitions saved, check that it + * matches the number of loaded files */ + if ( parmesh->info.npartin != count_npartin ) { + if ( parmesh->myrank == parmesh->info.root ) { + fprintf(stderr,"\n ## Error: %s: Unable to load mesh, %d partition(s) " + "expected, %d file(s) found.\n",__func__,parmesh->info.npartin, + count_npartin); + } + return 0; + } + + /* Reading more partitions than there are procs available is not supported yet */ + if ( parmesh->info.npartin > parmesh->nprocs ) { + if (parmesh->myrank == parmesh->info.root) { + fprintf(stderr,"\n ## Error: %s: Can't read %d partitions with %d procs yet.\n", + __func__, parmesh->info.npartin, parmesh->nprocs); + } + return 0; + } + + /* Set the new communicator containing the procs reading the mesh */ + int mpi_color = (parmesh->myrank < parmesh->info.npartin) ? 1 : 0; + + MPI_CHECK( MPI_Comm_split(parmesh->comm,mpi_color,parmesh->myrank,&parmesh->info.read_comm), + MMG5_SAFE_FREE(data);ier=0); MMG5_SAFE_FREE(data); @@ -495,7 +589,7 @@ int PMMG_loadMesh_centralized(PMMG_pParMesh parmesh,const char *filename) { } if ( parmesh->ngrp != 1 ) { - fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.", + fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.\n", __func__); return 0; } @@ -536,7 +630,7 @@ int PMMG_loadMet_centralized(PMMG_pParMesh parmesh,const char *filename) { } if ( parmesh->ngrp != 1 ) { - fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.", + fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.\n", __func__); return 0; } @@ -573,8 +667,12 @@ int PMMG_loadMet_distributed(PMMG_pParMesh parmesh,const char *filename) { int ier; char *data = NULL; - if ( parmesh->ngrp != 1 ) { - fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.", + if ( parmesh->myrank >= parmesh->info.npartin ) { + assert ( !parmesh->ngrp ); + return 1; + } + else if ( parmesh->ngrp != 1 ) { + fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.\n", __func__); return 0; } @@ -624,7 +722,7 @@ int PMMG_loadLs_centralized(PMMG_pParMesh parmesh,const char *filename) { } if ( parmesh->ngrp != 1 ) { - fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.", + fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.\n", __func__); return 0; } @@ -666,7 +764,7 @@ int PMMG_loadDisp_centralized(PMMG_pParMesh parmesh,const char *filename) { } if ( parmesh->ngrp != 1 ) { - fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.", + fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.\n", __func__); return 0; } @@ -708,7 +806,7 @@ int PMMG_loadSol_centralized(PMMG_pParMesh parmesh,const char *filename) { } if ( parmesh->ngrp != 1 ) { - fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.", + fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.\n", __func__); return 0; } @@ -745,6 +843,79 @@ int PMMG_loadSol_centralized(PMMG_pParMesh parmesh,const char *filename) { return ier; } +int PMMG_loadSol_distributed(PMMG_pParMesh parmesh,const char *filename) { + MMG5_pMesh mesh; + MMG5_pSol sol; + int ier; + char *data = NULL; + + if ( parmesh->myrank >= parmesh->info.npartin ) { + assert ( !parmesh->ngrp ); + return 1; + } + else if ( parmesh->ngrp != 1 ) { + fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.\n", + __func__); + return 0; + } + + mesh = parmesh->listgrp[0].mesh; + + /* For each mode: pointer over the solution structure to load */ + if ( mesh->info.lag >= 0 ) { + sol = parmesh->listgrp[0].disp; + } + else if ( mesh->info.iso ) { + sol = parmesh->listgrp[0].ls; + } + else { + sol = parmesh->listgrp[0].met; + } + + /* Add rank index to mesh name */ + if ( filename ) { + PMMG_insert_rankIndex(parmesh,&data,filename,".sol", ".sol"); + } + else if ( mesh->info.lag >= 0 && parmesh->dispin ) { + PMMG_insert_rankIndex(parmesh,&data,parmesh->dispin,".sol", ".sol"); + } + else if ( mesh->info.iso && parmesh->lsin ) { + PMMG_insert_rankIndex(parmesh,&data,parmesh->lsin,".sol", ".sol"); + } + else if ( parmesh->metin ) { + PMMG_insert_rankIndex(parmesh,&data,parmesh->metin,".sol", ".sol"); + } + else if ( mesh->info.lag >= 0 && sol->namein ) { + PMMG_insert_rankIndex(parmesh,&data,sol->namein,".sol", ".sol"); + } + else if ( mesh->info.iso && sol->namein ) { + PMMG_insert_rankIndex(parmesh,&data,sol->namein,".sol", ".sol"); + } + else if ( sol->namein ) { + PMMG_insert_rankIndex(parmesh,&data,sol->namein,".sol", ".sol"); + } + + /* Set mmg verbosity to the max between the Parmmg verbosity and the mmg verbosity */ + assert ( mesh->info.imprim == parmesh->info.mmg_imprim ); + mesh->info.imprim = MG_MAX ( parmesh->info.imprim, mesh->info.imprim ); + + ier = MMG3D_loadSol(mesh,sol,data); + + /* Restore the mmg verbosity to its initial value */ + mesh->info.imprim = parmesh->info.mmg_imprim; + + return ier; +} + +/** + * \param parmesh pointer toward the parmesh structure. + * \param filename name of the file to load the solutions from. + * + * \return 0 if fail, 1 otherwise + * + * Load a set of centralized solutions. + * + */ int PMMG_loadAllSols_centralized(PMMG_pParMesh parmesh,const char *filename) { MMG5_pMesh mesh; MMG5_pSol *sol; @@ -756,7 +927,7 @@ int PMMG_loadAllSols_centralized(PMMG_pParMesh parmesh,const char *filename) { } if ( parmesh->ngrp != 1 ) { - fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.", + fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.\n", __func__); return 0; } @@ -785,23 +956,54 @@ int PMMG_loadAllSols_centralized(PMMG_pParMesh parmesh,const char *filename) { } -/** - * \param parmesh pointer toward the parmesh structure. - * \param filename name of the file to load the mesh from. - * - * \return 0 if fail, 1 otherwise - * - * Save a distributed mesh with parallel communicators in Medit format (only one - * group per process is allowed). - * - */ +int PMMG_loadAllSols_distributed(PMMG_pParMesh parmesh,const char *filename) { + + MMG5_pMesh mesh; + MMG5_pSol *sol; + int ier; + char *data = NULL; + + if ( parmesh->myrank >= parmesh->info.npartin ) { + assert ( !parmesh->ngrp ); + return 1; + } + else if ( parmesh->ngrp != 1 ) { + fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.\n", + __func__); + return 0; + } + mesh = parmesh->listgrp[0].mesh; + sol = &parmesh->listgrp[0].field; + + /* Add rank index to mesh name */ + if ( filename ) { + PMMG_insert_rankIndex(parmesh,&data,filename,".sol", ".sol"); + } + else if ( parmesh->fieldin ) { + PMMG_insert_rankIndex(parmesh,&data,parmesh->fieldin,".sol", ".sol"); + } + + /* Set mmg verbosity to the max between the Parmmg verbosity and the mmg verbosity */ + assert ( mesh->info.imprim == parmesh->info.mmg_imprim ); + mesh->info.imprim = MG_MAX ( parmesh->info.imprim, mesh->info.imprim ); + + ier = MMG3D_loadAllSols(mesh,sol,data); + + /* Restore the mmg verbosity to its initial value */ + mesh->info.imprim = parmesh->info.mmg_imprim; + + MMG5_SAFE_FREE(data); + + return ier; +} + int PMMG_saveMesh_distributed(PMMG_pParMesh parmesh,const char *filename) { MMG5_pMesh mesh; int ier; char *data = NULL; if ( parmesh->ngrp != 1 ) { - fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.", + fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.\n", __func__); return 0; } @@ -853,7 +1055,7 @@ int PMMG_saveMesh_centralized(PMMG_pParMesh parmesh,const char *filename) { } if ( parmesh->ngrp != 1 ) { - fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.", + fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.\n", __func__); return 0; } @@ -886,7 +1088,7 @@ int PMMG_saveMet_centralized(PMMG_pParMesh parmesh,const char *filename) { } if ( parmesh->ngrp != 1 ) { - fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.", + fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.\n", __func__); return 0; } @@ -917,7 +1119,7 @@ int PMMG_saveMet_distributed(PMMG_pParMesh parmesh,const char *filename) { char *data = NULL; if ( parmesh->ngrp != 1 ) { - fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.", + fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.\n", __func__); return 0; } @@ -956,6 +1158,57 @@ int PMMG_saveMet_distributed(PMMG_pParMesh parmesh,const char *filename) { return ier; } +int PMMG_saveLs_centralized(PMMG_pParMesh parmesh,const char *filename) { + MMG5_pMesh mesh; + MMG5_pSol *sol; + int ier; + + if ( parmesh->myrank!=parmesh->info.root ) { + return 1; + } + + if ( parmesh->ngrp != 1 ) { + fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.\n", + __func__); + return 0; + } + mesh = parmesh->listgrp[0].mesh; + sol = &parmesh->listgrp[0].ls; + + /* Except in cases where we want to partition and input mesh + ls without + * inserting the ls and remeshing, the level-set will be deallocated */ + if ( (!*sol) || (!(*sol)->m) ) { + if ( parmesh->ddebug ) { + fprintf(stdout," %s: The Level-set is not allocated. Nothing to save.\n", + __func__); + } + return 1; + } + + /* Set mmg verbosity to the max between the Parmmg verbosity and the mmg verbosity */ + assert ( mesh->info.imprim == parmesh->info.mmg_imprim ); + mesh->info.imprim = MG_MAX ( parmesh->info.imprim, mesh->info.imprim ); + + /* Dirty hack: temporary replace the number of solution fields inside the mesh to allow the use of the saveAllSols function */ + int nfields = mesh->nsols; + mesh->nsols = 1; + + if ( filename && *filename ) { + ier = MMG3D_saveAllSols(mesh,sol,filename); + } + else { + ier = MMG3D_saveAllSols(mesh,sol,parmesh->lsout); + } + + /* Restore the mmg verbosity to its initial value */ + mesh->info.imprim = parmesh->info.mmg_imprim; + + /* Restore the number of solution fields */ + mesh->nsols = nfields; + + return ier; +} + int PMMG_saveAllSols_centralized(PMMG_pParMesh parmesh,const char *filename) { MMG5_pMesh mesh; MMG5_pSol sol; @@ -966,7 +1219,7 @@ int PMMG_saveAllSols_centralized(PMMG_pParMesh parmesh,const char *filename) { } if ( parmesh->ngrp != 1 ) { - fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.", + fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.\n", __func__); return 0; } @@ -989,3 +1242,3844 @@ int PMMG_saveAllSols_centralized(PMMG_pParMesh parmesh,const char *filename) { return ier; } + +int PMMG_saveLs_distributed(PMMG_pParMesh parmesh,const char *filename) { + MMG5_pMesh mesh; + MMG5_pSol sol; + int ier; + char *data = NULL; + + if ( parmesh->ngrp != 1 ) { + fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.\n", + __func__); + return 0; + } + mesh = parmesh->listgrp[0].mesh; + sol = parmesh->listgrp[0].ls; + + /* Except in cases where we want to partition and input mesh + ls without + * inserting the ls and remeshing, the level-set will be deallocated */ + int noLs; + + // noLsOnRank has to be 1 if we don't have the ls on the rank, 0 if ls is allocated + int noLsOnRank = ((!sol) || (!sol->m)) ? 1 : 0; + + MPI_Allreduce( &noLsOnRank, &noLs, 1, MPI_INT, MPI_MIN, parmesh->comm ); + + if ( noLs ) { + if ( parmesh->ddebug && parmesh->myrank==parmesh->info.root ) { + fprintf(stdout," %s: The Level-set is not allocated. Nothing to save.\n", + __func__); + } + return 1; + } + + if ( noLsOnRank ) { + fprintf(stderr," ## Error: %s: Distributed saving with empty level-set on one rank not implemented.\n", + __func__); + return 0; + } + + /* Add rank index to mesh name */ + if ( filename ) { + PMMG_insert_rankIndex(parmesh,&data,filename,".sol", ".sol"); + } + else if ( parmesh->lsout ) { + PMMG_insert_rankIndex(parmesh,&data,parmesh->lsout,".sol", ".sol"); + } + + /* Set mmg verbosity to the max between the Parmmg verbosity and the mmg verbosity */ + assert ( mesh->info.imprim == parmesh->info.mmg_imprim ); + mesh->info.imprim = MG_MAX ( parmesh->info.imprim, mesh->info.imprim ); + + /* Dirty hack: temporary replace the number of solution fields inside the mesh to allow the use of the saveAllSols function */ + int nfields = mesh->nsols; + mesh->nsols = 1; + + ier = MMG3D_saveAllSols(mesh,&sol,data); + + /* Restore the mmg verbosity to its initial value */ + mesh->info.imprim = parmesh->info.mmg_imprim; + + /* Restore the number of solution fields */ + mesh->nsols = nfields; + + return ier; +} + +int PMMG_saveAllSols_distributed(PMMG_pParMesh parmesh,const char *filename) { + MMG5_pMesh mesh; + MMG5_pSol *sol; + int ier; + char *data = NULL; + + if ( parmesh->ngrp != 1 ) { + fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.\n", + __func__); + return 0; + } + mesh = parmesh->listgrp[0].mesh; + sol = &parmesh->listgrp[0].field; + + /* Add rank index to mesh name */ + if ( filename ) { + PMMG_insert_rankIndex(parmesh,&data,filename,".sol", ".sol"); + } + else if ( parmesh->fieldout ) { + PMMG_insert_rankIndex(parmesh,&data,parmesh->fieldout,".sol", ".sol"); + } + + /* Set mmg verbosity to the max between the Parmmg verbosity and the mmg verbosity */ + assert ( mesh->info.imprim == parmesh->info.mmg_imprim ); + mesh->info.imprim = MG_MAX ( parmesh->info.imprim, mesh->info.imprim ); + + ier = MMG3D_saveAllSols(mesh,sol,data); + + + /* Restore the mmg verbosity to its initial value */ + mesh->info.imprim = parmesh->info.mmg_imprim; + + return ier; +} + +#ifdef USE_HDF5 +/** + * \param parmesh pointer toward the parmesh structure. + * \param nentities array of size PMMG_IO_ENTITIES_size * nprocs that will contain the number of entities of every proc. + * \param nentitiesl array of size PMMG_IO_ENTITIES_size that will contain the local number of entities. + * \param nentitiesg array of size PMMG_IO_ENTITIES_size that will contain the global number of entities. + * + * \return 1, there is no reason for this function to fail. + * + * Count the local and global number of entities in the parallel mesh (only one + * group per process is allowed). + * + */ +static int PMMG_countEntities(PMMG_pParMesh parmesh, hsize_t *nentities, hsize_t *nentitiesl, hsize_t* nentitiesg, int *io_entities) { + /* MMG variables */ + PMMG_pGrp grp; + MMG5_pMesh mesh; + MMG5_pPoint ppt; + MMG5_pEdge pa; + MMG5_pTria pt; + MMG5_pQuad pq; + MMG5_pTetra pe; + MMG5_pPrism pp; + /* Local number of entities */ + hsize_t ne, np, nt, na, nquad, nprism; /* Tetra, points, triangles, edges, quads, prisms */ + hsize_t nc, npreq, nppar; /* Corners, required and parallel vertices */ + hsize_t nr, nedreq, nedpar; /* Ridges, required and parallel edges */ + hsize_t ntreq, ntpar; /* Required and parallel triangles */ + hsize_t nqreq, nqpar; /* Required and parallel quads */ + hsize_t nereq, nepar; /* Required and parallel tetra */ + hsize_t nnor, ntan; /* Normals and Tangents */ + /* MPI variables */ + int rank, root, nprocs; + + /* Only one group is allowed */ + assert( parmesh->ngrp == 1 ); + + /* Set MPI variables */ + nprocs = parmesh->nprocs; + rank = parmesh->myrank; + root = parmesh->info.root; + + /* Set mesh size to 0 */ + np = na = nt = nquad = ne = nprism = 0; + nc = npreq = nppar = 0; + nr = nedreq = nedpar = 0; + ntreq = ntpar = 0; + nqreq = nqpar = 0; + nereq = nepar = 0; + nnor = ntan = 0; + + /* Set ParMmg variables */ + grp = &parmesh->listgrp[0]; + mesh = grp->mesh; + ppt = NULL; + pa = NULL; + pt = NULL; + pq = NULL; + pe = NULL; + pp = NULL; + + /* Check arguments */ + assert( nentities && "\n ## Error: %s: nentities array not allocated.\n" ); + assert( nentitiesl && "\n ## Error: %s: nentitiesl array not allocated.\n" ); + assert( nentitiesg && "\n ## Error: %s: nentitiesg array not allocated.\n" ); + + /* Count local entities */ + + /* Vertices, normals and tangents */ + for (int k = 1 ; k <= mesh->np ; k++) { + ppt = &mesh->point[k]; + if (MG_VOK(ppt)) { + ppt->tmp = ++np; + ppt->flag = 0; + if (ppt->tag & MG_CRN) nc++; + if (ppt->tag & MG_REQ) npreq++; + if (ppt->tag & MG_PARBDY) nppar++; + if (MG_SIN(ppt->tag)) continue; + if ((ppt->tag & MG_BDY) && (!(ppt->tag & MG_GEO) || (ppt->tag & MG_NOM))) nnor++; + if (MG_EDG(ppt->tag) || (ppt->tag & MG_NOM)) ntan++; + } + } + + /* Edges */ + if (mesh->na) { + for (int k = 1 ; k <= mesh->na ; k++) { + pa = &mesh->edge[k]; + na++; + if (pa->tag & MG_GEO) nr++; + if (pa->tag & MG_REQ) nedreq++; + if (pa->tag & MG_PARBDY) nedpar++; + } + } + + /* Triangles */ + if (mesh->nt) { + for (int k = 1 ; k <= mesh->nt ; k++) { + pt = &mesh->tria[k]; + nt++; + if (pt->tag[0] & MG_REQ && pt->tag[1] & MG_REQ && pt->tag[2] & MG_REQ) ntreq++; + if (pt->tag[0] & MG_PARBDY && pt->tag[1] & MG_PARBDY && pt->tag[2] & MG_PARBDY) ntpar++; + } + } + + /* Quadrilaterals */ + if (mesh->nquad) { + for (int k = 1 ; k <= mesh->nquad ; k++) { + pq = &mesh->quadra[k]; + nquad++; + if (pq->tag[0] & MG_REQ && pq->tag[1] & MG_REQ && + pq->tag[2] & MG_REQ && pq->tag[3] & MG_REQ) { + nqreq++; + } + if (pq->tag[0] & MG_PARBDY && pq->tag[1] & MG_PARBDY && + pq->tag[2] & MG_PARBDY && pq->tag[3] & MG_PARBDY) { + nqpar++; + } + } + } + + /* Tetrahedra */ + if (mesh->ne) { + for (int k = 1 ; k <= mesh->ne ; k++) { + pe = &mesh->tetra[k]; + if (!MG_EOK(pe)) { + continue; + } + ne++; + if (pe->tag & MG_REQ) nereq++; + if (pe->tag & MG_PARBDY) nepar++; + } + } + + /* Prisms */ + if (mesh->nprism) { + for (int k = 1 ; k <= mesh->nprism ; k++) { + pp = &mesh->prism[k]; + if (!MG_EOK(pp)){ + continue; + } + nprism++; + } + } + + /* Gather the number of entities in the nentities array */ + nentities[PMMG_IO_ENTITIES_size * rank + PMMG_IO_Vertex] = np; + nentities[PMMG_IO_ENTITIES_size * rank + PMMG_IO_Edge] = na; + nentities[PMMG_IO_ENTITIES_size * rank + PMMG_IO_Tria] = nt; + nentities[PMMG_IO_ENTITIES_size * rank + PMMG_IO_Quad] = nquad; + nentities[PMMG_IO_ENTITIES_size * rank + PMMG_IO_Tetra] = ne; + nentities[PMMG_IO_ENTITIES_size * rank + PMMG_IO_Prism] = nprism; + nentities[PMMG_IO_ENTITIES_size * rank + PMMG_IO_Corner] = nc; + nentities[PMMG_IO_ENTITIES_size * rank + PMMG_IO_RequiredVertex] = npreq; + nentities[PMMG_IO_ENTITIES_size * rank + PMMG_IO_ParallelVertex] = nppar; + nentities[PMMG_IO_ENTITIES_size * rank + PMMG_IO_Ridge] = nr; + nentities[PMMG_IO_ENTITIES_size * rank + PMMG_IO_RequiredEdge] = nedreq; + nentities[PMMG_IO_ENTITIES_size * rank + PMMG_IO_ParallelEdge] = nedpar; + nentities[PMMG_IO_ENTITIES_size * rank + PMMG_IO_RequiredTria] = ntreq; + nentities[PMMG_IO_ENTITIES_size * rank + PMMG_IO_ParallelTria] = ntpar; + nentities[PMMG_IO_ENTITIES_size * rank + PMMG_IO_RequiredQuad] = nqreq; + nentities[PMMG_IO_ENTITIES_size * rank + PMMG_IO_ParallelQuad] = nqpar; + nentities[PMMG_IO_ENTITIES_size * rank + PMMG_IO_RequiredTetra] = nereq; + nentities[PMMG_IO_ENTITIES_size * rank + PMMG_IO_ParallelTetra] = nepar; + nentities[PMMG_IO_ENTITIES_size * rank + PMMG_IO_Normal] = nnor; + nentities[PMMG_IO_ENTITIES_size * rank + PMMG_IO_Tangent] = ntan; + + /* Set the number of all entities that are not saved to zero */ + for (int typent = 0 ; typent < PMMG_IO_ENTITIES_size ; typent++) { + if (!io_entities[typent]) { + nentities[PMMG_IO_ENTITIES_size * rank + typent] = 0; + } + } + + /* Count local entities */ + for (int typent = 0 ; typent < PMMG_IO_ENTITIES_size ; typent++) { + nentitiesl[typent] = nentities[PMMG_IO_ENTITIES_size * rank + typent]; + } + + MPI_Allgather(&nentities[PMMG_IO_ENTITIES_size * rank], PMMG_IO_ENTITIES_size, MPI_UNSIGNED_LONG_LONG, + nentities , PMMG_IO_ENTITIES_size, MPI_UNSIGNED_LONG_LONG, parmesh->comm); + + /* Count global entities */ + for (int k = 0 ; k < nprocs ; k++) { + for (int typent = 0 ; typent < PMMG_IO_ENTITIES_size ; typent++) { + nentitiesg[typent] += nentities[PMMG_IO_ENTITIES_size * k + typent]; + } + } + + return 1; +} + +/** + * \param parmesh pointer toward the parmesh structure. + * \param nentities array of size PMMG_IO_ENTITIES_size * nprocs that contains the number of entities of every proc. + * \param offset array of size 2 * PMMG_IO_ENTITIES_size that will contain the offsets for each type of entity. + * + * \return 1 + * + * Compute the offset for parallel writing/reading in an HDF5 file. + * + */ +static inline int PMMG_computeHDFoffset(PMMG_pParMesh parmesh, hsize_t *nentities, hsize_t *offset) { + for (int k = 0 ; k < parmesh->myrank ; k++) { + for (int typent = 0 ; typent < PMMG_IO_ENTITIES_size ; typent++) { + offset[2 * typent] += nentities[PMMG_IO_ENTITIES_size * k + typent]; + } + } + return 1; +} + +/** + * \param parmesh pointer toward the parmesh structure. + * \param file_id identifier of the HDF5 file in which to write the mesh + * + * \return 1, there is no reason for this function to fail. + * + * Save the version and the dimension of \a parmesh aswell as the number of + * partitions and the API mode into the opened HDF5 file \a file_id. + * + */ +static int PMMG_saveHeader_hdf5(PMMG_pParMesh parmesh, hid_t file_id) { + MMG5_pMesh mesh; + hid_t dspace_id; + hid_t attr_id; + int rank, root; + + mesh = parmesh->listgrp[0].mesh; + rank = parmesh->myrank; + root = parmesh->info.root; + + dspace_id = H5Screate(H5S_SCALAR); + + attr_id = H5Acreate(file_id, "MeshVersionFormatted", H5T_NATIVE_INT, dspace_id, H5P_DEFAULT, H5P_DEFAULT); + if (rank == root) + H5Awrite(attr_id, H5T_NATIVE_INT, &mesh->ver); + H5Aclose(attr_id); + + attr_id = H5Acreate(file_id, "Dimension", H5T_NATIVE_INT, dspace_id, H5P_DEFAULT, H5P_DEFAULT); + if (rank == root) + H5Awrite(attr_id, H5T_NATIVE_INT, &mesh->dim); + H5Aclose(attr_id); + + attr_id = H5Acreate(file_id, "NumberOfPartitions", H5T_NATIVE_INT, dspace_id, H5P_DEFAULT, H5P_DEFAULT); + if (rank == root) + H5Awrite(attr_id, H5T_NATIVE_INT, &parmesh->nprocs); + H5Aclose(attr_id); + + attr_id = H5Acreate(file_id, "API_mode", H5T_NATIVE_INT, dspace_id, H5P_DEFAULT, H5P_DEFAULT); + if (rank == root) + H5Awrite(attr_id, H5T_NATIVE_INT, &parmesh->info.API_mode); + H5Aclose(attr_id); + + H5Sclose(dspace_id); + + return 1; +} + +/** + * \param parmesh pointer toward the parmesh structure. + * \param grp_entities_id identifier of the HDF5 group in which to write the mesh entities. + * \param dcpl_id identifier of the dataset creation property list (no fill value). + * \param dxpl_id identifier of the dataset transfer property list (MPI-IO). + * \param nentitiesl array of size PMMG_IO_ENTITIES_size containing the local number of entities. + * \param nentitiesg array of size PMMG_IO_ENTITIES_size containing the global number of entities. + * \param offset array of size PMMG_IO_ENTITIES_size containing the offset for parallel writing. + * \param save_entities array of size PMMG_IO_ENTITIES_size to tell which entities are to be saved. + * + * \return 0 if fail, 1 otherwise + * + * Save the mesh entities in the \a grp_entities_id group of an HDF5 file (only + * one group per process is allowed). + * + */ +static int PMMG_saveMeshEntities_hdf5(PMMG_pParMesh parmesh, hid_t grp_entities_id, hid_t dcpl_id, hid_t dxpl_id, + hsize_t *nentitiesl, hsize_t *nentitiesg, hsize_t *offset, int *save_entities) { + /* MMG variables */ + PMMG_pGrp grp; + MMG5_pMesh mesh; + MMG5_pPoint ppt; + MMG5_pEdge pa; + MMG5_pTria pt; + MMG5_pQuad pq; + MMG5_pTetra pe; + MMG5_pPrism pp; + MMG5_pxPoint pxp; + + /* Local mesh size */ + hsize_t ne, np, nt, na, nquad, nprism; /* Tetra, points, triangles, edges, quads, prisms */ + hsize_t nc, npreq, nppar; /* Corners, required and parallel vertices */ + hsize_t nr, nedreq, nedpar; /* Ridges, required and parallel edges */ + hsize_t ntreq, ntpar; /* Required and parallel triangles */ + hsize_t nqreq, nqpar; /* Required and parallel quads */ + hsize_t nereq, nepar; /* Required and parallel tetra */ + hsize_t nnor, ntan; /* Normals and Tangents */ + /* Global mesh size */ + hsize_t neg, npg, ntg, nag, nquadg, nprismg; /* Tetra, points, triangles, edges, quads, prisms */ + hsize_t ncg, npreqg, npparg; /* Corners, required and parallel vertices */ + hsize_t nrg, nedreqg, nedparg; /* Ridges, required and parallel edges */ + hsize_t ntreqg, ntparg; /* Required and parallel triangles */ + hsize_t nqreqg, nqparg; /* Required and parallel quads */ + hsize_t nereqg, neparg; /* Required and parallel tetra */ + hsize_t nnorg, ntang; /* Normals and Tangents */ + + /* Mesh buffer arrays */ + /* 6 buffers is the minimum amount for what we have to do */ + double *ppoint; /* Point coordinates */ + int *pent; /* Other entities : edges, trias, quads, tetra, prisms. */ + int *pcr; /* Corners and ridges */ + int *preq, *ppar; /* Required and parallel entities */ + int *pref; /* References */ + + /* Normals and tangents */ + /* We could reuse the previous buffers, but the names would be confusing */ + int *pnorat, *ptanat; /* Normals and Tangents at vertices */ + double *pnor, *ptan; /* Normals and Tangents */ + + /* Counters for the corners/ridges, the required and parallel entities, the normals and the tangents */ + int crcount, reqcount, parcount, ncount, tcount; + + /* MPI variables */ + hsize_t rank, root; + hsize_t nprocs; + + /* HDF5 variables */ + hid_t dspace_mem_id, dspace_file_id; + hid_t dset_id; + + /*------------------------- INIT -------------------------*/ + + assert ( parmesh->ngrp == 1 ); + + /* Set all buffers to NULL */ + ppoint = NULL; + pent = NULL; + pcr = NULL; + preq = NULL; ppar = NULL; + pref = NULL; + pnor = NULL; ptan = NULL; + pnorat = NULL; ptanat = NULL; + + /* Set MPI variables */ + nprocs = parmesh->nprocs; + rank = parmesh->myrank; + root = parmesh->info.root; + + /* Set ParMmg variables */ + grp = &parmesh->listgrp[0]; + mesh = grp->mesh; + ppt = NULL; + pa = NULL; + pt = NULL; + pq = NULL; + pe = NULL; + pp = NULL; + + /* Get the number of entities */ + np = nentitiesl[PMMG_IO_Vertex]; + na = nentitiesl[PMMG_IO_Edge]; + nt = nentitiesl[PMMG_IO_Tria]; + nquad = nentitiesl[PMMG_IO_Quad]; + ne = nentitiesl[PMMG_IO_Tetra]; + nprism = nentitiesl[PMMG_IO_Prism]; + nc = nentitiesl[PMMG_IO_Corner]; + npreq = nentitiesl[PMMG_IO_RequiredVertex]; + nppar = nentitiesl[PMMG_IO_ParallelVertex]; + nr = nentitiesl[PMMG_IO_Ridge]; + nedreq = nentitiesl[PMMG_IO_RequiredEdge]; + nedpar = nentitiesl[PMMG_IO_ParallelEdge]; + ntreq = nentitiesl[PMMG_IO_RequiredTria]; + ntpar = nentitiesl[PMMG_IO_ParallelTria]; + nqreq = nentitiesl[PMMG_IO_RequiredQuad]; + nqpar = nentitiesl[PMMG_IO_ParallelQuad]; + nereq = nentitiesl[PMMG_IO_RequiredTetra]; + nepar = nentitiesl[PMMG_IO_ParallelTetra]; + nnor = nentitiesl[PMMG_IO_Normal]; + ntan = nentitiesl[PMMG_IO_Tangent]; + + npg = nentitiesg[PMMG_IO_Vertex]; + nag = nentitiesg[PMMG_IO_Edge]; + ntg = nentitiesg[PMMG_IO_Tria]; + nquadg = nentitiesg[PMMG_IO_Quad]; + neg = nentitiesg[PMMG_IO_Tetra]; + nprismg = nentitiesg[PMMG_IO_Prism]; + ncg = nentitiesg[PMMG_IO_Corner]; + npreqg = nentitiesg[PMMG_IO_RequiredVertex]; + npparg = nentitiesg[PMMG_IO_ParallelVertex]; + nrg = nentitiesg[PMMG_IO_Ridge]; + nedreqg = nentitiesg[PMMG_IO_RequiredEdge]; + nedparg = nentitiesg[PMMG_IO_ParallelEdge]; + ntreqg = nentitiesg[PMMG_IO_RequiredTria]; + ntparg = nentitiesg[PMMG_IO_ParallelTria]; + nqreqg = nentitiesg[PMMG_IO_RequiredQuad]; + nqparg = nentitiesg[PMMG_IO_ParallelQuad]; + nereqg = nentitiesg[PMMG_IO_RequiredTetra]; + neparg = nentitiesg[PMMG_IO_ParallelTetra]; + nnorg = nentitiesg[PMMG_IO_Normal]; + ntang = nentitiesg[PMMG_IO_Tangent]; + + /* Arrays for bidimensional dataspaces */ + hsize_t hnp[2] = {np, 3}; + hsize_t hna[2] = {na, 2}; + hsize_t hnt[2] = {nt, 3}; + hsize_t hnquad[2] = {nquad, 4}; + hsize_t hne[2] = {ne, 4}; + hsize_t hnprism[2] = {nprism, 2}; + hsize_t hnnor[2] = {nnor, 3}; + hsize_t hntan[2] = {ntan, 3}; + hsize_t hnpg[2] = {npg, 3}; + hsize_t hnag[2] = {nag, 2}; + hsize_t hntg[2] = {ntg, 3}; + hsize_t hnquadg[2] = {nquadg, 4}; + hsize_t hneg[2] = {neg, 4}; + hsize_t hnprismg[2] = {nprismg, 2}; + hsize_t hnnorg[2] = {nnorg, 3}; + hsize_t hntang[2] = {ntang, 3}; + + /* Vertices, Normals and Tangents */ + if (save_entities[PMMG_IO_Vertex] && npg) { + + PMMG_MALLOC(parmesh, ppoint, 3 * np, double, "ppoint", goto free_and_return ); + PMMG_MALLOC(parmesh, pref, np, int, "pref", goto free_and_return ); + + if (save_entities[PMMG_IO_Corner]) PMMG_MALLOC(parmesh, pcr, nc, int, "pcr", + goto free_and_return ); + if (save_entities[PMMG_IO_RequiredVertex]) PMMG_MALLOC(parmesh, preq, npreq, int, "preq", + goto free_and_return ); + if (save_entities[PMMG_IO_ParallelVertex]) PMMG_MALLOC(parmesh, ppar, nppar, int, "ppar", + goto free_and_return ); + + if (save_entities[PMMG_IO_Normal]) { + PMMG_MALLOC(parmesh, pnor, 3 * nnor, double, "pnor", goto free_and_return ); + PMMG_MALLOC(parmesh, pnorat, nnor, int, "pnorat", goto free_and_return ); + } + + if (save_entities[PMMG_IO_Tangent]) { + PMMG_MALLOC(parmesh, ptan, 3 * ntan, double, "ptan", goto free_and_return ); + PMMG_MALLOC(parmesh, ptanat, ntan, int, "ptanat", goto free_and_return ); + } + + crcount = reqcount = parcount = ncount = tcount = 0; + + for (int i = 0 ; i < mesh->np ; i++) { + ppt = &mesh->point[i + 1]; + if (!MG_VOK(ppt)) continue; + for (int j = 0 ; j < 3 ; j++) { + ppoint[3 * (ppt->tmp - 1) + j] = ppt->c[j]; + } + if (save_entities[PMMG_IO_Corner] && (ppt->tag & MG_CRN)) { + pcr[crcount++] = ppt->tmp + offset[2 * PMMG_IO_Vertex] - 1; + } + if (save_entities[PMMG_IO_RequiredVertex] && (ppt->tag & MG_REQ)) { + preq[reqcount++] = ppt->tmp + offset[2 * PMMG_IO_Vertex] - 1; + } + if (save_entities[PMMG_IO_ParallelVertex] && (ppt->tag & MG_PARBDY)) { + ppar[parcount++] = ppt->tmp + offset[2 * PMMG_IO_Vertex] - 1; + } + if (!MG_SIN(ppt->tag)) { + /* Normals */ + if (save_entities[PMMG_IO_Normal]) { + if ((ppt->tag & MG_BDY) && (!(ppt->tag & MG_GEO) || (ppt->tag & MG_NOM))) { + pxp = &mesh->xpoint[ppt->xp]; + for (int j = 0 ; j < 3 ; j++) { + pnor[3 * ncount + j] = pxp->n1[j]; + } + pnorat[ncount++] = ppt->tmp + offset[2 * PMMG_IO_Vertex] - 1; + } + } + /* Tangents */ + if (save_entities[PMMG_IO_Tangent]) { + if (MG_EDG(ppt->tag) || (ppt->tag & MG_NOM)) { + for (int j = 0 ; j < 3 ; j++) { + ptan[3 * tcount + j] = ppt->n[j]; + } + ptanat[tcount++] = ppt->tmp + offset[2 * PMMG_IO_Vertex] - 1; + } + } + } + pref[ppt->tmp - 1] = abs(ppt->ref); + } + + dspace_mem_id = H5Screate_simple(2, hnp, NULL); + dspace_file_id = H5Screate_simple(2, hnpg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Vertex], NULL, hnp, NULL); + dset_id = H5Dcreate(grp_entities_id, "Vertices", H5T_NATIVE_DOUBLE, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_DOUBLE, dspace_mem_id, dspace_file_id, dxpl_id, ppoint); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + PMMG_DEL_MEM(parmesh, ppoint, double, "ppoint"); + + dspace_mem_id = H5Screate_simple(1, hnp, NULL); + dspace_file_id = H5Screate_simple(1, hnpg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Vertex], NULL, hnp, NULL); + dset_id = H5Dcreate(grp_entities_id, "VerticesRef", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, pref); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + PMMG_DEL_MEM(parmesh, pref, int, "pref"); + + if (save_entities[PMMG_IO_Corner] && ncg) { + dspace_mem_id = H5Screate_simple(1, &nc, NULL); + dspace_file_id = H5Screate_simple(1, &ncg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Corner], NULL, &nc, NULL); + dset_id = H5Dcreate(grp_entities_id, "Corners", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, pcr); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + PMMG_DEL_MEM(parmesh, pcr, int, "pcr"); + } + + if (save_entities[PMMG_IO_RequiredVertex] && npreqg) { + dspace_mem_id = H5Screate_simple(1, &npreq, NULL); + dspace_file_id = H5Screate_simple(1, &npreqg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_RequiredVertex], NULL, &npreq, NULL); + dset_id = H5Dcreate(grp_entities_id, "RequiredVertices", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, preq); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + PMMG_DEL_MEM(parmesh, preq, int, "preq"); + } + + if (save_entities[PMMG_IO_ParallelVertex] && npparg) { + dspace_mem_id = H5Screate_simple(1, &nppar, NULL); + dspace_file_id = H5Screate_simple(1, &npparg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_ParallelVertex], NULL, &nppar, NULL); + dset_id = H5Dcreate(grp_entities_id, "ParallelVertices", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, ppar); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + PMMG_DEL_MEM(parmesh, ppar, int, "ppar"); + } + + if (save_entities[PMMG_IO_Normal] && nnorg) { + dspace_mem_id = H5Screate_simple(2, hnnor, NULL); + dspace_file_id = H5Screate_simple(2, hnnorg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Normal], NULL, hnnor, NULL); + dset_id = H5Dcreate(grp_entities_id, "Normals", H5T_NATIVE_DOUBLE, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_DOUBLE, dspace_mem_id, dspace_file_id, dxpl_id, pnor); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + PMMG_DEL_MEM(parmesh, pnor, double, "pnor"); + + dspace_mem_id = H5Screate_simple(1, hnnor, NULL); + dspace_file_id = H5Screate_simple(1, hnnorg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Normal], NULL, hnnor, NULL); + dset_id = H5Dcreate(grp_entities_id, "NormalsAtVertices", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, pnorat); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + PMMG_DEL_MEM(parmesh, pnorat, int, "pnorat"); + } + + if (save_entities[PMMG_IO_Tangent] && ntang) { + dspace_mem_id = H5Screate_simple(2, hntan, NULL); + dspace_file_id = H5Screate_simple(2, hntang, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Tangent], NULL, hntan, NULL); + dset_id = H5Dcreate(grp_entities_id, "Tangents", H5T_NATIVE_DOUBLE, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_DOUBLE, dspace_mem_id, dspace_file_id, dxpl_id, ptan); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + PMMG_DEL_MEM(parmesh, ptan, double, "ptan"); + + dspace_mem_id = H5Screate_simple(1, hntan, NULL); + dspace_file_id = H5Screate_simple(1, hntang, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Tangent], NULL, hntan, NULL); + dset_id = H5Dcreate(grp_entities_id, "TangentsAtVertices", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, ptanat); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + PMMG_DEL_MEM(parmesh, ptanat, int, "ptanat"); + } + + } + + /* Edges */ + if (save_entities[PMMG_IO_Edge] && nag) { + + PMMG_MALLOC(parmesh, pent, 2 * na, int, "pent", goto free_and_return); + PMMG_MALLOC(parmesh, pref, na, int, "pref", goto free_and_return); + if (save_entities[PMMG_IO_Ridge]) PMMG_MALLOC(parmesh, pcr , nr , int, "pcr" , goto free_and_return); + if (save_entities[PMMG_IO_RequiredEdge]) PMMG_MALLOC(parmesh, preq, nedreq, int, "preq", goto free_and_return); + if (save_entities[PMMG_IO_ParallelEdge]) PMMG_MALLOC(parmesh, ppar, nedpar, int, "ppar", goto free_and_return); + + crcount = reqcount = parcount = 0; + + if (na) { + na = 0; + for (int i = 0 ; i < mesh->na ; i++) { + pa = &mesh->edge[i + 1]; + pent[2 * i] = mesh->point[pa->a].tmp + offset[2 * PMMG_IO_Vertex] - 1; + pent[2 * i + 1] = mesh->point[pa->b].tmp + offset[2 * PMMG_IO_Vertex] - 1; + pref[i] = pa->ref; + if (save_entities[PMMG_IO_Ridge] && (pa->tag & MG_GEO)) pcr[crcount++] = na + offset[2 * PMMG_IO_Edge]; + if (save_entities[PMMG_IO_RequiredEdge] && (pa->tag & MG_REQ)) preq[reqcount++] = na + offset[2 * PMMG_IO_Edge]; + if (save_entities[PMMG_IO_ParallelEdge] && (pa->tag & MG_PARBDY)) ppar[parcount++] = na + offset[2 * PMMG_IO_Edge]; + na++; + } + } + + dspace_mem_id = H5Screate_simple(2, hna, NULL); + dspace_file_id = H5Screate_simple(2, hnag, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Edge], NULL, hna, NULL); + dset_id = H5Dcreate(grp_entities_id, "Edges", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, pent); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + PMMG_DEL_MEM(parmesh, pent, int, "pent"); + + dspace_mem_id = H5Screate_simple(1, hna, NULL); + dspace_file_id = H5Screate_simple(1, hnag, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Edge], NULL, hna, NULL); + dset_id = H5Dcreate(grp_entities_id, "EdgesRef", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, pref); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + PMMG_DEL_MEM(parmesh, pref, int, "pref"); + + if (save_entities[PMMG_IO_Ridge] && nrg) { + dspace_mem_id = H5Screate_simple(1, &nr, NULL); + dspace_file_id = H5Screate_simple(1, &nrg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Ridge], NULL, &nr, NULL); + dset_id = H5Dcreate(grp_entities_id, "Ridges", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, pcr); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + PMMG_DEL_MEM(parmesh, pcr, int, "pcr"); + } + + if (save_entities[PMMG_IO_RequiredEdge] && nedreqg) { + dspace_mem_id = H5Screate_simple(1, &nedreq, NULL); + dspace_file_id = H5Screate_simple(1, &nedreqg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_RequiredEdge], NULL, &nedreq, NULL); + dset_id = H5Dcreate(grp_entities_id, "RequiredEdges", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, preq); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + PMMG_DEL_MEM(parmesh, preq, int, "preq"); + } + + if (save_entities[PMMG_IO_ParallelEdge] && nedparg) { + dspace_mem_id = H5Screate_simple(1, &nedpar, NULL); + dspace_file_id = H5Screate_simple(1, &nedparg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_ParallelEdge], NULL, &nedpar, NULL); + dset_id = H5Dcreate(grp_entities_id, "ParallelEdges", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, ppar); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + PMMG_DEL_MEM(parmesh, ppar, int, "ppar"); + } + } + + /* Triangles */ + if (save_entities[PMMG_IO_Tria] && ntg) { + + PMMG_MALLOC(parmesh, pent, 3 * nt, int, "pent", goto free_and_return); + PMMG_MALLOC(parmesh, pref, nt, int, "pref", goto free_and_return); + if (save_entities[PMMG_IO_RequiredTria]) PMMG_MALLOC(parmesh, preq, ntreq, int, "preq", goto free_and_return); + if (save_entities[PMMG_IO_ParallelTria]) PMMG_MALLOC(parmesh, ppar, ntpar, int, "ppar", goto free_and_return); + + reqcount = parcount = 0; + + if (nt) { + nt = 0; + for (int i = 0 ; i < mesh->nt ; i++) { + pt = &mesh->tria[i + 1]; + for (int j = 0 ; j < 3 ; j++) { + pent[3 * i + j] = mesh->point[pt->v[j]].tmp + offset[2 * PMMG_IO_Vertex] - 1; + } + pref[i] = pt->ref; + if (save_entities[PMMG_IO_RequiredTria]) { + if (pt->tag[0] & MG_REQ && pt->tag[1] & MG_REQ && pt->tag[2] & MG_REQ) { + preq[reqcount++] = nt + offset[2 * PMMG_IO_Tria]; + } + } + if (save_entities[PMMG_IO_ParallelTria]) { + if (pt->tag[0] & MG_PARBDY && pt->tag[1] & MG_PARBDY && pt->tag[2] & MG_PARBDY) { + ppar[parcount++] = nt + offset[2 * PMMG_IO_Tria]; + } + } + nt++; + } + } + + dspace_mem_id = H5Screate_simple(2, hnt, NULL); + dspace_file_id = H5Screate_simple(2, hntg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Tria], NULL, hnt, NULL); + dset_id = H5Dcreate(grp_entities_id, "Triangles", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, pent); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + PMMG_DEL_MEM(parmesh, pent, int, "pent"); + + dspace_mem_id = H5Screate_simple(1, hnt, NULL); + dspace_file_id = H5Screate_simple(1, hntg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Tria], NULL, hnt, NULL); + dset_id = H5Dcreate(grp_entities_id, "TrianglesRef", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, pref); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + PMMG_DEL_MEM(parmesh, pref, int, "pref"); + + if (save_entities[PMMG_IO_RequiredTria] && ntreqg) { + dspace_mem_id = H5Screate_simple(1, &ntreq, NULL); + dspace_file_id = H5Screate_simple(1, &ntreqg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_RequiredTria], NULL, &ntreq, NULL); + dset_id = H5Dcreate(grp_entities_id, "RequiredTriangles", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, preq); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + PMMG_DEL_MEM(parmesh, preq, int, "preq"); + } + + if (save_entities[PMMG_IO_ParallelTria] && ntparg) { + dspace_mem_id = H5Screate_simple(1, &ntpar, NULL); + dspace_file_id = H5Screate_simple(1, &ntparg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_ParallelTria], NULL, &ntpar, NULL); + dset_id = H5Dcreate(grp_entities_id, "ParallelTriangles", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, ppar); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + PMMG_DEL_MEM(parmesh, ppar, int, "ppar"); + } + } + + + /* Quadrilaterals */ + if (save_entities[PMMG_IO_Quad] && nquadg) { + + PMMG_MALLOC(parmesh, pent, 4 * nquad, int, "pent", goto free_and_return); + PMMG_MALLOC(parmesh, pref, nquad, int, "pref", goto free_and_return); + if (save_entities[PMMG_IO_RequiredQuad]) PMMG_MALLOC(parmesh, preq, nqreq, int, "preq", goto free_and_return); + if (save_entities[PMMG_IO_ParallelQuad]) PMMG_MALLOC(parmesh, ppar, nqpar, int, "ppar", goto free_and_return); + + reqcount = parcount = 0; + + if (nquad){ + nquad = 0; + for (int i = 0 ; i < mesh->nquad ; i++) { + pq = &mesh->quadra[i + 1]; + for (int j = 0 ; j < 4 ; j++) { + pent[4 * i + j] = mesh->point[pq->v[j]].tmp + offset[2 * PMMG_IO_Vertex] - 1; + } + pref[i] = pq->ref; + if (save_entities[PMMG_IO_RequiredQuad]) { + if (pq->tag[0] & MG_REQ && pq->tag[1] & MG_REQ && + pq->tag[2] & MG_REQ && pq->tag[3] & MG_REQ) { + preq[reqcount++] = nquad + offset[2 * PMMG_IO_Quad]; + } + } + if (save_entities[PMMG_IO_RequiredQuad]) { + if (pq->tag[0] & MG_PARBDY && pq->tag[1] & MG_PARBDY && + pq->tag[2] & MG_PARBDY && pq->tag[3] & MG_PARBDY) { + ppar[parcount++] = nquad + offset[2 * PMMG_IO_Quad]; + } + } + nquad++; + } + } + + dspace_mem_id = H5Screate_simple(2, hnquad, NULL); + dspace_file_id = H5Screate_simple(2, hnquadg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Quad], NULL, hnquad, NULL); + dset_id = H5Dcreate(grp_entities_id, "Quadrilaterals", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, pent); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + PMMG_DEL_MEM(parmesh, pent, int, "pent"); + + dspace_mem_id = H5Screate_simple(1, hnquad, NULL); + dspace_file_id = H5Screate_simple(1, hnquadg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Quad], NULL, hnquad, NULL); + dset_id = H5Dcreate(grp_entities_id, "QuadrilateralsRef", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, pref); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + PMMG_DEL_MEM(parmesh, pref, int, "pref"); + + if (save_entities[PMMG_IO_RequiredQuad] && nqreqg) { + dspace_mem_id = H5Screate_simple(1, &nqreq, NULL); + dspace_file_id = H5Screate_simple(1, &nqreqg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_RequiredQuad], NULL, &nqreq, NULL); + dset_id = H5Dcreate(grp_entities_id, "RequiredQuadrilaterals", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, preq); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + PMMG_DEL_MEM(parmesh, preq, int, "preq"); + } + + if (save_entities[PMMG_IO_ParallelQuad] && nqparg) { + dspace_mem_id = H5Screate_simple(1, &nqpar, NULL); + dspace_file_id = H5Screate_simple(1, &nqparg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_ParallelQuad], NULL, &nqpar, NULL); + dset_id = H5Dcreate(grp_entities_id, "ParallelQuadrilaterals", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, ppar); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + PMMG_DEL_MEM(parmesh, ppar, int, "ppar"); + } + } + + /* Tetrahedra */ + if (save_entities[PMMG_IO_Tetra] && neg) { + + PMMG_MALLOC(parmesh, pent, 4 * ne, int, "pent", goto free_and_return); + PMMG_MALLOC(parmesh, pref, ne, int, "pref", goto free_and_return); + if (save_entities[PMMG_IO_RequiredTetra]) PMMG_MALLOC(parmesh, preq, nereq, int, "preq", goto free_and_return); + if (save_entities[PMMG_IO_ParallelTetra]) PMMG_MALLOC(parmesh, ppar, nepar, int, "ppar", goto free_and_return); + + reqcount = parcount = 0; + + if (ne) { + ne = 0; + for (int i = 0 ; i < mesh->ne ; i++) { + pe = &mesh->tetra[i + 1]; + if (!MG_EOK(pe)) continue; + for (int j = 0 ; j < 4 ; j++) { + pent[4 * ne + j] = mesh->point[pe->v[j]].tmp + offset[2 * PMMG_IO_Vertex] - 1; + } + pref[i] = pe->ref; + if (save_entities[PMMG_IO_RequiredTetra] && (pe->tag & MG_REQ)) preq[reqcount++] = ne + offset[2 * PMMG_IO_Tetra]; + if (save_entities[PMMG_IO_ParallelTetra] && (pe->tag & MG_PARBDY)) ppar[parcount++] = ne + offset[2 * PMMG_IO_Tetra]; + ne++; + } + } + + dspace_mem_id = H5Screate_simple(2, hne, NULL); + dspace_file_id = H5Screate_simple(2, hneg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Tetra], NULL, hne, NULL); + dset_id = H5Dcreate(grp_entities_id, "Tetrahedra", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, pent); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + PMMG_DEL_MEM(parmesh, pent, int, "pent"); + + dspace_mem_id = H5Screate_simple(1, hne, NULL); + dspace_file_id = H5Screate_simple(1, hneg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Tetra], NULL, hne, NULL); + dset_id = H5Dcreate(grp_entities_id, "TetrahedraRef", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, pref); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + PMMG_DEL_MEM(parmesh, pref, int, "pref"); + + if (save_entities[PMMG_IO_RequiredTetra] && nereqg) { + dspace_mem_id = H5Screate_simple(1, &nereq, NULL); + dspace_file_id = H5Screate_simple(1, &nereqg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_RequiredTetra], NULL, &nereq, NULL); + dset_id = H5Dcreate(grp_entities_id, "RequiredTetrahedra", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, preq); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + PMMG_DEL_MEM(parmesh, preq, int, "preq"); + } + + if (save_entities[PMMG_IO_ParallelTetra] && neparg) { + dspace_mem_id = H5Screate_simple(1, &nepar, NULL); + dspace_file_id = H5Screate_simple(1, &neparg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_ParallelTetra], NULL, &nepar, NULL); + dset_id = H5Dcreate(grp_entities_id, "ParallelTetrahedra", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, ppar); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + PMMG_DEL_MEM(parmesh, ppar, int, "ppar"); + } + } + + /* Prisms */ + if (save_entities[PMMG_IO_Prism] && nprismg) { + PMMG_MALLOC(parmesh, pent, 6 * nprism, int, "pent", goto free_and_return); + PMMG_MALLOC(parmesh, pref, nprism, int, "pref", goto free_and_return); + + if (nprism){ + for (int i = 0 ; i < mesh->nprism ; i++) { + pp = &mesh->prism[i + 1]; + for (int j = 0 ; j < 6 ; j++) { + pent[6 * i + j] = mesh->point[pp->v[j]].tmp + offset[2 * PMMG_IO_Vertex] - 1; + } + pref[i] = pp->ref; + } + } + + dspace_mem_id = H5Screate_simple(2, hnprism, NULL); + dspace_file_id = H5Screate_simple(2, hnprismg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Prism], NULL, hnprism, NULL); + dset_id = H5Dcreate(grp_entities_id, "Prisms", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, pent); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + PMMG_DEL_MEM(parmesh, pent, int, "pent"); + + dspace_mem_id = H5Screate_simple(1, hnprism, NULL); + dspace_file_id = H5Screate_simple(1, hnprismg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Prism], NULL, hnprism, NULL); + dset_id = H5Dcreate(grp_entities_id, "PrismsRef", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, pref); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + PMMG_DEL_MEM(parmesh, pref, int, "pref"); + } + + /* Print the number of mesh entities */ + if ( parmesh->info.imprim > PMMG_VERB_STEPS ) { + fprintf(stdout," NUMBER OF VERTICES %lld CORNERS %lld" + " REQUIRED %lld\n",npg,ncg,npreqg); + fprintf(stdout," NUMBER OF TETRAHEDRA %lld REQUIRED %lld\n", + neg,nereqg); + if ( nprismg ) + fprintf(stdout," NUMBER OF PRISMS %lld\n",nprismg); + + if ( nag ) + fprintf(stdout," NUMBER OF EDGES %lld RIDGES %lld" + " REQUIRED %lld\n",nag,nrg,nedreqg); + if ( ntg ) + fprintf(stdout," NUMBER OF TRIANGLES %lld REQUIRED %lld\n", + ntg, ntreqg); + if ( nquadg ) + fprintf(stdout," NUMBER OF QUADRILATERALS %lld REQUIRED %lld\n", + nquadg,nqreqg); + + if ( npparg || nedparg || ntparg || neparg || nqparg ) { + fprintf(stdout," NUMBER OF PARALLEL ENTITIES: \n"); + if ( npparg ) + fprintf(stdout," VERTICES %lld \n",npparg); + if ( nedparg ) + fprintf(stdout," EDGES %lld \n",nedparg); + if ( ntparg ) + fprintf(stdout," TRIANGLES %lld \n",ntparg); + if ( nqparg ) + fprintf(stdout," QUADRILATERALS %lld \n",nqparg); + if ( neparg ) + fprintf(stdout," TETRAHEDRA %lld \n",neparg); + } + } + + return 1; + + free_and_return: + PMMG_DEL_MEM(parmesh, ppoint, double, "ppoint"); + PMMG_DEL_MEM(parmesh, pent, int, "pent"); + PMMG_DEL_MEM(parmesh, pref, int, "pref"); + PMMG_DEL_MEM(parmesh, pcr, int, "pcr"); + PMMG_DEL_MEM(parmesh, preq, int, "preq"); + PMMG_DEL_MEM(parmesh, ppar, int, "ppar"); + PMMG_DEL_MEM(parmesh, pnor, double, "pnor"); + PMMG_DEL_MEM(parmesh, pnorat, int, "pnorat"); + PMMG_DEL_MEM(parmesh, ptan, double, "ptan"); + PMMG_DEL_MEM(parmesh, ptanat, int, "ptanat"); + + return 0; + +} + +/** + * \param parmesh pointer toward the parmesh structure. + * \param grp_part_id identifier of the HDF5 group in which to write the mesh partitioning. + * \param dcpl_id identifier of the dataset creation property list (no fill value). + * \param dxpl_id identifier of the dataset transfer property list (MPI-IO). + * \param nentities array of size nprocs * PMMG_IO_ENTITIES_size containing the number of entities on every proc. + * + * \return 0 if fail, 1 otherwise + * + * Save the mesh partitioning in the \a grp_part_id group of an HDF5 file (only + * one group per process is allowed). + * + */ +static int PMMG_savePartitioning_hdf5(PMMG_pParMesh parmesh, hid_t grp_part_id, hid_t dcpl_id, hid_t dxpl_id, hsize_t *nentities) { + PMMG_pExt_comm comms; + hsize_t *ncomms, *nitem, *nitem_proc; + hsize_t ncommg, comm_offset, nitemg, item_offset, rank_offset; + hsize_t icomm; + int *colors; + int **idx_loc, **idx_glob, *loc_buf, *glob_buf; + int ier; + int rank, nprocs; + hid_t dspace_mem_id, dspace_file_id; + hid_t dset_id; + + /* Set pointers to NULL */ + ncomms = nitem = nitem_proc = NULL; + colors = NULL; + idx_loc = idx_glob = NULL; + loc_buf = glob_buf = NULL; + + /* Init variables */ + rank = parmesh->myrank; + nprocs = parmesh->nprocs; + + if (parmesh->info.API_mode == PMMG_APIDISTRIB_faces) + comms = parmesh->ext_face_comm; + else + comms = parmesh->ext_node_comm; + + ncommg = nitemg = comm_offset = item_offset = rank_offset = 0; + + /* Write the number of entities per proc */ + hsize_t hn[2] = {nprocs, PMMG_IO_ENTITIES_size}; + dspace_file_id = H5Screate_simple(2, hn, NULL); + dset_id = H5Dcreate(grp_part_id, "NumberOfEntities", H5T_NATIVE_HSIZE, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + if (rank == parmesh->info.root) + H5Dwrite(dset_id, H5T_NATIVE_HSIZE, H5S_ALL, H5S_ALL, H5P_DEFAULT, nentities); + H5Dclose(dset_id); + H5Sclose(dspace_file_id); + + /* Don't save the communicators outside the adaptation loop */ + if (parmesh->iter == PMMG_UNSET) return 1; + + /* Dont try to save communicators if there isn't any */ + if (nprocs == 1) return 1; + + /* Count the number of communicators */ + PMMG_MALLOC(parmesh, ncomms, nprocs, hsize_t, "ncomms", goto free_and_return); + + if (parmesh->info.API_mode == PMMG_APIDISTRIB_faces) + ncomms[rank] = parmesh->next_face_comm; + else + ncomms[rank] = parmesh->next_node_comm; + + MPI_CHECK( MPI_Allgather(&ncomms[rank], 1, MPI_LONG_LONG, ncomms, 1, MPI_LONG_LONG, parmesh->comm), + goto free_and_return ); + + for (int i = 0 ; i < nprocs ; i++) { + ncommg += ncomms[i]; + } + + /* Create the buffers */ + PMMG_MALLOC(parmesh, colors, ncomms[rank], int, "colors", goto free_and_return); + PMMG_MALLOC(parmesh, nitem, ncomms[rank], hsize_t, "nitem", goto free_and_return); + PMMG_CALLOC(parmesh, nitem_proc, nprocs, hsize_t, "nitem_proc", goto free_and_return); + + for (icomm = 0 ; icomm < ncomms[rank] ; icomm++) { + colors[icomm] = comms[icomm].color_out; + nitem[icomm] = comms[icomm].nitem; + nitem_proc[rank] += nitem[icomm]; + } + + MPI_CHECK( MPI_Allgather(&nitem_proc[rank], 1, MPI_LONG_LONG, nitem_proc, 1, MPI_LONG_LONG, parmesh->comm), + goto free_and_return ); + + /* Count the total number of items */ + for (int i = 0 ; i < nprocs ; i++) { + nitemg += nitem_proc[i]; + } + + /* Count the offsets for parallel writing */ + for (int i = 0 ; i < rank ; i++) { + comm_offset += ncomms[i]; + rank_offset += nitem_proc[i]; + } + + /* Write the things */ + /* Number of communicators */ + hsize_t hnprocs = nprocs; + dspace_file_id = H5Screate_simple(1, &hnprocs, NULL); + if (parmesh->info.API_mode == PMMG_APIDISTRIB_faces) + dset_id = H5Dcreate(grp_part_id, "NumberOfFaceCommunicators", H5T_NATIVE_HSIZE, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + else + dset_id = H5Dcreate(grp_part_id, "NumberOfNodeCommunicators", H5T_NATIVE_HSIZE, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + if (rank == parmesh->info.root) + H5Dwrite(dset_id, H5T_NATIVE_HSIZE, H5S_ALL, H5S_ALL, H5P_DEFAULT, ncomms); + H5Dclose(dset_id); + H5Sclose(dspace_file_id); + + /* For each communicator, write the outward proc color and the number of faces */ + dspace_mem_id = H5Screate_simple(1, &ncomms[rank], NULL); + dspace_file_id = H5Screate_simple(1, &ncommg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &comm_offset, NULL, &ncomms[rank], NULL); + + dset_id = H5Dcreate(grp_part_id, "ColorsOut", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, colors); + H5Dclose(dset_id); + + if (parmesh->info.API_mode == PMMG_APIDISTRIB_faces) + dset_id = H5Dcreate(grp_part_id, "NumberOfCommunicatorFaces", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + else + dset_id = H5Dcreate(grp_part_id, "NumberOfCommunicatorNodes", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + + H5Dwrite(dset_id, H5T_NATIVE_HSIZE, dspace_mem_id, dspace_file_id, dxpl_id, nitem); + H5Dclose(dset_id); + + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + /* Get the communicator items */ + PMMG_CALLOC(parmesh, idx_loc, ncomms[rank], int*, "idx_loc", goto free_and_return); + PMMG_CALLOC(parmesh, idx_glob, ncomms[rank], int*, "idx_glob", goto free_and_return); + for (icomm = 0 ; icomm < ncomms[rank] ; icomm++) { + PMMG_CALLOC(parmesh, idx_loc[icomm], nitem[icomm], int, "idx_loc[icomm]", goto free_and_return); + PMMG_CALLOC(parmesh, idx_glob[icomm], nitem[icomm], int, "idx_glob[icomm]", goto free_and_return); + } + + if (parmesh->info.API_mode == PMMG_APIDISTRIB_faces) { + ier = PMMG_Get_FaceCommunicator_faces(parmesh, idx_loc); + + if ( !ier ) { + fprintf(stderr,"\n ## Error: %s: unable to compute face communicators on rank %d.\n", + __func__, rank); + return ier; + } + ier = PMMG_Get_FaceCommunicator_owners(parmesh, NULL, idx_glob, NULL, NULL); + } + else { + ier = PMMG_Get_NodeCommunicator_nodes(parmesh, idx_loc); + + if ( !ier ) { + fprintf(stderr,"\n ## Error: %s: unable to compute node communicators on rank %d.\n", + __func__, rank); + return ier; + } + ier = PMMG_Get_NodeCommunicator_owners(parmesh, NULL, idx_glob, NULL, NULL); + } + + /* Make a unique buffer for each proc */ + PMMG_CALLOC(parmesh, loc_buf, nitem_proc[rank], int, "loc_buf", goto free_and_return); + PMMG_CALLOC(parmesh, glob_buf, nitem_proc[rank], int, "glob_buf", goto free_and_return); + + for (icomm = 0 ; icomm < ncomms[rank] ; icomm++) { + for (int k = 0 ; k < nitem[icomm] ; k++) { + loc_buf[item_offset + k] = idx_loc[icomm][k]; + glob_buf[item_offset + k] = idx_glob[icomm][k]; + } + item_offset += nitem[icomm]; + } + + /* Free the memory of idx_loc and idx_glob */ + for (icomm = 0 ; icomm < ncomms[rank] ; icomm++) { + PMMG_DEL_MEM(parmesh, idx_loc[icomm], int, "idx_loc[icomms]"); + PMMG_DEL_MEM(parmesh, idx_glob[icomm], int, "idx_glob[icomm]"); + } + PMMG_DEL_MEM(parmesh, idx_loc, int*, "idx_loc"); + PMMG_DEL_MEM(parmesh, idx_glob, int*, "idx_glob"); + + /* Write the local indices */ + dspace_file_id = H5Screate_simple(1, &nitemg, NULL); + + if (parmesh->info.API_mode == PMMG_APIDISTRIB_faces) + dset_id = H5Dcreate(grp_part_id, "LocalFaceIndices", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + else + dset_id = H5Dcreate(grp_part_id, "LocalNodeIndices", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + dspace_mem_id = H5Screate_simple(1, &nitem_proc[rank], NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &rank_offset, NULL, &nitem_proc[rank], NULL); + H5Dwrite(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, loc_buf); + H5Sclose(dspace_mem_id); + H5Dclose(dset_id); + + /* Write the global indices */ + if (parmesh->info.API_mode == PMMG_APIDISTRIB_faces) + dset_id = H5Dcreate(grp_part_id, "GlobalFaceIndices", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + else + dset_id = H5Dcreate(grp_part_id, "GlobalNodeIndices", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + dspace_mem_id = H5Screate_simple(1, &nitem_proc[rank], NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &rank_offset, NULL, &nitem_proc[rank], NULL); + H5Dwrite(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, glob_buf); + H5Sclose(dspace_mem_id); + H5Dclose(dset_id); + + H5Sclose(dspace_file_id); + + /* Free the memory */ + PMMG_DEL_MEM(parmesh, loc_buf, int, "loc_buf"); + PMMG_DEL_MEM(parmesh, glob_buf, int, "glob_buf"); + PMMG_DEL_MEM(parmesh, ncomms, hsize_t, "ncomms"); + PMMG_DEL_MEM(parmesh, colors, int, "colors"); + PMMG_DEL_MEM(parmesh, nitem, int, "nitem"); + PMMG_DEL_MEM(parmesh, nitem_proc, hsize_t, "nitem_proc"); + + return 1; + + free_and_return: + PMMG_DEL_MEM(parmesh, loc_buf, int, "loc_buf"); + PMMG_DEL_MEM(parmesh, glob_buf, int, "glob_buf"); + PMMG_DEL_MEM(parmesh, ncomms, hsize_t, "ncomms"); + PMMG_DEL_MEM(parmesh, colors, int, "colors"); + PMMG_DEL_MEM(parmesh, nitem, int, "nitem"); + PMMG_DEL_MEM(parmesh, nitem_proc, hsize_t, "nitem_proc"); + + if ( idx_loc && idx_glob ) { + for (icomm = 0 ; icomm < ncomms[rank] ; icomm++) { + PMMG_DEL_MEM(parmesh, idx_loc[icomm], int, "idx_loc[icomms]"); + PMMG_DEL_MEM(parmesh, idx_glob[icomm], int, "idx_glob[icomm]"); + } + PMMG_DEL_MEM(parmesh, idx_loc, int*, "idx_loc"); + PMMG_DEL_MEM(parmesh, idx_glob, int*, "idx_glob"); + } + + return 0; + +} + +/** + * \param mesh pointer toward the mesh associated to the solution. + * \param sol pointer toward the solution structure to save. + * \param dset_id identifier of the HDF5 group in which to write the solution. + * \param dcpl_id identifier of the dataset creation property list (no fill value). + * \param dxpl_id identifier of the dataset transfer property list (MPI-IO). + * \param np local number of vertices. + * \param npg global number of vertices. + * \param offset array of size PMMG_IO_ENTITIES_size containing the offset for parallel writing. + * + * \return 0 if fail, 1 otherwise + * + * Save at hdf5 format a given solution structure defined at vertices. + * + */ +static +int PMMG_saveSolAtVertices_hdf5(MMG5_pMesh mesh,MMG5_pSol sol,hid_t grp_sol_id, + hid_t dcpl_id,hid_t dxpl_id,int np,int npg, + hsize_t *offset) { + int mcount; + MMG5_pPoint ppt; + double *sol_buf; + hsize_t sol_offset[2] = {0, 0}; + hid_t dspace_file_id, dspace_mem_id; + hid_t dset_id; + + /* Arrays for bidimensional dataspaces */ + hsize_t hns[2] = {np, sol->size}; + hsize_t hnsg[2] = {npg, sol->size}; + + /* Offset for parallel writing */ + sol_offset[0] = offset[2 * PMMG_IO_Vertex]; + + /* Fill the solution buffer */ + PMMG_MALLOC(mesh, sol_buf, np * sol->size, double, "sol_buf", return 0); + mcount = 0; + for (int k = 0 ; k < mesh->np ; k++) { + ppt = &mesh->point[k + 1]; + if (!MG_VOK(ppt)) continue; + for (int j = 0 ; j < sol->size ; j++) { + sol_buf[mcount++] = sol->m[1 + k * sol->size + j]; + } + } + + /* Write the buffer */ + dspace_mem_id = H5Screate_simple(2, hns, NULL); + dspace_file_id = H5Screate_simple(2, hnsg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, sol_offset, NULL, hns, NULL); + dset_id = H5Dcreate(grp_sol_id, "SolAtVertices", H5T_NATIVE_DOUBLE, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_DOUBLE, dspace_mem_id, dspace_file_id, dxpl_id, sol_buf); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + /* Free the memory */ + PMMG_DEL_MEM(mesh, sol_buf, double, "sol_buf"); + + return 1; +} + +/** + * \param parmesh pointer toward the parmesh structure. + * \param grp_sols_id identifier of the HDF5 group in which to write the metric. + * \param dcpl_id identifier of the dataset creation property list (no fill value). + * \param dxpl_id identifier of the dataset transfer property list (MPI-IO). + * \param nentitiesl array of size PMMG_IO_ENTITIES_size containing the local number of entities. + * \param nentitiesg array of size PMMG_IO_ENTITIES_size containing the global number of entities. + * \param offset array of size PMMG_IO_ENTITIES_size containing the offset for parallel writing. + * + * \return 0 if fail, 1 otherwise + * + * Save the metric in the \a grp_sols_id group of an HDF5 file (only one group per process is allowed). + * + */ +static int PMMG_saveMetric_hdf5(PMMG_pParMesh parmesh, hid_t grp_sols_id, hid_t dcpl_id, hid_t dxpl_id, + hsize_t *nentitiesl, hsize_t *nentitiesg, hsize_t *offset) { + int np, npg, isMet,ier; + MMG5_pMesh mesh; + MMG5_pSol met; + hid_t grp_sol_id; + + assert ( parmesh->ngrp == 1 ); + + mesh = parmesh->listgrp[0].mesh; + met = parmesh->listgrp[0].met; + np = nentitiesl[PMMG_IO_Vertex]; + npg = nentitiesg[PMMG_IO_Vertex]; + + /* Check the metric */ + isMet = (met && met->m); + + MPI_CHECK( MPI_Allreduce(MPI_IN_PLACE, &isMet, 1, MPI_INT, MPI_MAX, parmesh->comm), return 0); + + if (!isMet) { + return 1; + } + if (met->size != 1 && met->size != 6 && !(met->size == 3 && mesh->info.lag != -1)) { + fprintf(stderr, "\n ## Error: %s: Wrong metric size/type.\n", __func__); + return 0; + } + if (mesh->np != met->np) { + fprintf(stderr, "\n ## Error: %s: The metric vertices do not match with the mesh vertices. \n", __func__); + return 0; + } + + HDF_CHECK( grp_sol_id = H5Gcreate(grp_sols_id, "Metric", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT), + fprintf(stderr,"\n ## Error: %s: Could not create the /Solutions/Metric group.\n",__func__); + return 0 ); + + ier = PMMG_saveSolAtVertices_hdf5(mesh,met,grp_sol_id,dcpl_id,dxpl_id,np,npg,offset); + + if (!ier) { + if (parmesh->myrank == parmesh->info.root) { + fprintf(stderr,"\n ## Error: %s: Could not write the metric.\n",__func__); + } + H5Gclose(grp_sol_id); + return 0; + } + + H5Gclose(grp_sol_id); + + return 1; +} + +/** + * \param parmesh pointer toward the parmesh structure. + * \param grp_sols_id identifier of the HDF5 group in which to write the metric. + * \param dcpl_id identifier of the dataset creation property list (no fill value). + * \param dxpl_id identifier of the dataset transfer property list (MPI-IO). + * \param nentitiesl array of size PMMG_IO_ENTITIES_size containing the local number of entities. + * \param nentitiesg array of size PMMG_IO_ENTITIES_size containing the global number of entities. + * \param offset array of size PMMG_IO_ENTITIES_size containing the offset for parallel writing. + * + * \return 0 if fail, 1 otherwise + * + * Save the level-set in the \a grp_sols_id group of an HDF5 file (only one group per process is allowed). + * + */ +static int PMMG_saveLs_hdf5(PMMG_pParMesh parmesh, hid_t grp_sols_id, hid_t dcpl_id, hid_t dxpl_id, + hsize_t *nentitiesl, hsize_t *nentitiesg, hsize_t *offset) { + int np, npg, isSol,ier; + MMG5_pMesh mesh; + MMG5_pSol ls; + hid_t grp_sol_id; + + assert ( parmesh->ngrp == 1 ); + + mesh = parmesh->listgrp[0].mesh; + ls = parmesh->listgrp[0].ls; + np = nentitiesl[PMMG_IO_Vertex]; + npg = nentitiesg[PMMG_IO_Vertex]; + + /* Check the metric */ + isSol = (ls && ls->m); + + MPI_CHECK( MPI_Allreduce(MPI_IN_PLACE, &isSol, 1, MPI_INT, MPI_MAX, parmesh->comm), return 0); + + if (!isSol) { + return 1; + } + if (ls->size != 1) { + fprintf(stderr, "\n ## Error: %s: Wrong level-set size/type.\n", __func__); + return 0; + } + if (mesh->np != ls->np) { + fprintf(stderr, "\n ## Error: %s: The level-set vertices do not match with the mesh vertices. \n", __func__); + return 0; + } + + HDF_CHECK( grp_sol_id = H5Gcreate(grp_sols_id, "Ls", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT), + fprintf(stderr,"\n ## Error: %s: Could not create the /Solutions/Ls group.\n",__func__); + H5Gclose(grp_sols_id); + return 0 ); + + ier = PMMG_saveSolAtVertices_hdf5(mesh,ls,grp_sol_id,dcpl_id,dxpl_id,np,npg,offset); + + if (!ier) { + if (parmesh->myrank == parmesh->info.root) { + fprintf(stderr,"\n ## Error: %s: Could not write the level-set.\n",__func__); + } + H5Gclose(grp_sol_id); + return 0; + } + + H5Gclose(grp_sol_id); + + return 1; +} + +/** + * \param parmesh pointer toward the parmesh structure. + * \param grp_sols_id identifier of the HDF5 group in which to write the solutions. + * \param dcpl_id identifier of the dataset creation property list (no fill value). + * \param dxpl_id identifier of the dataset transfer property list (MPI-IO). + * \param nentitiesl array of size PMMG_IO_ENTITIES_size containing the local number of entities. + * \param nentitiesg array of size PMMG_IO_ENTITIES_size containing the global number of entities. + * \param offset array of size PMMG_IO_ENTITIES_size containing the offset for parallel writing. + * + * \return 0 if fail, 1 otherwise + * + * Save the solutions in the \a grp_sols_id group of an HDF5 file (only one group per process is allowed). + * + */ +static int PMMG_saveAllSols_hdf5(PMMG_pParMesh parmesh, hid_t grp_sols_id, hid_t dcpl_id, hid_t dxpl_id, + hsize_t *nentitiesl, hsize_t *nentitiesg, hsize_t *offset) { + MMG5_pMesh mesh; + MMG5_pSol sols; + MMG5_pPoint ppt; + MMG5_pTetra pt; + int nsols, ndigits, np, npg, ne, neg, size, count, vcount, tcount, iwar = 1; + char *solname, *tmp; + hsize_t *sol_offset; + double *sol_buf; + hid_t dspace_mem_id, dspace_file_id; + hid_t dset_id, attr_id,grp_sol_id; + + /* Set ParMmg variables */ + mesh = parmesh->listgrp[0].mesh; + sols = parmesh->listgrp[0].field; + nsols = mesh->nsols; + + if ( !nsols || !sols ) { + /* Nothing to save */ + return 1; + } + + /* Get the local and global number of vertices/tetra */ + np = nentitiesl[PMMG_IO_Vertex]; + ne = nentitiesl[PMMG_IO_Tetra]; + npg = nentitiesg[PMMG_IO_Vertex]; + neg = nentitiesg[PMMG_IO_Tetra]; + + /* Arrays for bidimensional dataspaces */ + hsize_t hns[2] = {0, 0}; + hsize_t hnsg[2] = {0, 0}; + + /* Count digits for the name of the datasets */ + ndigits = PMMG_count_digits(nsols); + + /* Initialize the counts */ + vcount = tcount = 0; + + HDF_CHECK( grp_sol_id = H5Gcreate(grp_sols_id, "Fields", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT), + fprintf(stderr,"\n ## Error: %s: Could not create the /Solutions/Fields group.\n",__func__); + return 0 ); + + for (int i = 0 ; i < nsols ; i++) { + + if ( !(sols+i) || !(sols[i].m) ) { + iwar = 0; + } + else { + iwar = 1; + } + + MPI_Allreduce(MPI_IN_PLACE, &iwar, 1, MPI_INT, MPI_MAX, parmesh->comm); + if ( !iwar ) { + fprintf(stderr, "\n ## Warning: %s: Skipping empty solution %d.\n", __func__, i); + continue; + } + + size = sols[i].size; + count = 0; + + if (sols[i].entities == MMG5_Noentity || sols[i].entities == MMG5_Vertex) { + PMMG_MALLOC(parmesh, sol_buf, size * np, double, "sol_buf", goto free_buf); + for (int k = 0 ; k < mesh->np ; k++) { + ppt = &mesh->point[k + 1]; + if ( !MG_VOK(ppt) ) continue; + for (int j = 0 ; j < size ; j++) { + sol_buf[count++] = sols[i].m[1 + k * size + j]; + } + } + hns[0] = np; hns[1] = size; + hnsg[0] = npg; hnsg[1] = size; + PMMG_CALLOC(parmesh, sol_offset, np * size, hsize_t, "sol_offset", goto free_buf); + sol_offset[0] = offset[2 * PMMG_IO_Vertex]; + + PMMG_CALLOC(parmesh, solname, strlen("SolAtVertices") + ndigits + 1, char, "solname", goto free_buf); + PMMG_CALLOC(parmesh, tmp, ndigits + 1, char, "tmp", goto free_buf); + strcpy(solname, "SolAtVertices"); + int len = PMMG_count_digits(vcount); + snprintf(tmp,len*sizeof(int), "%d", vcount); + strcat(solname, tmp); + vcount++; + } + + else if (sols[i].entities == MMG5_Tetrahedron) { + PMMG_MALLOC(parmesh, sol_buf, size * ne, double, "sol_buf", goto free_buf); + for (int k = 0 ; k < mesh->ne ; k++) { + pt = &mesh->tetra[k + 1]; + if ( !MG_EOK(pt) ) continue; + for (int j = 0 ; j < size ; j++) { + sol_buf[count++] = sols[i].m[1 + k * size + j]; + } + } + hns[0] = ne; hns[1] = size; + hnsg[0] = neg; hnsg[1] = size; + PMMG_CALLOC(parmesh, sol_offset, ne * size, hsize_t, "sol_offset", goto free_buf); + sol_offset[0] = offset[2 * PMMG_IO_Tetra]; + + PMMG_CALLOC(parmesh, solname, strlen("SolAtTetrahedra") + ndigits + 1, char, "solname", goto free_buf); + PMMG_CALLOC(parmesh, tmp, ndigits + 1, char, "tmp", goto free_buf); + strcpy(solname, "SolAtTetrahedra"); + int len = PMMG_count_digits(vcount); + snprintf(tmp,len*sizeof(int), "%d", vcount); + strcat(solname, tmp); + tcount++; + } + + else { + printf("\n ## Warning: %s: unexpected entity type for solution %d: %s." + "\n Ignored.\n", + __func__, i, MMG5_Get_entitiesName(sols[i].entities)); + continue; + } + + dspace_mem_id = H5Screate_simple(2, hns, NULL); + dspace_file_id = H5Screate_simple(2, hnsg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, sol_offset, NULL, hns, NULL); + dset_id = H5Dcreate(grp_sol_id, solname, H5T_NATIVE_DOUBLE, dspace_file_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT); + H5Dwrite(dset_id, H5T_NATIVE_DOUBLE, dspace_mem_id, dspace_file_id, dxpl_id, sol_buf); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + free_buf: + PMMG_DEL_MEM(parmesh, sol_offset, hsize_t, "sol_offset"); + PMMG_DEL_MEM(parmesh, sol_buf, double, "sol_buf"); + PMMG_DEL_MEM(parmesh, solname, char, "solname"); + PMMG_DEL_MEM(parmesh, tmp, char, "tmp"); + } + + /* Save the actual number of solutions as group attributes */ + dspace_file_id = H5Screate(H5S_SCALAR); + + attr_id = H5Acreate(grp_sol_id, "NSolsAtVertices", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, H5P_DEFAULT); + H5Awrite(attr_id, H5T_NATIVE_INT, &vcount); + H5Aclose(attr_id); + + attr_id = H5Acreate(grp_sol_id, "NSolsAtTetrahedra", H5T_NATIVE_INT, dspace_file_id, H5P_DEFAULT, H5P_DEFAULT); + H5Awrite(attr_id, H5T_NATIVE_INT, &tcount); + H5Aclose(attr_id); + + H5Sclose(dspace_file_id); + + H5Gclose(grp_sol_id); + + + return 1; +} + + +/** + * \param parmesh pointer toward the parmesh structure. + * \param filename name of the HDF5 file (with .h5 extension) containing the heavy data. + * \param xdmfname name of the XDMF file (with .xdmf extension) that will contain the light data. + * \param nentitiesg array of size PMMG_IO_ENTITIES_size containing the global number of entities. + * + * \return 0 if fail, 1 otherwise + * + * Create the XDMF file \a xdmfname and write light data describing the mesh that was saved in + * the HDF5 file \a filename (only one group per process is allowed). + * + */ +static int PMMG_writeXDMF(PMMG_pParMesh parmesh, const char *filename, const char *xdmfname, hsize_t *nentitiesg) { + hsize_t neg, npg; + PMMG_pGrp grp; + MMG5_pSol met, ls, sols; + int nsols, entities; + FILE *xdmf_file = NULL; + + assert ( parmesh->ngrp == 1 ); + + npg = nentitiesg[PMMG_IO_Vertex]; + neg = nentitiesg[PMMG_IO_Tetra]; + grp = &parmesh->listgrp[0]; + met = grp->met; + ls = grp->ls; + sols = grp->field; + nsols = grp->mesh->nsols; + + if (parmesh->myrank == parmesh->info.root) { + + xdmf_file = fopen(xdmfname, "w"); + + if ( !xdmf_file ) return 0; + + if (parmesh->info.imprim > PMMG_VERB_VERSION) { + fprintf(stdout, "\n %%%% %s OPENED \n", xdmfname); + } + + /* XDMF header */ + fprintf(xdmf_file, "\n"); + fprintf(xdmf_file, "\n"); + fprintf(xdmf_file, "\n"); + fprintf(xdmf_file, " \n"); + + /* Tetrahedra */ + fprintf(xdmf_file, " \n", neg); + fprintf(xdmf_file, " \n", neg); + fprintf(xdmf_file, " %s:/Mesh/MeshEntities/Tetrahedra\n", filename); + fprintf(xdmf_file, " \n"); + fprintf(xdmf_file, " \n"); + + /* Vertices */ + fprintf(xdmf_file, " \n"); + fprintf(xdmf_file, " \n", npg); + fprintf(xdmf_file, " %s:/Mesh/MeshEntities/Vertices\n", filename); + fprintf(xdmf_file, " \n"); + fprintf(xdmf_file, " \n"); + + /* Metric */ + if (met && met->m) { + if (met->size == 6) + fprintf(xdmf_file, " \n"); + else if (met->size == 1) + fprintf(xdmf_file, " \n"); + fprintf(xdmf_file, " \n", npg, met->size); + fprintf(xdmf_file, " %s:/Solutions/Metric/SolAtVertices\n", filename); + fprintf(xdmf_file, " \n"); + fprintf(xdmf_file, " \n"); + } + + /* Level-set */ + if (ls && ls->m) { + assert ( ls->size==1 && "Unexpected size for level-set"); + fprintf(xdmf_file, " \n"); + fprintf(xdmf_file, " \n", npg, ls->size); + fprintf(xdmf_file, " %s:/Solutions/Ls/SolAtVertices\n", filename); + fprintf(xdmf_file, " \n"); + fprintf(xdmf_file, " \n"); + } + + /* Solutions */ + for (int i = 0 ; i < nsols ; i++) { + + /* Ignore invalid solutions */ + if ( !(sols+i) || !(sols[i].m) ) continue; + + entities = sols[i].entities; + + if (entities != MMG5_Noentity && entities != MMG5_Vertex && entities != MMG5_Tetrahedron) continue; + if (sols[i].type == MMG5_Scalar) { + fprintf(xdmf_file, " \n", i); + } + else if (sols[i].type == MMG5_Vector) { + fprintf(xdmf_file, " \n", i); + } + else if (sols[i].type == MMG5_Tensor) { + fprintf(xdmf_file, " \n", i); + } + fprintf(xdmf_file, " \n", npg, sols[i].size); + fprintf(xdmf_file, " %s:/Solutions/Fields/SolAtVertices%d\n", filename, i); + } + else if (sols[i].entities == MMG5_Tetrahedron) { + fprintf(xdmf_file, " Dimensions=\"%lld %d\">\n", neg, sols[i].size); + fprintf(xdmf_file, " %s:/Solutions/Fields/SolAtTetrahedra%d\n", filename, i); + } + fprintf(xdmf_file, " \n"); + fprintf(xdmf_file, " \n"); + } + + /* End */ + fprintf(xdmf_file, " \n"); + fprintf(xdmf_file, " \n"); + fprintf(xdmf_file, "\n"); + + fclose(xdmf_file); + + } + + return 1; +} +#endif + +int PMMG_saveMesh_hdf5(PMMG_pParMesh parmesh, const char *filename) { + return PMMG_saveMesh_hdf5_i(parmesh,parmesh->info.io_entities,filename); +} + +/** + * \param parmesh pointer toward the parmesh structure. + * \param save_entities array of 0s and 1s of size \ref PMMG_IO_ENTITIES_size to tell + * which entities to save and which not to. This array must be setted using the + * \ref PMMG_Set_defaultIOEntities and \ref PMMG_Set_IOEntities + * \param filename name of the HDF5 and XDMF files. + * \return 0 if failed, 1 otherwise. + * + * Write the mesh data, the metric, and all the solutions in an HDF5 file, aswell as + * an XDMF file for visualisation. This function is to be used for distributed meshes. + * + * \remark For internal use. + */ +int PMMG_saveMesh_hdf5_i(PMMG_pParMesh parmesh, int *save_entities, const char *filename) { + +#ifndef USE_HDF5 + + fprintf(stderr," ** HDF5 library not found. Unavailable file format.\n"); + return -1; + +#else + + int ier = 1; + int nullf = 0; /* Flag to remember if save_entities was NULL */ + hsize_t *nentities, *nentitiesl, *nentitiesg; /* Number of entities (on each proc/on the current proc/global) */ + hsize_t *offset; /* Offset for the parallel writing with HDF5 */ + hid_t file_id, grp_mesh_id, grp_part_id, grp_entities_id, grp_sols_id; /* HDF5 objects */ + hid_t fapl_id, dxpl_id, dcpl_id; /* HDF5 property lists */ + MPI_Info info = MPI_INFO_NULL; + mytime ctim[TIMEMAX]; + int8_t tim; + char stim[32]; + + /** Check arguments */ + if (parmesh->ngrp != 1) { + fprintf(stderr," ## Error: %s: you must have exactly 1 group in your parmesh.\n", + __func__); + ier = 0; + } + if (!filename || !*filename) { + fprintf(stderr," ## Error: %s: no file name provided.\n", + __func__); + ier = 0; + } + + /* Check filename extension (can be empty, .h5 or .xdmf) and store base name + * to save .h5 and .xdmf files */ + char *basename; + MMG5_SAFE_CALLOC(basename,strlen(filename)+1,char,ier=0); + strncpy(basename,filename,strlen(filename)); + + char *ptr = MMG5_Get_filenameExt( basename ); + assert ( ptr && "non-null filename should provide non-null pointer toward end of basename"); + + /* Check asked extension */ + if ( *ptr != '\0' ) { + if ( strcmp(ptr,".h5") && strcmp(ptr,".xdmf") ) { + if ( parmesh->myrank == parmesh->info.root ) { + fprintf(stderr, "\n ## Error: %s: Unexpected filename extension:" + " you must provide .h5 of .xdmf extension.\n", + __func__); + } + ier = 0; + } + } + + /* Create .xdmf and .h5 file names */ + char *xdmf_name; + // longest possible string + MMG5_SAFE_CALLOC(xdmf_name,strlen(basename)+6,char,ier=0); + + char *h5_name; + // longest possible string + MMG5_SAFE_CALLOC(h5_name,strlen(basename)+4,char,ier=0); + + *ptr='\0'; + strncpy(xdmf_name,basename,strlen(filename)); + strcat(xdmf_name,".xdmf"); + strncpy(h5_name,basename,strlen(filename)); + strcat(h5_name,".h5"); + + /* Check the save_entities argument */ + if (save_entities == NULL) { + nullf = 1; + PMMG_MALLOC(parmesh, save_entities, PMMG_IO_ENTITIES_size, int, "save_entities", return 0); + PMMG_Set_defaultIOEntities_i(save_entities); + } + if (!save_entities[PMMG_IO_Vertex] || !save_entities[PMMG_IO_Tetra]) { + if ( parmesh->myrank == parmesh->info.root ) { + fprintf(stderr, "\n ## Error: %s: save_entities: you must at least save the vertices and the tetra.\n", + __func__); + } + ier = 0; + } + + MPI_CHECK( MPI_Allreduce(MPI_IN_PLACE, &ier, 1, MPI_INT, MPI_MIN, parmesh->comm ), return 0); + if ( !ier ) { + return 0; + } + + tminit(ctim, TIMEMAX); + chrono(ON, &ctim[0]); + + /* Set all pointers to NULL */ + nentities = nentitiesl = nentitiesg = offset = NULL; + + /** Count the number of entities on each proc and globally */ + tim = 1; + chrono(ON, &ctim[tim]); + + PMMG_CALLOC(parmesh, nentities, PMMG_IO_ENTITIES_size * parmesh->nprocs, hsize_t, "nentities", + goto free_and_return); + PMMG_CALLOC(parmesh, nentitiesg, PMMG_IO_ENTITIES_size, hsize_t, "nentitiesg", + goto free_and_return ); + PMMG_CALLOC(parmesh, nentitiesl, PMMG_IO_ENTITIES_size, hsize_t, "nentitiesl", + goto free_and_return ); + + ier = PMMG_countEntities(parmesh, nentities, nentitiesl, nentitiesg, save_entities); + + /* Check that the global mesh is not empty */ + if ( !nentitiesg[PMMG_IO_Vertex] ) { + if (parmesh->myrank == parmesh->info.root) + fprintf(stderr, "\n ## Error: %s: can't save an empty mesh.\n", __func__); + return 0; + } + if ( !nentitiesg[PMMG_IO_Tetra] ) { + if (parmesh->myrank == parmesh->info.root) + fprintf(stderr, "\n ## Warning: %s: there is no tetra in your mesh.\n", __func__); + } + + chrono(OFF, &ctim[tim]); + printim(ctim[tim].gdif,stim); + if ( parmesh->info.imprim >= PMMG_VERB_STEPS ) { + fprintf(stdout," -- Count entities. %s\n",stim); + } + + /** Compute the offset for parallel writing */ + tim = 2; + chrono(ON, &ctim[tim]); + + PMMG_CALLOC(parmesh, offset, 2 * PMMG_IO_ENTITIES_size, hsize_t, "offset", + goto free_and_return ); + + ier = PMMG_computeHDFoffset(parmesh, nentities, offset); + + chrono(OFF, &ctim[tim]); + printim(ctim[tim].gdif,stim); + if ( parmesh->info.imprim >= PMMG_VERB_STEPS ) { + fprintf(stdout," -- Compute HDF5 write offsets. %s\n",stim); + } + + /*------------------------- HDF5 IOs START HERE -------------------------*/ + + /* Shut HDF5 error stack */ + HDF_CHECK( H5Eset_auto(H5E_DEFAULT, NULL, NULL), + goto free_and_return ); + + /* TODO ? Pass MPI hints via the info struct */ + MPI_Info_create(&info); + + /* Create the property lists */ + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + H5Pset_fapl_mpio(fapl_id, parmesh->comm, info); + H5Pset_coll_metadata_write(fapl_id, 1); + dxpl_id = H5Pcreate(H5P_DATASET_XFER); + H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE); + dcpl_id = H5Pcreate(H5P_DATASET_CREATE); + H5Pset_fill_time(dcpl_id, H5D_FILL_TIME_NEVER); + + /* Create the file */ + HDF_CHECK( file_id = H5Fcreate(h5_name, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id), + fprintf(stderr,"\n ## Error: %s: Rank %d could not create the hdf5 file %s.\n", + __func__, parmesh->myrank, h5_name); + goto free_and_return ); + + if (parmesh->info.imprim > PMMG_VERB_VERSION) { + fprintf(stdout, "\n %%%% %s OPENED \n", h5_name); + } + + /* Save the attributes (Version and Dimension, and number of entities per proc) */ + ier = PMMG_saveHeader_hdf5(parmesh, file_id); + + MPI_CHECK( MPI_Allreduce(MPI_IN_PLACE, &ier, 1, MPI_INT, MPI_MIN, parmesh->comm), + goto free_and_return ); + + if (!ier) { + if (parmesh->myrank == parmesh->info.root) { + fprintf(stderr,"\n ## Error: %s: Could not write the mesh attributes.\n",__func__); + } + goto free_and_return; + } + + /* Open the mesh group */ + HDF_CHECK( grp_mesh_id = H5Gcreate(file_id, "Mesh", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT), + fprintf(stderr,"\n ## Error: %s: Could not create the /Mesh group.\n",__func__); + goto free_and_return ); + + /** Write the partitionning information */ + tim = 3; + chrono(ON, &ctim[tim]); + + HDF_CHECK( grp_part_id = H5Gcreate(grp_mesh_id, "Partitioning", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT), + fprintf(stderr,"\n ## Error: %s: Could not create the /Mesh/Partitioning group.\n",__func__); + H5Gclose(grp_mesh_id); + goto free_and_return ); + + ier = PMMG_savePartitioning_hdf5(parmesh, grp_part_id, dcpl_id, dxpl_id, nentities); + + MPI_CHECK( MPI_Allreduce(MPI_IN_PLACE, &ier, 1, MPI_INT, MPI_MIN, parmesh->comm), + H5Gclose(grp_part_id); + H5Gclose(grp_mesh_id); + goto free_and_return ); + + if (!ier) { + if (parmesh->myrank == parmesh->info.root) { + fprintf(stderr,"\n ## Error: %s: Could not write the partitioning information.\n",__func__); + } + H5Gclose(grp_part_id); + H5Gclose(grp_mesh_id); + goto free_and_return; + } + + H5Gclose(grp_part_id); + + chrono(OFF, &ctim[tim]); + printim(ctim[tim].gdif,stim); + if ( parmesh->info.imprim >= PMMG_VERB_STEPS ) { + fprintf(stdout," -- Save partitioning info. %s\n",stim); + } + + /* Now that we have computed the offsets and written the number of entities, + each proc only needs to know its local number of entities and the + global number of entities */ + PMMG_DEL_MEM(parmesh, nentities, hsize_t, "nentities"); + + /** Write the mesh entities */ + tim = 4; + chrono(ON, &ctim[tim]); + + HDF_CHECK( grp_entities_id = H5Gcreate(grp_mesh_id, "MeshEntities", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT), + fprintf(stderr,"\n ## Error: %s: Could not create the /Mesh/MeshEntities group.\n",__func__); + H5Gclose(grp_mesh_id); + goto free_and_return ); + + ier = PMMG_saveMeshEntities_hdf5(parmesh, grp_entities_id, dcpl_id, dxpl_id, nentitiesl, nentitiesg, offset, save_entities); + + MPI_CHECK( MPI_Allreduce(MPI_IN_PLACE, &ier, 1, MPI_INT, MPI_MIN, parmesh->comm), + H5Gclose(grp_entities_id); + H5Gclose(grp_mesh_id); + goto free_and_return ); + + if (!ier) { + if (parmesh->myrank == parmesh->info.root) { + fprintf(stderr,"\n ## Error: %s: Could not write the mesh entities.\n",__func__); + } + H5Gclose(grp_entities_id); + H5Gclose(grp_mesh_id); + goto free_and_return; + } + + H5Gclose(grp_entities_id); + + chrono(OFF, &ctim[tim]); + printim(ctim[tim].gdif,stim); + if ( parmesh->info.imprim >= PMMG_VERB_STEPS ) { + fprintf(stdout," -- Save mesh entities. %s\n",stim); + } + + /* Close the mesh group */ + H5Gclose(grp_mesh_id); + + /* Deallocate the save_entities array if it was allocated in this function */ + if (nullf) PMMG_DEL_MEM(parmesh, save_entities, int, "save_entities"); + + /** Write the metric and the solutions */ + tim = 5; + chrono(ON, &ctim[tim]); + + HDF_CHECK( grp_sols_id = H5Gcreate(file_id, "Solutions", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT), + fprintf(stderr,"\n ## Error: %s: Could not create the /Solutions group.\n",__func__); + goto free_and_return ); + + ier = PMMG_saveMetric_hdf5(parmesh, grp_sols_id, dcpl_id, dxpl_id, nentitiesl, nentitiesg, offset); + + MPI_CHECK( MPI_Allreduce(MPI_IN_PLACE, &ier, 1, MPI_INT, MPI_MIN, parmesh->comm), + H5Gclose(grp_sols_id); + goto free_and_return ); + + if (!ier) { + if (parmesh->myrank == parmesh->info.root) { + fprintf(stderr,"\n ## Error: %s: Could not write the metric.\n",__func__); + } + H5Gclose(grp_sols_id); + goto free_and_return; + } + + ier = PMMG_saveLs_hdf5(parmesh, grp_sols_id, dcpl_id, dxpl_id, nentitiesl, nentitiesg, offset); + + MPI_CHECK( MPI_Allreduce(MPI_IN_PLACE, &ier, 1, MPI_INT, MPI_MIN, parmesh->comm), + H5Gclose(grp_sols_id); + goto free_and_return ); + + if (!ier) { + if (parmesh->myrank == parmesh->info.root) { + fprintf(stderr,"\n ## Error: %s: Could not write the level-set.\n",__func__); + } + H5Gclose(grp_sols_id); + goto free_and_return; + } + + ier = PMMG_saveAllSols_hdf5(parmesh, grp_sols_id, dcpl_id, dxpl_id, nentitiesl, nentitiesg, offset); + + MPI_CHECK( MPI_Allreduce(MPI_IN_PLACE, &ier, 1, MPI_INT, MPI_MIN, parmesh->comm), + H5Gclose(grp_sols_id); + goto free_and_return ); + + if (!ier) { + if (parmesh->myrank == parmesh->info.root) { + fprintf(stderr,"\n ## Error: %s: Could not write the solutions.\n",__func__); + } + H5Gclose(grp_sols_id); + goto free_and_return; + } + + H5Gclose(grp_sols_id); + + chrono(OFF, &ctim[tim]); + printim(ctim[tim].gdif,stim); + if ( parmesh->info.imprim >= PMMG_VERB_STEPS ) { + fprintf(stdout," -- Save metric and solutions. %s\n",stim); + } + + /* Release all HDF5 IDs */ + H5Fclose(file_id); + H5Pclose(fapl_id); + H5Pclose(dxpl_id); + H5Pclose(dcpl_id); + + /* We no longer need the offset nor the local nuumber of entities */ + PMMG_DEL_MEM(parmesh, offset, hsize_t, "offset"); + PMMG_DEL_MEM(parmesh, nentitiesl, hsize_t, "nentitiesl"); + + /** Write light data in XDMF file */ + ier = PMMG_writeXDMF(parmesh, h5_name, xdmf_name, nentitiesg); + + MPI_CHECK( MPI_Allreduce(MPI_IN_PLACE, &ier, 1, MPI_INT, MPI_MIN, parmesh->comm), + goto free_and_return ); + if (!ier) { + if (parmesh->myrank == parmesh->info.root) { + fprintf(stderr,"\n ## Error: %s: Could not write the xdmf file %s.\n", + __func__, xdmf_name); + } + goto free_and_return; + } + + /* We no longer need the global number of entities */ + PMMG_DEL_MEM(parmesh, nentitiesg, hsize_t, "nentitiesg"); + + chrono(OFF, &ctim[0]); + printim(ctim[0].gdif,stim); + if ( parmesh->info.imprim >= PMMG_VERB_STEPS ) { + fprintf(stdout,"\n SAVE MESH AT HDF5 FORMAT: ELAPSED TIME %s\n",stim); + } + return 1; + + free_and_return: + PMMG_DEL_MEM(parmesh, nentities, hsize_t, "nentities"); + PMMG_DEL_MEM(parmesh, nentitiesg, hsize_t, "nentitiesg"); + PMMG_DEL_MEM(parmesh, nentitiesl, hsize_t, "nentitiesl"); + PMMG_DEL_MEM(parmesh, offset, hsize_t, "offset"); + H5Fclose(file_id); + H5Pclose(fapl_id); + H5Pclose(dxpl_id); + H5Pclose(dcpl_id); + return 0; + +#endif +} + +#ifdef USE_HDF5 +/** + * \param parmesh pointer toward the parmesh structure. + * \param file_id identifier of the HDF5 file. + * + * \return 0 if fail, 1 otherwise + * + * Load the version and the dimension of \a parmesh aswell as the number of + * partitions and the API mode from the opened HDF5 file \a file_id. + * + */ +static int PMMG_loadHeader_hdf5(PMMG_pParMesh parmesh, hid_t file_id) { + MMG5_pMesh mesh; + hid_t attr_id; + + assert(parmesh->ngrp == 1); + + mesh = parmesh->listgrp[0].mesh; + + attr_id = H5Aopen(file_id, "MeshVersionFormatted", H5P_DEFAULT); + H5Aread(attr_id, H5T_NATIVE_INT, &mesh->ver); + H5Aclose(attr_id); + + attr_id = H5Aopen(file_id, "Dimension", H5P_DEFAULT); + H5Aread(attr_id, H5T_NATIVE_INT, &mesh->dim); + H5Aclose(attr_id); + + attr_id = H5Aopen(file_id, "NumberOfPartitions", H5P_DEFAULT); + H5Aread(attr_id, H5T_NATIVE_INT, &parmesh->info.npartin); + H5Aclose(attr_id); + + attr_id = H5Aopen(file_id, "API_mode", H5P_DEFAULT); + H5Aread(attr_id, H5T_NATIVE_INT, &parmesh->info.API_mode); + H5Aclose(attr_id); + + if (mesh->dim != 3) { + if (parmesh->myrank == parmesh->info.root) + fprintf(stderr,"\n ## Error: %s: Wrong mesh dimension: %d (expected 3)!\n", __func__, mesh->dim); + return 0; + } + + if (parmesh->info.API_mode == PMMG_UNSET) { + if (parmesh->myrank == parmesh->info.root) + fprintf(stderr,"\n ## Error: %s: No APIDISTRIB mode provided!\n", __func__); + return 0; + } + + return 1; +} + +/** + * \param parmesh pointer toward the parmesh structure. + * \param grp_part_id identifier of the HDF5 group from which to read the mesh partitioning. + * \param dxpl_id identifier of the dataset transfer property list (MPI-IO). + * \param nentities array of size nprocs * PMMG_IO_ENTITIES_size that will contain the number of entities on each proc. + * \param nentitiesl array of size PMMG_IO_ENTITIES_size that will contain the local number of entities. + * \param nentitiesg array of size PMMG_IO_ENTITIES_size that will contain the global number of entities. + * + * \return 0 if fail, 1 otherwise + * + * Load the mesh partitioning and the communicators from the \a grp_part_id group + * of the HDF5 file (only one group per process is allowed). + * Three situations can occur: + * 1/ nprocs = npartitions - Each proc just loads its corresponding partition + * from the HDF5 file. + * 2/ nprocs > npartitions - The excess procs do not load anything, and load balancing + * is performed before the remeshing loop. + * 3/ nprocs < npartitions - Some partitions are merged into the same proc, and load + * balancing is performed before the remeshing loop. + * + * \warning Only the cases 1/ and 2/ are actually implemented yet. + * + */ +static int PMMG_loadPartitioning_hdf5(PMMG_pParMesh parmesh, hid_t grp_part_id, hid_t dxpl_id, + hsize_t *nentities, hsize_t *nentitiesl, hsize_t *nentitiesg) { + hsize_t *nentities_read; + hsize_t *ncomms, *nitem, *nitem_part; + hsize_t ncommg, comm_offset, nitemg, item_offset, rank_offset; + hsize_t icomm; + int *colors; + int **idx_loc, **idx_glob, *loc_buf, *glob_buf; + int npartitions, nprocs, rank; + hid_t dspace_file_id, dspace_mem_id; + hid_t dset_id; + + assert ( parmesh->ngrp == 1 ); + + /* Set pointers to NULL */ + ncomms = nitem = nitem_part = NULL; + colors = NULL; + idx_loc = idx_glob = NULL; + loc_buf = glob_buf = NULL; + + /* Init */ + nprocs = parmesh->nprocs; + rank = parmesh->myrank; + npartitions = parmesh->info.npartin; + + ncommg = nitemg = comm_offset = item_offset = rank_offset = 0; + + if (nprocs < npartitions) { + fprintf(stderr, "\n ## Error : cannot read %d partitions with %d procs. \n", + npartitions, nprocs); + return 0; + } + + /* Read the number of entities per partition */ + PMMG_CALLOC(parmesh, nentities_read, npartitions * PMMG_IO_ENTITIES_size, hsize_t, "nentities_read", goto free_and_return); + dset_id = H5Dopen(grp_part_id, "NumberOfEntities", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_HSIZE, H5S_ALL, H5S_ALL, dxpl_id, nentities_read); + H5Dclose(dset_id); + for (int i = 0 ; i < npartitions ; i++) { + for (int j = 0 ; j < PMMG_IO_ENTITIES_size ; j++) { + nentities[PMMG_IO_ENTITIES_size * i + j] = nentities_read[PMMG_IO_ENTITIES_size * i + j]; + } + } + PMMG_DEL_MEM(parmesh, nentities_read, hsize_t, "nentities_read"); + + /* Set at least 1 communicator for each proc (even the ones that wont read the mesh) */ + if (parmesh->info.API_mode == PMMG_APIDISTRIB_faces) + PMMG_Set_numberOfFaceCommunicators(parmesh, 1); + else + PMMG_Set_numberOfNodeCommunicators(parmesh, 1); + + if (rank < npartitions) { + /* Get the local number of entities */ + for (int i = 0 ; i < PMMG_IO_ENTITIES_size ; i++) + nentitiesl[i] = nentities[PMMG_IO_ENTITIES_size * rank + i]; + } + + /* Get the global number of entities */ + for (int i = 0 ; i < npartitions ; i++) + for (int j = 0 ; j < PMMG_IO_ENTITIES_size ; j++) + nentitiesg[j] += nentities[PMMG_IO_ENTITIES_size * i + j]; + + /* Read the number of comms */ + PMMG_CALLOC(parmesh, ncomms, nprocs, hsize_t, "ncomms", goto free_and_return); + if (parmesh->info.API_mode == PMMG_APIDISTRIB_faces) + dset_id = H5Dopen(grp_part_id, "NumberOfFaceCommunicators", H5P_DEFAULT); + else + dset_id = H5Dopen(grp_part_id, "NumberOfNodeCommunicators", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_HSIZE, H5S_ALL, H5S_ALL, H5P_DEFAULT, ncomms); + H5Dclose(dset_id); + + /* Compute the total number of comms */ + for (int i = 0 ; i < npartitions ; i++) { + ncommg += ncomms[i]; + } + for (int i = 0 ; i < rank ; i++) { + comm_offset += ncomms[i]; + } + + /* Read the colors and the number of items */ + PMMG_MALLOC(parmesh, colors, ncomms[rank], int, "colors", goto free_and_return); + PMMG_MALLOC(parmesh, nitem, ncomms[rank], hsize_t, "nitem", goto free_and_return); + + dset_id = H5Dopen(grp_part_id, "ColorsOut", H5P_DEFAULT); + dspace_file_id = H5Dget_space(dset_id); + dspace_mem_id = H5Screate_simple(1, &ncomms[rank], NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &comm_offset, NULL, &ncomms[rank], NULL); + H5Dread(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, colors); + H5Dclose(dset_id); + H5Sclose(dspace_file_id); + H5Sclose(dspace_mem_id); + + if (parmesh->info.API_mode == PMMG_APIDISTRIB_faces) + dset_id = H5Dopen(grp_part_id, "NumberOfCommunicatorFaces", H5P_DEFAULT); + else + dset_id = H5Dopen(grp_part_id, "NumberOfCommunicatorNodes", H5P_DEFAULT); + dspace_file_id = H5Dget_space(dset_id); + dspace_mem_id = H5Screate_simple(1, &ncomms[rank], NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &comm_offset, NULL, &ncomms[rank], NULL); + H5Dread(dset_id, H5T_NATIVE_HSIZE, dspace_mem_id, dspace_file_id, dxpl_id, nitem); + H5Dclose(dset_id); + H5Sclose(dspace_file_id); + H5Sclose(dspace_mem_id); + + /* Compute the total number of items and the item offset for the parallel reading */ + + PMMG_CALLOC(parmesh, nitem_part, nprocs, hsize_t, "nitem_part", goto free_and_return); + + for (icomm = 0 ; icomm < ncomms[rank] ; icomm++) { + nitem_part[rank] += nitem[icomm]; + } + + MPI_Allgather(&nitem_part[rank], 1, MPI_LONG_LONG, nitem_part, 1, MPI_LONG_LONG, parmesh->comm); + + for (int i = 0 ; i < npartitions ; i++) { + nitemg += nitem_part[i]; + } + + for (int i = 0 ; i < rank ; i++) { + rank_offset += nitem_part[i]; + } + + /* Read the communicator items in one buffer */ + + PMMG_MALLOC(parmesh, loc_buf, nitem_part[rank], int, "loc_buf", goto free_and_return); + PMMG_MALLOC(parmesh, glob_buf, nitem_part[rank], int, "glob_buf", goto free_and_return); + + dspace_file_id = H5Screate_simple(1, &nitemg, NULL); + + /* Local indices */ + if (parmesh->info.API_mode == PMMG_APIDISTRIB_faces) + dset_id = H5Dopen(grp_part_id, "LocalFaceIndices", H5P_DEFAULT); + else + dset_id = H5Dopen(grp_part_id, "LocalNodeIndices", H5P_DEFAULT); + dspace_mem_id = H5Screate_simple(1, &nitem_part[rank], NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &rank_offset, NULL, &nitem_part[rank], NULL); + H5Dread(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, loc_buf); + H5Sclose(dspace_mem_id); + H5Dclose(dset_id); + + /* Global indices */ + if (parmesh->info.API_mode == PMMG_APIDISTRIB_faces) + dset_id = H5Dopen(grp_part_id, "GlobalFaceIndices", H5P_DEFAULT); + else + dset_id = H5Dopen(grp_part_id, "GlobalNodeIndices", H5P_DEFAULT); + dspace_mem_id = H5Screate_simple(1, &nitem_part[rank], NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &rank_offset, NULL, &nitem_part[rank], NULL); + H5Dread(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, glob_buf); + H5Sclose(dspace_mem_id); + H5Dclose(dset_id); + H5Sclose(dspace_file_id); + + H5Sclose(dspace_file_id); + + PMMG_DEL_MEM(parmesh, nitem_part, hsize_t, "nitem_part"); + + /* Set the communicator items */ + PMMG_CALLOC(parmesh, idx_loc, ncomms[rank], int*, "idx_loc", goto free_and_return); + PMMG_CALLOC(parmesh, idx_glob, ncomms[rank], int*, "idx_glob", goto free_and_return); + for (icomm = 0 ; icomm < ncomms[rank] ; icomm++) { + PMMG_CALLOC(parmesh, idx_loc[icomm], nitem[icomm], int, "idx_loc[icomm]", goto free_and_return); + PMMG_CALLOC(parmesh, idx_glob[icomm], nitem[icomm], int, "idx_glob[icomm]", goto free_and_return); + } + + for (icomm = 0 ; icomm < ncomms[rank] ; icomm++) { + for (int k = 0 ; k < nitem[icomm] ; k++) { + idx_loc[icomm][k] = loc_buf[item_offset + k]; + idx_glob[icomm][k] = glob_buf[item_offset + k]; + } + item_offset += nitem[icomm]; + } + + /* Free the buffers */ + PMMG_DEL_MEM(parmesh, loc_buf, int, "loc_buf"); + PMMG_DEL_MEM(parmesh, glob_buf, int, "glob_buf"); + + /* Set the communicators */ + if (rank < parmesh->info.npartin) { + if (parmesh->info.API_mode == PMMG_APIDISTRIB_faces) { + + if ( !PMMG_Set_numberOfFaceCommunicators(parmesh, ncomms[rank]) ) { + fprintf(stderr,"\n ## Error: %s: unable to set number of face communicators on rank %d.\n", + __func__, rank); + goto free_and_return; + } + + for (icomm = 0 ; icomm < ncomms[rank] ; icomm++) { + + if ( !PMMG_Set_ithFaceCommunicatorSize(parmesh, icomm, colors[icomm], nitem[icomm]) ) { + fprintf(stderr,"\n ## Error: %s: unable to set %lld th face communicator size on rank %d.\n", + __func__, icomm, rank); + goto free_and_return; + } + + if ( !PMMG_Set_ithFaceCommunicator_faces(parmesh, icomm, idx_loc[icomm], idx_glob[icomm], 1) ) { + fprintf(stderr,"\n ## Error: %s: unable to set %lld th face communicator faces on rank %d.\n", + __func__, icomm, rank); + goto free_and_return; + } + + } + } + else { + + if ( !PMMG_Set_numberOfNodeCommunicators(parmesh, ncomms[rank]) ) { + fprintf(stderr,"\n ## Error: %s: unable to set number of node communicators on rank %d.\n", + __func__, rank); + goto free_and_return; + } + + for (icomm = 0 ; icomm < ncomms[rank] ; icomm++) { + + if ( !PMMG_Set_ithNodeCommunicatorSize(parmesh, icomm, colors[icomm], nitem[icomm]) ) { + fprintf(stderr,"\n ## Error: %s: unable to set %lld th node communicator size on rank %d.\n", + __func__, icomm, rank); + goto free_and_return; + } + + if ( !PMMG_Set_ithNodeCommunicator_nodes(parmesh, icomm, idx_loc[icomm], idx_glob[icomm], 1) ) { + fprintf(stderr,"\n ## Error: %s: unable to set %lld th node communicator faces on rank %d.\n", + __func__, icomm, rank); + goto free_and_return; + } + + } + } + } + + /* Free all memory */ + for (icomm = 0 ; icomm < ncomms[rank] ; icomm++) { + PMMG_DEL_MEM(parmesh, idx_loc[icomm], int, "idx_loc[icomm]"); + PMMG_DEL_MEM(parmesh, idx_glob[icomm], int, "idx_glob[icomm]"); + } + PMMG_DEL_MEM(parmesh, idx_loc, int*, "idx_loc"); + PMMG_DEL_MEM(parmesh, idx_glob, int*, "idx_glob"); + + PMMG_DEL_MEM(parmesh, ncomms, hsize_t, "ncomms"); + PMMG_DEL_MEM(parmesh, colors, int, "colors"); + PMMG_DEL_MEM(parmesh, nitem, int, "nitem"); + + return 1; + + free_and_return: + PMMG_DEL_MEM(parmesh, loc_buf, int, "loc_buf"); + PMMG_DEL_MEM(parmesh, glob_buf, int, "glob_buf"); + PMMG_DEL_MEM(parmesh, ncomms, hsize_t, "ncomms"); + PMMG_DEL_MEM(parmesh, colors, int, "colors"); + PMMG_DEL_MEM(parmesh, nitem, int, "nitem"); + + if ( idx_loc && idx_glob ) { + for (icomm = 0 ; icomm < ncomms[rank] ; icomm++) { + PMMG_DEL_MEM(parmesh, idx_loc[icomm], int, "idx_loc[icomms]"); + PMMG_DEL_MEM(parmesh, idx_glob[icomm], int, "idx_glob[icomm]"); + } + PMMG_DEL_MEM(parmesh, idx_loc, int*, "idx_loc"); + PMMG_DEL_MEM(parmesh, idx_glob, int*, "idx_glob"); + } + + return 0; + +} + +static int PMMG_loadMeshEntities_hdf5(PMMG_pParMesh parmesh, hid_t grp_entities_id, hid_t dxpl_id, hsize_t *nentitiesl, hsize_t *nentitiesg, hsize_t *offset, int *load_entities) { + /* MMG variables */ + PMMG_pGrp grp; + MMG5_pMesh mesh; + MMG5_pPoint ppt; + MMG5_pEdge pa; + MMG5_pTria pt; + MMG5_pQuad pq; + MMG5_pTetra pe; + MMG5_pPrism pp; + + /* Local mesh size */ + hsize_t ne, np, nt, na, nquad, nprism; /* Tetra, points, triangles, edges, quads, prisms */ + hsize_t nc, npreq, nppar; /* Corners, required and parallel vertices */ + hsize_t nr, nedreq, nedpar; /* Ridges, required and parallel edges */ + hsize_t ntreq, ntpar; /* Required and parallel triangles */ + hsize_t nqreq, nqpar; /* Required and parallel quads */ + hsize_t nereq, nepar; /* Required and parallel tetra */ + hsize_t nnor, ntan; /* Normals and Tangents */ + /* Global mesh size */ + hsize_t neg, npg, ntg, nag, nquadg, nprismg; /* Tetra, points, triangles, edges, quads, prisms */ + hsize_t ncg, npreqg, npparg; /* Corners, required and parallel vertices */ + hsize_t nrg, nedreqg, nedparg; /* Ridges, required and parallel edges */ + hsize_t ntreqg, ntparg; /* Required and parallel triangles */ + hsize_t nqreqg, nqparg; /* Required and parallel quads */ + hsize_t nereqg, neparg; /* Required and parallel tetra */ + hsize_t nnorg, ntang; /* Normals and Tangents */ + + /* Mesh buffer arrays */ + /* 6 buffers is the minimum amount for what we have to do */ + double *ppoint; /* Point coordinates */ + int *pent; /* Other entities : edges, trias, quads, tetra, prisms. */ + int *pcr; /* Corners and ridges */ + int *preq, *ppar; /* Required and parallel entities */ + int *pref; /* References */ + + /* Normals and tangents */ + /* We could reuse the previous buffers, but the names would be confusing */ + int *pnorat, *ptanat; /* Normals and Tangents at vertices */ + double *pnor, *ptan; /* Normals and Tangents */ + + /* HDF5 variables */ + hid_t dspace_mem_id, dspace_file_id; + hid_t dset_id; + + /*------------------------- INIT -------------------------*/ + + /* Set all buffers to NULL */ + ppoint = NULL; + pent = NULL; + pcr = NULL; + preq = NULL; ppar = NULL; + pref = NULL; + pnor = NULL; ptan = NULL; + pnorat = NULL; ptanat = NULL; + + /* Set ParMmg variables */ + grp = &parmesh->listgrp[0]; + mesh = grp->mesh; + ppt = NULL; + pa = NULL; + pt = NULL; + pq = NULL; + pe = NULL; + pp = NULL; + + if (parmesh->myrank < parmesh->info.npartin) { + + /* Get the number of entities */ + np = nentitiesl[PMMG_IO_Vertex]; + na = nentitiesl[PMMG_IO_Edge]; + nt = nentitiesl[PMMG_IO_Tria]; + nquad = nentitiesl[PMMG_IO_Quad]; + ne = nentitiesl[PMMG_IO_Tetra]; + nprism = nentitiesl[PMMG_IO_Prism]; + nc = nentitiesl[PMMG_IO_Corner]; + npreq = nentitiesl[PMMG_IO_RequiredVertex]; + nppar = nentitiesl[PMMG_IO_ParallelVertex]; + nr = nentitiesl[PMMG_IO_Ridge]; + nedreq = nentitiesl[PMMG_IO_RequiredEdge]; + nedpar = nentitiesl[PMMG_IO_ParallelEdge]; + ntreq = nentitiesl[PMMG_IO_RequiredTria]; + ntpar = nentitiesl[PMMG_IO_ParallelTria]; + nqreq = nentitiesl[PMMG_IO_RequiredQuad]; + nqpar = nentitiesl[PMMG_IO_ParallelQuad]; + nereq = nentitiesl[PMMG_IO_RequiredTetra]; + nepar = nentitiesl[PMMG_IO_ParallelTetra]; + nnor = nentitiesl[PMMG_IO_Normal]; + ntan = nentitiesl[PMMG_IO_Tangent]; + + npg = nentitiesg[PMMG_IO_Vertex]; + nag = nentitiesg[PMMG_IO_Edge]; + ntg = nentitiesg[PMMG_IO_Tria]; + nquadg = nentitiesg[PMMG_IO_Quad]; + neg = nentitiesg[PMMG_IO_Tetra]; + nprismg = nentitiesg[PMMG_IO_Prism]; + ncg = nentitiesg[PMMG_IO_Corner]; + npreqg = nentitiesg[PMMG_IO_RequiredVertex]; + npparg = nentitiesg[PMMG_IO_ParallelVertex]; + nrg = nentitiesg[PMMG_IO_Ridge]; + nedreqg = nentitiesg[PMMG_IO_RequiredEdge]; + nedparg = nentitiesg[PMMG_IO_ParallelEdge]; + ntreqg = nentitiesg[PMMG_IO_RequiredTria]; + ntparg = nentitiesg[PMMG_IO_ParallelTria]; + nqreqg = nentitiesg[PMMG_IO_RequiredQuad]; + nqparg = nentitiesg[PMMG_IO_ParallelQuad]; + nereqg = nentitiesg[PMMG_IO_RequiredTetra]; + neparg = nentitiesg[PMMG_IO_ParallelTetra]; + nnorg = nentitiesg[PMMG_IO_Normal]; + ntang = nentitiesg[PMMG_IO_Tangent]; + + /* Arrays for bidimensional dataspaces */ + hsize_t hnp[2] = {np, 3}; + hsize_t hna[2] = {na, 2}; + hsize_t hnt[2] = {nt, 3}; + hsize_t hnquad[2] = {nquad, 4}; + hsize_t hne[2] = {ne, 4}; + hsize_t hnprism[2] = {nprism, 2}; + hsize_t hnnor[2] = {nnor, 3}; + hsize_t hntan[2] = {ntan, 3}; + hsize_t hnpg[2] = {npg, 3}; + hsize_t hnag[2] = {nag, 2}; + hsize_t hntg[2] = {ntg, 3}; + hsize_t hnquadg[2] = {nquadg, 4}; + hsize_t hneg[2] = {neg, 4}; + hsize_t hnprismg[2] = {nprismg, 2}; + hsize_t hnnorg[2] = {nnorg, 3}; + hsize_t hntang[2] = {ntang, 3}; + + PMMG_Set_meshSize(parmesh, np, ne, nprism, nt, nquad, na); + + /* Vertices, Normals and Tangents */ + if (load_entities[PMMG_IO_Vertex] && npg) { + + PMMG_MALLOC(parmesh, ppoint, 3 * np, double, "ppoint", goto free_and_return); + PMMG_MALLOC(parmesh, pref, np, int, "pref", goto free_and_return); + + dspace_mem_id = H5Screate_simple(2, hnp, NULL); + dspace_file_id = H5Screate_simple(2, hnpg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Vertex], NULL, hnp, NULL); + dset_id = H5Dopen(grp_entities_id, "Vertices", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_DOUBLE, dspace_mem_id, dspace_file_id, dxpl_id, ppoint); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + dspace_mem_id = H5Screate_simple(1, hnp, NULL); + dspace_file_id = H5Screate_simple(1, hnpg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Vertex], NULL, hnp, NULL); + dset_id = H5Dopen(grp_entities_id, "VerticesRef", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, pref); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + PMMG_Set_vertices(parmesh, ppoint, pref); + + PMMG_DEL_MEM(parmesh, ppoint, double, "ppoint"); + PMMG_DEL_MEM(parmesh, pref, int, "pref"); + + if (load_entities[PMMG_IO_Corner] && ncg) { + + PMMG_MALLOC(parmesh, pcr, nc, int, "pcr", goto free_and_return); + + dspace_mem_id = H5Screate_simple(1, &nc, NULL); + dspace_file_id = H5Screate_simple(1, &ncg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Corner], NULL, &nc, NULL); + dset_id = H5Dopen(grp_entities_id, "Corners", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, pcr); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + for (int i = 0 ; i < nc ; i++) { + PMMG_Set_corner(parmesh, pcr[i] - offset[2 * PMMG_IO_Vertex] + 1); + } + + PMMG_DEL_MEM(parmesh, pcr, int, "pcr"); + + } + + if (load_entities[PMMG_IO_RequiredVertex] && npreqg) { + + PMMG_MALLOC(parmesh, preq, npreq, int, "preq", goto free_and_return); + + dspace_mem_id = H5Screate_simple(1, &npreq, NULL); + dspace_file_id = H5Screate_simple(1, &npreqg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_RequiredVertex], NULL, &npreq, NULL); + dset_id = H5Dopen(grp_entities_id, "RequiredVertices", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, preq); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + for (int i = 0 ; i < npreq ; i++) { + PMMG_Set_requiredVertex(parmesh, preq[i] - offset[2 * PMMG_IO_Vertex] + 1); + } + + PMMG_DEL_MEM(parmesh, preq, int, "preq"); + + } + + if (load_entities[PMMG_IO_ParallelVertex] && npparg) { + + PMMG_MALLOC(parmesh, ppar, nppar, int, "ppar", goto free_and_return); + + dspace_mem_id = H5Screate_simple(1, &nppar, NULL); + dspace_file_id = H5Screate_simple(1, &npparg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_ParallelVertex], NULL, &nppar, NULL); + dset_id = H5Dopen(grp_entities_id, "ParallelVertices", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, ppar); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + for (int i = 0 ; i < nppar ; i++) { + ppt = &mesh->point[ppar[i] - offset[2 * PMMG_IO_Vertex] + 1]; + ppt->tag |= MG_PARBDY; + } + + PMMG_DEL_MEM(parmesh, ppar, int, "ppar"); + + } + + if (load_entities[PMMG_IO_Normal] && nnorg) { + + PMMG_MALLOC(parmesh, pnor, 3 * nnor, double, "pnor", goto free_and_return); + PMMG_MALLOC(parmesh, pnorat, nnor, int, "pnorat", goto free_and_return); + + dspace_mem_id = H5Screate_simple(2, hnnor, NULL); + dspace_file_id = H5Screate_simple(2, hnnorg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Normal], NULL, hnnor, NULL); + dset_id = H5Dopen(grp_entities_id, "Normals", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_DOUBLE, dspace_mem_id, dspace_file_id, dxpl_id, pnor); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + dspace_mem_id = H5Screate_simple(1, hnnor, NULL); + dspace_file_id = H5Screate_simple(1, hnnorg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Normal], NULL, hnnor, NULL); + dset_id = H5Dopen(grp_entities_id, "NormalsAtVertices", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, pnorat); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + for (int i = 0 ; i < nnor ; i++) { + PMMG_Set_normalAtVertex(parmesh, pnorat[i] - offset[2 * PMMG_IO_Vertex] + 1, pnor[3 * i], pnor[3 * i + 1], pnor[3 * i + 2]); + } + + PMMG_DEL_MEM(parmesh, pnor, double, "pnor"); + PMMG_DEL_MEM(parmesh, pnorat, int, "pnorat"); + + } + + if (load_entities[PMMG_IO_Tangent] && ntang) { + + PMMG_MALLOC(parmesh, ptan, 3 * ntan, double, "ptan", goto free_and_return); + PMMG_MALLOC(parmesh, ptanat, ntan, int, "ptanat", goto free_and_return); + + dspace_mem_id = H5Screate_simple(2, hntan, NULL); + dspace_file_id = H5Screate_simple(2, hntang, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Tangent], NULL, hntan, NULL); + dset_id = H5Dopen(grp_entities_id, "Tangents", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_DOUBLE, dspace_mem_id, dspace_file_id, dxpl_id, ptan); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + dspace_mem_id = H5Screate_simple(1, hntan, NULL); + dspace_file_id = H5Screate_simple(1, hntang, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Tangent], NULL, hntan, NULL); + dset_id = H5Dopen(grp_entities_id, "TangentsAtVertices", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, ptanat); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + PMMG_DEL_MEM(parmesh, ptan, double, "ptan"); + PMMG_DEL_MEM(parmesh, ptanat, int, "ptanat"); + + } + + } + + /* Edges */ + if (load_entities[PMMG_IO_Edge] && nag) { + + PMMG_MALLOC(parmesh, pent, 2 * na, int, "pent", goto free_and_return); + PMMG_MALLOC(parmesh, pref, na, int, "pref", goto free_and_return); + + dspace_mem_id = H5Screate_simple(2, hna, NULL); + dspace_file_id = H5Screate_simple(2, hnag, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Edge], NULL, hna, NULL); + dset_id = H5Dopen(grp_entities_id, "Edges", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, pent); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + dspace_mem_id = H5Screate_simple(1, hna, NULL); + dspace_file_id = H5Screate_simple(1, hnag, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Edge], NULL, hna, NULL); + dset_id = H5Dopen(grp_entities_id, "EdgesRef", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, pref); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + for (int i = 0 ; i < na ; i++) { + PMMG_Set_edge(parmesh, pent[2 * i] - offset[PMMG_IO_Vertex] + 1, pent[2 * i + 1] - offset[PMMG_IO_Vertex] + 1, pref[i], i + 1); + } + + PMMG_DEL_MEM(parmesh, pent, int, "pent"); + PMMG_DEL_MEM(parmesh, pref, int, "pref"); + + if (load_entities[PMMG_IO_Ridge] && nrg) { + + PMMG_MALLOC(parmesh, pcr, nr, int, "pcr", goto free_and_return); + + dspace_mem_id = H5Screate_simple(1, &nr, NULL); + dspace_file_id = H5Screate_simple(1, &nrg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Ridge], NULL, &nr, NULL); + dset_id = H5Dopen(grp_entities_id, "Ridges", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, pcr); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + for (int i = 0 ; i < nr ; i++) { + PMMG_Set_ridge(parmesh, pcr[i] - offset[2 * PMMG_IO_Edge] + 1); + } + + PMMG_DEL_MEM(parmesh, pcr, int, "pcr"); + + } + + if (load_entities[PMMG_IO_RequiredEdge] && nedreqg) { + + PMMG_MALLOC(parmesh, preq, nedreq, int, "preq", goto free_and_return); + + dspace_mem_id = H5Screate_simple(1, &nedreq, NULL); + dspace_file_id = H5Screate_simple(1, &nedreqg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_RequiredEdge], NULL, &nedreq, NULL); + dset_id = H5Dopen(grp_entities_id, "RequiredEdges", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, preq); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + for (int i = 0 ; i < nedreq ; i++) { + PMMG_Set_requiredEdge(parmesh, preq[i] - offset[2 * PMMG_IO_Edge] + 1); + } + + PMMG_DEL_MEM(parmesh, preq, int, "preq"); + + } + + if (load_entities[PMMG_IO_ParallelEdge] && nedparg) { + + PMMG_MALLOC(parmesh, ppar, nedpar, int, "ppar", goto free_and_return); + + dspace_mem_id = H5Screate_simple(1, &nedpar, NULL); + dspace_file_id = H5Screate_simple(1, &nedparg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_ParallelEdge], NULL, &nedpar, NULL); + dset_id = H5Dopen(grp_entities_id, "ParallelEdges", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, ppar); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + for (int i = 0 ; i < nedpar ; i++) { + pa = &mesh->edge[ppar[i] - offset[2 * PMMG_IO_Edge] + 1]; + pa->tag |= MG_PARBDY; + } + + PMMG_DEL_MEM(parmesh, ppar, int, "ppar"); + + } + } + + /* Triangles */ + if (load_entities[PMMG_IO_Tria] && ntg) { + + PMMG_MALLOC(parmesh, pent, 3 * nt, int, "pent", goto free_and_return); + PMMG_MALLOC(parmesh, pref, nt, int, "pref", goto free_and_return); + + dspace_mem_id = H5Screate_simple(2, hnt, NULL); + dspace_file_id = H5Screate_simple(2, hntg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Tria], NULL, hnt, NULL); + dset_id = H5Dopen(grp_entities_id, "Triangles", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, pent); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + dspace_mem_id = H5Screate_simple(1, hnt, NULL); + dspace_file_id = H5Screate_simple(1, hntg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Tria], NULL, hnt, NULL); + dset_id = H5Dopen(grp_entities_id, "TrianglesRef", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, pref); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + for (int i = 0 ; i < nt ; i++) { + PMMG_Set_triangle(parmesh, pent[3 * i] - offset[2 * PMMG_IO_Vertex] + 1, + pent[3 * i + 1] - offset[2 * PMMG_IO_Vertex] + 1, + pent[3 * i + 2] - offset[2 * PMMG_IO_Vertex] + 1, + pref[i], i + 1); + } + + PMMG_DEL_MEM(parmesh, pent, int, "pent"); + PMMG_DEL_MEM(parmesh, pref, int, "pref"); + + if (load_entities[PMMG_IO_RequiredTria] && ntreqg) { + + PMMG_MALLOC(parmesh, preq, ntreq, int, "preq", goto free_and_return); + + dspace_mem_id = H5Screate_simple(1, &ntreq, NULL); + dspace_file_id = H5Screate_simple(1, &ntreqg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_RequiredTria], NULL, &ntreq, NULL); + dset_id = H5Dopen(grp_entities_id, "RequiredTriangles", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, preq); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + for (int i = 0 ; i < ntreq ; i++) { + PMMG_Set_requiredTriangle(parmesh, preq[i] + offset[2 * PMMG_IO_Tria] + 1); + } + + PMMG_DEL_MEM(parmesh, preq, int, "preq"); + + } + + if (load_entities[PMMG_IO_ParallelTria] && ntparg) { + + PMMG_MALLOC(parmesh, ppar, ntpar, int, "ppar", goto free_and_return); + + dspace_mem_id = H5Screate_simple(1, &ntpar, NULL); + dspace_file_id = H5Screate_simple(1, &ntparg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_ParallelTria], NULL, &ntpar, NULL); + dset_id = H5Dopen(grp_entities_id, "ParallelTriangles", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, ppar); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + for (int i = 0 ; i < ntpar ; i++) { + pt = &mesh->tria[ppar[i] - offset[2 * PMMG_IO_Tria] + 1]; + for (int j = 0 ; j < 3 ; j++) { + pt->tag[j] |= MG_PARBDY; + } + } + + PMMG_DEL_MEM(parmesh, ppar, int, "ppar"); + + } + } + + + /* Quadrilaterals */ + if (load_entities[PMMG_IO_Quad] && nquadg) { + + PMMG_MALLOC(parmesh, pent, 4 * nquad, int, "pent", goto free_and_return); + PMMG_MALLOC(parmesh, pref, nquad, int, "pref", goto free_and_return); + + dspace_mem_id = H5Screate_simple(2, hnquad, NULL); + dspace_file_id = H5Screate_simple(2, hnquadg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Quad], NULL, hnquad, NULL); + dset_id = H5Dopen(grp_entities_id, "Quadrilaterals", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, pent); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + dspace_mem_id = H5Screate_simple(1, hnquad, NULL); + dspace_file_id = H5Screate_simple(1, hnquadg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Quad], NULL, hnquad, NULL); + dset_id = H5Dopen(grp_entities_id, "QuadrilateralsRef", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, pref); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + for (int i = 0 ; i < nquad ; i++) { + PMMG_Set_quadrilateral(parmesh, pent[4 * i] - offset[2 * PMMG_IO_Vertex] + 1, + pent[4 * i + 1] - offset[2 * PMMG_IO_Vertex] + 1, + pent[4 * i + 2] - offset[2 * PMMG_IO_Vertex] + 1, + pent[4 * i + 3] - offset[2 * PMMG_IO_Vertex] + 1, + pref[i], i + 1); + } + + PMMG_DEL_MEM(parmesh, pent, int, "pent"); + PMMG_DEL_MEM(parmesh, pref, int, "pref"); + + if (load_entities[PMMG_IO_RequiredQuad] && nqreqg) { + + PMMG_MALLOC(parmesh, preq, nqreq, int, "preq", goto free_and_return); + + dspace_mem_id = H5Screate_simple(1, &nqreq, NULL); + dspace_file_id = H5Screate_simple(1, &nqreqg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_RequiredQuad], NULL, &nqreq, NULL); + dset_id = H5Dopen(grp_entities_id, "RequiredQuadrilaterals", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, preq); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + for (int i = 0 ; i < nqreq ; i++) { + pq = &mesh->quadra[preq[i] - offset[2 * PMMG_IO_Quad] + 1]; + for (int j = 0 ; j < 4 ; j++) { + pq->tag[j] |= MG_REQ; + } + } + + PMMG_DEL_MEM(parmesh, preq, int, "preq"); + + } + + if (load_entities[PMMG_IO_ParallelQuad] && nqparg) { + + PMMG_MALLOC(parmesh, ppar, nqpar, int, "ppar", goto free_and_return); + + dspace_mem_id = H5Screate_simple(1, &nqpar, NULL); + dspace_file_id = H5Screate_simple(1, &nqparg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_ParallelQuad], NULL, &nqpar, NULL); + dset_id = H5Dopen(grp_entities_id, "ParallelQuadrilaterals", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, ppar); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + for (int i = 0 ; i < nqpar ; i++) { + pq = &mesh->quadra[ppar[i] - offset[2 * PMMG_IO_Quad] + 1]; + for (int j = 0 ; j < 4 ; j++) { + pq->tag[j] |= MG_PARBDY; + } + } + + PMMG_DEL_MEM(parmesh, ppar, int, "ppar"); + + } + } + + /* Tetrahedra */ + if (load_entities[PMMG_IO_Tetra] && neg) { + + PMMG_MALLOC(parmesh, pent, 4 * ne, int, "pent", goto free_and_return); + PMMG_MALLOC(parmesh, pref, ne, int, "pref", goto free_and_return); + + dspace_mem_id = H5Screate_simple(2, hne, NULL); + dspace_file_id = H5Screate_simple(2, hneg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Tetra], NULL, hne, NULL); + dset_id = H5Dopen(grp_entities_id, "Tetrahedra", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, pent); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + dspace_mem_id = H5Screate_simple(1, hne, NULL); + dspace_file_id = H5Screate_simple(1, hneg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Tetra], NULL, hne, NULL); + dset_id = H5Dopen(grp_entities_id, "TetrahedraRef", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, pref); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + for (int i = 0 ; i < ne ; i++) { + PMMG_Set_tetrahedron(parmesh, pent[4 * i] - offset[2 * PMMG_IO_Vertex] + 1, + pent[4 * i + 1] - offset[2 * PMMG_IO_Vertex] + 1, + pent[4 * i + 2] - offset[2 * PMMG_IO_Vertex] + 1, + pent[4 * i + 3] - offset[2 * PMMG_IO_Vertex] + 1, + pref[i], i + 1); + } + + PMMG_DEL_MEM(parmesh, pent, int, "pent"); + PMMG_DEL_MEM(parmesh, pref, int, "pref"); + + if (load_entities[PMMG_IO_RequiredTetra] && nereqg) { + + PMMG_MALLOC(parmesh, preq, nereq, int, "preq", goto free_and_return); + + dspace_mem_id = H5Screate_simple(1, &nereq, NULL); + dspace_file_id = H5Screate_simple(1, &nereqg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_RequiredTetra], NULL, &nereq, NULL); + dset_id = H5Dopen(grp_entities_id, "RequiredTetrahedra", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, preq); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + for (int i = 0 ; i < nereq ; i++) { + PMMG_Set_requiredTetrahedron(parmesh, preq[i] - offset[2 * PMMG_IO_Tetra] + 1); + } + + PMMG_DEL_MEM(parmesh, preq, int, "preq"); + + } + + if (load_entities[PMMG_IO_ParallelTetra] && neparg) { + + PMMG_MALLOC(parmesh, ppar, nepar, int, "ppar", goto free_and_return); + + dspace_mem_id = H5Screate_simple(1, &nepar, NULL); + dspace_file_id = H5Screate_simple(1, &neparg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_ParallelTetra], NULL, &nepar, NULL); + dset_id = H5Dopen(grp_entities_id, "ParallelTetrahedra", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, ppar); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + for (int i = 0 ; i < nepar ; i++) { + pe = &mesh->tetra[ppar[i] - offset[2 * PMMG_IO_Tetra] + 1]; + pe->tag |= MG_PARBDY; + } + + PMMG_DEL_MEM(parmesh, ppar, int, "ppar"); + + } + } + + /* Prisms */ + if (load_entities[PMMG_IO_Prism] && nprismg) { + PMMG_MALLOC(parmesh, pent, 6 * nprism, int, "pent", goto free_and_return); + PMMG_MALLOC(parmesh, pref, nprism, int, "pref", goto free_and_return); + + dspace_mem_id = H5Screate_simple(2, hnprism, NULL); + dspace_file_id = H5Screate_simple(2, hnprismg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Prism], NULL, hnprism, NULL); + dset_id = H5Dopen(grp_entities_id, "Prisms", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, pent); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + dspace_mem_id = H5Screate_simple(1, hnprism, NULL); + dspace_file_id = H5Screate_simple(1, hnprismg, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, &offset[2 * PMMG_IO_Prism], NULL, hnprism, NULL); + dset_id = H5Dopen(grp_entities_id, "PrismsRef", H5P_DEFAULT); + H5Dread(dset_id, H5T_NATIVE_INT, dspace_mem_id, dspace_file_id, dxpl_id, pref); + H5Dclose(dset_id); + H5Sclose(dspace_mem_id); + H5Sclose(dspace_file_id); + + for (int i = 0 ; i < nprism ; i++) { + PMMG_Set_prism(parmesh, pent[6 * i] - offset[2 * PMMG_IO_Vertex] + 1, + pent[6 * i + 1] - offset[2 * PMMG_IO_Vertex] + 1, + pent[6 * i + 2] - offset[2 * PMMG_IO_Vertex] + 1, + pent[6 * i + 3] - offset[2 * PMMG_IO_Vertex] + 1, + pent[6 * i + 4] - offset[2 * PMMG_IO_Vertex] + 1, + pent[6 * i + 5] - offset[2 * PMMG_IO_Vertex] + 1, + pref[i], i + 1); + } + + PMMG_DEL_MEM(parmesh, pent, int, "pent"); + PMMG_DEL_MEM(parmesh, pref, int, "pref"); + + } + + /* Print the number of entities */ + if ( parmesh->info.imprim >= PMMG_VERB_STEPS ) { + fprintf(stdout," NUMBER OF VERTICES %lld\n", npg); + fprintf(stdout," NUMBER OF TETRAHEDRA %lld\n", neg); + if ( nprismg ) + fprintf(stdout," NUMBER OF PRISMS %lld\n",nprismg); + + if ( nag ) { + fprintf(stdout," NUMBER OF EDGES %lld\n",nag); + if ( nrg ) + fprintf(stdout," NUMBER OF RIDGES %lld\n",nrg); + } + if ( ntg ) + fprintf(stdout," NUMBER OF TRIANGLES %lld\n",ntg); + if ( nquadg ) + fprintf(stdout," NUMBER OF QUADRILATERALS %lld\n",nquadg); + + + if ( npreqg || nedreqg || ntreqg || nereqg || nqreqg ) { + fprintf(stdout," NUMBER OF REQUIRED ENTITIES: \n"); + if ( npreqg ) + fprintf(stdout," VERTICES %lld \n",npreqg); + if ( nedreqg ) + fprintf(stdout," EDGES %lld \n",nedreqg); + if ( ntreqg ) + fprintf(stdout," TRIANGLES %lld \n",ntreqg); + if ( nqreqg ) + fprintf(stdout," QUADRILATERALS %lld \n",nqreqg); + if ( nereqg ) + fprintf(stdout," TETRAHEDRA %lld \n",nereqg); + } + if( ncg ) + fprintf(stdout," NUMBER OF CORNERS %lld \n",ncg); + + if ( npparg || nedparg || ntparg || neparg || nqparg ) { + fprintf(stdout," NUMBER OF PARALLEL ENTITIES: \n"); + if ( npparg ) + fprintf(stdout," VERTICES %lld \n",npparg); + if ( nedparg ) + fprintf(stdout," EDGES %lld \n",nedparg); + if ( neparg ) + fprintf(stdout," TRIANGLES %lld \n",ntparg); + if ( nqparg ) + fprintf(stdout," QUADRILATERALS %lld \n",nqparg); + if ( neparg ) + fprintf(stdout," TETRAHEDRA %lld \n",neparg); + } + } + + } + + return 1; + + free_and_return: + PMMG_DEL_MEM(parmesh, ppoint, double, "ppoint"); + PMMG_DEL_MEM(parmesh, pent, int, "pent"); + PMMG_DEL_MEM(parmesh, pref, int, "pref"); + PMMG_DEL_MEM(parmesh, pcr, int, "pcr"); + PMMG_DEL_MEM(parmesh, preq, int, "preq"); + PMMG_DEL_MEM(parmesh, ppar, int, "ppar"); + PMMG_DEL_MEM(parmesh, pnor, double, "pnor"); + PMMG_DEL_MEM(parmesh, pnorat, int, "pnorat"); + PMMG_DEL_MEM(parmesh, ptan, double, "ptan"); + PMMG_DEL_MEM(parmesh, ptanat, int, "ptanat"); + + return 0; + +} + +/** + * \param mesh pointer toward the mesh associated to the solution. + * \param sol pointer toward the solution structure to save. + * \param dspace_file_id identifier of the HDF5 data space in which is written the solution. + * \param dset_id identifier of the HDF5 group in which is written the solution. + * \param dxpl_id identifier of the dataset transfer property list (MPI-IO). + * \param np number of vertices. + * \param offset array of size PMMG_IO_ENTITIES_size containing the offset for parallel loading. + * \param imprim verbosity level + * + * \return 0 if fail, 1 otherwise + * + * Load at hdf5 format a given solution structure defined at vertices. + * + */ +static int PMMG_loadSolAtVertices_hdf5(MMG5_pMesh mesh,MMG5_pSol sol, + hid_t dspace_file_id, + hid_t dset_id,hid_t dxpl_id, + MMG5_int np,hsize_t *offset,int imprim) { + + MMG5_pPoint ppt; + double *sol_buf; + hsize_t sol_offset[2] = {0, 0}; + hid_t dspace_mem_id; + + /* Compute the offset for parallel reading */ + hsize_t hns[2] = {np, sol->size}; + sol_offset[0] = offset[2 * PMMG_IO_Vertex]; + + /* Read the solution buffer */ + PMMG_MALLOC(mesh, sol_buf, np * sol->size, double, "sol_buf", return 0); + dspace_mem_id = H5Screate_simple(2, hns, NULL); + H5Sselect_hyperslab(dspace_file_id, H5S_SELECT_SET, sol_offset, NULL, hns, NULL); + H5Dread(dset_id, H5T_NATIVE_DOUBLE, dspace_mem_id, dspace_file_id, dxpl_id, sol_buf); + H5Sclose(dspace_mem_id); + + /* Set the solution */ + for (int k = 0 ; k < mesh->np ; k++) { + ppt = &mesh->point[k + 1]; + if (!MG_VOK(ppt)) continue; + for (int j = 0 ; j < sol->size ; j++) { + sol->m[1 + k * sol->size + j] = sol_buf[k * sol->size + j]; + } + } + + /* Print the solution stats */ + if ( imprim > PMMG_VERB_STEPS ) { + if ( sol->size == 1 ) + fprintf(stdout," NUMBER OF SCALAR VALUES %8d\n",sol->np); + else if ( sol->size == 3 ) + fprintf(stdout," NUMBER OF VECTOR VALUES %8d\n",sol->np); + else + fprintf(stdout," NUMBER OF TENSOR VALUES %8d\n",sol->np); + } + + PMMG_DEL_MEM(mesh,sol_buf,double,"sol_buf"); + return 1; +} + +/** + * \param parmesh pointer toward the parmesh structure. + * \param grp_sols_id identifier of the HDF5 group in which is written the solution. + * \param dxpl_id identifier of the dataset transfer property list (MPI-IO). + * \param nentitiesl array of number of local entities. + * \param offset array of size PMMG_IO_ENTITIES_size containing the offset for parallel loading. + * + * \return 0 if fail, 1 otherwise + * + * Load at hdf5 format a given solution structure defined at vertices. + * + */ +static int PMMG_loadMetric_hdf5(PMMG_pParMesh parmesh, hid_t grp_sols_id, hid_t dxpl_id, + hsize_t *nentitiesl, hsize_t *offset) { + int np; + MMG5_pMesh mesh; + MMG5_pSol met; + hsize_t hnsg[2] = {0, 0}; + hid_t dset_id; + hid_t dspace_file_id; + + assert ( parmesh->ngrp == 1 ); + + /* Init mmg variables*/ + mesh = parmesh->listgrp[0].mesh; + met = parmesh->listgrp[0].met; + np = nentitiesl[PMMG_IO_Vertex]; + + /* Get the metric size */ + hid_t tmp = H5Gopen(grp_sols_id, "Metric", H5P_DEFAULT); + if (tmp < 0) return -1; + + dset_id = H5Dopen(tmp, "SolAtVertices", H5P_DEFAULT); + + if (dset_id < 0) { + H5Gclose(tmp); + return -1; + } + dspace_file_id = H5Dget_space(dset_id); + H5Sget_simple_extent_dims(dspace_file_id, hnsg, NULL); + + /* Set the metric size */ + if (hnsg[1] == 1) + PMMG_Set_metSize(parmesh, MMG5_Vertex, np, MMG5_Scalar); + else if (hnsg[1] == 3 && mesh->info.lag != -1) + PMMG_Set_metSize(parmesh, MMG5_Vertex, np, MMG5_Vector); + else if (hnsg[1] == 6) + PMMG_Set_metSize(parmesh, MMG5_Vertex, np, MMG5_Tensor); + else { + if (parmesh->myrank == parmesh->info.root) { + fprintf(stderr, "\n ## Error: %s: Wrong metric size/type \n", __func__); + } + return 0; + } + + int ier = PMMG_loadSolAtVertices_hdf5(mesh,met,dspace_file_id,dset_id,dxpl_id, + np,offset,parmesh->info.imprim); + + H5Sclose(dspace_file_id); + H5Dclose(dset_id); + H5Gclose(tmp); + + return ier; +} + +/** + * \param parmesh pointer toward the parmesh structure. + * \param grp_sols_id identifier of the HDF5 group in which is written the solution. + * \param dxpl_id identifier of the dataset transfer property list (MPI-IO). + * \param nentitiesl array of number of local entities. + * \param offset array of size PMMG_IO_ENTITIES_size containing the offset for parallel loading. + * + * \return 0 if fail, 1 otherwise + * + * Load at hdf5 format a given solution structure defined at vertices. + * + */ +static int PMMG_loadLs_hdf5(PMMG_pParMesh parmesh, hid_t grp_sols_id, hid_t dxpl_id, + hsize_t *nentitiesl, hsize_t *offset) { + int np; + MMG5_pMesh mesh; + MMG5_pSol ls; + hsize_t hnsg[2] = {0, 0}; + hid_t dset_id; + hid_t dspace_file_id; + + assert ( parmesh->ngrp == 1 ); + + /* Init mmg variables*/ + mesh = parmesh->listgrp[0].mesh; + ls = parmesh->listgrp[0].ls; + np = nentitiesl[PMMG_IO_Vertex]; + + /* Get the level-set size */ + hid_t tmp = H5Gopen(grp_sols_id, "Ls", H5P_DEFAULT); + if (tmp < 0) { + H5Gclose(tmp); + return -1; + } + + dset_id = H5Dopen(tmp, "SolAtVertices", H5P_DEFAULT); + if (dset_id < 0) return -1; + dspace_file_id = H5Dget_space(dset_id); + H5Sget_simple_extent_dims(dspace_file_id, hnsg, NULL); + + /* Set the level-set size */ + if (hnsg[1] == 1) + PMMG_Set_metSize(parmesh, MMG5_Vertex, np, MMG5_Scalar); + else { + if (parmesh->myrank == parmesh->info.root) { + fprintf(stderr, "\n ## Error: %s: Wrong level-set size/type \n", __func__); + } + return 0; + } + + int ier = PMMG_loadSolAtVertices_hdf5(mesh,ls,dspace_file_id,dset_id,dxpl_id, + np,offset,parmesh->info.imprim); + H5Sclose(dspace_file_id); + H5Dclose(dset_id); + H5Gclose(tmp); + + return ier; +} + + +/** + * \param parmesh pointer toward the parmesh structure. + * \param grp_sols_id identifier of the HDF5 group in which is written the solution. + * \param dxpl_id identifier of the dataset transfer property list (MPI-IO). + * \param nentitiesl array of number of local entities. + * \param offset array of size PMMG_IO_ENTITIES_size containing the offset for parallel loading. + * + * \return 0 if fail and we want the calling process to fail too, -1 if fail but + * we want to let the calling process continue, 0 if fields are succesfully + * readed. + * + * Load solutions fields at hdf5 format. + * + */ +static int PMMG_loadAllSols_hdf5(PMMG_pParMesh parmesh, hid_t grp_sols_id, + hid_t dxpl_id, hsize_t *nentitiesl, + hsize_t *offset) { + + /* Get the metric size */ + hid_t tmp = H5Gopen(grp_sols_id, "Fields", H5P_DEFAULT); + if ( tmp < 0) return -1; + + /* Input mesh has solution fields: hdf5 input is not yet implemented */ + if (parmesh->myrank == parmesh->info.root) { + fprintf(stderr, "\n ## Error: %s: Solution fields input not yet" + " implemented at hdf5 format.\n Ignored.\n", __func__); + } + + return -1; +} +#endif + + +int PMMG_loadMesh_hdf5(PMMG_pParMesh parmesh, const char *filename) { + return PMMG_loadMesh_hdf5_i(parmesh,parmesh->info.io_entities,filename); +} + +/** + * \param parmesh pointer toward the parmesh structure. + * \param load_entities array of 0s and 1s of size \ref PMMG_IO_ENTITIES_size + * to tell which entities to load and which not to. This array must be setted + * using the \ref PMMG_Set_defaultIOEntities and \ref PMMG_Set_IOEntities functions + * \param filename name of the HDF5 file. + * \return 0 if failed, 1 otherwise. + * + * Load the mesh data, the metric, and all the solutions from an HDF5 file in + * a distributed parmesh. + * + * \remark For internal use only + */ +int PMMG_loadMesh_hdf5_i(PMMG_pParMesh parmesh, int *load_entities, const char *filename) { + +#ifndef USE_HDF5 + + fprintf(stderr," ** HDF5 library not found. Unavailable file format.\n"); + return -1; + +#else + + int ier = 1; + int nullf = 0; /* Flag to remember if load_entities was NULL or not */ + hsize_t *nentities, *nentitiesl, *nentitiesg; + hsize_t *offset; + int npartitions; + hid_t file_id, grp_mesh_id, grp_part_id, grp_entities_id, grp_sols_id; /* Objects */ + hid_t fapl_id, dxpl_id; /* Property lists */ + MPI_Info info = MPI_INFO_NULL; + MPI_Comm read_comm; + int rank, nprocs, mpi_color; + mytime ctim[TIMEMAX]; + int8_t tim; + char stim[32]; + + /* Check arguments */ + if (parmesh->ngrp != 1) { + fprintf(stderr," ## Error: %s: you must have exactly 1 group in your parmesh.\n", + __func__); + return 0; + } + if (!filename || !*filename) { + fprintf(stderr," ## Error: %s: no HDF5 file name provided.\n", + __func__); + return 0; + } + + /* Check the load_entities argument */ + if (load_entities == NULL) { + nullf = 1; + PMMG_MALLOC(parmesh, load_entities, PMMG_IO_ENTITIES_size, int, "load_entities", ier = 0); + PMMG_Set_defaultIOEntities_i(load_entities); + } + if (!load_entities[PMMG_IO_Vertex] || !load_entities[PMMG_IO_Tetra]) { + fprintf(stderr, "\n ## Error: %s: load_entities: you must at least load the vertices and the tetra.\n", + __func__); + return 0; + } + + tminit(ctim, TIMEMAX); + chrono(ON, &ctim[0]); + + /* Set all pointers to NULL */ + nentities = nentitiesl = nentitiesg = offset = NULL; + + /* Set MPI variables */ + nprocs = parmesh->nprocs; + rank = parmesh->myrank; + + /* Shut HDF5 error stack */ + H5Eset_auto(H5E_DEFAULT, NULL, NULL); + + /** Open the file a first time to read npartin */ + + /* Create the property lists */ + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + H5Pset_fapl_mpio(fapl_id, parmesh->comm, info); /* Parallel access to the file */ + H5Pset_all_coll_metadata_ops(fapl_id, 1); /* Collective metadata read */ + dxpl_id = H5Pcreate(H5P_DATASET_XFER); + H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE); /* Collective dataset xfer operations */ + + /* Open the HDF5 file */ + HDF_CHECK( file_id = H5Fopen(filename, H5F_ACC_RDONLY, fapl_id), + fprintf(stderr,"\n ## Error: %s: Rank %d could not open the hdf5 file %s.\n", + __func__, rank, filename); + goto free_and_return ); + + /* Load the header (version, dimension, number of partitions and API mode) */ + ier = PMMG_loadHeader_hdf5(parmesh, file_id); + + MPI_CHECK( MPI_Allreduce(MPI_IN_PLACE, &ier, 1, MPI_INT, MPI_MIN, parmesh->comm), + goto free_and_return ); + + if (ier == 0) { + if (rank == parmesh->info.root) { + fprintf(stderr,"\n ## Error: %s: Wrong mesh attributes in hdf5 file %s.\n", + __func__, filename); + } + goto free_and_return; + } + + /* Close the file and create a new communicator if there are less partitions + than MPI processes */ + H5Fclose(file_id); + H5Pclose(fapl_id); + + npartitions = parmesh->info.npartin; + + /* Reading more partitions than there are procs available is not supported yet */ + if (npartitions > nprocs) { + if (rank == parmesh->info.root) { + fprintf(stderr,"\n ## Error: %s: Can't read %d partitions with %d procs yet.\n", + __func__, npartitions, nprocs); + } + return 0; + } + + /* Set the new communicator containing the procs reading the mesh */ + mpi_color = (rank < npartitions) ? 1 : 0; + + MPI_CHECK( MPI_Comm_split(parmesh->comm, mpi_color, rank, &read_comm), + goto free_and_return ); + + /* Set MPI error handling */ + MPI_CHECK( MPI_Comm_set_errhandler(read_comm, MPI_ERRORS_RETURN), + goto free_and_return ); + + parmesh->info.read_comm = read_comm; + + /** Open the file a second (and final) time to actually read the mesh */ + + /* Set the file access property list with the new communicator */ + fapl_id = H5Pcreate(H5P_FILE_ACCESS); + H5Pset_fapl_mpio(fapl_id, read_comm, info); /* Parallel access to the file */ + H5Pset_all_coll_metadata_ops(fapl_id, 1); /* Collective metadata read */ + + /* Reopen the file with the new communicator */ + HDF_CHECK( file_id = H5Fopen(filename, H5F_ACC_RDONLY, fapl_id), + fprintf(stderr,"\n ## Error: %s: Rank %d could not open the hdf5 file %s.\n", + __func__, rank, filename); + goto free_and_return ); + + if (parmesh->info.imprim > PMMG_VERB_VERSION) { + fprintf(stdout, "\n %%%% %s OPENED \n", filename); + } + + /* Open the mesh group */ + HDF_CHECK( grp_mesh_id = H5Gopen(file_id, "Mesh", H5P_DEFAULT), + fprintf(stderr,"\n ## Error: %s: Rank %d could not open the /Mesh group in file %s.\n", + __func__, rank, filename); + goto free_and_return ); + + /** Load the partitioning information*/ + tim = 1; + chrono(ON, &ctim[tim]); + + /* Open the partitionning group */ + HDF_CHECK( grp_part_id = H5Gopen(grp_mesh_id, "Partitioning", H5P_DEFAULT), + fprintf(stderr,"\n ## Error: %s: Rank %d could not open the /Mesh/Partitioning group in file %s.\n", + __func__, rank, filename); + H5Gclose(grp_mesh_id); + goto free_and_return ); + + /* Load the old partitioning of the mesh */ + PMMG_CALLOC(parmesh, nentities, PMMG_IO_ENTITIES_size * nprocs, hsize_t, "nentities", + goto free_and_return ); + PMMG_CALLOC(parmesh, nentitiesl, PMMG_IO_ENTITIES_size, hsize_t, "nentitiesl", + goto free_and_return ); + PMMG_CALLOC(parmesh, nentitiesg, PMMG_IO_ENTITIES_size, hsize_t, "nentitiesg", + goto free_and_return ); + + ier = PMMG_loadPartitioning_hdf5(parmesh, grp_part_id, dxpl_id, nentities, nentitiesl, nentitiesg); + + MPI_CHECK( MPI_Allreduce(MPI_IN_PLACE, &ier, 1, MPI_INT, MPI_MIN, read_comm), + H5Gclose(grp_part_id); + H5Gclose(grp_mesh_id); + goto free_and_return ); + + if (ier == 0) { + if (rank == parmesh->info.root) { + fprintf(stderr,"\n ## Error: %s: Could not read mesh partitioning.\n", __func__); + } + H5Gclose(grp_part_id); + H5Gclose(grp_mesh_id); + goto free_and_return; + } + + H5Gclose(grp_part_id); + + chrono(OFF, &ctim[tim]); + printim(ctim[tim].gdif,stim); + if ( parmesh->info.imprim >= PMMG_VERB_STEPS ) { + fprintf(stdout," -- Read mesh partitioning. %s\n",stim); + } + + /** Compute the offset for parallel reading */ + tim = 2; + chrono(ON, &ctim[tim]); + + PMMG_CALLOC(parmesh, offset, 2 * PMMG_IO_ENTITIES_size, hsize_t, "offset", + goto free_and_return ); + + ier = PMMG_computeHDFoffset(parmesh, nentities, offset); + + /* We do not need the number of entities anymore */ + PMMG_DEL_MEM(parmesh, nentities, hsize_t, "nentities"); + + chrono(OFF, &ctim[tim]); + printim(ctim[tim].gdif,stim); + if ( parmesh->info.imprim >= PMMG_VERB_STEPS ) { + fprintf(stdout," -- Compute HDF5 read offsets. %s\n",stim); + } + + /** Load the mesh entities */ + tim = 3; + chrono(ON, &ctim[tim]); + + /* Each proc reads the part of the mesh that is assigned to him */ + HDF_CHECK( grp_entities_id = H5Gopen(grp_mesh_id, "MeshEntities", H5P_DEFAULT), + fprintf(stderr,"\n ## Error: %s: Rank %d could not open the /Mesh/MeshEntities group in file %s.\n", + __func__, parmesh->myrank, filename); + H5Gclose(grp_mesh_id); + goto free_and_return ); + + ier = PMMG_loadMeshEntities_hdf5(parmesh, grp_entities_id, dxpl_id, nentitiesl, nentitiesg, offset, load_entities); + + MPI_CHECK( MPI_Allreduce(MPI_IN_PLACE, &ier, 1, MPI_INT, MPI_MIN, read_comm), + H5Gclose(grp_entities_id); + H5Gclose(grp_mesh_id); + goto free_and_return ); + + if (!ier) { + if (parmesh->myrank == parmesh->info.root) { + fprintf(stderr,"\n ## Error: %s: Could not read the mesh entities.\n", __func__); + } + H5Gclose(grp_entities_id); + H5Gclose(grp_mesh_id); + goto free_and_return; + } + + H5Gclose(grp_entities_id); + + chrono(OFF, &ctim[tim]); + printim(ctim[tim].gdif,stim); + if ( parmesh->info.imprim >= PMMG_VERB_STEPS ) { + fprintf(stdout," -- Read mesh entites. %s\n",stim); + } + + /* Close the mesh group */ + H5Gclose(grp_mesh_id); + + /* Deallocate the load_entities array if it was allocated in this function */ + if (nullf) PMMG_DEL_MEM(parmesh, load_entities, int, "load_entities"); + + /** Load the metric and the solutions */ + tim = 4; + chrono(ON, &ctim[tim]); + + /* Each proc reads the part of the solutions/metric that is assigned to him */ + HDF_CHECK( grp_sols_id = H5Gopen(file_id, "Solutions", H5P_DEFAULT), + fprintf(stderr,"\n ## Error: %s: Could not open the /Solutions group in file %s.\n", + __func__, filename); + goto free_and_return ); + + ier = PMMG_loadMetric_hdf5(parmesh, grp_sols_id, dxpl_id, nentitiesl, offset); + + MPI_CHECK( MPI_Allreduce(MPI_IN_PLACE, &ier, 1, MPI_INT, MPI_MIN, read_comm), + H5Gclose(grp_sols_id); + goto free_and_return ); + + if ( ier == -1 ) { + if (parmesh->myrank == parmesh->info.root) { + fprintf(stderr," ** METRIC NOT FOUND. USE DEFAULT METRIC.\n"); + } + } + if ( ier == 0 ) { + if (parmesh->myrank == parmesh->info.root) { + fprintf(stderr,"\n ## Error: %s: Could not load the metric.\n",__func__); + } + H5Gclose(grp_sols_id); + goto free_and_return; + } + + ier = PMMG_loadLs_hdf5(parmesh, grp_sols_id, dxpl_id, nentitiesl, offset); + + MPI_CHECK( MPI_Allreduce(MPI_IN_PLACE, &ier, 1, MPI_INT, MPI_MIN, read_comm), + H5Gclose(grp_sols_id); + goto free_and_return ); + + if ( ier == 0 ) { + if (parmesh->myrank == parmesh->info.root) { + fprintf(stderr,"\n ## Error: %s: Could not load the metric.\n",__func__); + } + H5Gclose(grp_sols_id); + goto free_and_return; + } + + + ier = PMMG_loadAllSols_hdf5(parmesh, grp_sols_id, dxpl_id, nentitiesl, offset); + + MPI_CHECK( MPI_Allreduce(MPI_IN_PLACE, &ier, 1, MPI_INT, MPI_MIN, read_comm), + H5Gclose(grp_sols_id); + goto free_and_return ); + + if ( ier==0 ) { + if (parmesh->myrank == parmesh->info.root) { + fprintf(stderr,"\n ## Error: %s: Could not load the solutions.\n",__func__); + } + H5Gclose(grp_sols_id); + goto free_and_return; + } + + H5Gclose(grp_sols_id); + + chrono(OFF, &ctim[tim]); + printim(ctim[tim].gdif,stim); + if ( parmesh->info.imprim >= PMMG_VERB_STEPS ) { + fprintf(stdout," -- Read metric and solutions. %s\n",stim); + } + + /*------------------------- RELEASE ALL HDF5 IDs AND MEMORY -------------------------*/ + + H5Fclose(file_id); + H5Pclose(fapl_id); + H5Pclose(dxpl_id); + PMMG_DEL_MEM(parmesh, nentitiesl, hsize_t, "nentitiesl"); + PMMG_DEL_MEM(parmesh, nentitiesg, hsize_t, "nentitiesg"); + PMMG_DEL_MEM(parmesh, offset, hsize_t, "offset"); + + /* Very ugly : if the rank is above the number of partitions of the input mesh, + allocate an internal communicator of opposite type of API_mode. This is necessary + because all those processes wont enter the PMMG_preprocessMesh_distributed function. + Also set ngrp to 0 for loadBalancing to work properly. */ + if ( rank >= parmesh->info.npartin ) { + parmesh->ngrp = 0; + if (parmesh->info.API_mode == PMMG_APIDISTRIB_faces) + PMMG_CALLOC(parmesh, parmesh->int_node_comm, 1, PMMG_Int_comm, "int_node_comm", goto free_and_return); + else + PMMG_CALLOC(parmesh, parmesh->int_face_comm, 1, PMMG_Int_comm, "int_face_comm", goto free_and_return); + } + + chrono(OFF,&ctim[0]); + printim(ctim[0].gdif,stim); + if ( parmesh->info.imprim >= PMMG_VERB_STEPS ) { + fprintf(stdout,"\n LOAD_PARMESH_HDF5: ELAPSED TIME %s\n",stim); + } + + return 1; + + free_and_return: + H5Fclose(file_id); + H5Pclose(fapl_id); + H5Pclose(dxpl_id); + PMMG_DEL_MEM(parmesh, nentities, hsize_t, "nentities"); + PMMG_DEL_MEM(parmesh, nentitiesg, hsize_t, "nentitiesg"); + PMMG_DEL_MEM(parmesh, nentitiesl, hsize_t, "nentitiesl"); + PMMG_DEL_MEM(parmesh, offset, hsize_t, "offset"); + return 0; + +#endif + +} diff --git a/src/inoutcpp_pmmg.cpp b/src/inoutcpp_pmmg.cpp index fd736d45..4ac47208 100644 --- a/src/inoutcpp_pmmg.cpp +++ b/src/inoutcpp_pmmg.cpp @@ -32,9 +32,8 @@ * */ -#include "vtkparser.hpp" - #ifdef USE_VTK +#include "vtkparser.hpp" #include #include #endif @@ -43,7 +42,7 @@ int PMMG_loadVtuMesh_centralized(PMMG_pParMesh parmesh,const char *filename) { MMG5_pMesh mesh; - MMG5_pSol met; + MMG5_pSol met, sol; int ier; if ( parmesh->myrank!=parmesh->info.root ) { @@ -52,13 +51,13 @@ int PMMG_loadVtuMesh_centralized(PMMG_pParMesh parmesh,const char *filename) { #ifndef USE_VTK - fprintf(stderr," ** VTK library not founded. Unavailable file format.\n"); + fprintf(stderr," ** VTK library not found. Unavailable file format.\n"); return -1; #else if ( parmesh->ngrp != 1 ) { - fprintf(stderr," ## Error: %s: you must have exactly 1 group in you parmesh.", + fprintf(stderr," ## Error: %s: you must have exactly 1 group in your parmesh.", __func__); return 0; } @@ -69,7 +68,7 @@ int PMMG_loadVtuMesh_centralized(PMMG_pParMesh parmesh,const char *filename) { assert ( mesh->info.imprim == parmesh->info.mmg_imprim ); mesh->info.imprim = MG_MAX ( parmesh->info.imprim, mesh->info.imprim ); - ier = MMG3D_loadVtuMesh(mesh,met,filename); + ier = MMG3D_loadVtuMesh(mesh,met,sol,filename); /* Restore the mmg verbosity to its initial value */ mesh->info.imprim = parmesh->info.mmg_imprim; @@ -85,31 +84,120 @@ int PMMG_savePvtuMesh(PMMG_pParMesh parmesh, const char * filename) { #ifndef USE_VTK if ( parmesh->myrank == parmesh->info.root ) { - fprintf(stderr," ** VTK library not founded. Unavailable file format.\n"); + fprintf(stderr," ** VTK library not found. Unavailable file format.\n"); } return -1; #else - char* mdata=NULL; // master file name + char* mdata=NULL; // master file name + char* sdata=NULL; // secondary file name + int i; MMG5_SAFE_CALLOC(mdata,strlen(filename)+6,char,return 0); + MMG5_SAFE_CALLOC(sdata,strlen(filename)+6,char,return 0); strcpy(mdata,filename); char *ptr = MMG5_Get_filenameExt(mdata); *ptr = '\0'; // get basename - sprintf( mdata, "%s.pvtu",mdata); + + // If the output *.pvtu filename has dots "." in the basename, + // replace them with dashes "-". + // Why? In VTK function SetFileName(filename), the first dot "." in the + // filename is interpreted as the extension start. So, whatever the + // user specifies after the first dot "." will be ignored by VTK. To overcome + // this, dots are replaced by dashes. + for(i=0;mdata[i]!='\0';i++) { + if(mdata[i]=='.') { + mdata[i] = '-'; + } + } + + snprintf( sdata,strlen(mdata)+6, "%s.pvtu",mdata); MMG5_pMesh mesh = parmesh->listgrp[0].mesh; MMG5_pSol met = parmesh->listgrp[0].met; // Warning : does it works with a communicator /= to MPI_COMM_WORLD? - vtkMPIController *vtkController = vtkMPIController::New(); + vtkSmartPointer vtkController = vtkSmartPointer::New(); + vtkController->Initialize(); + vtkMultiProcessController::SetGlobalController(vtkController); + + return MMG5_saveVtkMesh_i + (mesh,&met,sdata,1,0,parmesh->nprocs,parmesh->myrank,parmesh->info.root); + + MMG5_SAFE_FREE(mdata); + MMG5_SAFE_FREE(sdata); + +#endif + return 1; +} + +int PMMG_savePvtuMesh_and_allData(PMMG_pParMesh parmesh, const char * filename) { + + MMG5_pMesh mesh; + MMG5_pSol allSol[2]; + MMG5_pSol met = NULL; + MMG5_pSol field = NULL; + int metricData = 0; + +#ifndef USE_VTK + if ( parmesh->myrank == parmesh->info.root ) { + fprintf(stderr," ** VTK library not found. Unavailable file format.\n"); + } + return -1; + +#else + char* mdata=NULL; // master file name + char* sdata=NULL; // secondary file name + int i; + MMG5_SAFE_CALLOC(mdata,strlen(filename)+6,char,return 0); + MMG5_SAFE_CALLOC(sdata,strlen(filename)+6,char,return 0); + + strcpy(mdata,filename); + char *ptr = MMG5_Get_filenameExt(mdata); + *ptr = '\0'; // get basename + + // If the output *.pvtu filename has dots "." in the basename, + // replace them with dashes "-". + // Why? In VTK function SetFileName(filename), the first dot "." in the + // filename is interpreted as the extension start. So, whatever the + // user specifies after the first dot "." will be ignored by VTK. To overcome + // this, dots are replaced by dashes. + for(i=0;mdata[i]!='\0';i++) { + if(mdata[i]=='.') { + mdata[i] = '-'; + } + } + + snprintf( sdata,strlen(mdata)+6, "%s.pvtu",mdata); + + mesh = parmesh->listgrp[0].mesh; + // Add met at the end of field to be able to save everything in the pvtu file + if (parmesh->listgrp[0].met) { + met = parmesh->listgrp[0].met; + metricData = 1; + allSol[0] = met; + } + else { + allSol[0] = NULL; + } + if (parmesh->listgrp[0].field) { + field = parmesh->listgrp[0].field; + allSol[1] = field; + } + else { + allSol[1] = NULL; + } + + // Warning : does it works with a communicator /= to MPI_COMM_WORLD? + vtkSmartPointer vtkController = vtkSmartPointer::New(); vtkController->Initialize(); vtkMultiProcessController::SetGlobalController(vtkController); return MMG5_saveVtkMesh_i - (mesh,&met,mdata,1,1,parmesh->nprocs,parmesh->myrank,parmesh->info.root); + (mesh,allSol,sdata,metricData,0,parmesh->nprocs,parmesh->myrank,parmesh->info.root); MMG5_SAFE_FREE(mdata); + MMG5_SAFE_FREE(sdata); #endif return 1; diff --git a/src/interactionmap_pmmg.c b/src/interactionmap_pmmg.c index 670d359e..638e2068 100644 --- a/src/interactionmap_pmmg.c +++ b/src/interactionmap_pmmg.c @@ -171,7 +171,13 @@ int PMMG_interactionMap(PMMG_pParMesh parmesh,int **interactions,int **interacti receivers = NULL; PMMG_MALLOC ( parmesh,receivers, nrecv_max*nprocs,int,"receivers" ,ier=0 ); PMMG_REALLOC ( parmesh,interact_list,nrecv_max,nneighs_max,int,"interact_list" ,ier=0 ); - memset ( interact_list,0, nrecv_max * sizeof(int) ); + + if ( nrecv_max ) { + /* calling memset on a non-allocatted array, leads to + * have a NULL pointer that is evaluated to True inside a if + * test */ + memset ( interact_list,0, nrecv_max * sizeof(int) ); + } idx = 0; for ( k=0; ksize; +#warning Luca: when surface adapt will be ready, distinguish BDY from PARBDY /** Freezed points: Copy the data stored in solution structure */ if ( (!oldMesh->info.renum) || !permNodGlob ) { @@ -574,6 +575,7 @@ int PMMG_interpMetricsAndFields_mesh( MMG5_pMesh mesh,MMG5_pMesh oldMesh, } } +#warning Luca: make this part consistent with metrics interpolation /** Field interpolation */ if ( mesh->nsols ) { for ( j=0; jnsols; ++j ) { diff --git a/src/libparmmg.c b/src/libparmmg.c index e9763633..f7ce2c4f 100644 --- a/src/libparmmg.c +++ b/src/libparmmg.c @@ -55,7 +55,7 @@ extern int (*PMMG_interp2bar)(MMG5_pMesh mesh,MMG5_pSol met,MMG5_pSol oldMet,MMG int PMMG_check_inputData(PMMG_pParMesh parmesh) { MMG5_pMesh mesh; - MMG5_pSol met; + MMG5_pSol met,ls; int k; if ( parmesh->info.imprim > PMMG_VERB_VERSION ) @@ -70,15 +70,12 @@ int PMMG_check_inputData(PMMG_pParMesh parmesh) fprintf(stderr, " ## Error: lagrangian mode unavailable (MMG3D_IPARAM_lag):\n"); return 0; - } else if ( mesh->info.iso ) { - fprintf(stderr," ## Error: level-set discretisation unavailable" - " (MMG3D_IPARAM_iso):\n"); - return 0; } else if ( mesh->info.optimLES && met->size==6 ) { fprintf(stdout," ## Error: strong mesh optimization for LES methods" " unavailable (MMG3D_IPARAM_optimLES) with an anisotropic metric.\n"); return 0; } + /* specific meshing */ if ( met->np ) { if ( mesh->info.optim ) { @@ -128,10 +125,17 @@ int PMMG_check_inputData(PMMG_pParMesh parmesh) int PMMG_preprocessMesh( PMMG_pParMesh parmesh ) { MMG5_pMesh mesh; - MMG5_pSol met; + MMG5_pSol met,ls; + int8_t tim; + char stim[32]; + mytime ctim[TIMEMAX]; + + /* Chrono initialization */ + tminit(ctim,TIMEMAX); mesh = parmesh->listgrp[0].mesh; met = parmesh->listgrp[0].met; + ls = parmesh->listgrp[0].ls; assert ( ( mesh != NULL ) && ( met != NULL ) && "Preprocessing empty args"); @@ -139,24 +143,19 @@ int PMMG_preprocessMesh( PMMG_pParMesh parmesh ) MMG3D_Set_commonFunc(); /** Mesh scaling and quality histogram */ - if ( !MMG5_scaleMesh(mesh,met,NULL) ) { + if ( !MMG5_scaleMesh(mesh,met,ls) ) { return PMMG_LOWFAILURE; } - /* Don't reset the hmin value computed when unscaling the mesh */ - if ( !parmesh->info.sethmin ) { - mesh->info.sethmin = 1; - } - /* Don't reset the hmax value computed when unscaling the mesh */ - if ( !parmesh->info.sethmax ) { - mesh->info.sethmax = 1; - } + + /* Set mmg3d function pointers here to assign dosol */ + MMG3D_setfunc(mesh,met); + PMMG_setfunc(parmesh); /** specific meshing */ if ( mesh->info.optim && !met->np ) { if ( !MMG3D_doSol(mesh,met) ) { return PMMG_STRONGFAILURE; } - MMG5_solTruncatureForOptim(mesh,met); } if ( mesh->info.hsiz > 0. ) { @@ -165,28 +164,71 @@ int PMMG_preprocessMesh( PMMG_pParMesh parmesh ) } } - MMG3D_setfunc(mesh,met); - PMMG_setfunc(parmesh); + /* Don't reset the hmin value computed when unscaling the mesh */ + if ( !parmesh->info.sethmin ) { + mesh->info.sethmin = 1; + } + /* Don't reset the hmax value computed when unscaling the mesh */ + if ( !parmesh->info.sethmax ) { + mesh->info.sethmax = 1; + } if ( !MMG3D_tetraQual( mesh, met, 0 ) ) { return PMMG_STRONGFAILURE; } - if ( !PMMG_qualhisto(parmesh,PMMG_INQUA,1) ) { + if ( !PMMG_qualhisto(parmesh,PMMG_INQUA,1,parmesh->info.read_comm) ) { return PMMG_STRONGFAILURE; } + if ( !parmesh->info.pure_partitioning ) { + /* Discretization of the isovalue */ + if (mesh->info.iso) { + tim = 1; + chrono(ON,&(ctim[tim])); + if ( parmesh->info.imprim > PMMG_VERB_VERSION ) { + fprintf(stdout,"\n -- PHASE 1a: ISOVALUE DISCRETIZATION \n"); + } + if ( !MMG3D_mmg3d2(mesh,ls,met) ) { + return PMMG_STRONGFAILURE; + } + /* Update mesh->npi and mesh->nei to be equal to mesh->np and mesh->ne, respectively */ + mesh->npi = mesh->np; + mesh->nei = mesh->ne; + + chrono(OFF,&(ctim[tim])); + printim(ctim[tim].gdif,stim); + if ( parmesh->info.imprim > PMMG_VERB_VERSION ) { + fprintf(stdout," -- PHASE 1a COMPLETED %s\n",stim); + } + } + } + /** Mesh analysis */ if ( !MMG3D_analys(mesh) ) { return PMMG_STRONGFAILURE; } + if ( !parmesh->info.pure_partitioning ) { + + /* Check if the LS has led to a non-manifold topology */ + if ( mesh->info.iso && !MMG3D_chkmani(mesh) ) { + fprintf(stderr,"\n ## LS discretization: non-manifold initial topology. Exit program.\n"); + return PMMG_STRONGFAILURE; + } + else { + if ( parmesh->info.imprim > PMMG_VERB_VERSION && mesh->info.iso ) { + fprintf(stdout," LS discretization OK: no non-manifold topology.\n"); + } + } + } + if ( parmesh->info.imprim0 > PMMG_VERB_ITWAVES && (!mesh->info.iso) && met->m ) { - PMMG_prilen(parmesh,0,1); + PMMG_prilen(parmesh,0,1,parmesh->info.read_comm); } /** Mesh unscaling */ - if ( !MMG5_unscaleMesh(mesh,met,NULL) ) { + if ( !MMG5_unscaleMesh(mesh,met,ls) ) { return PMMG_STRONGFAILURE; } @@ -206,51 +248,74 @@ int PMMG_preprocessMesh( PMMG_pParMesh parmesh ) int PMMG_preprocessMesh_distributed( PMMG_pParMesh parmesh ) { MMG5_pMesh mesh; - MMG5_pSol met; + MMG5_pSol met,ls; + int8_t tim; + char stim[32]; + mytime ctim[TIMEMAX]; + MMG5_int *permtria; + int ier = PMMG_SUCCESS; + + /* Chrono initialization */ + tminit(ctim,TIMEMAX); mesh = parmesh->listgrp[0].mesh; met = parmesh->listgrp[0].met; + ls = parmesh->listgrp[0].ls; assert ( ( mesh != NULL ) && ( met != NULL ) && "Preprocessing empty args"); + // if (mesh->info.iso) { + // // Just print a warning saying that the feature is not implemented and + // // deallocate the isovalue structure (as the ls is not interpolated during the + // // remeshing step, the continuous integration tests will fail otherwise) + // if ( parmesh->myrank == parmesh->info.root) { + // fprintf(stdout,"Isovalue discretization is under development.\n"); + // } + // PMMG_DEL_MEM(mesh,parmesh->listgrp[0].ls->m,double,"ls structure"); + // parmesh->listgrp[0].ls->np = 0; + // } + + /** Check distributed API mode. Interface faces OR nodes need to be set by the - * user through the API interface at this point, meening that the + * user through the API interface at this point, meaning that the * corresponding external comm is set to the correct size, and filled with * local entity indices (for node comms, also itosend and itorecv arrays are * filled with local/global node IDs). */ - if( parmesh->nprocs >1 ) { + if ( parmesh->nprocs > 1 && parmesh->info.npartin > 1 ) { if( parmesh->info.API_mode == PMMG_APIDISTRIB_faces && !parmesh->next_face_comm ) { fprintf(stderr," ## Error: %s: parallel interface faces must be set through the API interface\n",__func__); - return PMMG_STRONGFAILURE; + ier = PMMG_STRONGFAILURE; } else if( parmesh->info.API_mode == PMMG_APIDISTRIB_nodes && !parmesh->next_node_comm ) { fprintf(stderr," ## Error: %s: parallel interface nodes must be set through the API interface\n",__func__); - return PMMG_STRONGFAILURE; + ier = PMMG_STRONGFAILURE; } } + /* Next functions involve MPI communications so we need to check now + that every proc suceeded in order to avoid deadlock */ + MPI_Allreduce(MPI_IN_PLACE, &ier, 1, MPI_INT, MPI_MAX, parmesh->info.read_comm); + + if (ier == PMMG_STRONGFAILURE) return ier; + /** Function setters (must be assigned before quality computation) */ MMG3D_Set_commonFunc(); /** Mesh scaling and quality histogram */ - if ( !MMG5_scaleMesh(mesh,met,NULL) ) { + if ( !MMG5_scaleMesh(mesh,met,ls) ) { return PMMG_LOWFAILURE; } - /* Don't reset the hmin value computed when unscaling the mesh */ - if ( !parmesh->info.sethmin ) { - mesh->info.sethmin = 1; - } - /* Don't reset the hmax value computed when unscaling the mesh */ - if ( !parmesh->info.sethmax ) { - mesh->info.sethmax = 1; - } - /** specific meshing */ + /* Set mmg3d function pointers here to assign dosol */ + MMG3D_setfunc(mesh,met); + PMMG_setfunc(parmesh); + + /** Specific meshing */ if ( mesh->info.optim && !met->np ) { + // Warning: doSol would need a clean // implementation along interfaces if ( !MMG3D_doSol(mesh,met) ) { return PMMG_STRONGFAILURE; } - MMG5_solTruncatureForOptim(mesh,met); } if ( mesh->info.hsiz > 0. ) { @@ -259,36 +324,56 @@ int PMMG_preprocessMesh_distributed( PMMG_pParMesh parmesh ) } } - MMG3D_setfunc(mesh,met); - PMMG_setfunc(parmesh); +#warning hmin/hmax computed on each proc while we want a global value from the global bounding box and/or the global metric field... + /* Don't reset the hmin value computed when unscaling the mesh */ + if ( !parmesh->info.sethmin ) { + mesh->info.sethmin = 1; + } + /* Don't reset the hmax value computed when unscaling the mesh */ + if ( !parmesh->info.sethmax ) { + mesh->info.sethmax = 1; + } + /* Note: Needed before ls discretization to include tetras with poor qualities + inside one or the other part of the level-set */ if ( !MMG3D_tetraQual( mesh, met, 0 ) ) { return PMMG_STRONGFAILURE; } if ( parmesh->info.imprim > PMMG_VERB_ITWAVES && (!mesh->info.iso) && met->m ) { +#warning Luca: check this function MMG3D_prilen(mesh,met,0); } /** Mesh unscaling */ - if ( !MMG5_unscaleMesh(mesh,met,NULL) ) { + if ( !MMG5_unscaleMesh(mesh,met,ls) ) { return PMMG_STRONGFAILURE; } - /** Mesh analysis I: check triangles, create xtetras */ - if ( !PMMG_analys_tria(parmesh,mesh) ) { - return PMMG_STRONGFAILURE; + /** Mesh analysis I: Needed to create communicators + * Check triangles, create xtetras */ + PMMG_CALLOC(parmesh,permtria,mesh->nt+1,MMG5_int,"permtria",return 0); + MMG5_int k; + for (k=0;k<=mesh->nt;k++) { + permtria[k] = k; + } + if ( parmesh->myrank < parmesh->info.npartin ) { + if ( !PMMG_analys_tria(parmesh,mesh,permtria) ) { + return PMMG_STRONGFAILURE; + } } - /* For both API modes, build communicators indices and set xtetra as PARBDY */ switch( parmesh->info.API_mode ) { case PMMG_APIDISTRIB_faces : /* 1) Set face communicators indexing */ - if( !PMMG_build_faceCommIndex( parmesh ) ) return 0; + if( !PMMG_build_faceCommIndex( parmesh, permtria ) ) return 0; + /* Convert tria index into iel face index (it needs a valid cc field in * each tria), and tag xtetra face as PARBDY before the tag is transmitted * to edges and nodes */ - PMMG_tria2elmFace_coords( parmesh ); + if ( parmesh->myrank < parmesh->info.npartin ) { + PMMG_tria2elmFace_coords( parmesh ); + } /* 2) Build node communicators from face ones (here because the mesh needs * to be unscaled) */ PMMG_parmesh_ext_comm_free( parmesh,parmesh->ext_node_comm,parmesh->next_node_comm); @@ -296,8 +381,12 @@ int PMMG_preprocessMesh_distributed( PMMG_pParMesh parmesh ) parmesh->next_node_comm = 0; PMMG_DEL_MEM(parmesh, parmesh->int_node_comm,PMMG_Int_comm,"int node comm"); PMMG_CALLOC(parmesh,parmesh->int_node_comm,1,PMMG_Int_comm,"int node comm",return 0); - if ( !PMMG_build_nodeCommFromFaces(parmesh) ) return PMMG_STRONGFAILURE; + if ( !PMMG_build_nodeCommFromFaces(parmesh,parmesh->info.read_comm) ) { + return PMMG_STRONGFAILURE; + } + break; + case PMMG_APIDISTRIB_nodes : /* 1) Set node communicators indexing */ if( !PMMG_build_nodeCommIndex( parmesh ) ) return 0; @@ -306,16 +395,54 @@ int PMMG_preprocessMesh_distributed( PMMG_pParMesh parmesh ) PMMG_DEL_MEM(parmesh, parmesh->ext_face_comm,PMMG_Ext_comm,"ext face comm"); parmesh->next_face_comm = 0; PMMG_DEL_MEM(parmesh, parmesh->int_face_comm,PMMG_Int_comm,"int face comm"); - if ( !PMMG_build_faceCommFromNodes(parmesh) ) return PMMG_STRONGFAILURE; + if ( !PMMG_build_faceCommFromNodes(parmesh,parmesh->info.read_comm) ) return PMMG_STRONGFAILURE; break; } + MMG5_SAFE_FREE( permtria ); - /** Mesh analysis II: geometrical analysis*/ - if ( !PMMG_analys(parmesh,mesh) ) { - return PMMG_STRONGFAILURE; + if ( !parmesh->info.pure_partitioning ) { + + /** Discretization of the isovalue */ + if (mesh->info.iso) { + + /* Destroy adja and adjat */ + MMG5_DEL_MEM(mesh,mesh->adja); + MMG5_DEL_MEM(mesh,mesh->adjt); + + tim = 1; + chrono(ON,&(ctim[tim])); + if ( parmesh->info.imprim > PMMG_VERB_VERSION ) { + fprintf(stdout,"\n -- PHASE 1a: ISOVALUE DISCRETIZATION \n"); + fprintf(stdout," -- under development \n"); + } + if ( !PMMG_ls(parmesh) ) { + return PMMG_STRONGFAILURE; + } + + chrono(OFF,&(ctim[tim])); + printim(ctim[tim].gdif,stim); + if ( parmesh->info.imprim > PMMG_VERB_VERSION ) { + fprintf(stdout,"\n -- PHASE 1a COMPLETED %s\n",stim); + } + + /** Mesh analysis Ib : After LS discretization + * Check triangles, create xtetras */ + if ( parmesh->myrank < parmesh->info.npartin ) { + if ( !PMMG_analys_tria(parmesh,mesh,permtria) ) { + return PMMG_STRONGFAILURE; + } + } + } } - if ( !PMMG_qualhisto(parmesh,PMMG_INQUA,0) ) { + /** Mesh analysis II: Perform surface analysis */ + if ( parmesh->myrank < parmesh->info.npartin ) { + if ( !PMMG_analys(parmesh,mesh,parmesh->info.read_comm) ) { + return PMMG_STRONGFAILURE; + } + } + + if ( !PMMG_qualhisto(parmesh,PMMG_INQUA,0,parmesh->info.read_comm) ) { return PMMG_STRONGFAILURE; } @@ -323,9 +450,9 @@ int PMMG_preprocessMesh_distributed( PMMG_pParMesh parmesh ) MMG5_DEL_MEM(mesh,mesh->tria); mesh->nt = 0; - assert ( PMMG_check_extFaceComm ( parmesh ) ); + assert ( PMMG_check_extFaceComm ( parmesh,parmesh->info.read_comm ) ); assert ( PMMG_check_intFaceComm ( parmesh ) ); - assert ( PMMG_check_extNodeComm ( parmesh ) ); + assert ( PMMG_check_extNodeComm ( parmesh,parmesh->info.read_comm ) ); assert ( PMMG_check_intNodeComm ( parmesh ) ); return PMMG_SUCCESS; @@ -333,7 +460,7 @@ int PMMG_preprocessMesh_distributed( PMMG_pParMesh parmesh ) int PMMG_distributeMesh_centralized_timers( PMMG_pParMesh parmesh,mytime *ctim ) { MMG5_pMesh mesh; - MMG5_pSol met; + MMG5_pSol met,ls; int ier,iresult; int8_t tim; char stim[32]; @@ -374,7 +501,8 @@ int PMMG_distributeMesh_centralized_timers( PMMG_pParMesh parmesh,mytime *ctim ) mesh = parmesh->listgrp[0].mesh; met = parmesh->listgrp[0].met; - if ( (ier==PMMG_STRONGFAILURE) && MMG5_unscaleMesh( mesh, met, NULL ) ) { + ls = parmesh->listgrp[0].ls; + if ( (ier==PMMG_STRONGFAILURE) && MMG5_unscaleMesh( mesh, met, ls ) ) { ier = PMMG_LOWFAILURE; } @@ -461,7 +589,7 @@ int PMMG_bdryBuild ( PMMG_pParMesh parmesh ) { return 1; } -int PMMG_Compute_trianglesGloNum( PMMG_pParMesh parmesh ) { +int PMMG_Compute_trianglesGloNum( PMMG_pParMesh parmesh,MPI_Comm comm ) { PMMG_pInt_comm int_face_comm; PMMG_pExt_comm ext_face_comm; PMMG_pGrp grp; @@ -599,7 +727,7 @@ int PMMG_Compute_trianglesGloNum( PMMG_pParMesh parmesh ) { } MPI_CHECK( - MPI_Allgather( &nglob,1,MPI_INT, &nglobvec[1],1,MPI_INT,parmesh->comm ), + MPI_Allgather( &nglob,1,MPI_INT, &nglobvec[1],1,MPI_INT,comm ), ier = 1 ); if( ier ) { PMMG_DEL_MEM(parmesh,nglobvec,int,"nglobvec"); @@ -659,7 +787,7 @@ int PMMG_Compute_trianglesGloNum( PMMG_pParMesh parmesh ) { /** Compute a second numbering offsets among procs and apply it */ nglobvec[0] = nglobvec[parmesh->nprocs]; MPI_CHECK( - MPI_Allgather( &nglob,1,MPI_INT, &nglobvec[1],1,MPI_INT,parmesh->comm ), + MPI_Allgather( &nglob,1,MPI_INT, &nglobvec[1],1,MPI_INT,comm ), ier = 1 ); if( ier ) { PMMG_DEL_MEM(parmesh,nglobvec,int,"nglobvec"); @@ -780,7 +908,7 @@ int PMMG_Compute_trianglesGloNum( PMMG_pParMesh parmesh ) { MPI_CHECK( MPI_Sendrecv(itosend,nitem,MPI_INT,color,MPI_COMMUNICATORS_REF_TAG, itorecv,nitem,MPI_INT,color,MPI_COMMUNICATORS_REF_TAG, - parmesh->comm,&status),return 0 ); + comm,&status),return 0 ); /* Store the info in intvalues */ for( i = 0; i < nitem; i++ ) { @@ -831,7 +959,6 @@ int PMMG_Compute_trianglesGloNum( PMMG_pParMesh parmesh ) { nitem = ext_face_comm->nitem; color = ext_face_comm->color_out; - PMMG_CALLOC(parmesh,ext_face_comm->itosend,nitem,int,"itosend",ier = 1); if( ier ) { for( k = 0; k < icomm; k++ ) { PMMG_DEL_MEM(parmesh,parmesh->ext_face_comm[k].itosend,int,"itosend"); @@ -844,7 +971,6 @@ int PMMG_Compute_trianglesGloNum( PMMG_pParMesh parmesh ) { } itosend = ext_face_comm->itosend; - PMMG_CALLOC(parmesh,ext_face_comm->itorecv,nitem,int,"itorecv",ier = 1); if( ier ) { for( k = 0; k < icomm; k++ ) { PMMG_DEL_MEM(parmesh,parmesh->ext_face_comm[k].itosend,int,"itosend"); @@ -866,7 +992,7 @@ int PMMG_Compute_trianglesGloNum( PMMG_pParMesh parmesh ) { MPI_CHECK( MPI_Sendrecv(itosend,nitem,MPI_INT,color,MPI_COMMUNICATORS_REF_TAG, itorecv,nitem,MPI_INT,color,MPI_COMMUNICATORS_REF_TAG, - parmesh->comm,&status),return 0 ); + comm,&status),return 0 ); /* Assert that the sent/received values are the same*/ for( i = 0; i < nitem; i++ ) { @@ -920,14 +1046,23 @@ int PMMG_Compute_trianglesGloNum( PMMG_pParMesh parmesh ) { return 1; } -int PMMG_Compute_verticesGloNum( PMMG_pParMesh parmesh ){ +/** + * \param parmesh pointer to the parmesh. + * \param comm pointer to the mpi communicator. + * + * Compute global numbering for vertices. + * + * \todo all MPI_abort have to be removed and replaced by a clean error handling + * without deadlocks. + */ +int PMMG_Compute_verticesGloNum( PMMG_pParMesh parmesh,MPI_Comm comm ){ PMMG_pGrp grp; MMG5_pMesh mesh; MMG5_pPoint ppt; PMMG_pInt_comm int_node_comm; PMMG_pExt_comm ext_node_comm; - MPI_Request request; - MPI_Status status; + MPI_Request *request; + MPI_Status *status; int *intvalues,*iproc2comm; int *itosend,*itorecv,src,dst,tag; int nitem,color; @@ -941,14 +1076,28 @@ int PMMG_Compute_verticesGloNum( PMMG_pParMesh parmesh ){ /* Allocate internal communicator */ int_node_comm = parmesh->int_node_comm; - PMMG_MALLOC(parmesh,int_node_comm->intvalues,int_node_comm->nitem,int,"intvalues",return 0); + PMMG_MALLOC(parmesh,int_node_comm->intvalues,int_node_comm->nitem,int,"intvalues", + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); intvalues = int_node_comm->intvalues; /** Step 0: Count nowned nodes */ + /* register heap arrays */ + size_t iptr,nptr = 2; + void** ptr_int[2]; + ptr_int[0] = (void*)&iproc2comm; + ptr_int[1] = (void*)&offsets; + /* nullify them to allow to always call free() on them */ + for( iptr = 0; iptr < nptr; iptr++ ) { + *ptr_int[iptr] = NULL; + } + request = NULL; + status = NULL; + itosend = itorecv = NULL; /* Array to reorder communicators */ - PMMG_MALLOC(parmesh,iproc2comm,parmesh->nprocs,int,"iproc2comm",return 0); + PMMG_MALLOC(parmesh,iproc2comm,parmesh->nprocs,int,"iproc2comm", + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); for( iproc = 0; iproc < parmesh->nprocs; iproc++ ) iproc2comm[iproc] = PMMG_UNSET; @@ -961,6 +1110,7 @@ int PMMG_Compute_verticesGloNum( PMMG_pParMesh parmesh ){ /* Mark nodes with the owner color (overwritten by higher-rank procs) */ for( iproc = 0; iproc < parmesh->nprocs; iproc++ ) { + /* Travel the communicators in increasing order of neighbour rank */ icomm = iproc2comm[iproc]; if( icomm == PMMG_UNSET ) continue; ext_node_comm = &parmesh->ext_node_comm[icomm]; @@ -968,13 +1118,21 @@ int PMMG_Compute_verticesGloNum( PMMG_pParMesh parmesh ){ /* Mark nodes */ for( i = 0; i < ext_node_comm->nitem; i++ ) { idx = ext_node_comm->int_comm_index[i]; + /* This affectation is right because we ensured that we travel the + * communicators in increasing order. Note that intvalues is not + * initialized before this stage so it containes fake values. */ intvalues[idx] = color; } } /* Store owner in the point flag */ + MMG5_int np_overlap = 0; for( ip = 1; ip <= mesh->np; ip++ ) { ppt = &mesh->point[ip]; + if (ppt->tag & MG_OVERLAP) { + ++np_overlap; + continue; + } ppt->flag = parmesh->myrank; } @@ -986,15 +1144,17 @@ int PMMG_Compute_verticesGloNum( PMMG_pParMesh parmesh ){ } /* Count owned nodes */ - nowned = mesh->np; + nowned = mesh->np - np_overlap; for( idx = 0; idx < int_node_comm->nitem; idx++ ) { if( intvalues[idx] != parmesh->myrank ) nowned--; } /* Compute offsets on each proc */ - PMMG_CALLOC(parmesh,offsets,parmesh->nprocs+1,int,"offsets",return 0); + PMMG_CALLOC(parmesh,offsets,parmesh->nprocs+1,int,"offsets", + PMMG_destroy_int(parmesh,ptr_int,nptr,"vertGlobNum"); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); MPI_Allgather( &nowned,1,MPI_INT, - &offsets[1],1,MPI_INT,parmesh->comm ); + &offsets[1],1,MPI_INT,comm ); for( i = 1; i <= parmesh->nprocs; i++ ) offsets[i] += offsets[i-1]; @@ -1008,13 +1168,13 @@ int PMMG_Compute_verticesGloNum( PMMG_pParMesh parmesh ){ counter = 0; for( ip = 1; ip <= mesh->np; ip++ ) { ppt = &mesh->point[ip]; + if (ppt->tag & MG_OVERLAP) continue; if( ppt->flag != parmesh->myrank ) continue; ppt->tmp = ++counter+offsets[parmesh->myrank]; assert(ppt->tmp); } assert( counter == nowned ); - /** Step 2: Communicate global numbering */ /* Store numbering in the internal communicator */ @@ -1026,13 +1186,36 @@ int PMMG_Compute_verticesGloNum( PMMG_pParMesh parmesh ){ } /* Send-recv */ + PMMG_MALLOC(parmesh,request,parmesh->nprocs,MPI_Request, + "mpi request array", + PMMG_destroy_int(parmesh,ptr_int,nptr,"color_comm_nodes"); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); + for ( i=0; inprocs; ++i ) { + request[i] = MPI_REQUEST_NULL; + } + + PMMG_MALLOC(parmesh,status,parmesh->nprocs,MPI_Status, + "mpi status array", + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"color_comm_nodes"); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); + + for( icomm = 0; icomm < parmesh->next_node_comm; icomm++ ) { ext_node_comm = &parmesh->ext_node_comm[icomm]; color = ext_node_comm->color_out; nitem = ext_node_comm->nitem; - PMMG_CALLOC(parmesh,ext_node_comm->itosend,nitem,int,"itosend",return 0); - PMMG_CALLOC(parmesh,ext_node_comm->itorecv,nitem,int,"itorecv",return 0); + PMMG_CALLOC(parmesh,ext_node_comm->itosend,nitem,int,"itosend", + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"vertGlobNum"); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); + PMMG_CALLOC(parmesh,ext_node_comm->itorecv,nitem,int,"itorecv", + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"vertGlobNum"); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); itosend = ext_node_comm->itosend; itorecv = ext_node_comm->itorecv; @@ -1047,13 +1230,24 @@ int PMMG_Compute_verticesGloNum( PMMG_pParMesh parmesh ){ itosend[i] = intvalues[idx]; assert(itosend[i]); } - MPI_CHECK( MPI_Isend(itosend,nitem,MPI_INT,dst,tag, - parmesh->comm,&request),return 0 ); + MPI_CHECK( MPI_Isend(itosend,nitem,MPI_INT,dst,tag,comm,&request[color]), + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"vertGlobNum"); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); } if ( parmesh->myrank == dst ) { - MPI_CHECK( MPI_Recv(itorecv,nitem,MPI_INT,src,tag, - parmesh->comm,&status),return 0 ); - for( i = 0; i < nitem; i++ ) assert(itorecv[i]); + MPI_CHECK( MPI_Recv(itorecv,nitem,MPI_INT,src,tag,comm,&status[0]), + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"vertGlobNum"); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); + +#ifndef NDEBUG + for( i = 0; i < nitem; i++ ) { + assert(itorecv[i]); + } +#endif } } @@ -1084,13 +1278,25 @@ int PMMG_Compute_verticesGloNum( PMMG_pParMesh parmesh ){ #ifndef NDEBUG for( ip = 1; ip <= mesh->np; ip++ ) { ppt = &mesh->point[ip]; + if (ppt->tag & MG_OVERLAP) continue; assert(ppt->tmp > 0); assert(ppt->tmp <= offsets[parmesh->nprocs]); } #endif + MPI_CHECK( MPI_Waitall(parmesh->nprocs,request,status), + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"vertGlobNum"); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); + + // Commented the 11/02/22 by Algiane: useless I think /* Don't free buffers before they have been received */ - MPI_CHECK( MPI_Barrier(parmesh->comm),return 0 ); + /* MPI_CHECK( MPI_Barrier(comm), */ + /* PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); */ + /* PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); */ + /* PMMG_destroy_int(parmesh,ptr_int,nptr,"vertGlobNum"); */ + /* return 0 ); */ /* Free arrays */ for( icomm = 0; icomm < parmesh->next_node_comm; icomm++ ) { @@ -1098,8 +1304,11 @@ int PMMG_Compute_verticesGloNum( PMMG_pParMesh parmesh ){ PMMG_DEL_MEM(parmesh,ext_node_comm->itosend,int,"itosend"); PMMG_DEL_MEM(parmesh,ext_node_comm->itorecv,int,"itorecv"); } - PMMG_DEL_MEM(parmesh,offsets,int,"offsets"); - PMMG_DEL_MEM(parmesh,iproc2comm,int,"iproc2comm"); + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); + + PMMG_destroy_int(parmesh,ptr_int,nptr,"vertGlobNum"); + PMMG_DEL_MEM(parmesh,int_node_comm->intvalues,int,"intvalues"); return 1; } @@ -1107,19 +1316,25 @@ int PMMG_Compute_verticesGloNum( PMMG_pParMesh parmesh ){ /** * \param parmesh pointer toward parmesh structure * \param idx_glob global IDs of interface nodes + * \param comm pointer toward the MPI communicator to use: when called before + * the first mesh balancing (at preprocessing stage) we have to use the + * read_comm communicator (i.e. the communicator used to provide the inputs). + * For all ather calls, comm has to be the communicator to use for computations. * * Create non-consecutive global IDs (starting from 1) for nodes on parallel * interfaces. * + * \todo clean parallel error handling (without MPI_abort call and without deadlocks) + * */ -int PMMG_color_commNodes( PMMG_pParMesh parmesh ) { +int PMMG_color_commNodes( PMMG_pParMesh parmesh, MPI_Comm comm ) { PMMG_pInt_comm int_node_comm; PMMG_pExt_comm ext_node_comm; PMMG_pGrp grp; MMG5_pMesh mesh; MMG5_pPoint ppt; - MPI_Request request; - MPI_Status status; + MPI_Request *request; + MPI_Status *status; int *intvalues,*itosend,*itorecv,*iproc2comm; int color,nitem; int *offsets,label; @@ -1130,17 +1345,36 @@ int PMMG_color_commNodes( PMMG_pParMesh parmesh ) { grp = &parmesh->listgrp[0]; mesh = grp->mesh; + /* register heap arrays */ + size_t iptr,nptr = 3; + void** ptr_int[3]; + + ptr_int[0] = (void*)&parmesh->int_node_comm->intvalues; + ptr_int[1] = (void*)&iproc2comm; + ptr_int[2] = (void*)&offsets; + + /* nullify them to allow to always call free() on them */ + for( iptr = 0; iptr < nptr; iptr++ ) { + *ptr_int[iptr] = NULL; + } + request = NULL; + status = NULL; + itosend = itorecv = NULL; + /* Allocate internal communicator */ int_node_comm = parmesh->int_node_comm; - PMMG_CALLOC(parmesh,int_node_comm->intvalues,int_node_comm->nitem,int,"intvalues",return 0); + PMMG_CALLOC(parmesh,int_node_comm->intvalues,int_node_comm->nitem,int,"intvalues", + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); intvalues = int_node_comm->intvalues; /* Array to reorder communicators */ - PMMG_MALLOC(parmesh,iproc2comm,parmesh->nprocs,int,"iproc2comm",return 0); - + PMMG_MALLOC(parmesh,iproc2comm,parmesh->nprocs,int,"iproc2comm", + PMMG_destroy_int(parmesh,ptr_int,nptr,"color_comm_nodes"); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); for( iproc = 0; iproc < parmesh->nprocs; iproc++ ) iproc2comm[iproc] = PMMG_UNSET; + /* Reorder communicators and count max (theoretically) owned nodes * (each rank owns nodes on the interface with lower-rank procs). */ nitem = 0; @@ -1153,9 +1387,13 @@ int PMMG_color_commNodes( PMMG_pParMesh parmesh ) { } /* Compute offsets on each proc */ - PMMG_CALLOC(parmesh,offsets,parmesh->nprocs+1,int,"offsets",return 0); + PMMG_CALLOC(parmesh,offsets,parmesh->nprocs+1,int,"offsets", + PMMG_destroy_int(parmesh,ptr_int,nptr,"color_comm_nodes"); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); + MPI_Allgather( &nitem,1,MPI_INT, - &offsets[1],1,MPI_INT,parmesh->comm ); + &offsets[1],1,MPI_INT,comm ); + for( i = 1; i <= parmesh->nprocs; i++ ) offsets[i] += offsets[i-1]; @@ -1184,13 +1422,35 @@ int PMMG_color_commNodes( PMMG_pParMesh parmesh ) { /** * 2) Communicate global numbering to the ghost copies. */ + PMMG_MALLOC(parmesh,request,parmesh->nprocs,MPI_Request, + "mpi request array", + PMMG_destroy_int(parmesh,ptr_int,nptr,"color_comm_nodes"); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); + for ( i=0; inprocs; ++i ) { + request[i] = MPI_REQUEST_NULL; + } + + PMMG_MALLOC(parmesh,status,parmesh->nprocs,MPI_Status, + "mpi status array", + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"color_comm_nodes"); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); + for( icomm = 0; icomm < parmesh->next_node_comm; icomm++ ) { ext_node_comm = &parmesh->ext_node_comm[icomm]; color = ext_node_comm->color_out; nitem = ext_node_comm->nitem; - PMMG_CALLOC(parmesh,ext_node_comm->itosend,nitem,int,"itosend",return 0); - PMMG_CALLOC(parmesh,ext_node_comm->itorecv,nitem,int,"itorecv",return 0); + PMMG_CALLOC(parmesh,ext_node_comm->itosend,nitem,int,"itosend", + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"color_comm_nodes"); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); + PMMG_CALLOC(parmesh,ext_node_comm->itorecv,nitem,int,"itorecv", + PMMG_destroy_int(parmesh,ptr_int,nptr,"color_comm_nodes"); + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); itosend = ext_node_comm->itosend; itorecv = ext_node_comm->itorecv; @@ -1205,17 +1465,28 @@ int PMMG_color_commNodes( PMMG_pParMesh parmesh ) { itosend[i] = intvalues[idx]; assert(itosend[i]); } - MPI_CHECK( MPI_Isend(itosend,nitem,MPI_INT,dst,tag, - parmesh->comm,&request),return 0 ); + MPI_CHECK( MPI_Isend(itosend,nitem,MPI_INT,dst,tag,comm,&request[color]), + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"color_comm_nodes"); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); } if ( parmesh->myrank == dst ) { - MPI_CHECK( MPI_Recv(itorecv,nitem,MPI_INT,src,tag, - parmesh->comm,&status),return 0 ); + MPI_CHECK( MPI_Recv(itorecv,nitem,MPI_INT,src,tag,comm,&status[0]), + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"color_comm_nodes"); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); #ifndef DNDEBUG for( i = 0; i < nitem; i++ ) assert(itorecv[i]); #endif } } + MPI_CHECK( MPI_Waitall(parmesh->nprocs,request,status), + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"color_comm_nodes"); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); /* Store recv buffer in the internal communicator */ for( iproc = parmesh->myrank+1; iproc < parmesh->nprocs; iproc++ ){ @@ -1232,7 +1503,6 @@ int PMMG_color_commNodes( PMMG_pParMesh parmesh ) { } } - /* 3) Retrieve numbering from the internal communicator */ for( i = 0; i < grp->nitem_int_node_comm; i++ ){ ip = grp->node2int_node_comm_index1[i]; @@ -1244,6 +1514,10 @@ int PMMG_color_commNodes( PMMG_pParMesh parmesh ) { #ifndef DNDEBUG /* Check */ + for ( i=0; inext_node_comm; ++i ) { + request[i] = MPI_REQUEST_NULL; + } + for( icomm = 0; icomm < parmesh->next_node_comm; icomm++ ) { ext_node_comm = &parmesh->ext_node_comm[icomm]; color = ext_node_comm->color_out; @@ -1263,15 +1537,20 @@ int PMMG_color_commNodes( PMMG_pParMesh parmesh ) { itosend[i] = intvalues[idx]; assert(itosend[i]); } - MPI_CHECK( MPI_Isend(itosend,nitem,MPI_INT,dst,tag, - parmesh->comm,&request),return 0 ); + MPI_CHECK( MPI_Isend(itosend,nitem,MPI_INT,dst,tag,comm,&request[color]), + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"color_comm_nodes"); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); } if ( parmesh->myrank == dst ) { - MPI_CHECK( MPI_Recv(itorecv,nitem,MPI_INT,src,tag, - parmesh->comm,&status),return 0 ); + MPI_CHECK( MPI_Recv(itorecv,nitem,MPI_INT,src,tag,comm,&status[0]), + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"color_comm_nodes"); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); for( i = 0; i < nitem; i++ ) assert(itorecv[i]); } - } /* Store recv buffer in the internal communicator */ @@ -1286,22 +1565,33 @@ int PMMG_color_commNodes( PMMG_pParMesh parmesh ) { assert( itorecv[i] == intvalues[idx] ); } } + MPI_CHECK( MPI_Waitall(parmesh->nprocs,request,status), + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); + PMMG_destroy_int(parmesh,ptr_int,nptr,"color_comm_nodes"); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); + #endif + // Commented the 11/02/22 by Algiane: useless I think /* Don't free buffers before they have been received */ - MPI_CHECK( MPI_Barrier(parmesh->comm),return 0 ); + /* MPI_CHECK( MPI_Barrier(comm), */ + /* PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); */ + /* PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); */ + /* PMMG_destroy_int(parmesh,ptr_int,nptr,"color_comm_nodes"); */ + /* return 0 ); */ /* Free arrays */ - PMMG_DEL_MEM(parmesh,offsets,int,"offsets"); - PMMG_DEL_MEM(parmesh,iproc2comm,int,"iproc2comm"); - for( icomm = 0; icomm < parmesh->next_node_comm; icomm++ ) { ext_node_comm = &parmesh->ext_node_comm[icomm]; PMMG_DEL_MEM(parmesh,ext_node_comm->itosend,int,"itosend"); PMMG_DEL_MEM(parmesh,ext_node_comm->itorecv,int,"itorecv"); } - PMMG_DEL_MEM(parmesh,int_node_comm->intvalues,int,"intvalues"); + PMMG_DEL_MEM(parmesh,request,MPI_Request,"mpi requests"); + PMMG_DEL_MEM(parmesh,status,MPI_Status,"mpi_status"); + + PMMG_destroy_int(parmesh,ptr_int,nptr,"color_comm_nodes"); return 1; } @@ -1328,18 +1618,24 @@ int PMMG_parmmglib_post(PMMG_pParMesh parmesh) { iresult = 1; + if ( parmesh->niter == 0 || parmesh->info.pure_partitioning ) { + /* set parmesh->iter to allow saving of mesh communicators */ + parmesh->iter = 0; + } + switch ( parmesh->info.fmtout ) { case ( PMMG_UNSET ): /* No output */ break; case ( MMG5_FMT_VtkPvtu ): case ( PMMG_FMT_Distributed ): case ( PMMG_FMT_DistributedMeditASCII ): case ( PMMG_FMT_DistributedMeditBinary ): + case ( PMMG_FMT_HDF5 ): /* Distributed Output */ tim = 1; chrono(ON,&(ctim[tim])); if ( parmesh->info.imprim > PMMG_VERB_VERSION ) { - fprintf( stdout,"\n -- PHASE 3 : MESH PACKED UP\n" ); + fprintf( stdout,"\n -- PHASE 3 : MESH PACKED UP\n" ); } ier = PMMG_bdryBuild ( parmesh ); @@ -1355,14 +1651,14 @@ int PMMG_parmmglib_post(PMMG_pParMesh parmesh) { if( parmesh->info.globalNum ) { - ier = PMMG_Compute_verticesGloNum( parmesh ); + ier = PMMG_Compute_verticesGloNum( parmesh,parmesh->comm ); if( !ier ) { if ( parmesh->info.imprim > PMMG_VERB_VERSION ) { fprintf(stdout,"\n\n\n -- WARNING: IMPOSSIBLE TO COMPUTE NODE GLOBAL NUMBERING\n\n\n"); } } - ier = PMMG_Compute_trianglesGloNum( parmesh ); + ier = PMMG_Compute_trianglesGloNum( parmesh,parmesh->comm ); if( !ier ) { if ( parmesh->info.imprim > PMMG_VERB_VERSION ) { fprintf(stdout,"\n\n\n -- WARNING: IMPOSSIBLE TO COMPUTE TRIANGLE GLOBAL NUMBERING\n\n\n"); @@ -1375,7 +1671,7 @@ int PMMG_parmmglib_post(PMMG_pParMesh parmesh) { chrono(OFF,&(ctim[tim])); if ( parmesh->info.imprim > PMMG_VERB_VERSION ) { printim(ctim[tim].gdif,stim); - fprintf( stdout," -- PHASE 3 COMPLETED. %s\n",stim ); + fprintf( stdout," -- PHASE 3 COMPLETED. %s\n",stim ); } break; @@ -1385,7 +1681,7 @@ int PMMG_parmmglib_post(PMMG_pParMesh parmesh) { tim = 1; chrono(ON,&(ctim[tim])); if ( parmesh->info.imprim > PMMG_VERB_VERSION ) { - fprintf( stdout,"\n -- PHASE 3 : MERGE MESHES OVER PROCESSORS\n" ); + fprintf( stdout,"\n -- PHASE 3 : MERGE MESHES OVER PROCESSORS\n" ); } ier = PMMG_merge_parmesh( parmesh ); @@ -1402,7 +1698,7 @@ int PMMG_parmmglib_post(PMMG_pParMesh parmesh) { chrono(OFF,&(ctim[tim])); if ( parmesh->info.imprim > PMMG_VERB_VERSION ) { printim(ctim[tim].gdif,stim); - fprintf( stdout," -- PHASE 3 COMPLETED. %s\n",stim ); + fprintf( stdout," -- PHASE 3 COMPLETED. %s\n",stim ); } /** Boundaries reconstruction: by all the procs if the merge has failed, @@ -1410,7 +1706,7 @@ int PMMG_parmmglib_post(PMMG_pParMesh parmesh) { tim = 2; chrono(ON,&(ctim[tim])); if ( parmesh->info.imprim > PMMG_VERB_VERSION ) { - fprintf( stdout,"\n -- PHASE 4 : MESH PACKED UP\n" ); + fprintf( stdout,"\n -- PHASE 4 : MESH PACKED UP\n" ); } if ( (!iresult) || (!parmesh->myrank) ) { @@ -1433,7 +1729,7 @@ int PMMG_parmmglib_post(PMMG_pParMesh parmesh) { chrono(OFF,&(ctim[tim])); if ( parmesh->info.imprim > PMMG_VERB_VERSION ) { printim(ctim[tim].gdif,stim); - fprintf( stdout," -- PHASE 4 COMPLETED. %s\n",stim ); + fprintf( stdout," -- PHASE 4 COMPLETED. %s\n",stim ); } } } @@ -1442,9 +1738,17 @@ int PMMG_parmmglib_post(PMMG_pParMesh parmesh) { } int PMMG_parmmglib_centralized(PMMG_pParMesh parmesh) { + return PMMG_parmmg_centralized(parmesh); +} + +int PMMG_parmmgls_centralized(PMMG_pParMesh parmesh) { + return PMMG_parmmg_centralized(parmesh); +} + +int PMMG_parmmg_centralized(PMMG_pParMesh parmesh) { PMMG_pGrp grp; MMG5_pMesh mesh; - MMG5_pSol met; + MMG5_pSol met,ls; int ier; int ierlib; mytime ctim[TIMEMAX]; @@ -1474,6 +1778,18 @@ int PMMG_parmmglib_centralized(PMMG_pParMesh parmesh) { } } + /* I/O check: if an input ls name is provided but the output one is not, + compute automatically an output ls name. */ + if ( parmesh->lsin && *parmesh->lsin ) { + ier = PMMG_Set_outputLsName(parmesh,NULL); + if ( !ier ) { + fprintf(stdout," ## Warning: %s: rank %d: an input field name is" + " provided without an output one.\n" + " : the saving process may fail.\n", + __func__,parmesh->myrank); + } + } + /* Distribute the mesh */ ier = PMMG_distributeMesh_centralized_timers( parmesh, ctim ); if( ier != PMMG_SUCCESS ) return ier; @@ -1481,6 +1797,7 @@ int PMMG_parmmglib_centralized(PMMG_pParMesh parmesh) { grp = &parmesh->listgrp[0]; mesh = grp->mesh; met = grp->met; + ls = grp->ls; /** Remeshing */ tim = 3; @@ -1490,16 +1807,21 @@ int PMMG_parmmglib_centralized(PMMG_pParMesh parmesh) { met->size < 6 ? "ISOTROPIC" : "ANISOTROPIC" ); } - ier = PMMG_parmmglib1(parmesh); - MPI_Allreduce( &ier, &ierlib, 1, MPI_INT, MPI_MAX, parmesh->comm ); + if ( !parmesh->info.pure_partitioning ) { + ier = PMMG_parmmglib1(parmesh); + MPI_Allreduce( &ier, &ierlib, 1, MPI_INT, MPI_MAX, parmesh->comm ); - chrono(OFF,&(ctim[tim])); - printim(ctim[tim].gdif,stim); - if ( parmesh->info.imprim > PMMG_VERB_VERSION ) { - fprintf(stdout," -- PHASE 2 COMPLETED. %s\n",stim); + chrono(OFF,&(ctim[tim])); + printim(ctim[tim].gdif,stim); + if ( parmesh->info.imprim > PMMG_VERB_VERSION ) { + fprintf(stdout," -- PHASE 2 COMPLETED. %s\n",stim); + } + if ( ierlib == PMMG_STRONGFAILURE ) { + return ierlib; + } } - if ( ierlib == PMMG_STRONGFAILURE ) { - return ierlib; + else { + ierlib = 0; } ier = PMMG_parmmglib_post(parmesh); @@ -1517,8 +1839,16 @@ int PMMG_parmmglib_centralized(PMMG_pParMesh parmesh) { } int PMMG_parmmglib_distributed(PMMG_pParMesh parmesh) { + return PMMG_parmmg_distributed(parmesh); +} + +int PMMG_parmmgls_distributed(PMMG_pParMesh parmesh) { + return PMMG_parmmg_distributed(parmesh); +} + +int PMMG_parmmg_distributed(PMMG_pParMesh parmesh) { MMG5_pMesh mesh; - MMG5_pSol met; + MMG5_pSol met,ls; int ier,iresult,ierlib; mytime ctim[TIMEMAX]; int8_t tim; @@ -1580,41 +1910,74 @@ int PMMG_parmmglib_distributed(PMMG_pParMesh parmesh) { ier = PMMG_preprocessMesh_distributed( parmesh ); mesh = parmesh->listgrp[0].mesh; met = parmesh->listgrp[0].met; - if ( (ier==PMMG_STRONGFAILURE) && MMG5_unscaleMesh( mesh, met, NULL ) ) { + ls = parmesh->listgrp[0].ls; + if ( (ier==PMMG_STRONGFAILURE) && (parmesh->nprocs == parmesh->info.npartin) && MMG5_unscaleMesh( mesh, met, ls ) ) { ier = PMMG_LOWFAILURE; } } - else { ier = PMMG_SUCCESS; } + else { + ier = PMMG_SUCCESS; + } MPI_Allreduce( &ier, &iresult, 1, MPI_INT, MPI_MAX, parmesh->comm ); + if ( iresult!=PMMG_SUCCESS ) { return iresult; } - chrono(OFF,&(ctim[tim])); - if ( parmesh->info.imprim > PMMG_VERB_VERSION ) { - printim(ctim[tim].gdif,stim); - fprintf(stdout," -- PHASE 1 COMPLETED. %s\n",stim); + if ( parmesh->info.pure_partitioning ) { + if ( parmesh->myrank == parmesh->info.root ) { + fprintf(stderr,"\n ## Error: %s: Pure repartitioning from distributed" + " input not implemented.\n",__func__); + } + return PMMG_STRONGFAILURE; } - /** Remeshing */ - tim = 3; - chrono(ON,&(ctim[tim])); - if ( parmesh->info.imprim > PMMG_VERB_VERSION ) { - fprintf( stdout,"\n -- PHASE 2 : %s MESHING\n", - met->size < 6 ? "ISOTROPIC" : "ANISOTROPIC" ); + /* I/O check: if the mesh was loaded with nprocs != npartin (for example from + hdf5 file), call loadBalancing before the remeshing loop to make sure no + proc has an empty mesh (nprocs > npartin) and the load is well balanced + (nprocs < npartin). */ + if ( parmesh->nprocs != parmesh->info.npartin ) { + ier = PMMG_loadBalancing(parmesh,PMMG_REDISTRIBUTION_graph_balancing); } - ier = PMMG_parmmglib1(parmesh); - MPI_Allreduce( &ier, &ierlib, 1, MPI_INT, MPI_MAX, parmesh->comm ); + /* I.O check: if the mesh was loaded from an HDF5 file with nprocs > npartin, + the ranks [npart, nprocs - 1] have parmesh->ngrp == 0, so they did not enter + in PMMG_preprocessMesh_distributed and their function pointers were not set. + Set them now and reset parmesh->ngrp = 1. */ + if ( parmesh->myrank >= parmesh->info.npartin ) { + MMG3D_Set_commonFunc(); + MMG3D_setfunc(parmesh->listgrp[0].mesh, parmesh->listgrp[0].met); + PMMG_setfunc(parmesh); + parmesh->ngrp = 1; + } chrono(OFF,&(ctim[tim])); - printim(ctim[tim].gdif,stim); if ( parmesh->info.imprim > PMMG_VERB_VERSION ) { - fprintf(stdout," -- PHASE 2 COMPLETED. %s\n",stim); + printim(ctim[tim].gdif,stim); + fprintf(stdout," -- PHASE 1 COMPLETED. %s\n",stim); } - if ( ierlib == PMMG_STRONGFAILURE ) { - return ierlib; + + if ( !parmesh->info.pure_partitioning ) { + /** Remeshing */ + tim = 3; + chrono(ON,&(ctim[tim])); + if ( parmesh->info.imprim > PMMG_VERB_VERSION ) { + fprintf( stdout,"\n -- PHASE 2 : %s MESHING\n", + parmesh->listgrp[0].met->size < 6 ? "ISOTROPIC" : "ANISOTROPIC" ); + } + + ier = PMMG_parmmglib1(parmesh); + MPI_Allreduce( &ier, &ierlib, 1, MPI_INT, MPI_MAX, parmesh->comm ); + + chrono(OFF,&(ctim[tim])); + printim(ctim[tim].gdif,stim); + if ( parmesh->info.imprim > PMMG_VERB_VERSION ) { + fprintf(stdout," -- PHASE 2 COMPLETED. %s\n",stim); + } + if ( ierlib == PMMG_STRONGFAILURE ) { + return ierlib; + } } ier = PMMG_parmmglib_post(parmesh); diff --git a/src/libparmmg.h b/src/libparmmg.h index c9befaea..e6d939fa 100644 --- a/src/libparmmg.h +++ b/src/libparmmg.h @@ -34,7 +34,6 @@ #define _PMMGLIB_H #include "libparmmgtypes.h" -#include "metis.h" #if defined(c_plusplus) || defined(__cplusplus) extern "C" { @@ -60,6 +59,7 @@ enum PMMG_Param { PMMG_IPARAM_mmgDebug, /*!< [1/0], Turn on/off debug mode */ PMMG_IPARAM_angle, /*!< [1/0], Turn on/off angle detection */ PMMG_IPARAM_iso, /*!< [1/0], Level-set meshing */ + PMMG_IPARAM_isosurf, /*!< [1/0], Level-set meshing along boundaries */ PMMG_IPARAM_lag, /*!< [-1/0/1/2], Lagrangian option */ PMMG_IPARAM_opnbdy, /*!< [0/1], Enable preservation of open boundaries */ PMMG_IPARAM_optim, /*!< [1/0], Optimize mesh keeping its initial edge sizes */ @@ -70,6 +70,7 @@ enum PMMG_Param { PMMG_IPARAM_nomove, /*!< [1/0], Avoid/allow point relocation */ PMMG_IPARAM_nosurf, /*!< [1/0], Avoid/allow surface modifications */ PMMG_IPARAM_numberOfLocalParam,/*!< [n], Number of local parameters */ + PMMG_IPARAM_purePartitioning, /*!< [0/1], Turn off/on pure mesh partitioning (no ls insertion, no remeshing) */ PMMG_IPARAM_anisosize, /*!< [1/0], Turn on/off anisotropic metric creation when no metric is provided */ PMMG_IPARAM_octree, /*!< [n], Specify the max number of points per octree cell (DELAUNAY) */ PMMG_IPARAM_meshSize, /*!< [n], Target mesh size of Mmg (advanced use) */ @@ -106,6 +107,7 @@ enum PMMG_Param { * a pointer toward a pointer toward a parmesh * the \a PMMG_ARG_pMesh keyword to initialize a \a mesh pointer inside your \a parmesh * the \a PMMG_ARG_pMet keyword to initialize a \a metric pointer inside your \a parmesh + * the \a PMMG_ARG_pLs keyword to initialize a \a level-set pointer inside your \a parmesh * the \a PMMG_ARG_dim keyword to set the mesh dimension * the \a PMMG_ARG_MPIComm keyword to set the MPI Communicator in which parmmg will work * the \a PMMG_ARG_end keyword to end the list of variadic args. @@ -144,6 +146,46 @@ enum PMMG_Param { **/ int PMMG_parmmglib_distributed(PMMG_pParMesh parmesh); +/** + * \param parmesh pointer toward the parmesh structure (boundary entities are + * stored into MMG5_Tria, MMG5_Edge... structures) + * + * \return \ref PMMG_SUCCESS if success, \ref PMMG_LOWFAILURE if fail but we can + * return a centralized and unscaled mesh or \ref PMMG_STRONGFAILURE if fail and + * we can't return a centralized and unscaled mesh. + * + * Main program for the parallel isovalue discretisation library for distributed + * meshes + * + * \remark Fortran interface: + * > SUBROUTINE PMMG_parmmgls_distributed(parmesh,retval)\n + * > MMG5_DATA_PTR_T,INTENT(INOUT) :: parmesh\n + * > INTEGER, INTENT(OUT) :: retval\n + * > END SUBROUTINE\n + * + **/ +int PMMG_parmmgls_distributed(PMMG_pParMesh parmesh); + +/** + * \param parmesh pointer toward the parmesh structure (boundary entities are + * stored into MMG5_Tria, MMG5_Edge... structures) + * + * \return \ref PMMG_SUCCESS if success, \ref PMMG_LOWFAILURE if fail but we can + * return a centralized and unscaled mesh or \ref PMMG_STRONGFAILURE if fail and + * we can't return a centralized and unscaled mesh. + * + * Main program for the parallel isovalue discretisation library and remesh + * library for distributed meshes + * + * \remark Fortran interface: + * > SUBROUTINE PMMG_parmmg_distributed(parmesh,retval)\n + * > MMG5_DATA_PTR_T,INTENT(INOUT) :: parmesh\n + * > INTEGER, INTENT(OUT) :: retval\n + * > END SUBROUTINE\n + * + **/ +int PMMG_parmmg_distributed(PMMG_pParMesh parmesh); + /** * \param parmesh pointer toward the parmesh structure (boundary entities are * stored into MMG5_Tria, MMG5_Edge... structures) @@ -163,6 +205,46 @@ int PMMG_parmmglib_distributed(PMMG_pParMesh parmesh); **/ int PMMG_parmmglib_centralized(PMMG_pParMesh parmesh); +/** + * \param parmesh pointer toward the parmesh structure (boundary entities are + * stored into MMG5_Tria, MMG5_Edge... structures) + * + * \return \ref PMMG_SUCCESS if success, \ref PMMG_LOWFAILURE if fail but we can + * return a centralized and unscaled mesh or \ref PMMG_STRONGFAILURE if fail and + * we can't return a centralized and unscaled mesh. + * + * Main program for the parallel isovalue discretisation library for centralized + * meshes + * + * \remark Fortran interface: + * > SUBROUTINE PMMG_parmmgls_centralized(parmesh,retval)\n + * > MMG5_DATA_PTR_T,INTENT(INOUT) :: parmesh\n + * > INTEGER, INTENT(OUT) :: retval\n + * > END SUBROUTINE\n + * + **/ +int PMMG_parmmgls_centralized(PMMG_pParMesh parmesh); + +/** + * \param parmesh pointer toward the parmesh structure (boundary entities are + * stored into MMG5_Tria, MMG5_Edge... structures) + * + * \return \ref PMMG_SUCCESS if success, \ref PMMG_LOWFAILURE if fail but we can + * return a centralized and unscaled mesh or \ref PMMG_STRONGFAILURE if fail and + * we can't return a centralized and unscaled mesh. + * + * Main program for the parallel isovalue discretisation library and remesh + * library for centralized meshes + * + * \remark Fortran interface: + * > SUBROUTINE PMMG_parmmg_centralized(parmesh,retval)\n + * > MMG5_DATA_PTR_T,INTENT(INOUT) :: parmesh\n + * > INTEGER, INTENT(OUT) :: retval\n + * > END SUBROUTINE\n + * + **/ +int PMMG_parmmg_centralized(PMMG_pParMesh parmesh); + /* init file names */ /** * \param parmesh pointer toward a parmesh structure. @@ -308,6 +390,24 @@ int PMMG_Set_outputSolsName(PMMG_pParMesh parmesh, const char* solout); */ int PMMG_Set_outputMetName(PMMG_pParMesh parmesh, const char* metout); +/** + * \param parmesh pointer toward a parmesh structure. + * \param lsout name of the output level-set file. + * \return 0 if failed, 1 otherwise. + * + * Set the name of output level-set file. + * + * \remark Fortran interface: + * > SUBROUTINE PMMG_SET_OUTPUTLSNAME(parmesh,lsout,strlen,retval)\n + * > MMG5_DATA_PTR_T, INTENT(INOUT) :: parmesh\n + * > CHARACTER(LEN=*), INTENT(IN) :: lsout\n + * > INTEGER, INTENT(IN) :: strlen\n + * > INTEGER, INTENT(OUT) :: retval\n + * > END SUBROUTINE\n + * + */ +int PMMG_Set_outputLsName(PMMG_pParMesh parmesh, const char* lsout); + /** * \param parmesh pointer toward the parmesh structure. * \param comm MPI communicator for ParMmg @@ -426,7 +526,7 @@ int PMMG_Set_iparameter(PMMG_pParMesh parmesh, int iparam, int val); * > END SUBROUTINE\n * */ -int PMMG_Set_dparameter(PMMG_pParMesh parmesh, int iparam, double val); +int PMMG_Set_dparameter(PMMG_pParMesh parmesh, int dparam, double val); /** * \param parmesh pointer toward the parmesh structure. @@ -1900,6 +2000,25 @@ int PMMG_usage( PMMG_pParMesh parmesh, char * const prog); * */ int PMMG_loadSol_centralized(PMMG_pParMesh parmesh,const char *filename); +/** + * \param parmesh pointer toward the parmesh structure. + * \param filename name of file. + * \return -1 data invalid, 0 no file, 1 ok. + * + * Load distributed displacement, level-set or metric field depending on the + * option setted. The solution file must contains only 1 solution. + * Insert rank index to the mesh name. + * + * \remark Fortran interface: + * > SUBROUTINE PMMG_LOADSOL_DISTRIBUTED(parmesh,filename,strlen,retval)\n + * > MMG5_DATA_PTR_T, INTENT(INOUT) :: parmesh\n + * > CHARACTER(LEN=*), INTENT(IN) :: filename\n + * > INTEGER, INTENT(IN) :: strlen\n + * > INTEGER, INTENT(OUT) :: retval\n + * > END SUBROUTINE\n + * + */ + int PMMG_loadSol_distributed(PMMG_pParMesh parmesh,const char *filename); /** * \param parmesh pointer toward the parmesh structure. * \param filename name of file. @@ -1917,6 +2036,24 @@ int PMMG_usage( PMMG_pParMesh parmesh, char * const prog); * */ int PMMG_loadAllSols_centralized(PMMG_pParMesh parmesh,const char *filename); +/** + * \param parmesh pointer toward the parmesh structure. + * \param filename name of file. + * \return -1 data invalid, 0 no file, 1 ok. + * + * Load 1 or more distributed solutions in a solution file at medit file format. + * Insert rank index in the file name. + * + * \remark Fortran interface: + * > SUBROUTINE PMMG_LOADALLSOLS_DSITRIBUTED(parmesh,filename,strlen,retval)\n + * > MMG5_DATA_PTR_T, INTENT(INOUT) :: parmesh\n + * > CHARACTER(LEN=*), INTENT(IN) :: filename\n + * > INTEGER, INTENT(IN) :: strlen\n + * > INTEGER, INTENT(OUT) :: retval\n + * > END SUBROUTINE\n + * + */ + int PMMG_loadAllSols_distributed(PMMG_pParMesh parmesh,const char *filename); /** * \param parmesh pointer toward the parmesh structure. * \param filename pointer toward the name of file. @@ -1996,7 +2133,45 @@ int PMMG_usage( PMMG_pParMesh parmesh, char * const prog); * \param filename name of file. * \return 0 if failed, 1 otherwise. * - * Write 1 or more than 1 solution in a file at medit format. + * Write level-set in a file at medit format. + * + * \remark Fortran interface: + * > SUBROUTINE PMMG_SAVELS_CENTRALIZED(parmesh,filename,strlen,retval)\n + * > MMG5_DATA_PTR_T, INTENT(INOUT) :: parmesh\n + * > CHARACTER(LEN=*), INTENT(IN) :: filename\n + * > INTEGER, INTENT(IN) :: strlen\n + * > INTEGER, INTENT(OUT) :: retval\n + * > END SUBROUTINE\n + * + */ + int PMMG_saveLs_centralized(PMMG_pParMesh parmesh, const char *filename); + +/** + * \param parmesh pointer toward the parmesh structure. + * \param filename name of file. + * \return 0 if failed, 1 otherwise. + * + * Write level-set in a file at medit format for a distributed + * mesh (insert rank index to filename). + * + * \remark Fortran interface: + * > SUBROUTINE PMMG_SAVELS_DISTRIBUTED(parmesh,filename,strlen,retval)\n + * > MMG5_DATA_PTR_T, INTENT(INOUT) :: parmesh\n + * > CHARACTER(LEN=*), INTENT(IN) :: filename\n + * > INTEGER, INTENT(IN) :: strlen\n + * > INTEGER, INTENT(OUT) :: retval\n + * > END SUBROUTINE\n + * + */ + int PMMG_saveLs_distributed(PMMG_pParMesh parmesh, const char *filename); + + +/** + * \param parmesh pointer toward the parmesh structure. + * \param filename name of file. + * \return 0 if failed, 1 otherwise. + * + * Write 1 or more than 1 solution field in a file at medit format. * * \remark Fortran interface: * > SUBROUTINE PMMG_SAVEALLSOLS_CENTRALIZED(parmesh,filename,strlen,retval)\n @@ -2009,8 +2184,152 @@ int PMMG_usage( PMMG_pParMesh parmesh, char * const prog); */ int PMMG_saveAllSols_centralized(PMMG_pParMesh parmesh, const char *filename); +/** + * \param parmesh pointer toward the parmesh structure. + * \param filename name of file. + * \return 0 if failed, 1 otherwise. + * + * Write 1 or more than 1 solution field in a file at medit format for a distributed + * mesh (insert rank index to filename). + * + * \remark Fortran interface: + * > SUBROUTINE PMMG_SAVEALLSOLS_DISTRIBUTED(parmesh,filename,strlen,retval)\n + * > MMG5_DATA_PTR_T, INTENT(INOUT) :: parmesh\n + * > CHARACTER(LEN=*), INTENT(IN) :: filename\n + * > INTEGER, INTENT(IN) :: strlen\n + * > INTEGER, INTENT(OUT) :: retval\n + * > END SUBROUTINE\n + * + */ + int PMMG_saveAllSols_distributed(PMMG_pParMesh parmesh, const char *filename); + +/** + * \param parmesh pointer toward parmesh steructure. + * \return 0 if failed, 1 otherwise. + * + * Set the default entities to save into an hdf5 file. + * + * For now, used only by hdf5 I/Os. + * + * \remark Fortran interface: + * > SUBROUTINE PMMG_SET_DEFAULTIOENTITIES(parmesh,retval)\n + * > MMG5_DATA_PTR_T , INTENT(INOUT) :: parmesh\n + * > INTEGER, INTENT(OUT) :: retval\n + * > END SUBROUTINE\n + * + */ + int PMMG_Set_defaultIOEntities(PMMG_pParMesh parmesh); + +/** + * \param parmesh pointer toward parmesh steructure. + * \param target type of entity for which we want to enable/disable saving. + * target value has to be one of the PMMG_IO_entities values. + * \pararm enable saving if PMMG_ON is passed, disable it if PMMG_OFF is passed. + * \return 0 if failed, 1 otherwise. + * + * Enable or disable entities to save depending on the \a val value. + * + * Passing \ref PMMG_IO_Required as \a target value allows to modify behaviour + * for all required entites. + * + * Passing \ref PMMG_IO_Parallel as \a target value allows to modify behaviour + * for all parallel entites. + * + * For now, used only by hdf5 I/Os. + * + * \remark Fortran interface: + * > SUBROUTINE PMMG_SET_IOENTITIES(parmesh,target,val,retval)\n + * > MMG5_DATA_PTR_T , INTENT(INOUT) :: parmesh\n + * > INTEGER, INTENT(IN) :: target,val\n + * > INTEGER, INTENT(OUT) :: retval\n + * > END SUBROUTINE\n + * + */ + int PMMG_Set_IOEntities(PMMG_pParMesh parmesh, int target, int val); + +/** + * \param parmesh pointer toward the parmesh structure. + * \param filename name of the HDF5 and XDMF files (can have no extention, .h5 or .xdmf extension). + * \return 0 if failed, 1 otherwise. + * + * Write the mesh data, the metric, and all the solutions in an HDF5 file, + * aswell as an XDMF file for visualisation. This function is to be used for + * distributed meshes. + * + * The entities that have to be saved can be setted using the + * \ref PMMG_Set_defaultIOEntities and \ref PMMG_Set_IOEntities + * functions. + * + * \remark Fortran interface: + * > SUBROUTINE PMMG_SAVEMESH_HDF5(parmesh,filename,strlen,retval)\n + * > MMG5_DATA_PTR_T , INTENT(INOUT) :: parmesh\n + * > CHARACTER(LEN=*), INTENT(IN) :: filename\n + * > INTEGER, INTENT(IN) :: strlen\n + * > INTEGER, INTENT(OUT) :: retval\n + * > END SUBROUTINE\n + * + */ + int PMMG_saveMesh_hdf5(PMMG_pParMesh parmesh,const char *filename); + +/** + * \param parmesh pointer toward the parmesh structure. + * \param filename name of the HDF5 file. + * \return 0 if failed, 1 otherwise. + * + * Load the mesh data, the metric, and all the solutions from an HDF5 file in + * a distributed parmesh. + * + * The entities that have to be saved can be setted using the + * \ref PMMG_Set_defaultIOEntities and \ref PMMG_Set_IOEntities + * functions. + * + * \remark Fortran interface: + * > SUBROUTINE PMMG_LOADMESH_HDF5(parmesh,filename,strlen,retval)\n + * > MMG5_DATA_PTR_T, INTENT(INOUT) :: parmesh\n + * > CHARACTER(LEN=*), INTENT(IN) :: filename\n + * > INTEGER, INTENT(IN) :: strlen\n + * > INTEGER, INTENT(OUT) :: retval\n + * > END SUBROUTINE\n + * + */ + int PMMG_loadMesh_hdf5(PMMG_pParMesh parmesh,const char *filename); + +/** + * \param parmesh pointer toward the parmesh structure. + * \param filename name of file. + * \return 0 if failed, 1 otherwise. + * + * Write mesh and 0 or 1 data at pvtu Vtk file format (.pvtu extension). + * + * \remark Fortran interface: + * > SUBROUTINE PMMG_SAVEPVTUMESH(parmesh,filename,strlen,retval)\n + * > MMG5_DATA_PTR_T, INTENT(INOUT) :: parmesh\n + * > CHARACTER(LEN=*), INTENT(IN) :: filename\n + * > INTEGER, INTENT(IN) :: strlen\n + * > INTEGER, INTENT(OUT) :: retval\n + * > END SUBROUTINE\n + * + */ int PMMG_savePvtuMesh(PMMG_pParMesh parmesh, const char * filename); +/** + * \param parmesh pointer toward the parmesh structure. + * \param filename name of file. + * \return 0 if failed, 1 otherwise. + * + * Write mesh and a list of data fields at pvtu Vtk file format (.pvtu extension). + * + * \remark Fortran interface: + * > SUBROUTINE PMMG_SAVEPVTUMESH_AND_ALLDATA(parmesh,filename,strlen,retval)\n + * > MMG5_DATA_PTR_T, INTENT(INOUT) :: parmesh\n + * > CHARACTER(LEN=*), INTENT(IN) :: filename\n + * > INTEGER, INTENT(IN) :: strlen\n + * > INTEGER, INTENT(OUT) :: retval\n + * > END SUBROUTINE\n + * + */ + int PMMG_savePvtuMesh_and_allData(PMMG_pParMesh parmesh, const char * filename); + /** * \param parmesh pointer toward the parmesh structure * \param next_comm number of communicators @@ -2189,7 +2508,7 @@ int PMMG_savePvtuMesh(PMMG_pParMesh parmesh, const char * filename); * \remark Fortran interface: * > SUBROUTINE PMMG_GET_ITHNODECOMMUNICATORSIZE(parmesh,ext_comm_index,color_out,nitem,retval)\n * > MMG5_DATA_PTR_T, INTENT(INOUT) :: parmesh\n - * > INTEGER, INTENT(OUT) :: ext_comm_index\n + * > INTEGER, INTENT(IN) :: ext_comm_index\n * > INTEGER, INTENT(OUT) :: color_out\n * > INTEGER, INTENT(OUT) :: nitem\n * > INTEGER, INTENT(OUT) :: retval\n @@ -2210,7 +2529,7 @@ int PMMG_savePvtuMesh(PMMG_pParMesh parmesh, const char * filename); * \remark Fortran interface: * > SUBROUTINE PMMG_GET_ITHFACECOMMUNICATORSIZE(parmesh,ext_comm_index,color_out,nitem,retval)\n * > MMG5_DATA_PTR_T, INTENT(INOUT) :: parmesh\n - * > INTEGER, INTENT(OUT) :: ext_comm_index\n + * > INTEGER, INTENT(IN) :: ext_comm_index\n * > INTEGER, INTENT(OUT) :: color_out\n * > INTEGER, INTENT(OUT) :: nitem\n * > INTEGER, INTENT(OUT) :: retval\n @@ -2219,14 +2538,17 @@ int PMMG_savePvtuMesh(PMMG_pParMesh parmesh, const char * filename); int PMMG_Get_ithFaceCommunicatorSize(PMMG_pParMesh parmesh, int ext_comm_index, int *color_out, int *nitem); /** * \param parmesh pointer toward the parmesh structure - * \param ext_comm_index index of the communicator * \param local_index array of local mesh IDs of interface entities * \return 0 if failed, 1 otherwise. * + * \warning Non callable from a fortran code as Fortran cannot assign a **int + * with differing allocations on each index. + * \ref PMMG_Get_ithNodeCommunicator_nodes should be used instead. + * * Get the nodes on a parallel interface. * * \remark Fortran interface: - * > SUBROUTINE PMMG_GET_ITHNODECOMMUNICATOR_NODES(parmesh,local_index,retval)\n + * > SUBROUTINE PMMG_GET_NODECOMMUNICATOR_NODES(parmesh,local_index,retval)\n * > MMG5_DATA_PTR_T, INTENT(INOUT) :: parmesh\n * > INTEGER, DIMENSION(*), INTENT(OUT) :: local_index\n * > INTEGER, INTENT(OUT) :: retval\n @@ -2234,6 +2556,25 @@ int PMMG_savePvtuMesh(PMMG_pParMesh parmesh, const char * filename); * */ int PMMG_Get_NodeCommunicator_nodes(PMMG_pParMesh parmesh, int** local_index); +/** + * \param parmesh pointer toward the parmesh structure + * \param ext_comm_index index of the communicator + * \param local_index array of local mesh IDs of specified interface entities + * \return 0 if failed, 1 otherwise. + * + * Get the nodes on a parallel interface for a given node communicator. + * To be used for Fortran users in place of \ref PMMG_Get_NodeCommunicator_nodes. + * + * \remark Fortran interface: + * > SUBROUTINE PMMG_GET_ITHNODECOMMUNICATOR_NODES(parmesh,ext_comm_index,local_index,retval)\n + * > MMG5_DATA_PTR_T, INTENT(INOUT) :: parmesh\n + * > INTEGER, INTENT(IN) :: ext_comm_index\n + * > INTEGER, DIMENSION(*), INTENT(OUT) :: local_index\n + * > INTEGER, INTENT(OUT) :: retval\n + * > END SUBROUTINE\n + * + */ + int PMMG_Get_ithNodeCommunicator_nodes(PMMG_pParMesh parmesh, int ext_comm_index, int* local_index); /** * \param parmesh pointer toward the parmesh structure * \param ext_comm_index index of the communicator @@ -2243,7 +2584,7 @@ int PMMG_savePvtuMesh(PMMG_pParMesh parmesh, const char * filename); * Get the faces on a parallel interface. * * \remark Fortran interface: - * > SUBROUTINE PMMG_GET_ITHFACECOMMUNICATOR_NODES(parmesh,local_index,retval)\n + * > SUBROUTINE PMMG_GET_FACECOMMUNICATOR_FACES(parmesh,local_index,retval)\n * > MMG5_DATA_PTR_T, INTENT(INOUT) :: parmesh\n * > INTEGER, DIMENSION(*), INTENT(OUT) :: local_index\n * > INTEGER, INTENT(OUT) :: retval\n @@ -2553,6 +2894,55 @@ void PMMG_setfunc( PMMG_pParMesh parmesh ); */ int PMMG_printCommunicator( PMMG_pParMesh parmesh,const char *filename ); +/** + * \param parmesh pointer toward the parmesh structure. + * \param ktri index of the boundary triangle. + * \param ktet pointer toward an integer that will contains the tetra index. + * \param iface pointer toward the triangle in \a ktet. + * + * \return 0 if fail, 1 otherwise + * + * Fill \a ktet by the indice of a tetra to which belong a boundary triangle + * and \a iface by the indice of the triangle in the tetra. + * + * \remark Fortran interface: + * > SUBROUTINE PMMG_GET_TETFROMTRIA(parmesh,ktri,ktet,iface,retval)\n + * > MMG5_DATA_PTR_T, INTENT(IN) :: parmesh\n + * > INTEGER, INTENT(IN) :: ktri\n + * > INTEGER, INTENT(OUT) :: ktet,iface\n + * > INTEGER, INTENT(OUT) :: retval\n + * > END SUBROUTINE\n + * + */ + int PMMG_Get_tetFromTria(PMMG_pParMesh parmesh, int ktri, int *ktet, int *iface); + +/** + * \param parmesh pointer toward the parmesh structure. + * \param ktri index of the boundary triangle. + * \param ktet array of size 2 that will contain the indices of the tetra + * (filled by the function). + * \param iface pointer toward an array of size 2 that will contains the indices + * of the faces of the tetras \a ktet[i] that corresponds to the boundary tria + * \a ktri. + * + * \return 0 if fail, 1 otherwise + * + * Fill \a ktet by the indices of the tetra to which belong a boundary triangle + * and \a iface by the indices of the faces of the tetras that correspond to the + * triangle. Fill ktet[1] and iface[1] by 0 if the triangle belongs to 1 tetra only. + * + * \remark Fortran interface: + * > SUBROUTINE PMMG_GET_TETSFROMTRIA(parmesh,ktri,ktet,iface,retval)\n + * > MMG5_DATA_PTR_T, INTENT(IN) :: parmesh\n + * > INTEGER, INTENT(IN) :: ktri\n + * > INTEGER, DIMENSION(2), INTENT(OUT) :: ktet,iface\n + * > INTEGER, INTENT(OUT) :: retval\n + * > END SUBROUTINE\n + * + */ + int PMMG_Get_tetsFromTria(PMMG_pParMesh parmesh, int ktri, int ktet[2], int iface[2]); + + #if defined(c_plusplus) || defined(__cplusplus) } #endif diff --git a/src/libparmmg1.c b/src/libparmmg1.c index e2f845d8..8fb56c50 100644 --- a/src/libparmmg1.c +++ b/src/libparmmg1.c @@ -34,6 +34,7 @@ * */ #include "parmmg.h" +#include "mmgexterns_private.h" /** * \param grp pointer toward the group in which we want to update the list of @@ -188,8 +189,7 @@ int PMMG_packTetra( PMMG_pParMesh parmesh, int igrp ) { * * \return 0 if fail, 1 otherwise * - * Pack the sparse meshes of each group and create triangles and edges before - * getting out of library + * Pack the sparse meshes of each group * */ int PMMG_packParMesh( PMMG_pParMesh parmesh ) @@ -274,10 +274,12 @@ int PMMG_packParMesh( PMMG_pParMesh parmesh ) __func__); return 0; } +#ifndef NDEBUG if ( !MMG5_chkmsh(mesh,1,1) ) { fprintf(stderr," ## Problem. Invalid mesh.\n"); return 0; } +#endif } } @@ -400,7 +402,7 @@ int PMMG_update_face2intInterfaceTetra( PMMG_pParMesh parmesh, int igrp, } } - /** Step 2: Travel through the \a facesData array, get int the hash table the + /** Step 2: Travel through the \a facesData array, get in the hash table the * index of the element to which belong the face and update the face * communicator */ face2int_face_comm_index1 = grp->face2int_face_comm_index1; @@ -566,6 +568,16 @@ int PMMG_parmmglib1( PMMG_pParMesh parmesh ) /** Set inputMet flag */ parmesh->info.inputMet = 0; + +#ifndef NDEBUG + for ( i=0; ingrp; ++i ) { + if ( !MMG5_chkmsh(parmesh->listgrp[i].mesh,1,1) ) { + fprintf(stderr," ## Problem. Invalid mesh.\n"); + return 0; + } + } +#endif + for ( i=0; ingrp; ++i ) { met = parmesh->listgrp[i].met; if ( met && met->m ) { @@ -618,11 +630,21 @@ int PMMG_parmmglib1( PMMG_pParMesh parmesh ) /** Reset the boundary fields between the old mesh size and the new one (Mmg * uses this fields assiming they are setted to 0)/ */ + + permNodGlob = NULL; + for ( i=0; ingrp; ++i ) { mesh = parmesh->listgrp[i].mesh; if ( !mesh ) continue; +#ifndef NDEBUG + if ( !MMG5_chkmsh(mesh,1,1) ) { + fprintf(stderr," ## Problem. Invalid mesh.\n"); + return 0; + } +#endif + memset(&mesh->xtetra[mesh->xt+1],0,(mesh->xtmax-mesh->xt)*sizeof(MMG5_xTetra)); memset(&mesh->xpoint[mesh->xp+1],0,(mesh->xpmax-mesh->xp)*sizeof(MMG5_xPoint)); @@ -664,13 +686,22 @@ int PMMG_parmmglib1( PMMG_pParMesh parmesh ) met = parmesh->listgrp[i].met; field = parmesh->listgrp[i].field; +#ifndef NDEBUG + if ( !MMG5_chkmsh(mesh,1,1) ) { + fprintf(stderr," ## Problem. Invalid mesh.\n"); + return 0; + } +#endif + +#warning Luca: until analysis is not ready #ifdef USE_POINTMAP - for( k = 1; k <= mesh->np; k++ ) + for( k = 1; k <= mesh->np; k++ ) { mesh->point[k].src = k; + } #endif /* Reset the value of the fem mode */ - mesh->info.fem = parmesh->info.fem; + mesh->info.fem = parmesh->info.setfem; if ( (!mesh->np) && (!mesh->ne) ) { /* Empty mesh */ @@ -691,7 +722,13 @@ int PMMG_parmmglib1( PMMG_pParMesh parmesh ) #ifdef USE_SCOTCH /* Allocation of the array that will store the node permutation */ - PMMG_MALLOC(parmesh,permNodGlob,mesh->np+1,int,"node permutation", + // npi stores the number of points when we enter Mmg, np stores the + // number of points after adatptation. + // In theorie, here np == npi + + assert ( mesh->np == mesh->npi ); + + PMMG_MALLOC(parmesh,permNodGlob,mesh->npi+1,int,"node permutation", PMMG_scotch_message(&warnScotch) ); if ( permNodGlob ) { for ( k=1; k<=mesh->np; ++k ) { @@ -705,7 +742,6 @@ int PMMG_parmmglib1( PMMG_pParMesh parmesh ) /* renumerotation if available: no need to renum the field here (they * will be interpolated) */ - assert ( mesh->npi==mesh->np ); if ( permNodGlob ) { if ( !MMG5_scotchCall(mesh,met,NULL,permNodGlob) ) { @@ -721,6 +757,11 @@ int PMMG_parmmglib1( PMMG_pParMesh parmesh ) mesh->tetra[k].mark = mesh->mark; mesh->tetra[k].flag = mesh->base; } + /* Reinitialisation of point flags because mesh->base has been reseted + * and scalem expects point flag to be lower or equal to mesh->base */ + for ( k=1 ; k<=mesh->npmax ; k++ ) { + mesh->point[k].flag = mesh->base; + } /** Call the remesher */ /* Here we need to scale the mesh */ @@ -733,6 +774,7 @@ int PMMG_parmmglib1( PMMG_pParMesh parmesh ) } } + #ifdef PATTERN ier = MMG5_mmg3d1_pattern( mesh, met, permNodGlob ); #else @@ -758,7 +800,7 @@ int PMMG_parmmglib1( PMMG_pParMesh parmesh ) } if ( parmesh->iter < parmesh->niter-1 && (!parmesh->info.inputMet) ) { - /* Delete the metrec computed by Mmg except at last iter */ + /* Delete the metric computed by Mmg except at last iter */ PMMG_DEL_MEM(mesh,met->m,double,"internal metric"); } @@ -799,14 +841,15 @@ int PMMG_parmmglib1( PMMG_pParMesh parmesh ) goto strong_failed; } +#ifdef USE_SCOTCH + PMMG_DEL_MEM(parmesh,permNodGlob,int,"node permutation"); +#endif + if ( !ier ) { break; } + } /* Reset the mesh->gap field in case Mmg have modified it */ mesh->gap = MMG5_GAP; - -#ifdef USE_SCOTCH - PMMG_DEL_MEM(parmesh,permNodGlob,int,"node permutation"); -#endif } MPI_Allreduce( &ier, &ieresult, 1, MPI_INT, MPI_MIN, parmesh->comm ); @@ -855,21 +898,12 @@ int PMMG_parmmglib1( PMMG_pParMesh parmesh ) if ( !parmesh->info.nobalancing ) { /** Load balancing of the output mesh */ - - /* Store user repartitioning mode */ - int repartitioning_mode; - repartitioning_mode = parmesh->info.repartitioning; - /* Load balance using mesh groups graph */ - parmesh->info.repartitioning = PMMG_REDISTRIBUTION_graph_balancing; - ier = PMMG_loadBalancing(parmesh); - - /* Repristinate user repartitioning mode */ - parmesh->info.repartitioning = repartitioning_mode; + ier = PMMG_loadBalancing(parmesh,PMMG_REDISTRIBUTION_graph_balancing); } } else { /** Standard parallel mesh repartitioning */ - ier = PMMG_loadBalancing(parmesh); + ier = PMMG_loadBalancing(parmesh,parmesh->info.repartitioning); } @@ -907,7 +941,7 @@ int PMMG_parmmglib1( PMMG_pParMesh parmesh ) } #endif - ier = PMMG_qualhisto( parmesh, PMMG_OUTQUA, 0 ); + ier = PMMG_qualhisto( parmesh, PMMG_OUTQUA, 0, parmesh->comm ); MPI_Allreduce( &ier, &ieresult, 1, MPI_INT, MPI_MIN, parmesh->comm ); if ( !ieresult ) { @@ -942,6 +976,15 @@ int PMMG_parmmglib1( PMMG_pParMesh parmesh ) ier = PMMG_merge_grps(parmesh,0); MPI_Allreduce( &ier, &ieresult, 1, MPI_INT, MPI_MIN, parmesh->comm ); +#ifndef NDEBUG + for (int k=0; kngrp; ++k ) { + if ( !MMG5_chkmsh(parmesh->listgrp[k].mesh,1,1) ) { + fprintf(stderr," ## Problem. Invalid mesh.\n"); + return 0; + } + } +#endif + if ( parmesh->info.imprim > PMMG_VERB_STEPS ) { chrono(OFF,&(ctim[tim])); printim(ctim[tim].gdif,stim); @@ -961,7 +1004,7 @@ int PMMG_parmmglib1( PMMG_pParMesh parmesh ) if ( parmesh->info.imprim0 > PMMG_VERB_ITWAVES && !parmesh->info.iso && parmesh->iter>0 ) { assert ( parmesh->listgrp[0].met->m ); - PMMG_prilen(parmesh,1,0); + PMMG_prilen(parmesh,1,0,parmesh->comm); } PMMG_CLEAN_AND_RETURN(parmesh,ier_end); diff --git a/src/libparmmg_tools.c b/src/libparmmg_tools.c index a051ef98..6e5a16b3 100644 --- a/src/libparmmg_tools.c +++ b/src/libparmmg_tools.c @@ -31,32 +31,19 @@ */ #include "parmmg.h" -/*! Helper macro used only in this file: copies the contents of fromV[fromC] - * to toV[toC] updates toC */ -#define ARGV_APPEND(parmesh,fromV,toV,fromC,toC,msg,on_failure) do { \ - PMMG_MALLOC(parmesh, toV[ toC ], strlen( fromV[ fromC ] ) + 1, char, msg, \ - on_failure); \ - memcpy( toV[ toC ], fromV[ fromC ], (strlen( fromV[ fromC ] ) + 1)*sizeof(char) ); \ - ++toC; \ - }while(0) - -/** - * \param parmesh pointer to pmmg structure - * \param mmgArgv pointer to argv like buffer - * \param mmgArgc pointer to argc like buffer - * \param argc actual argc value - * - * Free the allocations of the custom created argc/argv wrapper that is passed - * to mmg to parse the command line options - */ -static void -PMMG_argv_cleanup( PMMG_pParMesh parmesh, char **mmgArgv, int mmgArgc, int argc ) -{ - int i; - for ( i = 0; i < mmgArgc; ++i ) - PMMG_DEL_MEM(parmesh, mmgArgv[i],char, "Deallocating mmgargv[i]: " ); - PMMG_DEL_MEM(parmesh, mmgArgv,char*, "Deallocating mmgargv: " ); -} +#define PMMG_UNRECOGNIZED_ARG \ + do { \ + PMMG_ERROR_ARG("\nUnrecognized option %s\n",pmmgArgv,i); \ + } while(0) + +#define PMMG_ERROR_ARG(mess,argv_s,i) \ + do { \ + RUN_ON_ROOT_AND_BCAST( \ + fprintf(stderr,mess,argv_s[i]) && \ + fprintf(stderr,"Please, run %s -h command to get help.\n",argv_s[0]) && \ + 0 ,parmesh->info.root,parmesh->myrank, \ + ret_val = 0;goto clean ); \ + } while(0) int PMMG_defaultValues( PMMG_pParMesh parmesh ) { @@ -119,8 +106,16 @@ int PMMG_usage( PMMG_pParMesh parmesh, char * const prog ) fprintf(stdout,"-in file input triangulation\n"); fprintf(stdout,"-out file output triangulation\n"); fprintf(stdout,"-sol file load level-set, displacement or metric file\n"); + fprintf(stdout,"-met file load metric file\n"); fprintf(stdout,"-field file load sol field to interpolate from init onto final mesh\n"); + fprintf(stdout,"-f file load parameter file\n"); + fprintf(stdout,"-noout do not write output triangulation\n"); + fprintf(stdout,"-centralized-output centralized output (Medit format only)\n"); + fprintf(stdout,"-distributed-output distributed output (Medit format only)\n"); + + fprintf(stdout,"\n** Mode specifications (mesh adaptation by default)\n"); + fprintf(stdout,"-ls val create mesh of isovalue val (0 if no argument provided)\n"); fprintf(stdout,"\n** Parameters\n"); fprintf(stdout,"-niter val number of remeshing iterations\n"); @@ -129,6 +124,7 @@ int PMMG_usage( PMMG_pParMesh parmesh, char * const prog ) fprintf(stdout,"-nlayers val number of layers for interface displacement\n"); fprintf(stdout,"-groups-ratio val allowed imbalance between current and desired groups size\n"); fprintf(stdout,"-nobalance switch off load balancing of the output mesh\n"); + fprintf(stdout,"-pure-partitioning perform only mesh partitioning (no level-set insertion or remeshing"); //fprintf(stdout,"-ar val angle detection\n"); //fprintf(stdout,"-nr no angle detection\n"); @@ -172,110 +168,172 @@ int PMMG_parsar( int argc, char *argv[], PMMG_pParMesh parmesh ) { int val,i = 0; int ret_val = 1; - int mmgArgc = 0; - char** mmgArgv = NULL; + int mmgArgc = 0, pmmgArgc = 0; + char **mmgArgv = NULL,**pmmgArgv = NULL; assert ( parmesh->ngrp == 1 && "Not available for more than 1 group per proc.\n"); - /** Parse arguments specific to parMmg then add to mmgArgv the mmg arguments - * and call the mmg3d parser. */ + /** First step: search if user want to see the default parameters values or is + * asking for help */ for ( i = 1; i < argc; ++i ) { if ( !strcmp( argv[ i ],"-val" ) ) { - RUN_ON_ROOT_AND_BCAST( PMMG_defaultValues(parmesh),0, - parmesh->myrank,ret_val=0; goto fail_mmgargv); - ret_val = 0; - goto fail_mmgargv; + RUN_ON_ROOT_AND_BCAST( (PMMG_defaultValues(parmesh) && 0),0, + parmesh->myrank,ret_val=0; goto clean); } else if ( ( !strcmp( argv[ i ],"-?" ) ) || ( !strcmp( argv[ i ],"-h" ) ) ) { - RUN_ON_ROOT_AND_BCAST( PMMG_usage(parmesh, argv[0]),0, - parmesh->myrank,ret_val=0; goto fail_mmgargv); - ret_val = 0; - goto fail_mmgargv; + RUN_ON_ROOT_AND_BCAST( (PMMG_usage(parmesh, argv[0]) && 0),0, + parmesh->myrank,ret_val=0; goto clean); } } + + /** Second step: intercept ParMmg args that exists in Mmg but asks for a + * specific treatment ( m, v, d) */ + /* Create a new set of argc/argv variables adding only the the cl options that mmg has to process Overallocating as they are at most argc. Trying to avoid the overallocation is not worth any effort, these are ~kb */ - PMMG_MALLOC(parmesh, mmgArgv, argc, char*, " copy of argv for mmg: ", - ret_val = 0; goto fail_mmgargv); + MMG5_SAFE_MALLOC( mmgArgv, argc, char*,ret_val = 0; goto clean); /* First argument is always argv[0] ie prog name */ i = 0; - ARGV_APPEND(parmesh, argv, mmgArgv, i, mmgArgc, " mmgArgv[0] for mmg: ", - ret_val = 0; goto fail_proc); + MMG_ARGV_APPEND(argv, mmgArgv, i, mmgArgc,ret_val = 0; goto clean); i = 1; while ( i < argc ) { if ( *argv[i] == '-' ) { switch( argv[i][1] ) { - case 'c': - if ( !strcmp(argv[i],"-centralized-output") ) { - /* force centralized output: only relevant using medit distributed - * input or library call */ - if ( !PMMG_Set_iparameter(parmesh,PMMG_IPARAM_distributedOutput,0) ) { + case 'm': + if ( !strcmp(argv[i],"-m") ) { + /* memory */ + if ( ++i < argc && isdigit( argv[i][0] ) ) { + if ( ( atoi(argv[ i ]) > MMG5_memSize() ) || ( atoi(argv[ i ]) < 0 ) ) { + fprintf( stderr, + "\nErroneous mem size requested (%s)\n",argv[i] ); + ret_val = 0; + goto clean; + } + else { + parmesh->info.mem = atoi( argv[i] ); + PMMG_parmesh_SetMemGloMax( parmesh ); + } + PMMG_parmesh_SetMemMax( parmesh ); + } else { + PMMG_ERROR_ARG("\nMissing argument option %s\n",argv,i-1); + } + } + else { + /* Arg starts by '-m' but doesn't have to be intercepted: Append to + * list of args to send to Mmg */ + MMG_ARGV_APPEND(argv, mmgArgv, i, mmgArgc,ret_val = 0; goto clean); + } + break; + + case 'd': + if ( !strcmp(argv[i],"-d") ) { + /* debug */ + if ( !PMMG_Set_iparameter(parmesh,PMMG_IPARAM_debug,1) ) { ret_val = 0; - goto fail_proc; + goto clean; } } else { - ARGV_APPEND(parmesh, argv, mmgArgv, i, mmgArgc, - " adding to mmgArgv for mmg: ", - ret_val = 0; goto fail_proc ); + /* Arg starts by '-d' but doesn't have to be intercepted: Append to + * list of args to send to Mmg */ + MMG_ARGV_APPEND(argv, mmgArgv, i, mmgArgc,ret_val = 0; goto clean); } break; - case 'f': - if ( !strcmp(argv[i],"-field") ) { - if ( ++i < argc && isascii(argv[i][0]) && argv[i][0]!='-' ) { - if ( ! PMMG_Set_inputSolsName(parmesh,argv[i]) ) { - RUN_ON_ROOT_AND_BCAST( PMMG_usage(parmesh, argv[0]),0, - parmesh->myrank,ret_val=0; goto fail_mmgargv); + case 'v': /* verbosity */ + if ( !strcmp(argv[i],"-v") ) { + if ( ++i < argc && ( isdigit(argv[i][0]) || + (argv[i][0]=='-' && isdigit(argv[i][1])) ) ) { + if ( !PMMG_Set_iparameter(parmesh,PMMG_IPARAM_verbose,atoi(argv[i])) ) { ret_val = 0; - goto fail_mmgargv; + goto clean; } } else { - RUN_ON_ROOT_AND_BCAST( PMMG_usage(parmesh, argv[0]),0, - parmesh->myrank,ret_val=0; goto fail_mmgargv); - ret_val = 0; - goto fail_mmgargv; + i--; + PMMG_ERROR_ARG("\nMissing argument option for %s\n",mmgArgv,i); } } else { - ARGV_APPEND(parmesh, argv, mmgArgv, i, mmgArgc, - " adding to mmgArgv for mmg: ", - ret_val = 0; goto fail_proc ); + /* Arg starts by '-v' but doesn't have to be intercepted: Append to + * list of args to send to Mmg */ + MMG_ARGV_APPEND(argv, mmgArgv, i, mmgArgc,ret_val = 0; goto clean); } break; - case 'h': - if ( !strcmp(argv[i],"-hmin") && ++i < argc ) { - if ( !PMMG_Set_dparameter(parmesh,PMMG_DPARAM_hmin,atof(argv[i])) ) { + default: + /* Arg starts by '-' but doesn't have to be intercepted: Append to list + * of args to send to Mmg */ + MMG_ARGV_APPEND(argv, mmgArgv, i, mmgArgc,ret_val = 0; goto clean); + break; + } + } + else { + /* Arg doesn't start with '-': Append to list of args to send to Mmg */ + MMG_ARGV_APPEND(argv, mmgArgv, i, mmgArgc,ret_val = 0; goto clean); + } + ++i; + } + + /** Third step: Let Mmg parse args it knows among remaining and append in + pmmgArgv structure unknown ones */ + MMG5_SAFE_MALLOC( pmmgArgv, mmgArgc, char*,ret_val = 0; goto clean); + + i = 0; + + MMG_ARGV_APPEND(argv, pmmgArgv, i, pmmgArgc,ret_val = 0; goto clean); + MMG5_pMesh mesh = parmesh->listgrp[0].mesh; + MMG5_pSol met = parmesh->listgrp[0].met; + MMG5_pSol sol = parmesh->listgrp[0].ls; // Ok for now as // disp is not planned + MMG3D_storeknownar(mmgArgc,mmgArgv,mesh,met,sol,&pmmgArgc,pmmgArgv); + + /** Fourth step: parse remaining args with parmmg */ + i = 1; + while ( i < pmmgArgc ) { + if ( *pmmgArgv[i] == '-' ) { + switch( pmmgArgv[i][1] ) { + case 'c': + if ( !strcmp(pmmgArgv[i],"-centralized-output") ) { + /* force centralized output: only relevant using medit distributed + * input or library call */ + if ( !PMMG_Set_iparameter(parmesh,PMMG_IPARAM_distributedOutput,0) ) { ret_val = 0; - goto fail_proc; + goto clean; } } - else if ( !strcmp(argv[i],"-hmax") && ++i < argc ) { - if ( !PMMG_Set_dparameter(parmesh,PMMG_DPARAM_hmax,atof(argv[i])) ) { - ret_val = 0; - goto fail_proc; + else { + PMMG_UNRECOGNIZED_ARG; + } + break; + case 'f': + if ( !strcmp(pmmgArgv[i],"-field") ) { + if ( ++i < pmmgArgc && isascii(pmmgArgv[i][0]) && pmmgArgv[i][0]!='-' ) { + if ( ! PMMG_Set_inputSolsName(parmesh,pmmgArgv[i]) ) { + fprintf(stderr,"\nUnable to set filename for %s\n",pmmgArgv[i-1]); + ret_val = 0; + goto clean; + } + } + else { + PMMG_ERROR_ARG("\nMissing filename for %s\n",pmmgArgv,i-1); } } else { - ARGV_APPEND(parmesh, argv, mmgArgv, i, mmgArgc, - " adding to mmgArgv for mmg: ", - ret_val = 0; goto fail_proc ); + PMMG_UNRECOGNIZED_ARG; } break; case 'g': - if ( !strcmp(argv[i],"-groups-ratio") ) { + if ( !strcmp(pmmgArgv[i],"-groups-ratio") ) { - if ( ++i < argc ) { - if ( isdigit(argv[i][0]) ) { + if ( ++i < pmmgArgc ) { + if ( isdigit(pmmgArgv[i][0]) ) { - if ( !PMMG_Set_dparameter(parmesh,PMMG_DPARAM_groupsRatio,atof(argv[i])) ) { + if ( !PMMG_Set_dparameter(parmesh,PMMG_DPARAM_groupsRatio,atof(pmmgArgv[i])) ) { ret_val = 0; - goto fail_proc; + goto clean; } } else { @@ -283,30 +341,25 @@ int PMMG_parsar( int argc, char *argv[], PMMG_pParMesh parmesh ) } } else { - fprintf( stderr, "\nMissing argument option %c\n", argv[i-1][1] ); - ret_val = 0; - goto fail_proc; + PMMG_ERROR_ARG("\nMissing argument option %s\n",pmmgArgv,i-1); } } else { - ARGV_APPEND(parmesh, argv, mmgArgv, i, mmgArgc, - " adding to mmgArgv for mmg: ", - ret_val = 0; goto fail_proc ); + PMMG_UNRECOGNIZED_ARG; } break; case 'm': - if ( !strcmp(argv[i],"-mmg-v") ) { - + if ( !strcmp(pmmgArgv[i],"-mmg-v") ) { /* Mmg verbosity */ - if ( ++i < argc ) { - if ( isdigit(argv[i][0]) || - (argv[i][0]=='-' && isdigit(argv[i][1])) ) { - val = atoi(argv[i]); + if ( ++i < pmmgArgc ) { + if ( isdigit(pmmgArgv[i][0]) || + (pmmgArgv[i][0]=='-' && isdigit(pmmgArgv[i][1])) ) { + val = atoi(pmmgArgv[i]); if ( !PMMG_Set_iparameter(parmesh,PMMG_IPARAM_mmgVerbose,val) ) { ret_val = 0; - goto fail_proc; + goto clean; } } else { @@ -314,256 +367,329 @@ int PMMG_parsar( int argc, char *argv[], PMMG_pParMesh parmesh ) } } else { - fprintf( stderr, "\nMissing argument option %c\n", argv[i-1][1] ); - ret_val = 0; - goto fail_proc; + PMMG_ERROR_ARG("\nMissing argument option %s\n",pmmgArgv,i-1); } } - else if ( !strcmp(argv[i],"-mesh-size") ) { + else if ( !strcmp(pmmgArgv[i],"-mesh-size") ) { /* Remesher target mesh size */ - if ( ++i < argc ) { - if ( isdigit(argv[i][0]) ) { - val = atoi(argv[i]); - - if ( !PMMG_Set_iparameter(parmesh,PMMG_IPARAM_meshSize,val) ) { - ret_val = 0; - goto fail_proc; - } - } - else { - fprintf( stderr, "\nMissing argument option %c\n", argv[i-1][1] ); + if ( ++i < pmmgArgc && isdigit(pmmgArgv[i][0]) ) { + val = atoi(pmmgArgv[i]); + if ( !PMMG_Set_iparameter(parmesh,PMMG_IPARAM_meshSize,val) ) { ret_val = 0; - goto fail_proc; + goto clean; } } else { - fprintf( stderr, "\nMissing argument option %c\n", argv[i-1][1] ); - ret_val = 0; - goto fail_proc; + PMMG_ERROR_ARG("\nMissing argument option %s\n",pmmgArgv,i-1); } } - else if ( !strcmp(argv[i],"-metis-ratio") ) { + else if ( !strcmp(pmmgArgv[i],"-metis-ratio") ) { /* Number of metis super nodes per mesh */ - if ( ++i < argc ) { - if ( isdigit(argv[i][0]) ) { - val = atoi(argv[i]); + if ( ++i < pmmgArgc && isdigit(pmmgArgv[i][0]) ) { + val = atoi(pmmgArgv[i]); - if ( !PMMG_Set_iparameter(parmesh,PMMG_IPARAM_metisRatio,val) ) { - ret_val = 0; - goto fail_proc; - } - } - else { - fprintf( stderr, "\nMissing argument option %c\n", argv[i-1][1] ); + if ( !PMMG_Set_iparameter(parmesh,PMMG_IPARAM_metisRatio,val) ) { ret_val = 0; - goto fail_proc; + goto clean; } } else { - fprintf( stderr, "\nMissing argument option %c\n", argv[i-1][1] ); - ret_val = 0; - goto fail_proc; + PMMG_ERROR_ARG("\nMissing argument option %s\n",pmmgArgv,i-1); } } - else if ( !strcmp(argv[i],"-mmg-d") ) { - if ( !PMMG_Set_iparameter(parmesh,PMMG_IPARAM_mmgDebug,val) ) { - ret_val = 0; - goto fail_proc; - } - } - else if ( !strcmp(argv[i],"-m") ) { - /* memory */ - if ( ++i < argc && isdigit( argv[i][0] ) ) { - if ( ( atoi(argv[ i ]) > MMG5_memSize() ) || ( atoi(argv[ i ]) < 0 ) ) { - fprintf( stderr, - "\nErroneous mem size requested (%s)\n",argv[i] ); - ret_val = 0; - goto fail_proc; - } - else { - parmesh->info.mem = atoi( argv[i] ); - PMMG_parmesh_SetMemGloMax( parmesh ); - } - PMMG_parmesh_SetMemMax( parmesh ); - } else { - fprintf( stderr, "\nMissing argument option %c\n", argv[i-1][1] ); + else if ( !strcmp(pmmgArgv[i],"-mmg-d") ) { + if ( !PMMG_Set_iparameter(parmesh,PMMG_IPARAM_mmgDebug,1) ) { ret_val = 0; - goto fail_proc; + goto clean; } } else { - /* else : what happens with -met option... to treat */ - ARGV_APPEND(parmesh, argv, mmgArgv, i, mmgArgc, - " adding to mmgArgv for mmg: ", - ret_val = 0; goto fail_proc ); + PMMG_UNRECOGNIZED_ARG; } break; case 'n': /* number of adaptation iterations */ - if ( ( 0 == strncmp( argv[i], "-niter", 5 ) ) && ( ( i + 1 ) < argc ) ) { + if ( ( 0 == strncmp( pmmgArgv[i], "-niter", 5 ) ) && ( ( i + 1 ) < pmmgArgc ) ) { ++i; - if ( isdigit( argv[i][0] ) && ( atoi( argv[i] ) >= 0 ) ) { - parmesh->niter = atoi( argv[i] ); + if ( isdigit( pmmgArgv[i][0] ) && ( atoi( pmmgArgv[i] ) >= 0 ) ) { + parmesh->niter = atoi( pmmgArgv[i] ); } else { parmesh->niter = PMMG_NITER; fprintf( stderr, - "\nWrong number of adaptation iterations (%s).\n",argv[i]); + "\nWrong number of adaptation iterations (%s).\n",pmmgArgv[i]); ret_val = 0; - goto fail_proc; + goto clean; } - } else if ( ( 0 == strncmp( argv[i], "-nlayers", 5 ) ) && ( ( i + 1 ) < argc ) ) { + } else if ( ( 0 == strncmp( pmmgArgv[i], "-nlayers", 5 ) ) && ( ( i + 1 ) < pmmgArgc ) ) { ++i; - if ( isdigit( argv[i][0] ) && ( atoi( argv[i] ) > 0 ) ) { - parmesh->info.ifc_layers = atoi( argv[i] ); + if ( isdigit( pmmgArgv[i][0] ) && ( atoi( pmmgArgv[i] ) > 0 ) ) { + parmesh->info.ifc_layers = atoi( pmmgArgv[i] ); } else { parmesh->info.ifc_layers = PMMG_MVIFCS_NLAYERS; fprintf( stderr, - "\nWrong number of layers for interface displacement (%s).\n",argv[i]); + "\nWrong number of layers for interface displacement (%s).\n",pmmgArgv[i]); ret_val = 0; - goto fail_proc; + goto clean; } - } else if ( 0 == strncmp( argv[i], "-nobalance", 9 ) ) { + } else if ( 0 == strncmp( pmmgArgv[i], "-nobalance", 9 ) ) { parmesh->info.nobalancing = MMG5_ON; - } else if ( 0 == strncmp( argv[i], "-nofem", 5 ) ) { - if ( !PMMG_Set_iparameter(parmesh,PMMG_IPARAM_nofem,1) ) { - ret_val = 0; - goto fail_proc; - } - } else if ( 0 == strncmp( argv[i], "-noout", 5 ) ) { + } else if ( 0 == strncmp( pmmgArgv[i], "-noout", 5 ) ) { parmesh->info.fmtout = PMMG_UNSET; - } else { - ARGV_APPEND(parmesh, argv, mmgArgv, i, mmgArgc, - " adding to mmgArgv for mmg: ", - ret_val = 0; goto fail_proc ); + } + else { + PMMG_UNRECOGNIZED_ARG; } break; case 'd': - if ( !strcmp(argv[i],"-distributed-output") ) { + if ( !strcmp(pmmgArgv[i],"-distributed-output") ) { /* force distributed output: only relevant using medit centralized * input or library call */ if ( !PMMG_Set_iparameter(parmesh,PMMG_IPARAM_distributedOutput,1) ) { ret_val = 0; - goto fail_proc; - } - } - else if ( !strcmp(argv[i],"-d") ) { - /* debug */ - if ( !PMMG_Set_iparameter(parmesh,PMMG_IPARAM_debug,1) ) { - ret_val = 0; - goto fail_proc; + goto clean; } } else { - ARGV_APPEND(parmesh, argv, mmgArgv, i, mmgArgc, - " adding to mmgArgv for mmg: ", - ret_val = 0; goto fail_proc ); + PMMG_UNRECOGNIZED_ARG; } break; -#ifdef USE_SCOTCH - case 'r': - if ( !strcmp(argv[i],"-rn") ) { - ARGV_APPEND(parmesh, argv, mmgArgv, i, mmgArgc, - " adding to mmgArgv for mmg: ", - ret_val = 0; goto fail_proc ); - } - break; -#endif - case 's': - if ( 0 == strncmp( argv[i], "-surf", 4 ) ) { - parmesh->listgrp[0].mesh->info.nosurf = 0; + case 'p': + if ( !strcmp(pmmgArgv[i],"-pure-partitioning") ) { + /* Only perform partitionning of intput data */ + if ( !PMMG_Set_iparameter(parmesh,PMMG_IPARAM_purePartitioning,1) ) { + ret_val = 0; + goto clean; + } } else { - ARGV_APPEND(parmesh, argv, mmgArgv, i, mmgArgc, - " adding to mmgArgv for mmg: ", - ret_val = 0; goto fail_proc ); + PMMG_UNRECOGNIZED_ARG; } break; + case 'v': /* verbosity */ - if ( ++i < argc ) { - if ( isdigit(argv[i][0]) || - (argv[i][0]=='-' && isdigit(argv[i][1])) ) { - if ( !PMMG_Set_iparameter(parmesh,PMMG_IPARAM_verbose,atoi(argv[i])) ) { + if ( ++i < pmmgArgc ) { + if ( isdigit(pmmgArgv[i][0]) || + (pmmgArgv[i][0]=='-' && isdigit(pmmgArgv[i][1])) ) { + if ( !PMMG_Set_iparameter(parmesh,PMMG_IPARAM_verbose,atoi(pmmgArgv[i])) ) { ret_val = 0; - goto fail_proc; + goto clean; } } else i--; } else { - fprintf(stderr,"\nMissing argument option %c\n",argv[i-1][1]); + fprintf(stderr,"\nMissing argument option %s\n",pmmgArgv[i-1]); ret_val = 0; - goto fail_proc; + goto clean; } break; default: - ARGV_APPEND(parmesh, argv, mmgArgv, i, mmgArgc, - " adding to mmgArgv for mmg: ", - ret_val = 0; goto fail_proc); - + PMMG_UNRECOGNIZED_ARG; break; } } else { - ARGV_APPEND(parmesh, argv, mmgArgv, i, mmgArgc, - " adding to mmgArgv for mmg: ", - ret_val = 0; goto fail_proc); + if ( parmesh->meshin == NULL && mesh->namein == NULL ) { + if ( !PMMG_Set_inputMeshName(parmesh,pmmgArgv[i]) ) { + ret_val = 0; + goto clean; + } + } + else if ( parmesh->meshout == NULL && mesh->nameout == NULL ) { + if ( !PMMG_Set_outputMeshName(parmesh,pmmgArgv[i]) ) { + ret_val = 0; + goto clean; + } + } + else { + PMMG_ERROR_ARG("\nArgument %s ignored\n",pmmgArgv,i); + } } ++i; } - // parmmg finished parsing arguments, the rest will be handled by mmg3d - if ( 1 != MMG3D_parsar( mmgArgc, mmgArgv, - parmesh->listgrp[0].mesh, - parmesh->listgrp[0].met, - NULL ) ) { + /** Step 5: Transfer options parsed by Mmg toward ParMmg (if needed) and raise + * errors for unsupported options */ + parmesh->info.iso = parmesh->listgrp[0].mesh->info.iso; + parmesh->info.setfem = parmesh->listgrp[0].mesh->info.setfem; + parmesh->info.sethmin = parmesh->listgrp[0].mesh->info.sethmin; + parmesh->info.sethmax = parmesh->listgrp[0].mesh->info.sethmax; + + if ( parmesh->listgrp[0].mesh->info.isosurf ) { + + if ( parmesh->myrank == parmesh->info.root ) { + fprintf(stderr," ## Error: Splitting boundaries on isovalue not yet" + " implemented."); + } ret_val = 0; - goto fail_proc; + goto clean; + } + + if ( parmesh->listgrp[0].mesh->info.lag >=0 ) { + + if ( parmesh->myrank == parmesh->info.root ) { + fprintf(stderr," ## Error: Lagrangian motion not yet implemented."); + } + ret_val = 0; + goto clean; } if( parmesh->listgrp[0].mesh->info.opnbdy ) { - fprintf(stderr," ## Warning: Surface adaptation not supported with opnbdy." - "\nSetting nosurf on.\n"); - if ( !MMG3D_Set_iparameter(parmesh->listgrp[0].mesh,NULL,MMG3D_IPARAM_nosurf,1) ) return 0; + if ( parmesh->info.root == parmesh->myrank ) { + fprintf(stderr," ## Warning: Surface adaptation not supported with opnbdy." + "\nSetting nosurf on.\n"); + } + if ( !MMG3D_Set_iparameter(parmesh->listgrp[0].mesh,NULL,MMG3D_IPARAM_nosurf,1) ) { + ret_val = 0; + goto clean; + } } - /* Store mesh names into the parmesh if needed */ - if ( !parmesh->meshin ) { - assert ( parmesh->listgrp[0].mesh->namein ); - PMMG_Set_name(parmesh,&parmesh->meshin, - parmesh->listgrp[0].mesh->namein,"mesh.mesh"); + /** Step 6: Sychronize parmesh and mesh file names */ + if ( parmesh->meshin ) { + /* Input mesh name provided without -in command line arg */ + assert ( mesh->namein ); } - if ( !parmesh->meshout ) { - assert ( parmesh->listgrp[0].mesh->nameout ); - PMMG_Set_name(parmesh,&parmesh->meshout, - parmesh->listgrp[0].mesh->nameout,"mesh.o.mesh"); + else { + if ( mesh->namein ) { + /* Input mesh name provided with -in command line arg */ + PMMG_Set_name(parmesh,&parmesh->meshin,mesh->namein,"mesh.mesh"); + } + else { + /* Input mesh name not provided */ + if ( parmesh->myrank==parmesh->info.root ) { + fprintf(stderr,"\nMissing input mesh name.\n"); + fprintf(stderr,"Please, run %s -h command to get help.\n",argv[0]); + } + ret_val = 0; + goto clean; + } } - if ( (!parmesh->metin) && parmesh->listgrp[0].met && parmesh->listgrp[0].met->namein ) { - PMMG_Set_name(parmesh,&parmesh->metin, - parmesh->listgrp[0].met->namein,"mesh.sol"); + + if ( parmesh->meshout ) { + /* Output mesh name provided without -out command line arg */ + assert ( mesh->nameout ); } - if ( (!parmesh->metout) && parmesh->listgrp[0].met && parmesh->listgrp[0].met->nameout ) { - PMMG_Set_name(parmesh,&parmesh->metout, - parmesh->listgrp[0].met->nameout,"mesh.o.sol"); + else { + if ( mesh->nameout ) { + /* Output mesh name provided with -out command line arg */ + PMMG_Set_name(parmesh,&parmesh->meshout,mesh->nameout,"mesh.o.mesh"); + } + else { + /* Output mesh name not provided */ + char *data; + MMG5_SAFE_CALLOC(data,strlen(parmesh->meshin)+3,char,return 0); + strncpy(data,parmesh->meshin,strlen(parmesh->meshin)+3); + + char *ext = MMG5_Get_filenameExt(data); + if ( ext && !strncmp ( ext,".h5",strlen(".h5") ) ) { + /* .h5 extension is unknown by Mmg: fix this */ + *ext = '\0'; + strcat(data,".o.h5"); + MMG5_Set_outputMeshName( mesh,data ); + } + else { + /* Let Mmg deal automatically with all other file formats */ + MMG5_Set_outputMeshName( mesh,"" ); + } + MMG5_SAFE_FREE(data); + + assert ( mesh->nameout ); + PMMG_Set_name(parmesh,&parmesh->meshout, + parmesh->listgrp[0].mesh->nameout,"mesh.o.mesh"); + } + } + + /* Metric and solution names are always directly parsed inside met, ls and + * disp field: in adaptation mode, if the metric name is provided using the + * -sol arg and if a ls/disp structure is allocated inside the parmesh (for + * now we deal only with the ls case), the metric name has been stored into + * the ls/disp name and we have to transfer it in the metric structure. */ + if ( met->namein==NULL && + !(mesh->info.iso || mesh->info.isosurf || mesh->info.lag>=0) ) { + + if ( sol->namein ) { + /* A solution name has been provided using -sol option (facultative) */ + if ( !MMG3D_Set_inputSolName(mesh,met,sol->namein) ) { + RUN_ON_ROOT_AND_BCAST( (PMMG_usage(parmesh, argv[0]) && 0),0, + parmesh->myrank,ret_val=0; goto clean); + } + MMG5_DEL_MEM(mesh,sol->namein); + } } - if ( (!parmesh->lsin) && parmesh->listgrp[0].ls && parmesh->listgrp[0].ls->namein ) { + + /* If no input solution name has been parse, assign default name to the + * suitable data structure (metin in adp mode, lsin in ls mode, dispin in lag + * mode) */ + MMG5_pSol tmp = NULL; + if ( mesh->info.iso || mesh->info.isosurf ) { + tmp = parmesh->listgrp[0].ls; + } + else if ( mesh->info.lag >=0 ) { + tmp = parmesh->listgrp[0].disp; + } + else { + tmp = parmesh->listgrp[0].met; + } + assert ( tmp ); + + if ( tmp->namein == NULL ) { + if ( !MMG3D_Set_inputSolName(mesh,tmp,"") ) { + ret_val = 0; + goto clean; + } + } + + /* Assign default output metric name */ + if ( met->nameout == NULL ) { + if ( !MMG3D_Set_outputSolName(mesh,met,"") ) + return 0; + } + + /* Assign default output level-set name */ + if ( parmesh->listgrp[0].ls->nameout == NULL ) { + if ( !MMG3D_Set_outputSolName(mesh,parmesh->listgrp[0].ls,"") ) + return 0; + } + + /* Transfer solution names into the parmesh */ + assert ( !parmesh->metin ); + assert ( !parmesh->metout ); + assert ( !parmesh->lsin ); + assert ( !parmesh->lsout ); + assert ( !parmesh->dispin ); + + if ( met && met->namein ) { + PMMG_Set_name(parmesh,&parmesh->metin,met->namein,"mesh.sol"); + } + if ( parmesh->listgrp[0].ls && parmesh->listgrp[0].ls->namein ) { PMMG_Set_name(parmesh,&parmesh->lsin, parmesh->listgrp[0].ls->namein,"mesh.sol"); } - if ( (!parmesh->dispin) && parmesh->listgrp[0].disp && parmesh->listgrp[0].disp->namein ) { + if ( parmesh->listgrp[0].disp && parmesh->listgrp[0].disp->namein ) { PMMG_Set_name(parmesh,&parmesh->dispin, parmesh->listgrp[0].disp->namein,"mesh.sol"); } -fail_proc: - PMMG_argv_cleanup( parmesh, mmgArgv, mmgArgc, argc ); -fail_mmgargv: + if ( met && met->nameout ) { + PMMG_Set_name(parmesh,&parmesh->metout,met->nameout,"mesh.o.sol"); + } + + if ( parmesh->listgrp[0].ls && parmesh->listgrp[0].ls->nameout ) { + PMMG_Set_name(parmesh,&parmesh->lsout,parmesh->listgrp[0].ls->nameout,"mesh.o.sol"); + } + +clean: + MMG5_argv_cleanup( mmgArgv, mmgArgc ); + MMG5_argv_cleanup( pmmgArgv, pmmgArgc ); + return ret_val; } @@ -575,7 +701,9 @@ int PMMG_parsop ( PMMG_pParMesh parmesh ) MMG5_pMesh mesh; int ier; - assert ( parmesh->ngrp == 1 && "distributed input not yet implemented" ); + /* We may have ngrp=0 if distributed inputs have been provided on a different + * number of processes than the ones used for computation */ + assert ( parmesh->ngrp <= 1 && "more than one group per rank not implemented"); mesh = parmesh->listgrp[0].mesh; /* Set mmg verbosity to the max between the Parmmg verbosity and the mmg verbosity */ @@ -810,6 +938,8 @@ int PMMG_printCommunicator( PMMG_pParMesh parmesh,const char* filename ) { /** Step 3: file saving */ if ( !bin ) { + fprintf(fid,"\nNumberOfPartitions\n%d\n",parmesh->nprocs); + if( parmesh->info.API_mode == PMMG_APIDISTRIB_faces ) { ncomm = parmesh->next_face_comm; fprintf(fid,"\nParallelTriangleCommunicators\n%d\n",ncomm); @@ -892,3 +1022,13 @@ int PMMG_printCommunicator( PMMG_pParMesh parmesh,const char* filename ) { return 1; } + +int PMMG_Get_tetFromTria(PMMG_pParMesh parmesh, int ktri, int* ktet, int* iface ){ + assert ( parmesh->ngrp == 1 ); + return(MMG3D_Get_tetFromTria(parmesh->listgrp[0].mesh, ktri, ktet, iface)); +} + +int PMMG_Get_tetsFromTria(PMMG_pParMesh parmesh, int ktri, int ktet[2], int iface[2] ){ + assert ( parmesh->ngrp == 1 ); + return(MMG3D_Get_tetsFromTria(parmesh->listgrp[0].mesh, ktri, ktet, iface)); +} diff --git a/src/libparmmg_toolsf.c b/src/libparmmg_toolsf.c index ab99cee6..679dc060 100644 --- a/src/libparmmg_toolsf.c +++ b/src/libparmmg_toolsf.c @@ -58,3 +58,25 @@ FORTRAN_NAME(PMMG_PRINTCOMMUNICATOR,pmmg_printcommunicator, return; } + +/** + * See \ref PMMG_Get_tetFromTria function in \ref libarpmmg.h file. + */ +FORTRAN_NAME(PMMG_GET_TETFROMTRIA,pmmg_get_tetfromtria, + (PMMG_pParMesh *parmesh,int *ktri, int *ktet, int *iface,int *retval), + (parmesh,ktri,ktet,iface,retval)) { + + *retval = PMMG_Get_tetFromTria(*parmesh,*ktri,ktet,iface); + return; +} + +/** + * See \ref PMMG_Get_tetsFromTria function in \ref libparmmg.h file. + */ +FORTRAN_NAME(PMMG_GET_TETSFROMTRIA,pmmg_get_tetsfromtria, + (PMMG_pParMesh *parmesh,int *ktri, int ktet[2], int iface[2],int *retval), + (parmesh,ktri,ktet,iface,retval)) { + + *retval = PMMG_Get_tetsFromTria(*parmesh,*ktri,ktet,iface); + return; +} diff --git a/src/libparmmgtypes.h b/src/libparmmgtypes.h index 0421194c..c506d13c 100644 --- a/src/libparmmgtypes.h +++ b/src/libparmmgtypes.h @@ -32,7 +32,7 @@ #ifndef _LIBPARMMGTYPES_H #define _LIBPARMMGTYPES_H -#include "mmg/mmg3d/libmmgtypes.h" +#include "mmg/common/libmmgtypes.h" #include "pmmgversion.h" #include @@ -65,6 +65,13 @@ */ #define PMMG_FAILURE 4 +/** + * \def PMMG_TMPFAILURE + * + * Return failure not yet handled by developers: MPI_abort is called but should + * be removed with a clean error handling with no deadlocks. + */ +#define PMMG_TMPFAILURE PMMG_STRONGFAILURE /** * \def PMMG_ARG_start @@ -235,6 +242,16 @@ */ #define PMMG_UNSET -1 +/** + * \def PMMG_ON + */ +#define PMMG_ON 1 + +/** + * \def PMMG_OFF + */ +#define PMMG_OFF 0 + /** * \def PMMG_GAP * @@ -243,6 +260,36 @@ */ #define PMMG_GAP 0.2 +/** + * \enum PMMG_IO_entities + * \brief Type of mesh entities that are saved in/loaded from HDF5 files. + */ +enum PMMG_IO_entities { + PMMG_IO_Vertex, + PMMG_IO_Edge, + PMMG_IO_Tria, + PMMG_IO_Quad, + PMMG_IO_Tetra, + PMMG_IO_Prism, + PMMG_IO_Corner, + PMMG_IO_RequiredVertex, + PMMG_IO_ParallelVertex, + PMMG_IO_Ridge, + PMMG_IO_RequiredEdge, + PMMG_IO_ParallelEdge, + PMMG_IO_RequiredTria, + PMMG_IO_ParallelTria, + PMMG_IO_RequiredQuad, + PMMG_IO_ParallelQuad, + PMMG_IO_RequiredTetra, + PMMG_IO_ParallelTetra, + PMMG_IO_Normal, + PMMG_IO_Tangent, + PMMG_IO_ENTITIES_size, // Number of type of entities that can be saved + PMMG_IO_Required, // To enable/disable saving of all type of required entites + PMMG_IO_Parallel // To enable/disable saving of all type of parallel entites +}; + /** * Types */ @@ -316,10 +363,13 @@ typedef struct { int imprim0; /*!< ParMmg verbosity of the zero rank */ int mem; /*!< memory asked by user */ int iso; /*!< ls mode (not yet available) */ + int isosurf; /*!< ls mode on boundaries only (not yet available) */ + int lag; /*!< lagrangian motion (not yet available) */ int root; /*!< MPI root rank */ - int fem; /*!< fem mesh (no elt with more than 1 bdy face */ + int setfem; /*!< fem mesh (no elt with more than 1 bdy face */ int mmg_imprim; /*!< 1 if the user has manually setted the mmg verbosity */ int repartitioning; /*!< way to perform mesh repartitioning */ + int pure_partitioning; /*!< enable/disable pure mesh partitioning mode */ int ifc_layers; /*!< nb of layers for interface displacement */ double grps_ratio; /*!< allowed imbalance ratio between current and demanded groups size */ int nobalancing; /*!< switch off final load balancing */ @@ -329,12 +379,33 @@ typedef struct { int target_mesh_size; /*!< target mesh size for Mmg */ int API_mode; /*!< use faces or nodes information to build communicators */ int globalNum; /*!< compute nodes and triangles global numbering in output */ + int globalVNumGot; /*!< have global nodes actually been calculated */ + int globalTNumGot; /*!< have global triangles actually been calculated */ int fmtout; /*!< store the output format asked */ + int io_entities[PMMG_IO_ENTITIES_size]; /* Array to store entities to save in some I/O formats */ int8_t sethmin; /*!< 1 if user set hmin, 0 otherwise (needed for multiple library calls) */ int8_t sethmax; /*!< 1 if user set hmin, 0 otherwise (needed for multiple library calls) */ - uint8_t inputMet; /* 1 if User prescribe a metric or a size law */ + uint8_t inputMet; /*!< 1 if User prescribe a metric or a size law */ + int npartin; /*!< nb of partitions of the mesh in the input HDF5 file */ + MPI_Comm read_comm; /*!< MPI comm containing the procs that read the mesh (HDF5 input) */ } PMMG_Info; +/** + * \struct PMMG_overlap + * \brief Overlap structure. + */ +typedef struct { + int color_in; /*!< Color of the hosting processor */ + int color_out; /*!< Color of the remote processor */ + int np_in2out; /*!< Nbr of points sends from color_in to color_out */ + int np_out2in; /*!< Nbr of points receives on color_in from color_out */ + int nt_in2out; /*!< Nbr of tetra sends from color_in to color_out */ + int nt_out2in; /*!< Nbr of tetra receives on color_in from color_out */ + int *hash_in2out; /*!< Hash table to find pts index on color_out from pts index on color_in */ + int *hash_out2in; /*!< Hash table to find pts index on color_in from pts index on color_out */ + +} PMMG_Overlap; +typedef PMMG_Overlap * PMMG_pOverlap; /** * \struct PMMG_ParMesh @@ -356,17 +427,19 @@ typedef struct { /* file names */ char *meshin,*meshout; char *metin,*metout; - char *lsin; + char *lsin,*lsout; char *dispin; char *fieldin,*fieldout; /* grp */ - int ngrp; /*!< Number of grp */ + int ngrp; /*!< Number of grp used inside the parmesh. It can + * differ from listgrp size (for example if inputs have + * been provided on a different number of processes than + * the ones used for computation) */ PMMG_pGrp listgrp; /*!< List of grp */ int nold_grp; /*!< Number of old grp */ PMMG_pGrp old_listgrp; /*!< List of old grp */ - /* internal communicators */ PMMG_pInt_comm int_node_comm; /*!< Internal node communicator (only one PMMG_Int_comm, it is not an array) */ PMMG_pInt_comm int_edge_comm; /*!< Internal edge communicator */ @@ -380,6 +453,9 @@ typedef struct { int next_face_comm; /*!< Number of external face communicator */ PMMG_pExt_comm ext_face_comm; /*!< External communicators (in increasing order w.r. to the remote proc index) */ + /* overlap variables */ + PMMG_pOverlap overlap; /*!< Overlap variables */ + /* global variables */ int ddebug; //! Debug level int iter; //! Current adaptation iteration diff --git a/src/linkedlist_pmmg.c b/src/linkedlist_pmmg.c index f682cd28..6c3f9ade 100644 --- a/src/linkedlist_pmmg.c +++ b/src/linkedlist_pmmg.c @@ -533,7 +533,13 @@ int PMMG_sort_iarray( PMMG_pParMesh parmesh,int *array1,int *array2,int *oldIdx, } /* Sort lists based on values in array2, in ascending order */ - qsort(cell,nitem,sizeof(PMMG_lnkdCell),PMMG_compare_cell2); + if ( nitem ) { + /* as memset, calling qsort on a non-allocatted array, leads to + * have a NULL pointer that is evaluated to True inside a if + * test */ + qsort(cell,nitem,sizeof(PMMG_lnkdCell),PMMG_compare_cell2); + } + /* Permute arrays and deallocate lists */ if ( array1 ) { diff --git a/src/loadbalancing_pmmg.c b/src/loadbalancing_pmmg.c index 0fff8180..c2c882c4 100644 --- a/src/loadbalancing_pmmg.c +++ b/src/loadbalancing_pmmg.c @@ -32,16 +32,19 @@ * */ #include "parmmg.h" +#include "mmgexterns_private.h" + /** * \param parmesh pointer toward a parmesh structure + * \param partitioning_mode strategy to use for repartitioning * * \return 1 if success, 0 if fail but we can save the meshes, -1 if we cannot. * * Load balancing of the mesh groups over the processors. * */ -int PMMG_loadBalancing(PMMG_pParMesh parmesh) { +int PMMG_loadBalancing(PMMG_pParMesh parmesh,int partitioning_mode) { MMG5_pMesh mesh; int ier,ier_glob,igrp,ne; mytime ctim[5]; @@ -83,11 +86,30 @@ int PMMG_loadBalancing(PMMG_pParMesh parmesh) { for ( igrp=0; igrp < parmesh->ngrp; igrp++ ) ne += parmesh->listgrp[igrp].mesh->ne; + int k; +#ifndef NDEBUG + for (k=0; kngrp; ++k ) { + if ( !MMG5_chkmsh(parmesh->listgrp[k].mesh,1,1) ) { + fprintf(stderr," ## Problem. Invalid mesh.\n"); + return 0; + } + } +#endif + if ( ier ) { /** Split the ngrp groups of listgrp into a higher number of groups */ - ier = PMMG_split_n2mGrps(parmesh,PMMG_GRPSPL_DISTR_TARGET,1); + ier = PMMG_split_n2mGrps(parmesh,PMMG_GRPSPL_DISTR_TARGET,1,partitioning_mode); } +#ifndef NDEBUG + for ( k=0; kngrp; ++k ) { + if ( !MMG5_chkmsh(parmesh->listgrp[k].mesh,1,1) ) { + fprintf(stderr," ## Problem. Invalid mesh.\n"); + return 0; + } + } +#endif + /* There is mpi comms in distribute_grps thus we don't want that one proc * enters the function and not the other proc */ MPI_Allreduce( &ier, &ier_glob, 1, MPI_INT, MPI_MIN, parmesh->comm); @@ -108,7 +130,18 @@ int PMMG_loadBalancing(PMMG_pParMesh parmesh) { chrono(ON,&(ctim[tim])); } - ier = PMMG_distribute_grps(parmesh); +#ifndef NDEBUG + int i; + for ( i=0; ingrp; ++i ) { + if ( !MMG5_chkmsh(parmesh->listgrp[i].mesh,1,1) ) { + fprintf(stderr," ## Problem. Invalid mesh.\n"); + return 0; + } + } +#endif + + ier = PMMG_distribute_grps(parmesh,partitioning_mode); + if ( ier <= 0 ) { fprintf(stderr,"\n ## Group distribution problem.\n"); } @@ -130,12 +163,21 @@ int PMMG_loadBalancing(PMMG_pParMesh parmesh) { chrono(ON,&(ctim[tim])); } +#ifndef NDEBUG + for ( i=0; ingrp; ++i ) { + if ( !MMG5_chkmsh(parmesh->listgrp[i].mesh,1,1) ) { + fprintf(stderr," ## Problem. Invalid mesh.\n"); + return 0; + } + } +#endif + if ( ier ) { - /** Redistribute the ngrp groups of listgrp into a higher number of groups */ - ier = PMMG_split_n2mGrps(parmesh,PMMG_GRPSPL_MMG_TARGET,0); + /** Redistribute the ngrp groups of listgrp into a lower number of groups */ + ier = PMMG_split_n2mGrps(parmesh,PMMG_GRPSPL_MMG_TARGET,0,partitioning_mode); if ( ier<=0 ) fprintf(stderr,"\n ## Problem when splitting into a lower number of groups.\n"); - } + } // Algiane: Optim: is this reduce needed? MPI_Allreduce( &ier, &ier_glob, 1, MPI_INT, MPI_MIN, parmesh->comm); @@ -151,6 +193,15 @@ int PMMG_loadBalancing(PMMG_pParMesh parmesh) { } } +#ifndef NDEBUG + for (i=0; ingrp; ++i ) { + if ( !MMG5_chkmsh(parmesh->listgrp[i].mesh,1,1) ) { + fprintf(stderr," ## Problem. Invalid mesh.\n"); + return 0; + } + } +#endif + if ( parmesh->info.imprim > PMMG_VERB_DETQUAL ) { chrono(OFF,&(ctim[tim])); printim(ctim[tim].gdif,stim); diff --git a/src/locate_pmmg.c b/src/locate_pmmg.c index 5a1b1702..c5317914 100644 --- a/src/locate_pmmg.c +++ b/src/locate_pmmg.c @@ -536,6 +536,7 @@ int PMMG_locatePoint_foundConvex( MMG5_pMesh mesh,MMG5_pPoint ppt,int *kfound, int *adjt,l,i,k,kmin,updated; double hmin; +#warning Luca: check distance computation adjt = &mesh->adjt[3*(*kfound-1)+1]; hmin = *h; @@ -590,7 +591,7 @@ int PMMG_locatePointBdy( MMG5_pMesh mesh,MMG5_pPoint ppt, MMG5_pTria ptr,ptr1; int *adjt,j,i,k,k1,kprev,step,closestTria,stuck,backward; int iloc; - double vol,eps,h,closestDist; + double vol,eps,h=DBL_MAX,closestDist; static int mmgWarn0=0,mmgWarn1=0; int ier; @@ -726,6 +727,7 @@ int PMMG_locatePointBdy( MMG5_pMesh mesh,MMG5_pPoint ppt, * \param mesh pointer to the background mesh structure * \param ppt pointer to the point to locate * \param faceAreas oriented face areas of the all tetrahedra in the mesh + * \param barycoord barycentric coordinate of the closest point * \param idxTet pointer to the index of the found tetrahedron * \param closestTet pointer to the index of the closest tetrahedron * \param closestDist pointer to the distance from the closest tetrahedron diff --git a/src/ls_pmmg.c b/src/ls_pmmg.c new file mode 100644 index 00000000..80e510a7 --- /dev/null +++ b/src/ls_pmmg.c @@ -0,0 +1,2245 @@ +/* ============================================================================= +** This file is part of the parmmg software package for parallel tetrahedral +** mesh modification. +** Copyright (c) Bx INP/Inria/UBordeaux, 2017- +** +** parmmg is free software: you can redistribute it and/or modify it +** under the terms of the GNU Lesser General Public License as published +** by the Free Software Foundation, either version 3 of the License, or +** (at your option) any later version. +** +** parmmg is distributed in the hope that it will be useful, but WITHOUT +** ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +** FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +** License for more details. +** +** You should have received a copy of the GNU Lesser General Public +** License and of the GNU General Public License along with parmmg (in +** files COPYING.LESSER and COPYING). If not, see +** . Please read their terms carefully and +** use this copy of the parmmg distribution only if you accept them. +** ============================================================================= +*/ + +/** + * \file ls_pmmg.c + * \brief Create implicit surface in distribuited mesh. + * \author Cécile Dobrzynski (Bx INP/Inria/UBordeaux) + * \author Algiane Froehly (InriaSoft) + * \author Laetitia Mottet (UBordeaux) + * \version 1 + * \copyright GNU Lesser General Public License. + * + * Functions to perform the level-set discretization in parallel + * + */ + +#include "parmmg.h" +#include "mmgexterns_private.h" + +/** + * \param hash pointer to the hash table of edges. + * \param a index of the first extremity of the edge. + * \param b index of the second extremity of the edge. + * \return the index of point stored along \f$[a;b]\f$. + * + * Find the index of point stored along \f$[a;b]\f$. + * \note In ParMmg, hash_pmmg.c: PMMG_hashGet_all gets k and s at the same time; + * \note PMMG_hashGet_all might be moved here if needed one day in mmg. + * + */ +static inline +int PMMG_hashMark_splitEdge(MMG5_Hash *hash,MMG5_int a,MMG5_int b) { + MMG5_hedge *ph; + MMG5_int key; + MMG5_int ia,ib; + + if ( !hash->item ) return 0; + + ia = MG_MIN(a,b); + ib = MG_MAX(a,b); + key = (MMG5_KA*(int64_t)ia + MMG5_KB*(int64_t)ib) % hash->siz; + ph = &hash->item[key]; + + if ( !ph->a ) return 0; + if ( ph->a == ia && ph->b == ib ) return ph->k; + while ( ph->nxt ) { + ph = &hash->item[ph->nxt]; + if ( ph->a == ia && ph->b == ib ) return ph->k; + } + return 0; +} + +#ifndef NDEBUG +/* Checks over external communicators consistenc, involving comms, are not + * called */ +#define ERR_RULE return 0 + +#else +/* Checks over external communicators consistenc, involving comms, are called */ +#define ERR_RULE MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) + +#endif + +/** + * \param parmesh pointer toward a parmesh structure + * \param mesh pointer toward the mesh structure. + * \param sol pointer toward the level-set values. + * \param met pointer toward a metric (non-mandatory). + * + * \return 1 if success, 0 if fail. + * + * Proceed to discretization of the implicit function carried by sol into mesh, + * once values of sol have been snapped/checked + * + * \todo all MPI_Abort have to be removed and replaced by a clean error handling + * without deadlocks. + * + */ +int PMMG_cuttet_ls(PMMG_pParMesh parmesh){ + MMG5_pTetra pt,pt0; + MMG5_pxTetra pxt,pxt0; + MMG5_pPoint p0,p1; + MMG5_Hash hash; + + PMMG_pExt_comm ext_node_comm,ext_edge_comm,ext_face_comm; + PMMG_pInt_comm int_node_comm, int_edge_comm, int_face_comm; + PMMG_pGrp grp; + MMG5_pSol field; + MMG5_pSol psl; + MMG5_pMat mat; + MMG5_pMesh mesh; + MMG5_pSol met,sol; + + MMG5_int ne_init,ne_tmp; + MMG5_int i,j,k,k0; + MMG5_int ie,idx_tmp; + MMG5_int pos,pos_edge,pos_node,pos_face; + MMG5_int ip0,ip1,np,nb,ns; + MMG5_int src; + MMG5_int refext,refint,ref; + MMG5_int vGlobNum[4],vx[6]; + MMG5_int tetra_sorted[3], node_sorted[3]; + MMG5_int *ne_tmp_tab,*vGlobNum_tab; + + static int8_t mmgWarn = 0; + int8_t ia; + int8_t npneg,nface_added; + int8_t already_split; + + const uint8_t *taued=NULL; + uint8_t tau[4]; + uint8_t imin0,imin2; + + double c[3],v0,v1,s; + + int i_commf; + int ifac,iploc; + int flag; + int ier; + + int nitem_grp_node_firstalloc,nitem_grp_face_firstalloc; + int nitem_ext_face_init; + int next_node_comm,next_face_comm,next_edge_comm; + + int nitem_int_node, nitem_grp_node, nitem_ext_node; + int nitem_int_edge, nitem_grp_edge, nitem_ext_edge; + int nitem_int_face, nitem_grp_face, nitem_ext_face; + int nitem_grp_face_tmp; + + int color_in_node,color_out_node; + int color_in_edge,color_out_edge; + + int idx_edge_ext,idx_edge_int,idx_edge_mesh; + int idx_face_ext,idx_face_int,val_face; + + /* Ensure only one group on each proc */ + if ( !parmesh->ngrp ) { + return 1; + } + + assert ( parmesh->ngrp == 1 && "Implemented for 1 group per rank" ); + + mesh = parmesh->listgrp[0].mesh; + met = parmesh->listgrp[0].met; + sol = parmesh->listgrp[0].ls; + + /* Initialization */ + grp = &parmesh->listgrp[0]; + field = grp->field; + next_node_comm = parmesh->next_node_comm; // Number of communicator for nodes + next_edge_comm = parmesh->next_edge_comm; // Number of communicator for edges + next_face_comm = parmesh->next_face_comm; // Number of communicator for faces + nitem_grp_node = grp->nitem_int_node_comm; // Number of initial total nodes in internal node communicator + nitem_grp_edge = grp->nitem_int_edge_comm; + nitem_grp_face = grp->nitem_int_face_comm; // Number of initial total faces in internal node communicator + int_node_comm = parmesh->int_node_comm; // Internal node communicator + int_edge_comm = parmesh->int_edge_comm; // Internal edge communicator + int_face_comm = parmesh->int_face_comm; // Internal face communicator + + nitem_int_node = int_node_comm->nitem; + nitem_int_edge = int_edge_comm->nitem; + nitem_int_face = int_face_comm->nitem; + + ne_init = mesh->ne; // Initial number of tetra - before ls - needed in step 6.3 + + + /* Create an overlap to check if edges along the partition interfaces have to + * be split in multi-material mode (it allows to check if an edge belonging to + * a "nosplit" material on a partition belongs to a split material on another + * partition and thus has to be split) to maintains the mesh consistency. + */ + + // Remark: The overlap creation uses the point flags that are used in the + // current function too so it cannot be called further. + ier = 1; + if ( !PMMG_create_overlap(parmesh,parmesh->info.read_comm) ) { + ier = 0; + } + + /** STEP 1 - Reset flags */ + for (k=1; k<=mesh->np; k++) + mesh->point[k].flag = 0; + + for (k=1; k<=mesh->ne; k++) + mesh->tetra[k].flag = 0; + + /** STEP 2 - Approximate the number nb of intersection points on edges */ + nb = 0; + + /* Loop over tetra */ + for (k=1; k<=mesh->ne; k++) { + pt = &mesh->tetra[k]; + + if ( !MG_EOK(pt) ) { + continue; + } + + if ( pt->tag & MG_OVERLAP ) { + continue; + } + + /* Loop over edges */ + for (ia=0; ia<6; ia++) { + + /* Get the points defining the edges */ + ip0 = pt->v[MMG5_iare[ia][0]]; + ip1 = pt->v[MMG5_iare[ia][1]]; + p0 = &mesh->point[ip0]; + p1 = &mesh->point[ip1]; + + /* If both points have flag, then pass as these points have been treated */ + if ( p0->flag && p1->flag ) continue; + + /* Otherwise take the values at these points */ + v0 = sol->m[ip0]; + v1 = sol->m[ip1]; + + /* If the points are not already exactly on the level-set + and does not have the same sign, then this edge needs to be split */ + if ( fabs(v0) > MMG5_EPSD2 && fabs(v1) > MMG5_EPSD2 && v0*v1 < 0.0 ) { + /* If the points have not been treated yet, assign a new flag and increase nb value */ + if ( !p0->flag ) { + p0->flag = ++nb; + } + if ( !p1->flag ) { + p1->flag = ++nb; + } + } + } + } + if ( ier && !nb ) { + /* Succeed but no overlap to create */ + return ier; + } + + /* TODO:: test if the number of point proc by proc is correct */ + // Cannot be done here as it is an approximation. Otherwise, need to robustify step 2 above. +#ifndef NDEBUG + /* TODO */ +#endif + + /** STEP 3 - Memory allocation */ + /* STEP 3.1 - Initialize hash table for edges */ + if ( !MMG5_hashNew(mesh,&hash,nb,7*nb) ) { + ier = 0; + } + + /* STEP 3.2 - Realloc internal node communicators */ + PMMG_REALLOC(parmesh, grp->node2int_node_comm_index1, + nitem_grp_node+nitem_grp_edge, + nitem_grp_node, + int,"Allocation of node2int_node_comm_index1", + ier = 0); + PMMG_REALLOC(parmesh, grp->node2int_node_comm_index2, + nitem_grp_node+nitem_grp_edge, + nitem_grp_node, + int,"Allocation of node2int_node_comm_index2", + ier = 0); + nitem_grp_node_firstalloc = nitem_grp_node+nitem_grp_edge; + + /* STEP 3.3 - Realloc internal face communicators */ + PMMG_REALLOC(parmesh, grp->face2int_face_comm_index1, + 3*nitem_grp_face, + nitem_grp_face, + int,"Allocation of face2int_face_comm_index1", + ier = 0); + PMMG_REALLOC(parmesh, grp->face2int_face_comm_index2, + 3*nitem_grp_face, + nitem_grp_face, + int,"Allocation of face2int_face_comm_index2", + ier = 0); + nitem_grp_face_firstalloc = 3*nitem_grp_face; + + /* STEP 3.4 - Realloc external node communicator */ + for ( k=0; knext_node_comm; k++ ) { + ext_node_comm = &parmesh->ext_node_comm[k]; + nitem_ext_node = ext_node_comm->nitem; + PMMG_REALLOC(parmesh,ext_node_comm->int_comm_index, + nitem_ext_node+2*nb, + nitem_ext_node, + int,"Allocation of external node communicator", + ier = 0); + ext_node_comm->nitem_to_share = nitem_ext_node+2*nb; + } + + /* STEP 3.5 - Realloc external face communicator */ + for ( k=0; knext_face_comm; k++ ) { + ext_face_comm = &parmesh->ext_face_comm[k]; + nitem_ext_face = ext_face_comm->nitem; + PMMG_REALLOC(parmesh,ext_face_comm->int_comm_index, + nitem_ext_face+2*nitem_ext_face, + nitem_ext_face, + int,"Allocation of external face communicator", + ier = 0); + ext_face_comm->nitem_to_share = nitem_ext_face+2*nitem_ext_face; + } + + /* STEP 3.6 - Allocate all the other variables needed to be allocated ! */ + PMMG_CALLOC( parmesh,vGlobNum_tab,4*(nitem_grp_face),MMG5_int,"vGlobNum_tab",ier = 0 ); + PMMG_CALLOC( parmesh,ne_tmp_tab,nitem_grp_face+1,MMG5_int,"ne_tmp_tab",ier = 0 ); + + + if ( !ier ) { + /* One alloc has failed: Avoid segfault or deadlock */ + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } + + /** STEP 4 - Identify required edges. Put hash.item[key].k = -1. This step + * assumes that the required tags are consistent through the partitions. */ + /* Loop over tetra */ + for (k=1; k<=mesh->ne; k++) { + pt = &mesh->tetra[k]; + if ( !MG_EOK(pt) ) continue; + + /* Check whether the tetra with reference ref should be split and, if yes, + * use the "s" field of the hashed edge to mark parallel edges that have to + * be split. + * + * If an edge or face is at the interface of 2 partitions and on one + * partition, the domain is marked as noSplit and on another, the domain is + * split, the overlap allows to mark the parallel edges that have to be + * split and to ensure the consistency of split along the interface. */ + int is_split = MMG5_isSplit(mesh,pt->ref,&refint,&refext); + + /** Step 4.1 - Identification of edges belonging to a required tet */ + /* Add required edges of required tetras to the hash table. The + overlap has to be ignored because we don't want to hash the edges of the + overlap and interface edges are marked as required so they will be added + to the hash table in the next step. */ + if ( (pt->tag & MG_REQ) && !(pt->tag & MG_OVERLAP) ) { + np = -1; + /* Loop over the edges */ + for (ia=0; ia<6; ia++) { + ip0 = pt->v[MMG5_iare[ia][0]]; + ip1 = pt->v[MMG5_iare[ia][1]]; + + /* Add an edge to the edge table with hash.item[key].k = -1 */ + if ( !MMG5_hashEdge(mesh,&hash,ip0,ip1,np) ) { + ier = -1; + } + } + continue; + } + + /** Step 4.2 - Identification of edges belonging to a (par)boundary or being + * explicitely required. Here overlap tetra are automatically ignored + * because the xt field is not transferred in the overlap. */ + /* If the xtetra associated to this tetra exists */ + if ( !pt->xt ) continue; + + /* Point towards the xtetra corresponding to the tetra... */ + pxt = &mesh->xtetra[pt->xt]; + /* ... then loop over the faces */ + for (ia=0; ia<4; ia++) { + + /* (a) If the face is not a boundary MG_BDY, then continue */ + if ( !(pxt->ftag[ia] & MG_BDY) ) continue; + + /* (a) otherwise loop over the edges */ + for (j=0; j<3; j++) { + + /* (b) If the edge is not required, then continue */ + if ( !(pxt->tag[ MMG5_iarf[ia][j] ] & MG_REQ) ) continue; + + /* (b) otherwise get the extremity of the edge ... */ + ip0 = pt->v[MMG5_idir[ia][MMG5_inxt2[j]]]; + ip1 = pt->v[MMG5_idir[ia][MMG5_iprv2[j]]]; + np = -1; + + /* (c) ... and add an edge to the edge table with hash.item[key].k = -1 */ + if ( !MMG5_hashEdge(mesh,&hash,ip0,ip1,np) ) { + ier = -1; + } + } + } + } + + /** Use the overlap to check which parallel edges have to be split and store + * this info in the s field of the hash table. */ + for (k=1; k<=mesh->ne; k++) { + pt = &mesh->tetra[k]; + if ( !MG_EOK(pt) ) continue; + + /* Check whether the tetra with reference ref should be split and, if yes, + * use the "s" field of the hashed edge to mark parallel edges that have to + * be split. + * + * If an edge or face is at the interface of 2 partitions and on one + * partition, the domain is marked as noSplit and on another, the domain is + * split, the overlap allows to mark the parallel edges that have to be + * split and to ensure the consistency of split along the interface. */ + int is_split = MMG5_isSplit(mesh,pt->ref,&refint,&refext); + + if ( !is_split ) { + continue; + } + /* Loop over the edges and mark them as belonging to a "split" reference. */ + for (ia=0; ia<6; ia++) { + ip0 = pt->v[MMG5_iare[ia][0]]; + ip1 = pt->v[MMG5_iare[ia][1]]; + + /* Remark: only edges that already exist in the hash table are updated, + * thus only required edges are updated. As all parallel edges are + * required and already added to the hash table, we will be able to store + * the required info. In other cases (edges that doesn't exist in the hash + * table), the PMMG_hashUpdate_s function return PMMG_FAILURE, which is + * expected and harmless as we will not need to check if the edge is split + * or not. */ + PMMG_hashUpdate_s(&hash,ip0,ip1,1); + } + } + + + /* Delete overlap */ + if ( !PMMG_delete_overlap(parmesh,parmesh->info.read_comm) ) { + ier = MG_MIN(ier,0); + } + + + if ( ier < 1 ) { + /* Avoid deadlock in comms in build_edgeComm */ + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } + + /** STEP 5 - Create points at iso-value. Fill or update edge hash table */ + /** STEP 5.1 - Create new points located on parallel interfaces */ + /* Internal edge communicator - intvalues stores: + - point position in the shared buffer of points (as in node2int_node_comm_index2) + if the edge is split + - otherwise, -1 + */ + PMMG_CALLOC(parmesh,int_edge_comm->intvalues,nitem_int_edge,int, + "int_edge_comm intvalues", + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); + + /* Loop on the internal edge communicator */ + for (i=0; i < nitem_grp_edge; i++) { + + ie = grp->edge2int_edge_comm_index1[i]; // id of edge + pos = grp->edge2int_edge_comm_index2[i]; // position in int_edge_comm->intvalues + int_edge_comm->intvalues[pos] = -1; // initialization at -1 + + /* Find extremities of this edge */ + ip0 = mesh->edge[ie].a; + ip1 = mesh->edge[ie].b; + + MMG5_int dummy,is_split; + if ( PMMG_SUCCESS != PMMG_hashGet_all(&hash,ip0,ip1,&dummy,&is_split) ) { + ier = 0; + } + + if ( !is_split ) { + /* The parallel edge belongs to a domain that is not split */ + continue; + } + + /* STEP 5.1.1 - Create a new point if this edge needs to be split */ + /* Check the ls value at the edge nodes */ + p0 = &mesh->point[ip0]; + p1 = &mesh->point[ip1]; + v0 = sol->m[ip0]; + v1 = sol->m[ip1]; + + /* Check if the edge should be split */ + /* If one of the points is exactly on the level set, the point exists already, pass */ + if ( fabs(v0) < MMG5_EPSD2 || fabs(v1) < MMG5_EPSD2 ) continue; + /* If the points have the same sign, no need to split the edge, pass */ + else if ( MG_SMSGN(v0,v1) ) continue; + /* If one or the other point has never been treated, pass */ + else if ( !p0->flag || !p1->flag ) continue; + + /* Define the weighting factor */ + s = v0 / (v0-v1); + s = MG_MAX(MG_MIN(s,1.0-MMG5_EPS),MMG5_EPS); + + /* Find the coordinates of the new points using the weighting factor */ + c[0] = p0->c[0] + s*(p1->c[0]-p0->c[0]); + c[1] = p0->c[1] + s*(p1->c[1]-p0->c[1]); + c[2] = p0->c[2] + s*(p1->c[2]-p0->c[2]); + + /* Create a new point with coordinates c, tags MG_PARBDY+MG_NOSURF+MG_REQ + and source src. Return the new number of points np in this partition */ +#ifdef USE_POINTMAP + src = p0->src; +#else + src = 1; +#endif + np = MMG3D_newPt(mesh,c,MG_PARBDY+MG_NOSURF+MG_REQ,src); + + /* Update internal communicators of node and edge. For int edge comm + intvalues stores the point position in the shared buffer of point as in + node2int_node_comm_index2 */ + grp->node2int_node_comm_index1[nitem_grp_node]=np; // Add this new point in int node comm index1 + grp->node2int_node_comm_index2[nitem_grp_node]=nitem_int_node; // Position in int node comm + int_edge_comm->intvalues[pos] = nitem_int_node; // In int edge comm, assign position of node in int comm + nitem_int_node += 1; + nitem_grp_node += 1; + grp->nitem_int_node_comm = nitem_grp_node; + int_node_comm->nitem = nitem_int_node; + + /* STEP 5.1.2 - Update hash table, met, sol and field for the new point */ + /* Memory allocation for sol and met */ + if ( !np ) { + MMG5_int oldnpmax = mesh->npmax; + MMG3D_POINT_REALLOC(mesh,sol,np,MMG5_GAP, + fprintf(stderr,"\n ## Error: %s: unable to" + " allocate a new point\n",__func__); + MMG5_INCREASE_MEM_MESSAGE(); + ier = 0,c,0,src); + if( met && ier ) { + if( met->m ) { + MMG5_ADD_MEM(mesh,(met->size*(mesh->npmax-met->npmax))*sizeof(double), + "larger solution", + MMG5_SAFE_RECALLOC(mesh->point,mesh->npmax+1,oldnpmax+1,MMG5_Point,,); + mesh->memCur -= (mesh->npmax - oldnpmax)*sizeof(MMG5_Point); + mesh->npmax = oldnpmax; + mesh->np = mesh->npmax-1; + mesh->npnil = 0; + ier = 0 ); + + if ( ier ) { + MMG5_SAFE_REALLOC(met->m,met->size*(met->npmax+1), + met->size*(mesh->npmax+1), + double,"larger solution", + MMG5_SAFE_RECALLOC(mesh->point,mesh->npmax+1,oldnpmax+1,MMG5_Point,,); + mesh->memCur -= (mesh->npmax - oldnpmax)*sizeof(MMG5_Point); + mesh->npmax = oldnpmax; + mesh->np = mesh->npmax-1; + mesh->npnil = 0; + ier = 0); + } + met->npmax = mesh->npmax; + } + } + } + + if ( ier ) { + /* For this new point, add the value of the solution, i.e. the isovalue 0 */ + sol->m[np] = 0; + + /* If user provide a metric, interpolate it at the new point */ + if ( met && met->m ) { + if ( met->size > 1 ) { + ier = MMG3D_intmet33_ani_edge(met,ip0,ip1,np,s); + } + else { + ier = MMG5_intmet_iso_edge(met,ip0,ip1,np,s); + } + if ( ier <= 0 ) { + /* Unable to compute the metric */ + fprintf(stderr,"\n ## Error: %s: unable to" + " interpolate the metric during the level-set" + " discretization\n",__func__); + ier = 0; + } + } + + /* If user provide fields, interpolate them at the new point */ + if ( mesh->nsols ) { + for ( j=0; jnsols; ++j ) { + psl = field + j; + if ( field->size > 1 ) { + ier = MMG3D_intmet33_ani_edge(psl,ip0,ip1,np,s); + } + else { + ier = MMG5_intmet_iso_edge(psl,ip0,ip1,np,s); + } + if ( ier <= 0 ) { + /* Unable to compute fields */ + fprintf(stderr,"\n ## Error: %s: unable to" + " interpolate fields during the level-set" + " discretization\n",__func__); + ier = 0; + } + } + } + + /* Update hash table */ + // Remark: to call succesfully hashUpdate, the edge must already exist in + // the hash table + MMG5_hashUpdate(&hash,ip0,ip1,np); + } + + if ( !ier ) { + /* Avoid too long list of errors in case of failure */ + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } + } + + /** STEP 5.2 - Update external node communicator */ + /* Loop over the external edge comm */ + for (i=0; i < next_edge_comm; i++) { + + /* Get external edge communicator information */ + ext_edge_comm = &parmesh->ext_edge_comm[i]; // External edge communicator + color_in_edge = ext_edge_comm->color_in; // Color of the hosting proc - this proc + color_out_edge = ext_edge_comm->color_out; // Color of the remote proc - the proc to exchange with + nitem_ext_edge = ext_edge_comm->nitem; // Nbr of edges in common between these 2 procs + + /* Loop over the edges in the external edge communicator */ + for (j=0; j < nitem_ext_edge; j++) { + + /* Get the position of the edge and node in internal communicators */ + pos_edge = ext_edge_comm->int_comm_index[j]; + pos_node = int_edge_comm->intvalues[pos_edge]; + + /* If pos_node < 0, this edge j is not split, so ignore it */ + if (pos_node < 0) continue; + + /* Update the external node communicator */ + /* Loop over the external node comm to find the appropriate one to be updated */ + for (k=0; k < next_node_comm; k++) { + ext_node_comm = &parmesh->ext_node_comm[k]; // External node communicator + color_in_node = ext_node_comm->color_in; // Color of the hosting proc - this proc + color_out_node = ext_node_comm->color_out; // Color of the remote proc - the proc to exchange with + assert(color_in_node == color_in_edge); // Ensure that the hosting proc is the same + + /* While color_out_node and color_out_edge are different, continue */ + if (color_out_node != color_out_edge) continue; + + /* If color_out of edge and node comm are the same - Update external node communicator */ + nitem_ext_node = ext_node_comm->nitem; // Initial nbr of nodes in common between these 2 procs + ext_node_comm->int_comm_index[nitem_ext_node] = pos_node; // Add the node to the external node comm + ext_node_comm->nitem = nitem_ext_node + 1; // Updated nbr of nodes in common between these 2 procs + break; + } + } + } + + /** STEP 5.3 - Create all the other new points located elsewhere and update hash table */ + /* Loop over tetra k */ + for (k=1; k<=mesh->ne; k++) { + pt = &mesh->tetra[k]; + if ( !MG_EOK(pt) ) continue; + + /* Loop over the edges ia */ + for (ia=0; ia<6; ia++) { + /* Find extremities of this edge and np (value stored in hash.item[key].k) */ + ip0 = pt->v[MMG5_iare[ia][0]]; + ip1 = pt->v[MMG5_iare[ia][1]]; + np = MMG5_hashGet(&hash,ip0,ip1); + + /* If np>0 (i.e. hash.item[key].k != [0;-1]), this edge has already been + * split or is required (user-required or //), pass to the next edge */ + if ( np>0 ) continue; + + + /* Check whether an entity with reference ref should be split */ + if ( !MMG5_isSplit(mesh,pt->ref,&refint,&refext) ) { + continue; + } + + /* STEP 5.3.1 - Create a new point if this edge needs to be split */ + /* Check the ls value at the edge nodes */ + p0 = &mesh->point[ip0]; + p1 = &mesh->point[ip1]; + v0 = sol->m[ip0]; + v1 = sol->m[ip1]; + + /* Check if the edge should be split */ + /* If one of the points is exactly on the level set, the point exists already, pass */ + if ( fabs(v0) < MMG5_EPSD2 || fabs(v1) < MMG5_EPSD2 ) continue; + /* If the points have the same sign, no need to split the edge, pass */ + else if ( MG_SMSGN(v0,v1) ) continue; + /* If one or the other point has never been treated, pass */ + else if ( !p0->flag || !p1->flag ) continue; + + /* If np is = -1; then npneg is = 1 */ + npneg = (np<0); + + /* Define the weighting factor */ + s = v0 / (v0-v1); + s = MG_MAX(MG_MIN(s,1.0-MMG5_EPS),MMG5_EPS); + + /* Find the coordinates of the new points using the weighting factor */ + c[0] = p0->c[0] + s*(p1->c[0]-p0->c[0]); + c[1] = p0->c[1] + s*(p1->c[1]-p0->c[1]); + c[2] = p0->c[2] + s*(p1->c[2]-p0->c[2]); + + /* Create a new point with coordinates c, tag 0 and source src. + Return the new number of points np in this partition */ +#ifdef USE_POINTMAP + src = p0->src; +#else + src = 1; +#endif + np = MMG3D_newPt(mesh,c,0,src); + + /* STEP 5.3.2 - Update of met, sol and field for the new point */ + /* Memory allocation for sol and met */ + if ( !np ) { + MMG5_int oldnpmax = mesh->npmax; + MMG3D_POINT_REALLOC(mesh,sol,np,MMG5_GAP, + fprintf(stderr,"\n ## Error: %s: unable to" + " allocate a new point\n",__func__); + MMG5_INCREASE_MEM_MESSAGE(); + ier = 0 + ,c,0,src); + if( ier && met ) { + if( met->m ) { + MMG5_ADD_MEM(mesh,(met->size*(mesh->npmax-met->npmax))*sizeof(double), + "larger solution", + MMG5_SAFE_RECALLOC(mesh->point,mesh->npmax+1,oldnpmax+1,MMG5_Point,,); + mesh->memCur -= (mesh->npmax - oldnpmax)*sizeof(MMG5_Point); + mesh->npmax = oldnpmax; + mesh->np = mesh->npmax-1; + mesh->npnil = 0; + ier = 0); + + if ( ier ) { + MMG5_SAFE_REALLOC(met->m,met->size*(met->npmax+1), + met->size*(mesh->npmax+1), + double,"larger solution", + MMG5_SAFE_RECALLOC(mesh->point,mesh->npmax+1,oldnpmax+1,MMG5_Point,,); + mesh->memCur -= (mesh->npmax - oldnpmax)*sizeof(MMG5_Point); + mesh->npmax = oldnpmax; + mesh->np = mesh->npmax-1; + mesh->npnil = 0; + ier = 0); + met->npmax = mesh->npmax; + } + } + } + } + + if ( ier ) { + /* For this new point, add the value of the solution, i.e. the isovalue 0 */ + sol->m[np] = 0; + + /* If user provide a metric, interpolate it at the new point */ + if ( met && met->m ) { + if ( met->size > 1 ) { + ier = MMG3D_intmet33_ani(mesh,met,k,ia,np,s); + } + else { + ier = MMG5_intmet_iso(mesh,met,k,ia,np,s); + } + if ( ier <= 0 ) { + /* Unable to compute the metric */ + fprintf(stderr,"\n ## Error: %s: unable to" + " interpolate the metric during the level-set" + " discretization\n",__func__); + ier = 0; + } + } + + /* If user provide fields, interpolate them at the new point */ + if ( mesh->nsols ) { + for ( j=0; jnsols; ++j ) { + psl = field + j; + if ( field->size > 1 ) { + ier = MMG3D_intmet33_ani(mesh,psl,k,ia,np,s); + } + else { + ier = MMG5_intmet_iso(mesh,psl,k,ia,np,s); + } + if ( ier <= 0 ) { + /* Unable to compute fields */ + fprintf(stderr,"\n ## Error: %s: unable to" + " interpolate fields during the level-set" + " discretization\n",__func__); + ier = 0; + } + } + } + } + + if ( !ier ) { + /* Avoid too long list of errors in case of failure */ + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } + + /* STEP 5.2.3 - Update edge hash table */ + /* If this edge is required, then inform the user it is split anyway + and update the hash: hash.item[key].k = - 1 becomes = np */ + if ( npneg ) { + /* We split a required edge */ + if ( !mmgWarn ) { + mmgWarn = 1; + if ( parmesh->info.imprim > PMMG_VERB_VERSION ) { + fprintf(stderr," ## Warning: %s: the level-set intersect at least" + " one required entity. Required entity ignored.\n\n",__func__); + } + } + MMG5_hashUpdate(&hash,ip0,ip1,np); + } + /* Otherwise add the edge to be split into hash table */ + else { + MMG5_hashEdge(mesh,&hash,ip0,ip1,np); + } + } + } + + /** Check the internal node communicator */ + assert( PMMG_check_intNodeComm( parmesh ) ); + + /** Check the external node communicator */ + assert( PMMG_check_extNodeComm( parmesh,parmesh->info.read_comm ) ); + + /** STEP 6 - Split according to tets flags */ + /** STEP 6.1 - Compute global node vertices */ + if ( !PMMG_Compute_verticesGloNum( parmesh,parmesh->info.read_comm ) ) { + fprintf(stderr,"\n ## Error: impossible to compute node global numbering\n"); + } + + /** STEP 6.2 - Do the splitting for tetra on parallel interface */ + ns = 0; // Number of total split on this proc + ier = 1; // Error + idx_tmp = 0; // Index of an already split tetra to recover info stored in ne_tmp_tab and vGlobNum_tab + nitem_grp_face_tmp = nitem_grp_face; + + /* Allocate internal face comm */ + PMMG_CALLOC(parmesh,int_face_comm->intvalues,3*nitem_int_face,int, + "int_face_comm intvalues",ERR_RULE); + + /* Loop over the internal faces communicator */ + for (i=0; i < nitem_grp_face; i++) { + + /* Get the position of the face in internal communicator and the value of the face */ + val_face = grp->face2int_face_comm_index1[i]; + pos = grp->face2int_face_comm_index2[i]; + + /* Initialize interval face comm at -1 */ + int_face_comm->intvalues[3*pos] = -1; + int_face_comm->intvalues[3*pos+1] = -1; + int_face_comm->intvalues[3*pos+2] = -1; + + /* Find the local tetra, the face and node associated */ + k = val_face/12; // Index of the tetra on this proc + ifac = (val_face%12)/3; // Index of the face + iploc = (val_face%12)%3; // Index of the node + + /* Get the tetra k and xtetra associated */ + pt = &mesh->tetra[k]; + assert ( MG_EOK(pt) && "Invalid tetra stored in the communicator" ); + + pxt = &mesh->xtetra[pt->xt]; + + /* STEP 6.2.1 - Find global numbering and split pattern of the tetra */ + /* If the tetra has already a flag (flag !=0) - then it has already been processed */ + already_split = 0; + if ( pt->flag ) { + /* If flag>0, we need to update the face communicators; */ + if ( pt->flag > 0 ) { + already_split = 1; // Identify the tetra as already split + idx_tmp = pt->mark; // Index of ne_tmp stored in ne_tmp_tab + ne_tmp = ne_tmp_tab[idx_tmp]; // Old total number of tetras just after the split of this old tetra + /* Get global num of the old tetra vertices before the split */ + for (j=0; j<4; j++) { + vGlobNum[j] = vGlobNum_tab[idx_tmp*4+j]; + } + } + /* Otherwise, if flag equals to -1 (flag<0), the tetra does not need to be split. Pass to the next tetra. */ + else if ( pt->flag < 0 ) { + continue; + } + } + /* Otherwise, if flag=0, the tetra has never been processed. */ + else { + /* Get the split pattern: loop over the edges, get hash.item[key].k */ + memset(vx,0,6*sizeof(MMG5_int)); + for (ia=0; ia<6; ia++) { + vx[ia] = MMG5_hashGet(&hash,pt->v[MMG5_iare[ia][0]],pt->v[MMG5_iare[ia][1]]); + + if ( vx[ia] > 0 ) { + MG_SET(pt->flag,ia); + } + + } + + /* Get and store global num of the tetra vertices */ + for (j=0; j<4; j++) { + vGlobNum[j] = mesh->point[pt->v[j]].tmp; + vGlobNum_tab[ns*4+j] = vGlobNum[j]; + } + } + + /* Get the split pattern stored in flag */ + flag = pt->flag; + + /* Initialize tetra_sorted and node_sorted at -1 */ + for ( j=0; j<3; ++j ) { + tetra_sorted[j] = -1; + node_sorted[j] = -1; + } + + /* STEP 6.2.2 - If not already done, split the tetra according to the flag */ + switch (flag) { + case 1: case 2: case 4: case 8: case 16: case 32: // 1 edge split + if (!already_split) { + ier = MMG5_split1(mesh,met,k,vx,1); + mesh->tetra[k].flag = flag; // Re-flag tetra k as the flag has been reset in the split + mesh->tetra[k].mark = ns; // Split number to recover info later if needed + ne_tmp_tab[ns] = mesh->ne; // Total number of tetras after this split + ne_tmp = mesh->ne; // Total number of tetras after this split + ns++; // Incremente the total number of split + } + + /* Find the tetras and nodes defining the face ifac of tetra k */ + MMG3D_split1_cfg(flag,tau,&taued); // Compute tau + PMMG_split1_sort(mesh,k,ifac,tau,ne_tmp,tetra_sorted,node_sorted); // Find tetra_sorted and node_sorted + break; + + case 48: case 24: case 40: case 6: case 34: case 36: // 2 edges (same face) split + case 20: case 5: case 17: case 9: case 3: case 10: + if (!already_split) { + ier = MMG5_split2sf_globNum(mesh,met,k,vx,vGlobNum,1); + mesh->tetra[k].flag = flag; // Re-flag tetra k as the flag has been reset in the split + mesh->tetra[k].mark = ns; // Split number to recover info later if needed + ne_tmp_tab[ns] = mesh->ne; // Total number of tetras after this split + ne_tmp = mesh->ne; // Total number of tetras after this split + ns++; // Incremente the total number of split + } + + /* Find the tetras and nodes defining the face ifac of tetra k */ + MMG3D_split2sf_cfg(flag,vGlobNum,tau,&taued,&imin0); // Compute tau and imin0 + PMMG_split2sf_sort(mesh,k,ifac,tau,imin0,ne_tmp,tetra_sorted,node_sorted); // Find tetra_sorted and node_sorted + break; + + case 7: case 25: case 42: case 52: // 3 edges on conic configuration split + if (!already_split) { + ier = MMG5_split3cone_globNum(mesh,met,k,vx,vGlobNum,1); + mesh->tetra[k].flag = flag; // Re-flag tetra k as the flag has been reset in the split + mesh->tetra[k].mark = ns; // Split number to recover info later if needed + ne_tmp_tab[ns] = mesh->ne; // Total number of tetras after this split + ne_tmp = mesh->ne; // Total number of tetras after this split + ns++; // Incremente the total number of split + } + + /* Find the tetras and nodes defining the face ifac of tetra k */ + MMG3D_split3cone_cfg(flag,vGlobNum,tau,&taued,&imin0,&imin2); // Compute tau, imin0 and imin2 + PMMG_split3cone_sort(mesh,k,ifac,tau,imin0,imin2,ne_tmp,tetra_sorted,node_sorted); // Find tetra_sorted and node_sorted + break; + + case 30: case 45: case 51: // 4 edges on opposite configuration split + if (!already_split) { + ier = MMG5_split4op_globNum(mesh,met,k,vx,vGlobNum,1); + mesh->tetra[k].flag = flag; // Re-flag tetra k as the flag has been reset in the split + mesh->tetra[k].mark = ns; // Split number to recover info later if needed + ne_tmp_tab[ns] = mesh->ne; // Total number of tetras after this split + ne_tmp = mesh->ne; // Total number of tetras after this split + ns++; // Incremente the total number of split + } + + /* Find the tetras and nodes defining the face ifac of tetra k */ + MMG3D_split4op_cfg(flag,vGlobNum,tau,&taued,&imin0,&imin2); // Compute tau, imin0 and imin2 + PMMG_split4op_sort(mesh,k,ifac,tau,imin0,imin2,ne_tmp,tetra_sorted,node_sorted); // Find tetra_sorted and node_sorted + break; + + default: // This tetra does not need to be split and is processed for the first time + assert(pt->flag == 0); + pt->flag = -1; // Put this flag to -1 to specify that this tetra has been processed + PMMG_nosplit_sort(mesh,k,ifac,tetra_sorted,node_sorted); + break; + } + + if ( !ier ) { + ERR_RULE; + } + + if (mesh->tetra[k].flag != -1) { + /* STEP 6.2.3 - Update tag of edges in xtetra with MG_PARBDY */ + for (j=0; j<3; j++) { + k0 = tetra_sorted[j]; + if (k0 != -1) { + pt0 = &mesh->tetra[k0]; + pxt0 = &mesh->xtetra[pt0->xt]; + for (k=0; k<3; k++) { + ia = MMG5_iarf[ifac][k]; + pxt0->tag[ia] |= MG_PARBDY; + } + } + } + + /* STEP 6.2.4 - Update internal face communicators */ + /* (a) Update the first face located at i - Modify only index1 - index2 stays the same */ + grp->face2int_face_comm_index1[i] = 12*tetra_sorted[0]+3*ifac+node_sorted[0]; + int_face_comm->intvalues[3*pos] = pos; + + /* (b) Update the communicators for the potential 2 other faces */ + nface_added = 0; + for (j=0; j<2; j++) { + if ( tetra_sorted[j+1] != -1) { + grp->face2int_face_comm_index1[nitem_grp_face_tmp+j] = 12*tetra_sorted[j+1]+3*ifac+node_sorted[j+1]; + grp->face2int_face_comm_index2[nitem_grp_face_tmp+j] = nitem_grp_face_tmp+j; + int_face_comm->intvalues[3*pos+j+1] = nitem_grp_face_tmp+j; + nface_added += 1; + } + } + + /* (c) Update the total number of faces */ + nitem_grp_face_tmp += nface_added; + int_face_comm->nitem = nitem_grp_face_tmp; + } + else if (mesh->tetra[k].flag == -1) { + /* STEP 6.2.5 - Update internal face communicators for tetra not split */ + /* As the tetra is not split, the tetra index has not changed. + Moreover, the node of face ifac is chosen to be the node with highest + coordinates, so it should not have been changed either. However, to be + sure we are not missing a case, we still update face2int_face_comm_index1 */ + grp->face2int_face_comm_index1[i] = 12*tetra_sorted[0]+3*ifac+node_sorted[0]; + } + } + + /** STEP 6.3 - Update internal and external face communicator */ + /* Update number of element in internal face comm*/ + nitem_grp_face = nitem_grp_face_tmp; + grp->nitem_int_face_comm = nitem_grp_face; + int_face_comm->nitem = nitem_grp_face; + + /* Update external face comm */ + for (k=0; k < next_face_comm; k++) { + + /* Get current external face communicator */ + ext_face_comm = &parmesh->ext_face_comm[k]; // External face communicator + nitem_ext_face = ext_face_comm->nitem; // Nbr of faces in common between these 2 procs + nitem_ext_face_init = ext_face_comm->nitem; // Initial nbr of faces in common between these 2 procs + + /* Loop over the faces in the external face communicator */ + for (i=0; i < nitem_ext_face_init; i++) { + pos = ext_face_comm->int_comm_index[i]; + + /* Loop over the potential 3 faces created after the split */ + for (j=0; j < 3; j++) { + pos_face = int_face_comm->intvalues[3*pos+j]; + if (pos_face < 0) continue; // If pos_face=-1, there is not extra face to add + /* The first face is located at i in the ext face comm */ + if (j==0) { + ext_face_comm->int_comm_index[i] = pos_face; + } + /* The next faces are added at the end of the ext face comm */ + else { + ext_face_comm->int_comm_index[nitem_ext_face] = pos_face; + nitem_ext_face += 1; + } + } + } + ext_face_comm->nitem = nitem_ext_face; // Update nbr of face in ext face comm + } + + /** Check the internal face communicator */ + assert( PMMG_check_intFaceComm( parmesh ) ); + + /** Check the external face communicator */ + assert( PMMG_check_extFaceComm( parmesh,parmesh->info.read_comm ) ); + + /** STEP 6.4 - Do the splitting for tetra located elsewhere */ + /* Loop over tetra */ + for (k=1; k<=ne_init; k++) { + + /* Get the tetra k and xtetra associated */ + pt = &mesh->tetra[k]; + pxt = &mesh->xtetra[pt->xt]; + if ( !MG_EOK(pt) ) continue; + + /* If the tetra has already a flag (flag !=0) - it has already been processed. Pass to the next tetra. */ + if (pt->flag) continue; + + /* STEP 6.4.1 - Find global numbering and split pattern of the tetra */ + /* Get the split pattern: loop over the edges, get hash.item[key].k */ + memset(vx,0,6*sizeof(MMG5_int)); + for (ia=0; ia<6; ia++) { + vx[ia] = MMG5_hashGet(&hash,pt->v[MMG5_iare[ia][0]],pt->v[MMG5_iare[ia][1]]); + if ( vx[ia] > 0 ) + MG_SET(pt->flag,ia); + } + + /* Get global num of the tetra vertices */ + for (j=0; j<4; j++) { + vGlobNum[j] = mesh->point[pt->v[j]].tmp; + } + + /* Get the split pattern stored in flag */ + flag = pt->flag; + + /* STEP 6.4.2 - If not already done, split the tetra according to the flag */ + switch (flag) { + case 1: case 2: case 4: case 8: case 16: case 32: // 1 edge split + ier = MMG5_split1(mesh,met,k,vx,1); + mesh->tetra[k].flag = flag; // Re-flag tetra k as the flag has been reset in the split + ns++; // Incremente the total number of split + break; + + case 48: case 24: case 40: case 6: case 34: case 36: // 2 edges (same face) split + case 20: case 5: case 17: case 9: case 3: case 10: + ier = MMG5_split2sf_globNum(mesh,met,k,vx,vGlobNum,1); + mesh->tetra[k].flag = flag; // Re-flag tetra k as the flag has been reset in the split + ns++; // Incremente the total number of split + break; + + case 7: case 25: case 42: case 52: // 3 edges on conic configuration split + ier = MMG5_split3cone_globNum(mesh,met,k,vx,vGlobNum,1); + mesh->tetra[k].flag = flag; // Re-flag tetra k as the flag has been reset in the split + ns++; // Incremente the total number of split + break; + + case 30: case 45: case 51: // 4 edges on opposite configuration split + ier = MMG5_split4op_globNum(mesh,met,k,vx,vGlobNum,1); + mesh->tetra[k].flag = flag; // Re-flag tetra k as the flag has been reset in the split + ns++; // Incremente the total number of split + break; + + default: + assert(pt->flag == 0); + pt->flag = -1; // Put this flag to -1 to specify that this tetra has been processed + break; + } + + if ( !ier ) return 0; + } + + /** STEP 7 - Deallocation/Allocation of memory and reset of some fields */ + /* Delete the tables storing imin0, imin2 and ne_tmp_tab */ + PMMG_DEL_MEM(parmesh,vGlobNum_tab,MMG5_int,"vGlobNum_tab"); + PMMG_DEL_MEM(parmesh,ne_tmp_tab,MMG5_int,"ne_tmp_tab"); + + /* Delete the edges hash table */ + PMMG_DEL_MEM(parmesh,int_edge_comm->intvalues,int,"edge intvalues"); + PMMG_DEL_MEM(parmesh,int_face_comm->intvalues,int,"face intvalues"); + + /* Delete internal communicators */ + MMG5_DEL_MEM(mesh,hash.item); + + /* Realloc internal node communicators to exact final size */ + PMMG_REALLOC(parmesh, grp->node2int_node_comm_index1, + nitem_grp_node, + nitem_grp_node_firstalloc, + int,"Allocation of node2int_node_comm_index1", return 0); + PMMG_REALLOC(parmesh, grp->node2int_node_comm_index2, + nitem_grp_node, + nitem_grp_node_firstalloc, + int,"Allocation of node2int_node_comm_index2", return 0); + + /* Realloc internal face communicators to exact final size */ + PMMG_REALLOC(parmesh, grp->face2int_face_comm_index1, + nitem_grp_face, + nitem_grp_face_firstalloc, + int,"Allocation of face2int_face_comm_index1", return 0); + PMMG_REALLOC(parmesh, grp->face2int_face_comm_index2, + nitem_grp_face, + nitem_grp_face_firstalloc, + int,"Allocation of face2int_face_comm_index2", return 0); + + /* Realloc external node communicator to exact final size */ + for ( k=0; knext_node_comm; k++ ) { + ext_node_comm = &parmesh->ext_node_comm[k]; + PMMG_REALLOC(parmesh,ext_node_comm->int_comm_index, + ext_node_comm->nitem, + ext_node_comm->nitem_to_share, + int,"Re-allocation of external node communicator",return 0); + ext_node_comm->nitem_to_share = 0; + } + + /* Realloc external face communicator to exact final size */ + for ( k=0; knext_face_comm; k++ ) { + ext_face_comm = &parmesh->ext_face_comm[k]; + PMMG_REALLOC(parmesh,ext_face_comm->int_comm_index, + ext_face_comm->nitem, + ext_face_comm->nitem_to_share, + int,"Re-allocation of external face communicator",return 0); + ext_face_comm->nitem_to_share = 0; + } + + /* Reset mark and flag in mesh->tetra */ + for (k=1; k<=ne_init; k++) { + mesh->tetra[k].mark = 0; + mesh->tetra[k].flag = 0; + } + + return ns; +} + +/** + * \param mesh pointer toward the mesh structure + * \param k index of the tetra that we do not split + * \param ifac local index of the face located on a parallel boundary + * \param tetra_sorted indices of tetra + * sorted by increasing order of their global node index + * \param node_sorted for each tetras in tetra_sorted: local index of the node + * on ifac having the minimum global node index + * + * Find the node on ifac with the minimum global node index + * for the tetra that we do not split and store the local index in node_sorted. + * + */ +void PMMG_nosplit_sort(MMG5_pMesh mesh,MMG5_int k,int ifac,MMG5_int *tetra_sorted,MMG5_int *node_sorted) { + MMG5_int v_t0[3], v_t1[3], v_t2[3]; + + /* STEP 1 - Find the indices of the tetra */ + /* The tetra k is not split + Note that 2 faces of the initial tetra are divided into 2 + and 2 faces are not divided. */ + + /* STEP 1.1 - Index of the first tetra */ + /* Tetra #0 created by MMG5_split1 */ + tetra_sorted[0] = k; + + /* Store index of the node with highest coordinates in node_sorted[0] + and sort the vertices by increasing order in v_t0 */ + node_sorted[0]=PMMG_sort_vertices(mesh,tetra_sorted[0],v_t0,ifac); + + /* STEP 1.2 - Index of the second tetra */ + tetra_sorted[1] = -1; + v_t1[0] = v_t1[1] = v_t1[2] = -1; + + /* STEP 1.3 - Index of the third tetra */ + /* There is no third tetra created by MMG5_split1 - we assign -1 */ + tetra_sorted[2] = -1; + v_t2[0] = v_t2[1] = v_t2[2] = -1; + + /* STEP 2 - Sort these tetras by their global indices */ + PMMG_sort_tetra(tetra_sorted,node_sorted,v_t0,v_t1,v_t2); + + return; +} + +/** + * \param mesh pointer toward the mesh structure + * \param k index of the tetra that we have split with MMG5_split1 + * \param ifac local index of the face located on a parallel boundary + * that we have split with MMG5_split1 + * \param tau vertices permutation + * \param ne_tmp number of tetra created after MMG5_split1 + * \param tetra_sorted indices of tetras (defining ifac after the split MMG5_split1) + * sorted by increasing order of their global node index + * \param node_sorted for each tetras in tetra_sorted: local index of the node + * on ifac having the minimum global node index + * + * Sort the tetras created by MMG5_split1 defining the face ifac. + * Find the node on ifac with the minimum global node index + * for each tetra in tetra_sorted and store the local index in node_sorted. + * + */ +void PMMG_split1_sort(MMG5_pMesh mesh,MMG5_int k,int ifac,uint8_t tau[4], + MMG5_int ne_tmp,MMG5_int *tetra_sorted,MMG5_int *node_sorted) { + MMG5_int v_t0[3], v_t1[3], v_t2[3]; + + /* STEP 1 - Find the indices of the new tetras defining the face ifac */ + /* The 2 tetras created by MMG5_split1 are at + mesh.tetra[k] (tetra #0) and [ne_tmp] (tetra #1) + Note that 2 faces of the initial tetra are divided into 2 + and 2 faces are not divided. */ + + /* STEP 1.1 - Index of the first tetra */ + /* Tetra #0 created by MMG5_split1 */ + tetra_sorted[0] = k; + /* Except for the following: treta #1 created by MMG5_split1 */ + if ( ifac==tau[0] ) tetra_sorted[0] = ne_tmp; + + /* Store index of the node with highest coordinates in node_sorted[0] + and sort the vertices by increasing order in v_t0 */ + node_sorted[0]=PMMG_sort_vertices(mesh,tetra_sorted[0],v_t0,ifac); + + /* STEP 1.2 - Index of the second tetra */ + /* Tetra #1 created by MMG5_split1 */ + tetra_sorted[1] = ne_tmp; + /* Except for the following: no more tetra define ifac - we assign -1 */ + if ( ifac == tau[0] ) tetra_sorted[1] = -1; + if ( ifac == tau[1] ) tetra_sorted[1] = -1; + + if ( tetra_sorted[1] != -1 ) { + /* Store index of the node with highest coordinates in node_sorted[1] + and sort the vertices by increasing order in v_t1 */ + node_sorted[1]=PMMG_sort_vertices(mesh,tetra_sorted[1],v_t1,ifac); + } + else { + v_t1[0] = v_t1[1] = v_t1[2] = -1; + } + + /* STEP 1.3 - Index of the third tetra */ + /* There is no third tetra created by MMG5_split1 - we assign -1 */ + tetra_sorted[2] = -1; + v_t2[0] = v_t2[1] = v_t2[2] = -1; + + /* STEP 2 - Sort these tetras by their global indices */ + PMMG_sort_tetra(tetra_sorted,node_sorted,v_t0,v_t1,v_t2); + + return; +} + +/** + * \param mesh pointer toward the mesh structure + * \param k index of the tetra that we have split with MMG5_split2sf_globNum + * \param ifac local index of the face located on a parallel boundary + * that we have split with MMG5_split2sf_globNum + * \param tau vertices permutation + * \param imin minimal index of vertices \a tau[1] and \a tau[2] + * \param ne_tmp number of tetra created after MMG5_split2sf_globNum + * \param tetra_sorted indices of tetras (defining ifac after the split MMG5_split2sf_globNum) + * sorted by increasing order of their global node index + * \param node_sorted for each tetras in tetra_sorted: local index of the node on ifac having + * the minimum global node index + * + * Sort the tetras created by MMG5_split2sf_globNum defining the face ifac. + * Find the node on ifac with the minimum global node index + * for each tetra in tetra_sorted and store the local index in node_sorted. + * + */ +void PMMG_split2sf_sort(MMG5_pMesh mesh,MMG5_int k,int ifac,uint8_t tau[4],int imin, + MMG5_int ne_tmp,MMG5_int *tetra_sorted,MMG5_int *node_sorted) { + MMG5_int v_t0[3], v_t1[3], v_t2[3]; + + /* STEP 1 - Find the indices of the new tetras defining the face ifac */ + /* The 3 tetras created by MMG3D_split2sf_globNum are at + mesh.tetra[k] (tetra #0), [ne_tmp-1] (tetra #1) and [ne_tmp] (tetra #2) + Note that 1 face of the initial tetra is divided into 3; + 2 faces are divided into 2 and 1 face is not divided. */ + + /* STEP 1.1 - Index of the first tetra */ + /* Tetra #0 created by MMG5_split2sf_globNum */ + tetra_sorted[0] = k; + /* Except for the following: treta #1 or #2 created by MMG5_split2sf_globNum */ + if ( (imin==tau[1]) && (ifac==tau[3]) ) tetra_sorted[0] = ne_tmp; + if ( (imin==tau[2]) && (ifac==tau[3]) ) tetra_sorted[0] = ne_tmp-1; + + /* Store index of the node with highest coordinates in node_sorted[0] + and sort the vertices by increasing order in v_t0 */ + node_sorted[0]=PMMG_sort_vertices(mesh,tetra_sorted[0],v_t0,ifac); + + /* STEP 1.2 - Index of the second tetra */ + /* Tetra #1 created by MMG5_split2sf_globNum */ + tetra_sorted[1] = ne_tmp-1; + /* Except for the following: treta #2 created by MMG5_split2sf_globNum or no more tetra */ + if ( ifac == tau[1] ) tetra_sorted[1] = ne_tmp; + if ( ifac == tau[3] ) tetra_sorted[1] = -1; + + if ( tetra_sorted[1] != -1 ) { + /* Store index of the node with highest coordinates in node_sorted[1] + and sort the vertices by increasing order in v_t1 */ + node_sorted[1]=PMMG_sort_vertices(mesh,tetra_sorted[1],v_t1,ifac); + } + else { + v_t1[0] = v_t1[1] = v_t1[2] = -1; + } + + /* STEP 1.3 - Index of the third tetra */ + /* Tetra #2 created by MMG5_split2sf_globNum */ + tetra_sorted[2] = ne_tmp; + /* Except for the following: no more tetra define ifac */ + if ( ifac != tau[0] ) tetra_sorted[2] = -1; + + if ( tetra_sorted[2] != -1 ) { + /* Store index of the node with highest coordinates in node_sorted[2] + and sort the vertices by increasing order in v_t2 */ + node_sorted[2]=PMMG_sort_vertices(mesh,tetra_sorted[2],v_t2,ifac); + } + else{ + v_t2[0] = v_t2[1] = v_t2[2] = -1; + } + + /* STEP 2 - Sort these tetras by their global indices */ + PMMG_sort_tetra(tetra_sorted,node_sorted,v_t0,v_t1,v_t2); + + return; +} + +/** + * \param mesh pointer toward the mesh structure + * \param k index of the tetra that we have split with MMG5_split3cone_globNum + * \param ifac local index of the face located on a parallel boundary + * that we have split with MMG5_split3cone_globNum + * \param tau vertices permutation + * \param ia first condition to choose the appropriate split in MMG5_split3cone_globNum + * \param ib second condition to choose the appropriate split in MMG5_split3cone_globNum + * \param ne_tmp number of tetra created after MMG5_split3cone_globNum + * \param tetra_sorted indices of tetras (defining ifac after the split MMG5_split3cone_globNum) + * sorted by increasing order of their global node index + * \param node_sorted for each tetras in tetra_sorted: local index of the node on ifac having + * the minimum global node index + * + * Sort the tetras created by MMG5_split3cone_globNum defining the face ifac. + * Find the node on ifac with the minimum global node index + * for each tetra in tetra_sorted and store the local index in node_sorted. + * + */ +void PMMG_split3cone_sort(MMG5_pMesh mesh, MMG5_int k,int ifac,uint8_t tau[4],int ia,int ib, + MMG5_int ne_tmp,MMG5_int *tetra_sorted,MMG5_int *node_sorted) { + MMG5_int v_t0[3], v_t1[3], v_t2[3]; + + /* STEP 1 - Find the indices of the new tetras defining the face ifac */ + /* The 4 tetras created by MMG3D_split3cone_globNum are at + mesh.tetra[k] (tetra #0), [ne_tmp-2] (tetra #1), + [ne_tmp-1] (tetra #2) and [ne_tmp] (tetra #3) + Note that 3 faces of the initial tetra are divided into 3 + and 1 face is not divided. */ + + /* STEP 1.1 - Index of the first tetra */ + /* Tetra #0 created by MMG5_split3cone_globNum */ + tetra_sorted[0] = k; + /* Except for the following: treta #3 created by MMG5_split3cone_globNum */ + if ( ifac == tau[0] ) tetra_sorted[0] = ne_tmp; + + /* Store index of the node with highest coordinates in node_sorted[0] + and sort the vertices by increasing order in v_t0 */ + node_sorted[0]=PMMG_sort_vertices(mesh,tetra_sorted[0],v_t0,ifac); + + /* STEP 1.2 - Index of the second tetra */ + /* Tetra #1 created by MMG5_split3cone_globNum */ + tetra_sorted[1] = ne_tmp-2; + /* Except for the following: treta #2 created by MMG5_split3cone_globNum or no more tetra */ + if ( (ia==tau[1]) && (ifac == tau[1]) ) tetra_sorted[1] = ne_tmp-1; + if ( (ia==tau[2]) && (ifac == tau[2]) ) tetra_sorted[1] = ne_tmp-1; + if ( (ia==tau[3]) && (ifac == tau[3]) ) tetra_sorted[1] = ne_tmp-1; + if ( ifac == tau[0] ) tetra_sorted[1] = -1; + + if ( tetra_sorted[1] != -1 ) { + /* Store index of the node with highest coordinates in node_sorted[1] + and sort the vertices by increasing order in v_t1 */ + node_sorted[1]=PMMG_sort_vertices(mesh,tetra_sorted[1],v_t1,ifac); + } + else { + v_t1[0] = v_t1[1] = v_t1[2] = -1; + } + + /* STEP 1.3 - Index of the third tetra */ + /* Tetra #3 created by MMG5_split3cone_globNum */ + tetra_sorted[2] = ne_tmp; + /* Except for the following: treta #2 by MMG5_split3cone_globNum or no more tetra */ + if ( (ia==tau[1]) && (ib==tau[2]) && (ifac == tau[3]) ) tetra_sorted[2] = ne_tmp-1; + if ( (ia==tau[1]) && (ib==tau[3]) && (ifac == tau[2]) ) tetra_sorted[2] = ne_tmp-1; + if ( (ia==tau[2]) && (ib==tau[1]) && (ifac == tau[3]) ) tetra_sorted[2] = ne_tmp-1; + if ( (ia==tau[2]) && (ib==tau[3]) && (ifac == tau[1]) ) tetra_sorted[2] = ne_tmp-1; + if ( (ia==tau[3]) && (ib==tau[1]) && (ifac == tau[2]) ) tetra_sorted[2] = ne_tmp-1; + if ( (ia==tau[3]) && (ib==tau[2]) && (ifac == tau[1]) ) tetra_sorted[2] = ne_tmp-1; + if ( ifac == tau[0] ) tetra_sorted[2] = -1; + + if ( tetra_sorted[2] != -1 ) { + /* Store index of the node with highest coordinates in node_sorted[2] + and sort the vertices by increasing order in v_t2 */ + node_sorted[2]=PMMG_sort_vertices(mesh,tetra_sorted[2],v_t2,ifac); + } + else{ + v_t2[0] = v_t2[1] = v_t2[2] = -1; + } + + /* STEP 2 - Sort these tetras by their global indices */ + PMMG_sort_tetra(tetra_sorted,node_sorted,v_t0,v_t1,v_t2); + + return; +} + + +/** + * \param mesh pointer toward the mesh structure + * \param k index of the tetra that we have split with MMG5_split4op_globNum + * \param ifac local index of the face located on a parallel boundary + * that we have split with MMG5_split4op_globNum + * \param tau vertices permutation + * \param imin01 minimal index of vertices \a tau[0] and \a tau[1] + * \param imin23 minimal index of vertices \a tau[2] and \a tau[3] + * \param ne_tmp number of tetra created after MMG5_split4op_globNum + * \param tetra_sorted indices of tetras (defining ifac after the split MMG5_split4op_globNum) + * sorted by increasing order of their global node index + * \param node_sorted for each tetras in tetra_sorted: local index of the node on ifac having + * the minimum global node index + * + * Sort the tetras created by MMG5_split4op_globNum defining the face ifac. + * Find the node on ifac with the minimum global node index + * for each tetra in tetra_sorted and store the local index in node_sorted. + * + */ +void PMMG_split4op_sort(MMG5_pMesh mesh,MMG5_int k,int ifac,uint8_t tau[4],int imin01,int imin23, + MMG5_int ne_tmp,MMG5_int *tetra_sorted,MMG5_int *node_sorted) { + MMG5_int v_t0[3], v_t1[3], v_t2[3]; + + /* STEP 1 - Find the indices of the new tetras defining the face ifac */ + /* The 6 tetras created by MMG5_split4op_globNum are at + mesh.tetra[k] (tetra #0), [ne_tmp-4] (tetra #1), [ne_tmp-3] (tetra #2), + [ne_tmp-2] (tetra #3), [ne_tmp-1] (tetra #4) and [ne_tmp] (tetra #5) + Note that all the 4 faces of the initial tetra are divided into 3. */ + + /* STEP 1.1 - Index of the first tetra */ + /* Tetra #0 created by MMG5_split4op_globNum */ + tetra_sorted[0] = k; + /* Except for the following: treta #2 created by MMG5_split4op_globNum */ + if ( (imin01==tau[0]) && (imin23==tau[2]) && (ifac == tau[1]) ) tetra_sorted[0] = ne_tmp-3; + if ( (imin01==tau[1]) && (imin23==tau[2]) && (ifac == tau[0]) ) tetra_sorted[0] = ne_tmp-3; + if ( (imin01==tau[0]) && (imin23==tau[3]) && (ifac == tau[1]) ) tetra_sorted[0] = ne_tmp-3; + if ( (imin01==tau[1]) && (imin23==tau[3]) && (ifac == tau[0]) ) tetra_sorted[0] = ne_tmp-3; + + /* Store index of the node with highest coordinates in node_sorted[0] + and sort the vertices by increasing order in v_t0 */ + node_sorted[0]=PMMG_sort_vertices(mesh,tetra_sorted[0],v_t0,ifac); + + /* STEP 1.2 - Index of the second tetra */ + /* Tetra #3 created by MMG5_split4op_globNum */ + tetra_sorted[1] = ne_tmp-2; + /* Except for the following: treta #1 or #2 created by MMG5_split4op_globNum */ + if (ifac == tau[2]) { + tetra_sorted[1] = ne_tmp-4; + if ( imin01 == tau[1]) tetra_sorted[1] = ne_tmp-3; + } + else if (ifac==tau[3]) { + tetra_sorted[1] = ne_tmp-3; + if ( imin01 == tau[1]) tetra_sorted[1] = ne_tmp-4; + } + + /* Store index of the node with highest coordinates in node_sorted[1] + and sort the vertices by increasing order in v_t1 */ + node_sorted[1]=PMMG_sort_vertices(mesh,tetra_sorted[1],v_t1,ifac); + + /* STEP 1.3 - Index of the third tetra */ + /* Tetra #5 created by MMG5_split4op_globNum */ + tetra_sorted[2] = ne_tmp; + /* Except for the following: treta #3 or #4 created by MMG5_split4op_globNum */ + if ( (imin23==tau[2]) && (ifac == tau[0]) ) tetra_sorted[2] = ne_tmp-1; + if ( (imin23==tau[3]) && (ifac == tau[1]) ) tetra_sorted[2] = ne_tmp-1; + if ( (imin23==tau[2]) && (ifac == tau[2]) ) tetra_sorted[2] = ne_tmp-2; + if ( (imin23==tau[3]) && (ifac == tau[3]) ) tetra_sorted[2] = ne_tmp-2; + + /* Store index of the node with highest coordinates in node_sorted[2] + and sort the vertices by increasing order in v_t2 */ + node_sorted[2]=PMMG_sort_vertices(mesh,tetra_sorted[2],v_t2,ifac); + + /* STEP 2 - Sort these tetras by their global indices */ + PMMG_sort_tetra(tetra_sorted,node_sorted,v_t0,v_t1,v_t2); + + return; +} + +/** + * \param mesh pointer toward the mesh structure + * \param k index of the tetra + * \param v_t table of a triangle vertices + * \param ifac local index of the face located on a parallel boundary + * + * \return iploc index (0,1 or 2) the node with highest coordinates defining triangle ifac + * + * Find the node index (0,1 or 2) with highest coordinates defining triangle ifac + * and sort the vertices by increasing order. + * + */ +int PMMG_sort_vertices(MMG5_pMesh mesh,MMG5_int k,MMG5_int *v_t,int ifac) { + MMG5_pTetra pt; + int iploc; + + /* Pointer to the tetra structure */ + pt = &mesh->tetra[k]; + + /* Local node indices of the 3 vertices on ifac */ + v_t[0] = pt->v[MMG5_idir[ifac][0]]; + v_t[1] = pt->v[MMG5_idir[ifac][1]]; + v_t[2] = pt->v[MMG5_idir[ifac][2]]; + + /* Index [0,1,2] of the node with highest coordinates defining triangle ifac */ + iploc = PMMG_tria_highestcoord(mesh,v_t); + + /* Global node indices of the 3 vertices on ifac */ + v_t[0] = mesh->point[pt->v[MMG5_idir[ifac][0]]].tmp; + v_t[1] = mesh->point[pt->v[MMG5_idir[ifac][1]]].tmp; + v_t[2] = mesh->point[pt->v[MMG5_idir[ifac][2]]].tmp; + + /* Sort the vertices by increasing order of the global node indices */ + PMMG_swap_vertices(v_t); + + return iploc; + +} + +/** + * \param tetra Indices of the tetras to be sorted + * \param node Indices of the nodes associated with the tetras + * \param v_t0 First tetra: indices of triangle vertices + * \param v_t1 Second tetra: indices of triangle vertices + * \param v_t2 Third tetra: indices of triangle vertices + * + * Sort the tetras in increasing order based on vertices indices + * Sort accordingly to the tetra sorting, the array storing the indices of nodes + * + */ +void PMMG_sort_tetra(MMG5_int *tetra,MMG5_int *node,MMG5_int *v_t0,MMG5_int *v_t1,MMG5_int *v_t2) { + /* Sorting using conditional statements */ + if ( v_t1[0] != -1 ) { + if (PMMG_compare_3ints_array(v_t0, v_t1) > 0) { + PMMG_swap_ints(&tetra[0], &tetra[1]); + PMMG_swap_ints(&node[0], &node[1]); + PMMG_swap_3int_arrays(v_t0, v_t1); + } + if ( v_t2[0] != -1 ) { + if (PMMG_compare_3ints_array(v_t1, v_t2) > 0) { + PMMG_swap_ints(&tetra[1], &tetra[2]); + PMMG_swap_ints(&node[1], &node[2]); + PMMG_swap_3int_arrays(v_t1, v_t2); + } + if (PMMG_compare_3ints_array(v_t0, v_t1) > 0) { + PMMG_swap_ints(&tetra[0], &tetra[1]); + PMMG_swap_ints(&node[0], &node[1]); + PMMG_swap_3int_arrays(v_t0, v_t1); + } + } + } +} + +/** + * \param v_t table of a triangle vertices + * + * Sort the vertices of the triangle in increasing order + * + */ +void PMMG_swap_vertices(MMG5_int *v_t) { + /* Sorting using conditional statements */ + if (v_t[0] > v_t[1]) { + PMMG_swap_ints(&v_t[0], &v_t[1]); + } + if (v_t[1] > v_t[2]) { + PMMG_swap_ints(&v_t[1], &v_t[2]); + } + if (v_t[0] > v_t[1]) { + PMMG_swap_ints(&v_t[0], &v_t[1]); + } +} + +/** + * \param a table of the 3 vertices of first triangle + * \param b table of the 3 vertices of second triangle + * + * \return -1 if a < b + * \return 0 if a = b + * \return +1 if a > b + * + * Compare vertices of 2 triangles to sort the triangle by increasing order + * of their vertices indices + * + */ +int PMMG_compare_3ints_array(int *a, int *b) { + MMG5_int result; + if (a[0] > b[0]) return 1; + else if (a[0] < b[0]) return -1; + + if (a[1] > b[1]) return 1; + else if (a[1] < b[1]) return -1; + + if (a[2] > b[2]) return 1; + else if (a[2] < b[2]) return -1; + + return 0; +} + +/** + * \param a first integer to swap + * \param b second integer to swap + * + * Swap the integer a and b + * + */ +void PMMG_swap_ints(int *a, int *b) { + MMG5_int temp = *a; + *a = *b; + *b = temp; +} + +/** + * \param a first array of 3 integers to swap + * \param b second array of 3 integers to swap + * + * Swap the array of 3 integers a and b + * + */ +void PMMG_swap_3int_arrays(int *a, int *b) { + for ( int i = 0; i < 3; i++ ) { + MMG5_int temp = a[i]; + a[i] = b[i]; + b[i] = temp; + } +} + +/** + * \param parmesh pointer toward a parmesh structure + * \param mesh pointer toward the mesh + * \param sol pointer toward the level-set + * + * \return 1 if success, 0 otherwise + * + * \todo Fill the funtion + * + * Removal of small parasitic components (bubbles of material, etc) with volume + * less than mesh->info.rmc (default VOLFRAC) * volume of the mesh. + * + */ +int PMMG_rmc(PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_pSol sol){ + if ( parmesh->info.imprim > PMMG_VERB_VERSION ) + fprintf(stdout,"\n ## TODO:: PMMG_rmc.\n"); + return 1; +} + +/** + * \param parmesh pointer toward a parmesh structure + * \param mesh pointer toward the mesh structure. + * \param sol pointer toward the level-set function. + * \param comm MPI communicator for ParMmg + * + * \return 1 if success, 0 if fail. + * + * Snap values of the level set function very close to 0 to exactly 0, + * and prevent nonmanifold patterns from being generated. + * + * \todo all MPI_Abort have to be removed and replaced by a clean error handling + * without deadlocks. + * + */ +int PMMG_snpval_ls(PMMG_pParMesh parmesh,MPI_Comm comm) { + + MMG5_pTetra pt; + MMG5_pPoint p0; + PMMG_pInt_comm int_comm; + PMMG_pExt_comm ext_comm; + PMMG_pGrp grp; + MPI_Status status; + MMG5_pMesh mesh; + MMG5_pSol sol; + + int nitem_ext,next_comm,nitem_ToShare_ToSend,nitem_ToShare_ToRecv; + int color_in,color_out; + int idx_ext,idx_int; + int icomm,i,ip,idx; + double *tmp; + MMG5_int k,nc,ns,ncg; + double *rtosend,*rtorecv,*doublevalues; + int *itosend,*itorecv,*intvalues; + int ier = 1; // Initialize error + + if ( parmesh->info.imprim > PMMG_VERB_VERSION ) + fprintf(stdout,"\n ## TODO:: PMMG_snpval_ls.\n"); + + /* Ensure only one group on each proc */ + assert(parmesh->ngrp == 1 && "more than one group per rank not implemented"); + + /* Get external node communicator information */ + grp = &parmesh->listgrp[0]; + mesh = parmesh->listgrp[0].mesh; + sol = parmesh->listgrp[0].ls; + next_comm = parmesh->next_node_comm; // Nbr of external node communicators + int_comm = parmesh->int_node_comm; // Internal node communicator + + /** STEP 1 - Create tetra adjacency */ + if ( !MMG3D_hashTetra(mesh,1) ) { + fprintf(stderr,"\n ## Error: %s: hashing problem (1). Exit program.\n", + __func__); + ier = 0; + } + + /* Reset point flags and s */ + for (k=1; k<=mesh->np; k++) { + mesh->point[k].flag = 0; + mesh->point[k].s = -1; + } + + /* Allocation memory */ + PMMG_CALLOC(parmesh,int_comm->intvalues,int_comm->nitem,int,"intvalues",ier = 0); + MMG5_ADD_MEM(mesh,(mesh->npmax+1)*sizeof(double),"temporary table", + fprintf(stderr," Exit program.\n"); ier = 0); + + if ( ier ) { + MMG5_SAFE_CALLOC(tmp,mesh->npmax+1,double,ier = 0); + } + + if ( !ier ) { + /* Comms of step 6 will fail */ + MPI_Abort(parmesh->comm, PMMG_TMPFAILURE); + } + + /** STEP 2 - Identify proc owner of interface points */ + /* Store point index in internal communicator intvalues */ + for( i = 0; i < grp->nitem_int_node_comm; i++ ){ + ip = grp->node2int_node_comm_index1[i]; + idx = grp->node2int_node_comm_index2[i]; + int_comm->intvalues[idx] = ip; + } + + /* The highest rank to which this point belong store in mesh->point.s */ + for (icomm=0; icommext_node_comm[icomm]; // External node communicator + color_in = ext_comm->color_in; // Color of this partition Pcolor_in + color_out = ext_comm->color_out; // Color of the remote partition Pcolor_out + nitem_ext = ext_comm->nitem; // Nbr of nodes in common between Pcolor_in and Pcolor_out + + /* Loop over the nodes in the external node communicator **/ + for (i=0; i < nitem_ext; i++) { + /* Get the indices of the nodes in internal communicators */ + idx = ext_comm->int_comm_index[i]; + ip = int_comm->intvalues[idx]; + p0 = &mesh->point[ip]; + + if (color_out > p0->s) + p0->s = color_out; + if (color_in > p0->s) + p0->s = color_in; + } + } + + /** STEP 3 - Include tetras with very poor quality that are connected to the negative part */ + for (k=1; k<=mesh->ne; k++) { + pt = &mesh->tetra[k]; + if ( !pt->v[0] ) continue; + if ( pt->tag & MG_OVERLAP) continue; // Ignore overlap tetra + if ( pt->qual < MMG5_EPS ) { + + if ( parmesh->ddebug ) { + fprintf(stdout, " ## Info: %s: rank %d: tetra %d has bad qual (%f < 1e-6)\n", + __func__,parmesh->myrank,k,pt->qual); + } + + for (i=0; i<4; i++) { + ip = pt->v[i]; + if ( sol->m[ip] < 1000.0*MMG5_EPS ) break; + } + if ( i < 4 ) { + for (i=0; i<4; i++) { + ip = pt->v[i]; + sol->m[ip] = -1000.0*MMG5_EPS; + } + } + } + } + + /** STEP 4 - Snap values of sol that are close to 0 to 0 exactly */ + ns = 0; + for (k=1; k<=mesh->np; k++) { + p0 = &mesh->point[k]; + if ( !MG_VOK(p0) ) continue; + if (p0->tag & MG_OVERLAP) continue; // Ignore overlap points + /* Snap points in the interior of the partition and + interface points with proc owner being this proc color */ + if ( (p0->s == -1) || (p0->s == parmesh->myrank)) { + if ( fabs(sol->m[k]) < MMG5_EPS ) { + if ( mesh->info.ddebug ) + fprintf(stderr," ## Warning: %s: snapping value at vertex %" MMG5_PRId "; " + "previous value: %E.\n",__func__,k,fabs(sol->m[k])); + + tmp[k] = ( fabs(sol->m[k]) < MMG5_EPSD ) ? + (-100.0*MMG5_EPS) : sol->m[k]; + + if ( parmesh->ddebug ) { + fprintf(stderr, " ## Warning: %s: rank %d: snapping value at " + "vertex %d, s=%d, tmp=%f, sol=%f \n",__func__, + parmesh->myrank,k,p0->s,tmp[k],sol->m[k]); + } + + p0->flag = 1; + sol->m[k] = 0; + ns++; + } + } + } + + /** STEP 5 - Check snapping did not lead to a nonmanifold situation */ + ncg = 0; + do { + nc = 0; + /* Check snapping did not lead to a nonmanifold situation */ + for (k=1; k<=mesh->ne; k++) { + pt = &mesh->tetra[k]; + if ( !MG_EOK(pt) ) continue; + if (pt->tag & MG_OVERLAP) continue; // Ignore overlap tetra + for (i=0; i<4; i++) { + ip = pt->v[i]; + p0 = &mesh->point[ip]; + // if (p0->tag & MG_PARBDY) continue; + if ( p0->flag == 1 ) { + if ( parmesh->ddebug ) { + fprintf(stdout, " ## Info: %s: rank %d: call MMG3D_ismaniball:\n" + " Tetra=%d, Point=%d, maniball=%d \n", + __func__,parmesh->myrank,k,ip,MMG3D_ismaniball(mesh,sol,k,i)); + } + if ( !MMG3D_ismaniball(mesh,sol,k,i) ) { + if ( tmp[ip] < 0.0 ) + sol->m[ip] = -100.0*MMG5_EPS; + else + sol->m[ip] = +100.0*MMG5_EPS; + + p0->flag = -1; + nc++; + } + } + } + } + ncg += nc; + } + while ( nc ); + + /** TODO :: STEP 6 - Transfer data of snap point to the other proc */ + // nitem_ToShare_ToSend = 0; // Nbr of nodes in common between Pcolor_in and Pcolor_out + // PMMG_CALLOC(parmesh,intvalues, mesh->np, int, "intvalues", ier = 0); + // PMMG_CALLOC(parmesh,doublevalues, mesh->np, double, "doublevalues", ier = 0); + + // if ( !ier ) { + // /* Avoid deadlock in comms */ + // MPI_Abort(parmesh->comm, PMMG_TMPFAILURE); + // } + + // for (k=1; k<=mesh->np; k++) { + // p0 = &mesh->point[k]; + // if ( !MG_VOK(p0) ) continue; + // if ( !(p0->tag & MG_PARBDY) ) continue; // If the point is not MG_PARBDY, ignore it + // if ( !(p0->tag & MG_OVERLAP) ) continue; // If the point is not MG_OVERLAP, ignore it + + // /* If this point is MG_PARBDY or MG_OVERLAP and has been modified on this partition (p0->flag==-1) */ + // if ( (p0->flag == -1) ) { + // intvalues[nitem_ToShare_ToSend] = ip; + // doublevalues[nitem_ToShare_ToSend] = sol->m[ip]; + // nitem_ToShare_ToSend +=1; + + // } + // } + + // for (icomm=0; icommext_node_comm[icomm]; // External node communicator + // color_in = ext_comm->color_in; // Color of this partition Pcolor_in + // color_out = ext_comm->color_out; // Color of the remote partition Pcolor_out + // nitem_ext = ext_comm->nitem; // Nbr of nodes in common between Pcolor_in and Pcolor_out + // nitem_ToShare_ToSend = 0; // Nbr of nodes in common between Pcolor_in and Pcolor_out + + // itosend = ext_comm->itosend; + // rtosend = ext_comm->rtosend; + // itorecv = ext_comm->itorecv; + // rtorecv = ext_comm->rtorecv; + + // PMMG_CALLOC(parmesh,itosend, nitem_ext, int, "itosend", ier = 0); + // PMMG_CALLOC(parmesh,rtosend, nitem_ext, double, "rtosend", ier = 0); + + // if ( !ier ) { + // /* Avoid deadlock in comms */ + // MPI_Abort(parmesh->comm, PMMG_TMPFAILURE); + // } + + // /* Loop over the nodes in the external node communicator **/ + // for (i=0; i < nitem_ext; i++) { + // /* Get the indices of the nodes in internal communicators */ + // idx_ext = ext_comm->int_comm_index[i]; + // idx_int = grp->node2int_node_comm_index2[idx_ext]; + // ip = grp->node2int_node_comm_index1[idx_int]; + // p0 = &mesh->point[ip]; + + // /* If this point has been treated on this partition, send the data to the other */ + // if ( (p0->s == color_in) & (p0->flag == -1)) { + // itosend[nitem_ToShare_ToSend] = ip; + // rtosend[nitem_ToShare_ToSend] = sol->m[ip]; + // nitem_ToShare_ToSend +=1; + + // } + // } + + // /* Communication */ + // // PMMG_CALLOC(parmesh,ext_comm->nitem_to_share,1,int,"nitem_to_share",ier = 0); + // MPI_CHECK( + // MPI_Sendrecv(&nitem_ToShare_ToSend,1,MPI_INT,color_out,MPI_LS_TAG+2, + // &nitem_ToShare_ToRecv,1,MPI_INT,color_out,MPI_LS_TAG+2, + // comm,&status),MPI_Abort(parmesh->comm, PMMG_TMPFAILURE) ); + + // ext_comm->nitem_to_share = nitem_ToShare_ToRecv; + + // PMMG_CALLOC(parmesh,itorecv, nitem_ToShare_ToRecv, int, "itorecv", ier = 0); + // PMMG_CALLOC(parmesh,rtorecv, nitem_ToShare_ToRecv, double, "rtorecv", ier = 0); + + // if ( !ier ) { + // /* Avoid deadlock in comms */ + // MPI_Abort(parmesh->comm, PMMG_TMPFAILURE); + // } + + // MPI_CHECK( + // MPI_Sendrecv(itosend,nitem_ToShare_ToSend,MPI_INT,color_out,MPI_LS_TAG, + // itorecv,nitem_ToShare_ToRecv,MPI_INT,color_out,MPI_LS_TAG, + // comm,&status), MPI_Abort(parmesh->comm, PMMG_TMPFAILURE) ); + // MPI_CHECK( + // MPI_Sendrecv(rtosend,nitem_ToShare_ToSend,MPI_DOUBLE,color_out,MPI_LS_TAG+1, + // rtorecv,nitem_ToShare_ToRecv,MPI_DOUBLE,color_out,MPI_LS_TAG+1, + // comm,&status), MPI_Abort(parmesh->comm, PMMG_TMPFAILURE) ); + + // } + + if ( (parmesh->info.imprim > PMMG_VERB_DETQUAL || parmesh->ddebug) && ns+ncg > 0 ) + fprintf(stdout," rank %d: %8" MMG5_PRId " points snapped, %" MMG5_PRId + " corrected\n",parmesh->myrank,ns,ncg); + + /* Reset point flags */ + for (k=1; k<=mesh->np; k++) + mesh->point[k].flag = 0; + + /* memory free */ + MMG5_DEL_MEM(mesh,mesh->adja); + MMG5_DEL_MEM(mesh,tmp); + + return 1; +} + +/** + * \param parmesh pointer toward a parmesh structure + * \param mesh pointer toward the mesh structure. + * \param sol pointer toward the level-set. + * \param met pointer toward a metric (optionnal). + * + * \return 0 if fail, 1 otherwise. + * + * Create implicit surface in mesh. + * + * \todo all MPI_Abort have to be removed and replaced by a clean error handling + * without deadlocks. + * + */ +int PMMG_ls(PMMG_pParMesh parmesh) { + char str[16]=""; + MMG5_HGeom hpar; + MMG5_pMesh mesh; + MMG5_pSol met,sol; + MMG5_int k; + int ier = 1; + + mesh = parmesh->listgrp[0].mesh; + met = parmesh->listgrp[0].met; + sol = parmesh->listgrp[0].ls; + + /* Set function pointers */ + /** \todo TODO :: Surface ls and alias functions */ + if ( mesh->info.isosurf ) { + fprintf(stderr," ## Error: Splitting boundaries on isovalue not yet" + " implemented. Exit program.\n"); + ier = 0; + } + + if ( parmesh->info.imprim > PMMG_VERB_VERSION ) + fprintf(stdout," ** ISOSURFACE EXTRACTION %s\n",str); + + if ( mesh->nprism || mesh->nquad ) { + fprintf(stderr,"\n ## Error: Isosurface extraction not available with" + " hybrid meshes. Exit program.\n"); + ier = 0; + } + + /* Modify the value of the level-set to work with the 0 level-set */ + for (k=1; k<= sol->np; k++) + sol->m[k] -= mesh->info.ls; + + /* Create overlap */ + if ( !ier ) { + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } + + /* Implementation in progress: call this part of code only when the + * PMMG_SNAPVAL environment variable is enabled */ + const char* dev_snap = getenv("PMMG_SNAPVAL"); + + if ( dev_snap ) { + if ( !PMMG_create_overlap(parmesh,parmesh->info.read_comm) ) { + /* To avoid deadlocks in snpval_ls */ + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } + + /** \todo TODO :: Snap values of level set function if needed */ + if ( !PMMG_snpval_ls(parmesh,parmesh->info.read_comm) ) { + fprintf(stderr,"\n ## Problem with implicit function. Exit program.\n"); + /* To avoid deadlocks in parbdyTria */ + ier = 0; + } + + /* Delete overlap */ + if ( !PMMG_delete_overlap(parmesh,parmesh->info.read_comm) ) { + fprintf(stderr,"\n ## Impossible to delete overlap. Exit program.\n"); + ier = 0; + } + } + + /* Create table of adjacency for tetra */ + if ( !MMG3D_hashTetra(mesh,1) ) { + fprintf(stderr,"\n ## Hashing problem. Exit program.\n"); + ier = 0; + } + + /* Reset the mesh->info.isoref field everywhere */ + if ( ier && !MMG3D_resetRef_ls(mesh) ) { + fprintf(stderr,"\n ## Problem in resetting references. Exit program.\n"); + ier = 0; + } + + /* Tag parallel triangles on material interfaces as boundary. */ + if ( !ier ) { + /* Avoid deadlock in comms in parbdyTria */ + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } + + if( !PMMG_parbdyTria( parmesh ) ) { + fprintf(stderr,"\n ## Unable to recognize parallel triangles on material interfaces." + " Exit program.\n"); + ier = 0; + } + + /* Check the compatibility of triangle orientation with tetra faces */ + if ( !MMG5_bdryPerm(mesh) ) { + fprintf(stderr,"\n ## Boundary orientation problem. Exit program.\n"); + ier = 0; + } + + /* Identify surface mesh. Clean triangle array: remove useless or double + triangles and add the missing ones. Remark: spurious boundary triangles + across parallel interface cannot be removed by the serial function but will + not be stored inthe xtetra by the MMG5_bdrySet function during analysis. + This may create inconsistencies between edge and point tags. + */ + if ( ier && !MMG5_chkBdryTria(mesh) ) { + fprintf(stderr,"\n ## Boundary problem. Exit program.\n"); + ier = 0; + } + + /* Build hash table for initial edges: gather tag infos from edges and + * triangles and store these infos in tria. Skip non PARBDYBDY // edges. */ + if ( ier && !MMG5_hGeom(mesh) ) { + fprintf(stderr,"\n ## Hashing problem (0). Exit program.\n"); + ier = 0; + } + + /* Set the triangles references to the tetrahedra faces and edges */ + if ( ier && !MMG5_bdrySet(mesh) ) { + fprintf(stderr,"\n ## Problem in setting boundary. Exit program.\n"); + ier = 0; + } + + /** \todo TODO :: Removal of small parasitic components */ + if ( mesh->info.rmc > 0 && ier ) { + PMMG_rmc(parmesh,mesh,sol); + fprintf(stdout,"\n ## Warning: rmc option not implemented yet for ParMmg.\n"); + ier = 0; + } + +#ifdef USE_POINTMAP + /* Initialize source point with input index */ + MMG5_int ip; + for( ip = 1; ip <= mesh->np; ip++ ) { + if ( (!MG_VOK(&mesh->point[ip])) ) continue; + mesh->point[ip].src = ip; + } +#endif + + /* Compute vertices global numerotation + This step is needed to compute the edge communicator */ + + if ( !ier ) { + /* Avoid deadlock in comms in compute_verticesGloNum */ + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } + + if ( !PMMG_Compute_verticesGloNum( parmesh,parmesh->info.read_comm ) ) { + fprintf(stderr,"\n ## Warning: impossible to compute node global numbering.\n"); + ier = 0; + } + + /* Hash parallel edges + This step is needed to compute the edge communicator */ + if( ier && (PMMG_hashPar_fromFaceComm( parmesh,&hpar ) != PMMG_SUCCESS) ) { + fprintf(stderr,"\n ## Warning: impossible to compute the hash parallel edge.\n"); + ier = 0; + } + + if ( !ier ) { + /* Avoid deadlock in comms in build_edgeComm */ + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } + + /* Build edge communicator */ + if( !PMMG_build_edgeComm( parmesh,mesh,&hpar,parmesh->info.read_comm ) ) { + fprintf(stderr,"\n ## Warning: Impossible to build edge communicator.\n"); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } + + assert ( PMMG_check_extEdgeComm ( parmesh,parmesh->info.read_comm ) ); + + /** Discretization of the implicit function - Cut tetra */ + if ( 0 >= PMMG_cuttet_ls(parmesh) ) { + fprintf(stderr,"\n ## Problem in discretizing implicit function. Exit program.\n"); + ier = 0; + } + + /* Delete outdated arrays */ + MMG5_DEL_MEM(mesh,mesh->adja); + MMG5_DEL_MEM(mesh,mesh->adjt); + MMG5_DEL_MEM(mesh,mesh->tria); + + /* The function MMG5_hGeom is in charge to set mesh->na=0 if not already. + Here mesh->na is modified by the creation of the edge comm PMMG_build_edgeComm and is + set equal to the number of // edges. + Unfortunately here PMMG_build_edgeComm cannot be called before MMG5_hGeom. + Hence, mesh->na is then set equal to 0 here because: + 1. later in the analysis, mesh->na needs to be equal to 0 and; + 2. at this stage, the edge comm does not exist anymore, so mesh->na should be 0 */ + mesh->na = 0; + mesh->nt = 0; + + /* Update mesh->npi and mesh->nei to be equal to mesh->np and mesh->ne, respectively */ + mesh->npi = mesh->np; + mesh->nei = mesh->ne; + + /* Set ref to tetra according to the sign of the level-set */ + if ( ier && !MMG3D_setref_ls(mesh,sol) ) { + fprintf(stderr,"\n ## Problem in setting references. Exit program.\n"); + ier = 0; + } + + /* Clean old bdy analysis */ + for ( k=1; k<=mesh->np; ++k ) { + if ( mesh->point[k].tag & MG_BDY ) { + mesh->point[k].tag &= ~MG_BDY; + } + if ( mesh->point[k].tag & MG_PARBDYBDY ) { + mesh->point[k].tag &= ~MG_PARBDYBDY; + } + } + + /* Clean memory */ + MMG5_DEL_MEM(mesh,sol->m); + + if ( !ier ) { + /* Avoid deadlock in comms in build_edgeComm */ + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } + + /* Check communicators */ + assert ( PMMG_check_extFaceComm ( parmesh,parmesh->info.read_comm ) ); + assert ( PMMG_check_intFaceComm ( parmesh ) ); + assert ( PMMG_check_extNodeComm ( parmesh,parmesh->info.read_comm ) ); + assert ( PMMG_check_intNodeComm ( parmesh ) ); + + /* Dealloc edge comm as it is not up-to-date */ + MMG5_DEL_MEM(mesh,hpar.geom); + PMMG_edge_comm_free( parmesh ); + + return 1; +} diff --git a/src/mergemesh_pmmg.c b/src/mergemesh_pmmg.c index 59c65f6b..5eac1077 100644 --- a/src/mergemesh_pmmg.c +++ b/src/mergemesh_pmmg.c @@ -190,7 +190,7 @@ int PMMG_check_solsNpmax(MMG5_pMesh mesh,MMG5_pSol met,MMG5_pSol ls, */ static inline int PMMG_realloc_pointAndSols(MMG5_pMesh mesh,MMG5_pSol met,MMG5_pSol ls, - MMG5_pSol disp,MMG5_pSol field,double *c,int16_t tag,int src) { + MMG5_pSol disp,MMG5_pSol field,double *c,uint16_t tag,int src) { MMG5_pSol psl; int ip = 0; int oldnpmax = mesh->npmax; @@ -1097,11 +1097,10 @@ int PMMG_gather_parmesh( PMMG_pParMesh parmesh, PMMG_pInt_comm *rcv_int_node_comm, int **rcv_next_node_comm, PMMG_pExt_comm **rcv_ext_node_comm ) { - - size_t pack_size_tot; - int *rcv_pack_size,ier,ier_glob,k,*displs,ier_pack; + size_t pack_size_tot,next_disp,*displs,buf_idx; + int *rcv_pack_size,ier,ier_glob,k,ier_pack; int nprocs,root,pack_size; - char *rcv_buffer,*buffer,*ptr; + char *rcv_buffer,*ptr_to_free,*buffer; nprocs = parmesh->nprocs; root = parmesh->info.root; @@ -1120,7 +1119,7 @@ int PMMG_gather_parmesh( PMMG_pParMesh parmesh, /** 1: Memory alloc */ if ( parmesh->myrank == root ) { PMMG_MALLOC( parmesh, rcv_pack_size ,nprocs,int,"rcv_pack_size",ier=0); - PMMG_MALLOC( parmesh, displs ,nprocs,int,"displs for gatherv",ier=0); + PMMG_MALLOC( parmesh, displs ,nprocs,size_t,"displs for gatherv",ier=0); PMMG_CALLOC( parmesh, (*rcv_grps) ,nprocs,PMMG_Grp,"rcv_grps",ier=0); PMMG_MALLOC( parmesh, (*rcv_int_node_comm) ,nprocs,PMMG_Int_comm,"rcv_int_comm" ,ier=0); PMMG_MALLOC( parmesh, (*rcv_next_node_comm),nprocs,int,"rcv_next_comm" ,ier=0); @@ -1144,16 +1143,33 @@ int PMMG_gather_parmesh( PMMG_pParMesh parmesh, if ( parmesh->myrank == root ) { displs[0] = 0; for ( k=1; kcomm ),ier=0 ); - PMMG_DEL_MEM(parmesh,ptr,char,"buffer to send"); + /* Here the gatherv call has been replaced by a send/recv to avoid errors when + * displacements overflow the INT_MAX value */ + if (parmesh->myrank == root) { + int i; + for ( i = 0; i < nprocs; ++i ) { + if ( i != root ) { + MPI_CHECK( + MPI_Recv(rcv_buffer + displs[i], rcv_pack_size[i], MPI_CHAR, i, + MPI_MERGEMESH_TAG, parmesh->comm, MPI_STATUS_IGNORE), + ier = 0); + } + } + } else { + MPI_CHECK( + MPI_Send(buffer_to_send, pack_size, MPI_CHAR, root, MPI_MERGEMESH_TAG,parmesh->comm), + ier = 0); + } /** 4: Unpack parmeshes */ #ifndef NDEBUG @@ -1185,7 +1224,6 @@ int PMMG_gather_parmesh( PMMG_pParMesh parmesh, #endif if ( parmesh->myrank == root ) { - ptr = rcv_buffer; for ( k=0; kmesh ); - memcpy ( &mesh->info,&grp_1->mesh->info,sizeof(MMG5_Info) ); + if ( !PMMG_copy_mmgInfo ( &grp_1->mesh->info,&mesh->info ) ) return 0; /** Recover mesh name */ @@ -1541,6 +1581,9 @@ int PMMG_mergeParmesh_rcvParMeshes ( PMMG_pParMesh parmesh,PMMG_pGrp rcv_grps, if ( parmesh->lsin ) { MMG3D_Set_inputSolName (mesh,ls, parmesh->lsin); } + if ( parmesh->lsout ) { + MMG3D_Set_outputSolName (mesh,ls, parmesh->lsout); + } } if ( disp ) { if ( parmesh->dispin ) { @@ -1588,6 +1631,7 @@ int PMMG_merge_parmesh( PMMG_pParMesh parmesh ) { /** Step 1: Allocate internal communicator buffer and fill it: the * intvalues array contains the indices of the matching nodes on the proc. */ +#warning MEMORY: small inconsistency /* Internal comm allocation */ int_node_comm = parmesh->int_node_comm; diff --git a/src/metis_pmmg.c b/src/metis_pmmg.c index 5b05e5ef..8d542bea 100644 --- a/src/metis_pmmg.c +++ b/src/metis_pmmg.c @@ -31,6 +31,7 @@ */ #include "metis_pmmg.h" #include "linkedlist_pmmg.h" +#include "mmg3dexterns_private.h" /** * \param parmesh pointer toward the parmesh structure. @@ -56,10 +57,12 @@ int PMMG_saveGraph( PMMG_pParMesh parmesh,idx_t *xadj,idx_t *adjncy, idx_t istart,istop,iadj,nadj; int ip,k,j,jel; - PMMG_CALLOC(parmesh,sname,strlen(filename)+9,char,"file name prefix",return 0); - PMMG_CALLOC(parmesh,smesh,strlen(filename)+15,char,"mesh file name",return 0); - PMMG_CALLOC(parmesh,ssol,strlen(filename)+15,char,"sol file name",return 0); - sprintf(sname,"%s-P%02d-I%02d",filename,parmesh->myrank,parmesh->iter); + size_t snamelen = strlen(filename)+1+4*sizeof(int); + PMMG_CALLOC(parmesh,sname,snamelen,char,"file name prefix",return 0); + PMMG_CALLOC(parmesh,smesh,snamelen+5,char,"mesh file name",return 0); + PMMG_CALLOC(parmesh,ssol,snamelen+4,char,"sol file name",return 0); + + snprintf(sname,snamelen,"%s-P%02d-I%02d",filename,parmesh->myrank,parmesh->iter); strcpy(smesh,sname); strcat(smesh,".mesh"); strcpy(ssol,sname); @@ -978,6 +981,10 @@ int PMMG_graph_parmeshGrps2parmetis( PMMG_pParMesh parmesh,idx_t **vtxdist, nitem = ext_face_comm->nitem; color = ext_face_comm->color_out; + /* If the communicator is empty, dont try to communicate. This case happens when + the mesh was loaded from an HDF5 file with more procs than partitions. */ + if ( !nitem ) continue; + PMMG_CALLOC(parmesh,ext_face_comm->itosend,nitem,int,"itosend array", goto fail_6); itosend = ext_face_comm->itosend; @@ -1165,7 +1172,8 @@ int PMMG_graph_parmeshGrps2parmetis( PMMG_pParMesh parmesh,idx_t **vtxdist, /* Print graph to file */ /* FILE* fid; char filename[48]; - sprintf(filename,"graph_proc%d",parmesh->myrank); + int len = PMMG_count_digits(parmesh->myrank); + snprintf(filename,11+len*sizeof(int),"graph_proc%d",parmesh->myrank); fid = fopen(filename,"w"); for( k = 0; k < parmesh->nprocs+1; k++ ) fprintf(fid,"%d\n",(*vtxdist)[k]); @@ -1284,6 +1292,8 @@ int PMMG_part_meshElts2metis( PMMG_pParMesh parmesh, idx_t* part, idx_t nproc ) xadj = adjncy = vwgt = adjwgt = NULL; + if (!nelt) return 1; + /* Set contiguity of partitions if using Metis also for graph partitioning */ METIS_SetDefaultOptions(options); options[METIS_OPTION_CONTIG] = ( parmesh->info.contiguous_mode && @@ -1477,7 +1487,7 @@ int PMMG_part_parmeshGrps2metis( PMMG_pParMesh parmesh,idx_t* part,idx_t nproc ) /* Print graph to file */ /* FILE* fid; char filename[48]; - sprintf(filename,"part_centralized"); + snprintf(filename,17,"part_centralized"); fid = fopen(filename,"w"); for( iproc = 0; iproc < vtxdist[nproc]; iproc++ ) fprintf(fid,"%d\n",part_seq[iproc]); diff --git a/src/moveinterfaces_pmmg.c b/src/moveinterfaces_pmmg.c index 264f6728..d308654e 100644 --- a/src/moveinterfaces_pmmg.c +++ b/src/moveinterfaces_pmmg.c @@ -688,6 +688,7 @@ int PMMG_check_reachability( PMMG_pParMesh parmesh,int *counter ) { itosend[i] = intvalues[idx] ; } +#warning Luca: change this tag MPI_CHECK( MPI_Sendrecv(itosend,nitem,MPI_INT,color,MPI_PARMESHGRPS2PARMETIS_TAG+1, itorecv,nitem,MPI_INT,color,MPI_PARMESHGRPS2PARMETIS_TAG+1, @@ -1280,6 +1281,10 @@ int PMMG_set_ifcDirection( PMMG_pParMesh parmesh,int **displsgrp,int **mapgrp ) for( k=0; knamein && ls->nameout ) { + /* If the m array of the level-set is not allocated, the level-set field will + * not be allocated after redistribution so no need to share its name info */ + if ( ls && ls->m && ls->namein && ls->nameout ) { idx += (strlen(ls->namein) + 1) * sizeof(char); idx += (strlen(ls->nameout) + 1) * sizeof(char); } @@ -126,7 +128,10 @@ int PMMG_mpisizeof_filenames ( PMMG_pGrp grp ) { /* disp */ idx += sizeof(int); // metin idx += sizeof(int); // metout - if ( disp && disp->namein && disp->nameout ) { + /* If the m array of the displacement field is not allocated, the disp field + * will not be allocated after redistribution so no need to share its name + * info */ + if ( disp && disp->m && disp->namein && disp->nameout ) { idx += (strlen(disp->namein) + 1) * sizeof(char); idx += (strlen(disp->nameout) + 1) * sizeof(char); } @@ -206,6 +211,10 @@ int PMMG_mpisizeof_infos ( MMG5_Info *info ) { idx += info->nmat*sizeof(int); // mat->ref idx += info->nmat*sizeof(int); // mat->rin idx += info->nmat*sizeof(int); // mat->rex + assert( info->invmat.lookup); + idx += sizeof(int); // invmat->offset + idx += sizeof(int); // invmat->size + idx += info->invmat.size*sizeof(int); // invmat->lookup } /* local parameters */ @@ -635,7 +644,9 @@ int PMMG_mpipack_filenames ( PMMG_pGrp grp,char **buffer ) { tmp += metout_s * sizeof(char); } - if ( ls && ls->namein && ls->nameout ) { + /* If the m array of the level-set is not allocated, the level-set field will + * not be allocated after redistribution so no need to share its name info */ + if ( ls && ls->m && ls->namein && ls->nameout ) { lsin_s = (strlen(ls->namein) + 1); lsout_s = (strlen(ls->nameout) + 1); if ( lsin_s > MMG5_FILENAME_LEN_MAX || lsout_s > MMG5_FILENAME_LEN_MAX ) { @@ -659,7 +670,10 @@ int PMMG_mpipack_filenames ( PMMG_pGrp grp,char **buffer ) { tmp += lsout_s * sizeof(char); } - if ( disp && disp->namein && disp->nameout ) { + /* If the m array of the displacement field is not allocated, the disp field + * will not be allocated after redistribution so no need to share its name + * info */ + if ( disp && disp->m && disp->namein && disp->nameout ) { dispin_s = (strlen(disp->namein) + 1); dispout_s = (strlen(disp->nameout) + 1); if ( dispin_s > MMG5_FILENAME_LEN_MAX || dispout_s > MMG5_FILENAME_LEN_MAX ) { @@ -781,6 +795,12 @@ void PMMG_mpipack_infos ( MMG5_Info *info,char **buffer ) { *( (int *) tmp) = info->mat[k].rin; tmp += sizeof(int); *( (int *) tmp) = info->mat[k].rex; tmp += sizeof(int); } + assert( info->invmat.lookup); + *( (int *) tmp) = info->invmat.offset; tmp += sizeof(int); + *( (int *) tmp) = info->invmat.size; tmp += sizeof(int); + for ( k=0; kinvmat.size; ++k ) { + *( (int *) tmp) = info->invmat.lookup[k]; tmp += sizeof(int); + } } /* local parameters */ @@ -1112,6 +1132,8 @@ int PMMG_mpipack_grp ( PMMG_pGrp grp,char **buffer ) { * buffer pointer at the end of the written area. The parmesh groups must have * been merged before entering this function. * + * \remark the \a buffer pointer is modified (shifted) thus, after this + * function, it cannot be used for deallocation anymore */ int PMMG_mpipack_parmesh ( PMMG_pParMesh parmesh ,char **buffer ) { PMMG_pGrp grp; diff --git a/src/mpiunpack_pmmg.c b/src/mpiunpack_pmmg.c index 7d2c27bd..a7b32edb 100644 --- a/src/mpiunpack_pmmg.c +++ b/src/mpiunpack_pmmg.c @@ -92,10 +92,12 @@ int PMMG_mpiunpack_meshSizes ( PMMG_pParMesh parmesh,PMMG_pGrp listgrp,int igrp, ier_grp = MMG3D_Init_mesh(MMG5_ARG_start, MMG5_ARG_ppMesh,&(grp->mesh), MMG5_ARG_ppMet ,&(grp->met), + MMG5_ARG_ppLs ,&(grp->ls), MMG5_ARG_end); mesh = grp->mesh; met = grp->met; + ls = grp->ls; /* Set maximum memory */ mesh->memMax = parmesh->memGloMax; @@ -410,21 +412,26 @@ int PMMG_copy_filenames ( PMMG_pParMesh parmesh,PMMG_pGrp grp,int *ier,int ier_m if ( !MMG5_Set_outputSolName( mesh,met, parmesh->metout ) ) { *ier = 0; } } - if ( ier_ls ) { + if ( ier_ls && ls ) { /* ls structure is allocated */ if ( parmesh->lsin && *parmesh->lsin ) { if ( !MMG5_Set_inputSolName( mesh, ls, parmesh->lsin ) ) { *ier = 0; } } + if ( parmesh->lsout && *parmesh->lsout ) { + if ( !MMG5_Set_outputSolName( mesh, ls, parmesh->lsout ) ) { + *ier = 0; + } + } } - if ( ier_disp ) { + if ( ier_disp && disp ) { /* disp structure is allocated */ if ( parmesh->dispin && *parmesh->dispin ) { if ( !MMG5_Set_inputSolName( mesh, disp, parmesh->dispin ) ) { *ier = 0; } } } - if ( ier_field ) { + if ( ier_field && field ) { /* field structure is allocated */ for ( is=0; isfieldin ) { @@ -457,7 +464,7 @@ int PMMG_copy_filenames ( PMMG_pParMesh parmesh,PMMG_pGrp grp,int *ier,int ier_m */ static void PMMG_mpiunpack_infos ( MMG5_Info *info,char **buffer,int *ier,int ier_mesh ) { - int k,nmat,npar; + int k,nmat,npar,invsize; if ( ier_mesh ) { /** Mesh infos */ @@ -517,6 +524,19 @@ void PMMG_mpiunpack_infos ( MMG5_Info *info,char **buffer,int *ier,int ier_mesh *buffer += sizeof(int8_t); *buffer += 3*sizeof(int); } + + info->invmat.offset = *( (int *) *buffer); *buffer += sizeof(int); + info->invmat.size = *( (int *) *buffer); *buffer += sizeof(int); + MMG5_SAFE_CALLOC(info->invmat.lookup,info->invmat.size,int, *ier = 0); + if ( *ier ) { + for ( k=0; kinvmat.size; ++k ) { + info->invmat.lookup[k] = *( (int *) *buffer); + *buffer += sizeof(int); + } + } + else { + *buffer += info->invmat.size*sizeof(int); + } } /* local parameters */ @@ -552,10 +572,15 @@ void PMMG_mpiunpack_infos ( MMG5_Info *info,char **buffer,int *ier,int ier_mesh *buffer += 7*sizeof(uint8_t); if ( nmat ) { + /* mat */ *buffer += nmat*sizeof(int8_t); *buffer += nmat*sizeof(int); *buffer += nmat*sizeof(int); *buffer += nmat*sizeof(int); + /* invmat */ + *buffer += sizeof(int); + invsize = *( (int *) *buffer); *buffer += sizeof(int); + *buffer += invsize*sizeof(int); } /* local parameters */ @@ -1092,6 +1117,9 @@ int PMMG_mpiunpack_grp ( PMMG_pParMesh parmesh,PMMG_pGrp listgrp,int igrp,char * * pointer toward a buffer of type "x". Then we can get the variable value by * dereferencing the adress of the buffer. * + * \remark the \a buffer pointer is modified (shifted) thus, after this + * function, it cannot be used for deallocation anymore + * */ int PMMG_mpiunpack_parmesh ( PMMG_pParMesh parmesh,PMMG_pGrp listgrp,int igrp, PMMG_pInt_comm int_node_comm, diff --git a/src/overlap_pmmg.c b/src/overlap_pmmg.c new file mode 100644 index 00000000..cc7829f8 --- /dev/null +++ b/src/overlap_pmmg.c @@ -0,0 +1,846 @@ +/* ============================================================================= +** This file is part of the parmmg software package for parallel tetrahedral +** mesh modification. +** Copyright (c) Bx INP/Inria/UBordeaux, 2017- +** +** parmmg is free software: you can redistribute it and/or modify it +** under the terms of the GNU Lesser General Public License as published +** by the Free Software Foundation, either version 3 of the License, or +** (at your option) any later version. +** +** parmmg is distributed in the hope that it will be useful, but WITHOUT +** ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +** FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +** License for more details. +** +** You should have received a copy of the GNU Lesser General Public +** License and of the GNU General Public License along with parmmg (in +** files COPYING.LESSER and COPYING). If not, see +** . Please read their terms carefully and +** use this copy of the parmmg distribution only if you accept them. +** ============================================================================= +*/ + +/** + * \file overlap_pmmg.c + * \brief Create and delete overlap. + * \author Cécile Dobrzynski (Bx INP/Inria/UBordeaux) + * \author Algiane Froehly (InriaSoft) + * \author Laetitia Mottet (UBordeaux) + * \version 1 + * \copyright GNU Lesser General Public License. + * + * Functions to create and delete the overlap + * + */ + +#include "parmmg.h" +#include "mmgexterns_private.h" +#include "inlined_functions_3d_private.h" + +/** + * \param parmesh pointer toward a parmesh structure + * \param comm MPI communicator for ParMmg + * + * \return 1 if success, 0 if fail. + * + * Create the overlap. The overlap consists in sending to and receiving from + * neighbour partitions one extra layer of point and associated tetra. + * + * \remark Data transferred between partitions: + * - mesh->point.c, mesh->point.tag and mesh->point.ref + * - mesh->tetra.v and mesh->tetra.ref + * - mesh->ls + * Data NOT transferred between partitions: + * - Other mesh->point and mesh->tetra fields + * - mesh->xtetra fields + * + * \todo all MPI_Abort have to be removed and replaced by a clean error handling + * without deadlocks. + * + */ +int PMMG_create_overlap(PMMG_pParMesh parmesh, MPI_Comm comm) { + + /* Local variables + Remark: *Interior_*: pts not tagged MG_PARBDY + *PBDY_* : pts tagged MG_PARBDY + *in2out: data from color_in sends to color_out + *out2in: data on color_in receives from color_out */ + PMMG_pInt_comm int_comm; + PMMG_pExt_comm ext_comm,ext_comm_ter; + PMMG_pGrp grp; + MMG5_pMesh mesh; + MMG5_pSol ls; + PMMG_pOverlap overlap; + MPI_Status status; + MMG5_pPoint p0; + MMG5_pTetra pt; + + int ier = 1; // Initialize error + int local_mpi_tag; // Tag used for MPI comm + int i,j,k,r; + int idx, ip, ip_in,ip_out,ip_ter,pos; + int icomm,icomm_ter; + int ref; + int duplicated_point; + int nitem_ext,nitem_ext_ter,next_comm; + int color_in,color_out,color_ter; + double coord[3],ls_val; + uint16_t tag; + + int *n_ToSend, *n_ToRecv; // Tables to send/receive nbr of points and tetras + int *hash_in2out, *hash_out2in; // Hash table needed in structure PMMG_pOverlap + double *lsInterior_ToSend, *lsPBDY_ToSend, *lsInterior_ToRecv, *lsPBDY_ToRecv; // LS values to send/receive + + /* Number of tetras */ + int nt_initial; // Initial tetras nbr on Pcolor_in + int ntTot_in2out; // Total tetras nbr from Pcolor_in sends to Pcolor_out + int ntTot_out2in; // Total tetras nbr on Pcolor_in receives from Pcolor_out + + /* Tetras vertices index and ref to send/receive */ + int *tetraVertices_ToSend; // Indices of tetra vertices from Pcolor_in sends to Pcolor_out + int *tetraVertices_ToRecv_inIdx; // Indices of tetra vertices from Pcolor_out on Pcolor_in + int *tetraVertices_ToRecv_outIdx; // Indices of tetra vertices from Pcolor_out on Pcolor_out + int *tetraRef_ToSend, *tetraRef_ToRecv; + int *tetraVerticesSeen_ToSend, *tetraVerticesSeen_ToRecv; // Flag tetra vertices + + /* Number of points */ + int np_in, np_out; // Nbr of pts on Pcolor_in or Pcolor_out + int npInterior_in2out; // Nbr of pts not tagged MG_PARBDY from Pcolor_in sends to Pcolor_out + int npPBDY_in2out; // Nbr of pts tagged MG_PARBDY from Pcolor_in sends to Pcolor_out + int npInterior_out2in; // Nbr of pts not tagged MG_PARBDY from Pcolor_out receives on Pcolor_in + int npPBDY_out2in; // Nbr of pts tagged MG_PARBDY from Pcolor_out receives on Pcolor_in + int npTot_in2out; // Total nbr of pts from Pcolor_in sends to Pcolor_out npTot=npInterior+npPBDY + int npTot_out2in; // Total nbr of pts from Pcolor_out receives on Pcolor_in npTot=npInterior+npPBDY + + /* Points coordinates, tag, index and ref to send/receive */ + double *pointCoordInterior_ToSend, *pointCoordPBDY_ToSend; + double *pointCoordInterior_ToRecv, *pointCoordPBDY_ToRecv; + uint16_t *pointTagInterior_ToSend, *pointTagPBDY_ToSend; + uint16_t *pointTagInterior_ToRecv, *pointTagPBDY_ToRecv; + int *pointRefInterior_ToSend, *pointRefPBDY_ToSend; + int *pointRefInterior_ToRecv, *pointRefPBDY_ToRecv; + int *pointIdxPBDY_ToSend, *pointIdxInterface_ToSend; + int *pointIdxPBDY_ToRecv, *pointIdxInterface_ToRecv; + + /* Data needed to identify MG_PARBDY pts located on Pcolor_ter + and ensure they are added only once in Pcolor_in */ + int ndataPBDY_in2out, ndataPBDY_out2in, ndataPBDY_added; // Nbr of MG_PARBDY points + int *dataPBDY_ToSend, *dataPBDY_ToRecv, *dataPBDY_AlreadyAdded; // Data to identify MG_PARBDY points + + if ( !parmesh->ngrp ) { + /* No deadlock because no global comms */ + return 1; + } + + if ( parmesh->info.imprim > PMMG_VERB_STEPS ) + fprintf(stdout," Create Overlap\n"); + + /* Creation of overlap works only when there is one group */ + /* Ensure only one group on each proc */ + assert ( parmesh->ngrp == 1 + && "Overlap not implemented for more than 1 group per rank"); + + /* Global initialization */ + grp = &parmesh->listgrp[0]; + mesh = parmesh->listgrp[0].mesh; + ls = parmesh->listgrp[0].ls; + next_comm = parmesh->next_node_comm; // Nbr of external node communicators + int_comm = parmesh->int_node_comm; // Internal node communicator + nt_initial = mesh->ne; + ndataPBDY_added = 0; + + /* Reset flags */ + for (k=1; k<=mesh->np; k++) + mesh->point[k].flag = 0; + + /* Global allocation memory */ + PMMG_CALLOC(parmesh,int_comm->intvalues,int_comm->nitem,int,"intvalues", ier = 0 ); + PMMG_CALLOC(parmesh,parmesh->overlap,next_comm,PMMG_Overlap,"overlap",ier = 0); + PMMG_CALLOC(parmesh,dataPBDY_AlreadyAdded,5*mesh->np,int,"dataPBDY_AlreadyAdded",ier = 0); + + PMMG_CALLOC(parmesh,n_ToSend,6,int,"n_ToSend",ier = 0); + PMMG_CALLOC(parmesh,n_ToRecv,6,int,"n_ToRecv",ier = 0); + PMMG_CALLOC(parmesh,pointCoordInterior_ToSend, 3*mesh->np, double, "pointCoordInterior_ToSend",ier = 0); + PMMG_CALLOC(parmesh,pointCoordPBDY_ToSend, 3*mesh->np, double, "pointCoordPBDY_ToSend", ier = 0); + PMMG_CALLOC(parmesh,pointTagInterior_ToSend, mesh->np, uint16_t,"pointTagInterior_ToSend", ier = 0); + PMMG_CALLOC(parmesh,pointTagPBDY_ToSend, mesh->np, uint16_t,"pointTagPBDY_ToSend", ier = 0); + PMMG_CALLOC(parmesh,pointRefInterior_ToSend, mesh->np, int, "pointRefInterior_ToSend", ier = 0); + PMMG_CALLOC(parmesh,pointRefPBDY_ToSend, mesh->np, int, "pointRefPBDY_ToSend", ier = 0); + PMMG_CALLOC(parmesh,pointIdxPBDY_ToSend, mesh->np, int, "pointIdxPBDY_ToSend", ier = 0); + PMMG_CALLOC(parmesh,pointIdxInterface_ToSend, mesh->np, int, "pointIdxInterface_ToSend",ier = 0); + PMMG_CALLOC(parmesh,tetraVertices_ToSend, 4*mesh->ne, int, "tetraVertices_ToSend", ier = 0); + PMMG_CALLOC(parmesh,tetraVerticesSeen_ToSend, 4*mesh->ne, int, "tetraVerticesSeen_ToSend", ier = 0); + PMMG_CALLOC(parmesh,tetraRef_ToSend, mesh->ne, int, "tetraRef_ToSend", ier = 0); + PMMG_CALLOC(parmesh,lsInterior_ToSend, mesh->np, double, "lsInterior_ToSend", ier = 0); + PMMG_CALLOC(parmesh,lsPBDY_ToSend, mesh->np, double, "lsPBDY_ToSend", ier = 0); + + if ( !ier ) { + /* One alloc has failed: Avoid segfault or deadlock */ + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } + + /** STEP 1 - Store point index in internal communicator intvalues. + Like we have only one group (see the assert at the beginning of the function), + we can store directly the id of the node in the internal comm int_comm->intvalue + as we know it won't be overwrite by another group. Indeed, entities shared + by the groups have a unique position in this buffer. In short, the next steps + work because we have only one group when we are in this function. **/ + for( i = 0; i < grp->nitem_int_node_comm; i++ ){ + ip = grp->node2int_node_comm_index1[i]; + pos = grp->node2int_node_comm_index2[i]; + int_comm->intvalues[pos] = ip; + } + + /* Loop over the number of external node communicator */ + for (icomm=0; icommnp; + + /* Get external node communicator information */ + ext_comm = &parmesh->ext_node_comm[icomm]; // External node communicator + color_in = ext_comm->color_in; // Color of this partition Pcolor_in + color_out = ext_comm->color_out; // Color of the remote partition Pcolor_out + nitem_ext = ext_comm->nitem; // Nbr of nodes in common between Pcolor_in and Pcolor_out + + /* Overlap variables */ + overlap = &parmesh->overlap[icomm]; + overlap->color_in = color_in; + overlap->color_out = color_out; + + /** STEP 2 - Identify nodes at the interface between Pcolor_in and Pcolor_out + a) Assign flag -1 to these nodes + b) Store the local point indices in pointIdxInterface_ToSend + to be able to fill hash_out2in + Note: The following works because we have only one group in this function + (see the assert at the beginning of the function) and we have been able + to store the local index of node **/ + /* Loop over the nodes in the external node communicator */ + for (i=0; i < nitem_ext; i++) { + /* Get the index of the node stored in internal communicator */ + pos = ext_comm->int_comm_index[i]; + ip = int_comm->intvalues[pos]; + + /* Add the flag -1 to this point */ + p0 = &mesh->point[ip]; + p0->flag = -1; + pointIdxInterface_ToSend[i] = ip; + } + + /** STEP 3 - Identification of points and tetras to send to Pcolor_out + tetraVerticesSeen_ToSend: flag the tetra vertices using following rules + +1 means the vertex is seen for the first time + 0 means the vertex has been already seen (from another tetra) + OR is on the interface between Pcolor_in and Pcolor_out + OR is tagged MG_PARBDY (special treatments for those) **/ + /* Loop over the tetra on this partition, i.e. Pcolor_in */ + for (k=1; k<=nt_initial; k++) { + pt = &mesh->tetra[k]; + if (!MG_EOK(pt)) continue; + + /* If one vertex if flag -1, assign MG_OVERLAP to this tetra */ + for (i=0; i<4; i++) { + ip = pt->v[i]; + p0 = &mesh->point[ip]; + if ( p0->flag < 0 ) { + pt->tag |= MG_OVERLAP; + break; + } + } + + /* If tetra has not been identified as MG_OVERLAP, then ignore it */ + if ( !(pt->tag & MG_OVERLAP) ) continue; + + tetraRef_ToSend[ntTot_in2out] = pt->ref; + + /* Loop over the vertices of this tetra, all the nodes belong to the overlap + that we want to send to Pcolor_out */ + for (i=0; i<4; i++) { + ip = pt->v[i]; + p0 = &mesh->point[ip]; + tetraVertices_ToSend[4*ntTot_in2out+i] = ip; + + /* If this node has never been seen before for the partition on Pcolor_out + (i.e. p0->flag!=color_out+1) and is not on interface between Pcolor_in + and Pcolor_out (p0->flag>=0): + a) in VerticesSeen_ToSend : assign 1 if this node is not tagged MG_PARBDY; + assign 0 otherwise (special treatment for those). + b) store its coordinates in pointCoord*_ToSend, its tag in + pointTag*_ToSend; its ref in pointRef*_ToSend; its level-set value + in ls*_ToSend. `*` being either `Interior` for pts not tagged + MG_PARBDY or `PBDY` for pts tagged MG_PARBDY */ + if ( (p0->flag>=0) && (p0->flag!=color_out+1) ) { + + /* Update flag of this vertex to identify it has already been seen */ + p0->flag = color_out+1; + + /* If this vertex is not tagged MG_PARBDY, store needed info in *Interior* to be sent to Pcolor_out */ + if (!(p0->tag & MG_PARBDY)) { + tetraVerticesSeen_ToSend[4*ntTot_in2out+i] = 1; // Vertex seen for the first time + pointCoordInterior_ToSend[3*npInterior_in2out] = p0->c[0]; + pointCoordInterior_ToSend[3*npInterior_in2out+1] = p0->c[1]; + pointCoordInterior_ToSend[3*npInterior_in2out+2] = p0->c[2]; + pointTagInterior_ToSend[npInterior_in2out] = p0->tag; + pointRefInterior_ToSend[npInterior_in2out] = p0->ref; + lsInterior_ToSend[npInterior_in2out] = ls->m[ip]; + npInterior_in2out++; + } + /* If this vertex is tagged MG_PARBDY, store needed info in *PBDY* to + be sent to Pcolor_out. pointIdxPBDY_ToSend stores MG_PARBDY points + except the ones at interface between Pcolor_in and Pcolor_out */ + else { + /* If this vertex is tagged MG_PARBDY, store it in pointIdxPBDY_ToSend + pointIdxPBDY_ToSend stores the points w/ MG_PARBDY tag except the ones + at interface between Pcolor_in and Pcolor_out */ + tetraVerticesSeen_ToSend[4*ntTot_in2out+i] = 0; // Vertex tagged MG_PARBDY (special treatments for those) + pointCoordPBDY_ToSend[3*npPBDY_in2out] = p0->c[0]; + pointCoordPBDY_ToSend[3*npPBDY_in2out+1] = p0->c[1]; + pointCoordPBDY_ToSend[3*npPBDY_in2out+2] = p0->c[2]; + pointTagPBDY_ToSend[npPBDY_in2out] = p0->tag; + pointRefPBDY_ToSend[npPBDY_in2out] = p0->ref; + pointIdxPBDY_ToSend[npPBDY_in2out] = ip; + lsPBDY_ToSend[npPBDY_in2out] = ls->m[ip]; + npPBDY_in2out++; + } + } + /* If this node is on the interface between Pcolor_in and Pcolor_out (p0->flag=-1), + this node will be treated separately, assign 0 in tetraVerticesSeen_ToSend + OR + If this node has been seen before in another tetra (p0->flag = color_out+1), + assign 0 in tetraVerticesSeen_ToSend */ + else { + tetraVerticesSeen_ToSend[4*ntTot_in2out+i] = 0; + } + } + ntTot_in2out++; + /* Remove the tag MG_OVERLAP : here it is used as a way to identify the + tetra to be sent. Later, tetras having the tag MG_OVERLAP will actually + be tetras received from Pcolor_out */ + pt->tag &= ~MG_OVERLAP; + } + + /* The total number of point to send to Pcolor_out npTot_in2out is + the nbr of point not tagged MG_PARBDY npInterior_in2out + plus the ones tagged MG_PARBDY npPBDY_in2out */ + npTot_in2out = npInterior_in2out + npPBDY_in2out; + + /* Reinitialize points flags and tag **/ + for (i=0; i < nitem_ext; i++) { + pos = ext_comm->int_comm_index[i]; + ip = int_comm->intvalues[pos]; + p0 = &mesh->point[ip]; + p0->flag = 0; + if (p0->tag & MG_OVERLAP) p0->tag &=~ MG_OVERLAP; + } + + /** STEP 4 - Special treatment for nodes with tag MG_PARBDY. + Identification of the points at the interface between Pcolor_in + and another Pcolor_ter (!=Pcolor_out) to be sent to Pcolor_out. + Array `dataPBDY_ToSend` (for points with MG_PARBDY) stores + - Pcolor_ter: the other partition w/ which this node is shared + (Pcolor_ter!=Pcolor_out) + - The position of the point in the external communicator + - The local index in the mesh + NB: This part of the algo might be optimised **/ + + /* Allocate the variables dataPBDY_ToSend */ + PMMG_CALLOC(parmesh,dataPBDY_ToSend,3*npPBDY_in2out*parmesh->nprocs,int,"dataPBDY_ToSend", + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); + + /* Loop over the nodes with MG_PARBDY tags to store useful data to identify + them in dataPBDY_ToSend */ + for (i=0; ipoint[ip]; + + /* Loop over the partitions having nodes in common w/ Pcolor_in */ + for (icomm_ter=0; icomm_terext_node_comm[icomm_ter]; // External node communicator + color_ter = ext_comm_ter->color_out; // Color of the remote partition Pcolor_ter + nitem_ext_ter = ext_comm_ter->nitem; // Nbr of nodes in common between Pcolor_in and Pcolor_ter + + assert( (color_ter != color_out) && "Unexpected case: duplicated communicator?" ); + + /* Loop over the nodes in the external node communicator Pcolor_ter */ + for (j=0; j < nitem_ext_ter; j++) { + /* Get the indices of the nodes in internal communicators */ + pos = ext_comm_ter->int_comm_index[j]; + ip_ter = int_comm->intvalues[pos]; + + if ( !(ip==ip_ter) ) continue; + + /* Each time the node ip is found being shared w/ another + partition store Pcolor_ter and the position j of this node + in the external comm of Pcolor_ter and the local index of the point */ + dataPBDY_ToSend[3*ndataPBDY_in2out] = color_ter; + dataPBDY_ToSend[3*ndataPBDY_in2out+1] = j; // position in external communicator + dataPBDY_ToSend[3*ndataPBDY_in2out+2] = ip; // index of point on this partition + ndataPBDY_in2out++; + break; + } + } + } + + /** STEP 5 - Store all the different sizes to exchange **/ + n_ToSend[0] = npInterior_in2out;// Nbr of interior point from Pcolor_in to send to Pcolor_out + n_ToSend[1] = npPBDY_in2out; // Nbr of MG_PARBDY point from Pcolor_in to send to Pcolor_out + n_ToSend[2] = npTot_in2out; // Total nbr of points from Pcolor_in to send to Pcolor_out + n_ToSend[3] = ndataPBDY_in2out; // Nbr of data for MG_PARBDY points from Pcolor_in to send to Pcolor_out + n_ToSend[4] = np_in; // Total nbr of points on mesh Pcolor_in + n_ToSend[5] = ntTot_in2out; // Total nbr of tetras from Pcolor_in to send to Pcolor_out + + /** STEP 6 - Send and Receive all the data from the other partitions **/ + // TODO :: Improve number of communications + /* STEP 6.1 - First send/receive the different sizes to exchange */ + MPI_CHECK( + MPI_Sendrecv(n_ToSend,6,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + n_ToRecv,6,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status),MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); + local_mpi_tag++; + + npInterior_out2in = n_ToRecv[0]; + npPBDY_out2in = n_ToRecv[1]; + npTot_out2in = n_ToRecv[2]; + ndataPBDY_out2in = n_ToRecv[3]; + np_out = n_ToRecv[4]; + ntTot_out2in = n_ToRecv[5]; + + /* Fill overlap variables*/ + overlap->np_in2out = npTot_in2out; // Total pts nbr sends from Pcolor_in to Pcolor_out + overlap->np_out2in = npTot_out2in; // Total pts nbr receives on Pcolor_in from Pcolor_out + overlap->nt_in2out = ntTot_in2out; // Total tetra nbr sends from Pcolor_in to Pcolor_out + overlap->nt_out2in = ntTot_out2in; // Total tetra nbr receives on Pcolor_in from Pcolor_out + + /* STEP 6.2 - Alloc hash table */ + hash_in2out = overlap->hash_in2out; + hash_out2in = overlap->hash_out2in; + PMMG_CALLOC(parmesh,hash_in2out,np_in +npTot_out2in+1,int,"hash_in2out", + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); + PMMG_CALLOC(parmesh,hash_out2in,np_out +1,int,"hash_out2in", + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE);); + + /* STEP 6.3 - Send and receive all the other data */ + /* Send/receive indices of tetras vertices */ + PMMG_CALLOC(parmesh,tetraVertices_ToRecv_outIdx,4*ntTot_out2in,int,"tetraVertices_ToRecv_outIdx", + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); + PMMG_CALLOC(parmesh,tetraVertices_ToRecv_inIdx, 4*ntTot_out2in,int,"tetraVertices_ToRecv_inIdx", + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); + + MPI_CHECK( + MPI_Sendrecv(tetraVertices_ToSend, 4*ntTot_in2out,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + tetraVertices_ToRecv_outIdx,4*ntTot_out2in,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status),MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); + local_mpi_tag++; + + /* Send/receive flag to know if tetra vertex has already been seen */ + PMMG_CALLOC(parmesh,tetraVerticesSeen_ToRecv,4*ntTot_out2in,int, + "tetraVerticesSeen_ToRecv",MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); + MPI_CHECK( + MPI_Sendrecv(tetraVerticesSeen_ToSend,4*ntTot_in2out,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + tetraVerticesSeen_ToRecv,4*ntTot_out2in,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status), + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); + local_mpi_tag++; + + /* Send/receive tetras refs */ + PMMG_CALLOC(parmesh,tetraRef_ToRecv,ntTot_out2in,int,"tetraRef_ToRecv", + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); + MPI_CHECK( + MPI_Sendrecv(tetraRef_ToSend,ntTot_in2out,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + tetraRef_ToRecv,ntTot_out2in,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status), + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); + local_mpi_tag++; + + /* Send/receive points indices */ + PMMG_CALLOC(parmesh,pointIdxInterface_ToRecv,nitem_ext,int,"pointIdxInterface_ToRecv", + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); + MPI_CHECK( + MPI_Sendrecv(pointIdxInterface_ToSend,nitem_ext,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + pointIdxInterface_ToRecv,nitem_ext,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status),MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); + local_mpi_tag++; + + PMMG_CALLOC(parmesh,pointIdxPBDY_ToRecv,npPBDY_out2in,int,"pointIdxPBDY_ToRecv", + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); + MPI_CHECK( + MPI_Sendrecv(pointIdxPBDY_ToSend,npPBDY_in2out,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + pointIdxPBDY_ToRecv,npPBDY_out2in,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status),MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); + local_mpi_tag++; + + /* Send/receive points tag */ + PMMG_CALLOC(parmesh,pointTagInterior_ToRecv,npInterior_out2in,uint16_t,"pointTagInterior_ToRecv", + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); + MPI_CHECK( + MPI_Sendrecv(pointTagInterior_ToSend,npInterior_in2out,MPI_UINT16_T,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + pointTagInterior_ToRecv,npInterior_out2in,MPI_UINT16_T,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status),MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); + local_mpi_tag++; + + PMMG_CALLOC(parmesh,pointTagPBDY_ToRecv,npPBDY_out2in,uint16_t,"pointTagPBDY_ToRecv", + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); + MPI_CHECK( + MPI_Sendrecv(pointTagPBDY_ToSend,npPBDY_in2out,MPI_UINT16_T,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + pointTagPBDY_ToRecv,npPBDY_out2in,MPI_UINT16_T,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status),MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); + local_mpi_tag++; + + /* Send/receive points references */ + PMMG_CALLOC(parmesh,pointRefInterior_ToRecv,npInterior_out2in,int,"pointRefInterior_ToRecv", + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); + MPI_CHECK( + MPI_Sendrecv(pointRefInterior_ToSend,npInterior_in2out,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + pointRefInterior_ToRecv,npInterior_out2in,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status),MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); + local_mpi_tag++; + + PMMG_CALLOC(parmesh,pointRefPBDY_ToRecv,npPBDY_out2in,int,"pointRefPBDY_ToRecv", + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); + MPI_CHECK( + MPI_Sendrecv(pointRefPBDY_ToSend,npPBDY_in2out,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + pointRefPBDY_ToRecv,npPBDY_out2in,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status),MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); + local_mpi_tag++; + + /* Send/receive points coordinates */ + PMMG_CALLOC(parmesh,pointCoordInterior_ToRecv,3*npInterior_out2in,double,"pointCoordInterior_ToRecv", + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); + MPI_CHECK( + MPI_Sendrecv(pointCoordInterior_ToSend,3*npInterior_in2out,MPI_DOUBLE,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + pointCoordInterior_ToRecv,3*npInterior_out2in,MPI_DOUBLE,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status),MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); + local_mpi_tag++; + + PMMG_CALLOC(parmesh,pointCoordPBDY_ToRecv,3*npPBDY_out2in,double,"pointCoordPBDY_ToRecv", + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); + MPI_CHECK( + MPI_Sendrecv(pointCoordPBDY_ToSend,3*npPBDY_in2out,MPI_DOUBLE,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + pointCoordPBDY_ToRecv,3*npPBDY_out2in,MPI_DOUBLE,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status),MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); + local_mpi_tag++; + + /* Send/receive data needed to identify MG_PARBDY nodes */ + PMMG_CALLOC(parmesh,dataPBDY_ToRecv,3*ndataPBDY_out2in,int,"dataPBDY_ToRecv", + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); + MPI_CHECK( + MPI_Sendrecv(dataPBDY_ToSend,3*ndataPBDY_in2out,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + dataPBDY_ToRecv,3*ndataPBDY_out2in,MPI_INT,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status),MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); + local_mpi_tag++; + + /* Send/receive LS values */ + PMMG_CALLOC(parmesh,lsInterior_ToRecv,npInterior_out2in,double,"lsInterior_ToRecv", + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); + MPI_CHECK( + MPI_Sendrecv(lsInterior_ToSend,npInterior_in2out,MPI_DOUBLE,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + lsInterior_ToRecv,npInterior_out2in,MPI_DOUBLE,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status),MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); + local_mpi_tag++; + + PMMG_CALLOC(parmesh,lsPBDY_ToRecv,npPBDY_out2in,double,"lsPBDY_ToRecv", + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); + MPI_CHECK( + MPI_Sendrecv(lsPBDY_ToSend,npPBDY_in2out,MPI_DOUBLE,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + lsPBDY_ToRecv,npPBDY_out2in,MPI_DOUBLE,color_out,MPI_OVERLAP_TAG+local_mpi_tag, + comm,&status),MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); + local_mpi_tag++; + + /** STEP 7 - Fill the overlap hash tables with interface points **/ + for (i=0; i < nitem_ext; i++) { + ip_out = pointIdxInterface_ToRecv[i]; // Point index on Pcolor_out (the other partition) + ip_in = pointIdxInterface_ToSend[i]; // Point index on Pcolor_in (this partition) + hash_out2in[ip_out] = ip_in; // From index on Pcolor_out, I found index on Pcolor_in + hash_in2out[ip_in] = ip_out; // From index on Pcolor_in, I found index on Pcolor_out + } + + /** STEP 8 - Add nodes from Pcolor_out having MG_PARBDY tag to: + * a) the local mesh on Pcolor_in + * b) the overlap hash tables (hash_out2in and hash_in2out) + * Create variable dataPBDY_AlreadyAdded storing: + * (1) color_out: neighbouring partition + * (2) color_ter: tierce partition w/ which MG_PARBDY nodes is shared + * (3) k: position of node ip in external communicator Pcolor_out-Pcolor_ter + * (4) ip_in: local index of node on Pcolor_in + * (5) ip_out: local index of node on Pcolor_out + * Steps are as follows: + * 8.1. Identify if duplicated_point = 0 or = 1 based on data in dataPBDY_AlreadyAdded + * 8.2. If duplicated_point = 0, then we add this new point to the mesh, + * otherwise do nothing + * 8.3. For both duplicated_point = 0 and = 1, the hash tables (to find + * correspondence between indexes) is updated with the information of this point + * 8.4. For both duplicated_point = 0 and = 1, dataPBDY_AlreadyAdded is + * updated with the information of this point. + **/ + mesh->xpmax = MG_MAX( (MMG5_int)(1.5*mesh->xp),mesh->npmax); + + /* Loop over the points having MG_PARBDY tag to add them to mesh on Pcolor_in */ + for (i=0; i < npPBDY_out2in; i++) { + ip_out = pointIdxPBDY_ToRecv[i]; // Index of point on Pcolor_out + coord[0] = pointCoordPBDY_ToRecv[3*i]; + coord[1] = pointCoordPBDY_ToRecv[3*i+1]; + coord[2] = pointCoordPBDY_ToRecv[3*i+2]; + tag = pointTagPBDY_ToRecv[i] | MG_OVERLAP; // Add the tag MG_OVERLAP to this point + ref = pointRefPBDY_ToRecv[i]; + ls_val = lsPBDY_ToRecv[i]; + + /* Loop over the array dataPBDY_ToRecv allowing to find the point ip_out */ + for (j=0; j < ndataPBDY_out2in; j++) { + + color_ter = dataPBDY_ToRecv[3*j]; + k = dataPBDY_ToRecv[3*j+1]; // Position in external communicator Pcolor_out-Pcolor_ter + ip = dataPBDY_ToRecv[3*j+2]; // Index of point on Pcolor_out + + if ( !(ip == ip_out) ) continue; + + /* STEP 8.1 - Search into dataPBDY_AlreadyAdded if this point has already + been added to the mesh + - if this point exists already, then duplicated_point = 1; + - otherwise duplicated_point = 0 */ + duplicated_point = 0; + for (r=0; r < ndataPBDY_added+1; r++) { + /* If the tuple (Pcolor_out-Pcolor_ter) is the right one */ + if ( (color_ter == dataPBDY_AlreadyAdded[5*r]) && (color_out == dataPBDY_AlreadyAdded[5*r+1]) ) { + /* And the point is at the same position in external comm */ + if ( k == dataPBDY_AlreadyAdded[5*r+2] ) { + /* then this point has alreadyh been added */ + ip_in = dataPBDY_AlreadyAdded[5*r+3]; + duplicated_point = 1; + break; + } + } + /* If this node ip_out from Pcolor_out has already been added: + get the index ip_in it has on this partition Pcolor_in */ + if ( (color_out == dataPBDY_AlreadyAdded[5*r]) && (ip_out == dataPBDY_AlreadyAdded[5*r+4])) { + ip_in = dataPBDY_AlreadyAdded[5*r+3]; + duplicated_point = 1; + break; + } + } + + /* STEP 8.2 - If this point has not been added yet duplicated_point = 0, + then add it to the mesh; otherwise do nothing */ + if (duplicated_point==0) { + ip_in = MMG3D_newPt(mesh,coord,tag,1); + mesh->point[ip_in].ref = ref; // Add ref + mesh->point[ip_in].xp = 0; // Assign 0 to xp + ls->m[ip_in] = ls_val; + } + + /* STEP 8.3 - Update the hash tables to know correspondence of + local index on color_in and color_out */ + hash_out2in[ip_out] = ip_in; // From index on color_out, I found index on color_in + hash_in2out[ip_in] = ip_out; // From index on color_in, I found index on color_out + + /* STEP 8.4 - Update dataPBDY_AlreadyAdded data necessary to identify which + MG_PARBDY points have already been treated */ + dataPBDY_AlreadyAdded[5*ndataPBDY_added] = color_out; + dataPBDY_AlreadyAdded[5*ndataPBDY_added+1]= color_ter; + dataPBDY_AlreadyAdded[5*ndataPBDY_added+2]= k; + dataPBDY_AlreadyAdded[5*ndataPBDY_added+3]= ip_in; + dataPBDY_AlreadyAdded[5*ndataPBDY_added+4]= ip_out; + ndataPBDY_added++; + } + } + + /** STEP 9 - Add all the other nodes from Pcolor_out to: + * a) the local mesh on Pcolor_in + * b) the overlap hash tables (hash_out2in and hash_in2out) **/ + j=0; + + for (i=0; i < 4*ntTot_out2in; i++) { + ip_out=tetraVertices_ToRecv_outIdx[i]; // This point has the index ip_out on Pcolor_out + + if (tetraVerticesSeen_ToRecv[i]==1) { + coord[0] = pointCoordInterior_ToRecv[3*j]; + coord[1] = pointCoordInterior_ToRecv[3*j+1]; + coord[2] = pointCoordInterior_ToRecv[3*j+2]; + ref = pointRefInterior_ToRecv[j]; + tag = pointTagInterior_ToRecv[j] | MG_OVERLAP; // Add the tag MG_OVERLAP to this point + ls_val = lsInterior_ToRecv[j]; + j += 1; + + /* New overlapping node is created. src is set equal to 1 by default, + but the value does not really matter because this point will then be + deleted by PMMG_delete_overlap once the overlap is not needed anymore */ + ip_in = MMG3D_newPt(mesh,coord,tag,1); + mesh->point[ip_in].ref = ref; // Assign ref + mesh->point[ip_in].xp = 0; // Assign 0 to xp + ls->m[ip_in] = ls_val; + hash_out2in[ip_out] = ip_in; // From index on color_out, I found index on color_in + hash_in2out[ip_in] = ip_out; // From index on color_in, I found index on color_out + tetraVertices_ToRecv_inIdx[i]=ip_in; // This point has the index ip_in on Pcolor_in + } + else{ + tetraVertices_ToRecv_inIdx[i]=hash_out2in[ip_out]; // Find the local index of this point from hash table + } + } + + /** STEP 10 - Add the tetra to the mesh */ + for (i=0; i < ntTot_out2in; i++) { + /* Create a new tetra*/ + k = MMG3D_newElt(mesh); + if ( !k ) { + MMG3D_TETRA_REALLOC(mesh,k,mesh->gap, + fprintf(stderr,"\n ## Error: %s: unable to allocate" + " a new element.\n",__func__); + MMG5_INCREASE_MEM_MESSAGE(); + fprintf(stderr," Exit program.\n"); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE) ); + } + pt = &mesh->tetra[k]; + + /* Add vertices index, ref and tag to this new tetra */ + pt->v[0] = tetraVertices_ToRecv_inIdx[4*i]; + pt->v[1] = tetraVertices_ToRecv_inIdx[4*i+1]; + pt->v[2] = tetraVertices_ToRecv_inIdx[4*i+2]; + pt->v[3] = tetraVertices_ToRecv_inIdx[4*i+3]; + pt->ref = tetraRef_ToRecv[i]; + pt->tag |= MG_OVERLAP; + } + + if ( parmesh->info.imprim > PMMG_VERB_ITWAVES ) + fprintf(stdout, " part %d sends %d pts and %d tetra to part %d\n", + color_in,npTot_in2out,ntTot_in2out,color_out); + + /* Reset memory*/ + memset(n_ToSend,0x00,6*sizeof(int)); + memset(n_ToRecv,0x00,6*sizeof(int)); + + memset(pointCoordInterior_ToSend,0x00,3*npInterior_in2out*sizeof(double)); + PMMG_DEL_MEM(parmesh,pointCoordInterior_ToRecv,double,"pointCoordInterior_ToRecv"); + + memset(pointCoordPBDY_ToSend,0x00,3*npPBDY_in2out*sizeof(double)); + PMMG_DEL_MEM(parmesh,pointCoordPBDY_ToRecv,double,"pointCoordPBDY_ToRecv"); + + memset(pointTagInterior_ToSend,0x00,npInterior_in2out*sizeof(uint16_t)); + PMMG_DEL_MEM(parmesh,pointTagInterior_ToRecv,uint16_t,"pointTagInterior_ToRecv"); + + memset(pointTagPBDY_ToSend,0x00,npPBDY_in2out*sizeof(uint16_t)); + PMMG_DEL_MEM(parmesh,pointTagPBDY_ToRecv,uint16_t,"pointTagPBDY_ToRecv"); + + memset(pointRefInterior_ToSend,0x00,npInterior_in2out*sizeof(int)); + PMMG_DEL_MEM(parmesh,pointRefInterior_ToRecv,int,"pointRefInterior_ToRecv"); + + memset(pointRefPBDY_ToSend,0x00,npPBDY_in2out*sizeof(int)); + PMMG_DEL_MEM(parmesh,pointRefPBDY_ToRecv,int,"pointRefPBDY_ToRecv"); + + memset(pointIdxInterface_ToSend,0x00,nitem_ext*sizeof(int)); + PMMG_DEL_MEM(parmesh,pointIdxInterface_ToRecv,int,"pointIdxInterface_ToRecv"); + + memset(pointIdxPBDY_ToSend,0x00,npPBDY_in2out*sizeof(int)); + PMMG_DEL_MEM(parmesh,pointIdxPBDY_ToRecv,int,"pointIdxPBDY_ToRecv"); + + memset(tetraVertices_ToSend,0x00,4*ntTot_in2out*sizeof(int)); + PMMG_DEL_MEM(parmesh,tetraVertices_ToRecv_inIdx, int,"tetraVertices_ToRecv_inIdx"); + PMMG_DEL_MEM(parmesh,tetraVertices_ToRecv_outIdx,int,"tetraVertices_ToRecv_outIdx"); + + memset(tetraVerticesSeen_ToSend,0x00,4*ntTot_in2out*sizeof(int)); + PMMG_DEL_MEM(parmesh,tetraVerticesSeen_ToRecv,int,"tetraVerticesSeen_ToRecv"); + + memset(tetraRef_ToSend,0x00,ntTot_in2out*sizeof(int)); + PMMG_DEL_MEM(parmesh,tetraRef_ToRecv,int,"tetraRef_ToRecv"); + + memset(lsInterior_ToSend,0x00,npInterior_in2out*sizeof(double)); + PMMG_DEL_MEM(parmesh,lsInterior_ToRecv,double,"lsInterior_ToRecv"); + + memset(lsPBDY_ToSend,0x00,npPBDY_in2out*sizeof(double)); + PMMG_DEL_MEM(parmesh,lsPBDY_ToRecv,double,"lsPBDY_ToRecv"); + + PMMG_DEL_MEM(parmesh,dataPBDY_ToSend,int,"dataPBDY_ToSend"); + PMMG_DEL_MEM(parmesh,dataPBDY_ToRecv,int,"dataPBDY_ToRecv"); + } + + /* Deallocate memory*/ + PMMG_DEL_MEM(parmesh,n_ToSend,int,"n_ToSend"); + PMMG_DEL_MEM(parmesh,n_ToRecv,int,"n_ToRecv"); + PMMG_DEL_MEM(parmesh,pointCoordInterior_ToSend,double,"pointCoordInterior_ToSend"); + PMMG_DEL_MEM(parmesh,pointCoordPBDY_ToSend,double,"pointCoordPBDY_ToSend"); + PMMG_DEL_MEM(parmesh,pointTagInterior_ToSend,uint16_t,"pointTagInterior_ToSend"); + PMMG_DEL_MEM(parmesh,pointTagPBDY_ToSend,uint16_t,"pointTagPBDY_ToSend"); + PMMG_DEL_MEM(parmesh,pointRefInterior_ToSend,int,"pointRefInterior_ToSend"); + PMMG_DEL_MEM(parmesh,pointRefPBDY_ToSend,int,"pointRefPBDY_ToSend"); + PMMG_DEL_MEM(parmesh,pointIdxInterface_ToSend,int,"pointIdxInterface_ToSend"); + PMMG_DEL_MEM(parmesh,pointIdxPBDY_ToSend,int,"pointIdxPBDY_ToSend"); + PMMG_DEL_MEM(parmesh,tetraVertices_ToSend,int,"tetraVertices_ToSend"); + PMMG_DEL_MEM(parmesh,tetraVerticesSeen_ToSend,int,"tetraVerticesSeen_ToSend"); + PMMG_DEL_MEM(parmesh,tetraRef_ToSend,int,"tetraRef_ToSend"); + PMMG_DEL_MEM(parmesh,lsInterior_ToSend,double,"lsInterior_ToSend"); + PMMG_DEL_MEM(parmesh,lsPBDY_ToSend,double,"lsPBDY_ToSend"); + + PMMG_DEL_MEM(parmesh,dataPBDY_AlreadyAdded,int,"dataPBDY_AlreadyAdded"); + PMMG_DEL_MEM(parmesh,int_comm->intvalues,int,"intvalues"); + + if ( parmesh->info.imprim > PMMG_VERB_ITWAVES ) + fprintf(stdout, " part %d has %d pts and %d tetras after overlap creation\n", + color_in,mesh->np,mesh->ne); + + return 1; +} + +/** + * \param parmesh pointer toward a parmesh structure + * \param comm MPI communicator for ParMmg + * + * \return 1 if success, 0 if fail. + * + * Delete the overlap points and tetras present in the mesh + * + */ +int PMMG_delete_overlap(PMMG_pParMesh parmesh, MPI_Comm comm) { + + /* Local variables */ + MMG5_pMesh mesh; + MMG5_pTetra pt; + MMG5_pPoint ppt; + + int i; + + if ( parmesh->info.imprim > PMMG_VERB_STEPS ) { + fprintf(stdout," Delete Overlap\n"); + } + + if ( !parmesh->ngrp ) { + return 1; + } + + /* Delete overlap works only when there is one group */ + /* Ensure only one group on each proc */ + assert ( parmesh->ngrp == 1 + && "Overlap not implemented for more than 1 group per rank" ); + + /* Global initialization */ + mesh = parmesh->listgrp[0].mesh; + + /* Step 1 - Delete tetras with tag MG_OVERLAP */ + for (i=mesh->ne; i > 0; i--) { + pt = &mesh->tetra[i]; + if ( !(pt->tag & MG_OVERLAP) ) continue; + if ( !MMG3D_delElt(mesh,i) ) return 0; + } + + /* Step 2 - Delete points with tag MG_OVERLAP */ + for (i=mesh->np; i > 0; i--) { + ppt = &mesh->point[i]; + if ( !(ppt->tag & MG_OVERLAP) ) continue; + MMG3D_delPt(mesh,i); + } + + if ( parmesh->info.imprim > PMMG_VERB_ITWAVES ) + fprintf(stdout, " part %d has %d pts and %d tetras after overlap deletion\n", + parmesh->myrank,mesh->np,mesh->ne); + + return 1; +} diff --git a/src/parmmg.c b/src/parmmg.c index fc84f8b9..c542542d 100644 --- a/src/parmmg.c +++ b/src/parmmg.c @@ -24,7 +24,7 @@ /** * \file parmmg.c * \brief main file for the parmmg application - * \author Cécile Dobrzynski (Bx INP/Inria) + * \author Cecile Dobrzynski (Bx INP/Inria) * \author Algiane Froehly (Inria) * \version 5 * \copyright GNU Lesser General Public License. @@ -56,6 +56,7 @@ static void PMMG_endcod() { * * Main program for PARMMG executable: perform parallel mesh adaptation. * + * \todo refactoring to improve readibility */ int main( int argc, char *argv[] ) { @@ -94,6 +95,7 @@ int main( int argc, char *argv[] ) /* Allocate the main pmmg struct and assign default values */ if ( 1 != PMMG_Init_parMesh( PMMG_ARG_start, PMMG_ARG_ppParMesh,&parmesh, + PMMG_ARG_pLs, PMMG_ARG_dim,3, PMMG_ARG_MPIComm,MPI_COMM_WORLD, PMMG_ARG_end) ) { @@ -107,6 +109,7 @@ int main( int argc, char *argv[] ) if ( 1 != MMG3D_Free_names(MMG5_ARG_start, MMG5_ARG_ppMesh, &parmesh->listgrp[0].mesh, MMG5_ARG_ppMet, &parmesh->listgrp[0].met, + MMG5_ARG_ppLs, &parmesh->listgrp[0].ls, MMG5_ARG_end) ) PMMG_RETURN_AND_FREE( parmesh, PMMG_STRONGFAILURE ); @@ -114,6 +117,7 @@ int main( int argc, char *argv[] ) if ( !PMMG_parmesh_SetMemMax(parmesh) ) PMMG_RETURN_AND_FREE( parmesh, PMMG_STRONGFAILURE ); + /* Read command line */ if ( 1 != PMMG_parsar( argc, argv, parmesh ) ) PMMG_RETURN_AND_FREE( parmesh, PMMG_STRONGFAILURE ); @@ -144,13 +148,13 @@ int main( int argc, char *argv[] ) ptr = MMG5_Get_filenameExt(parmesh->meshin); - fmtin = MMG5_Get_format(ptr,MMG5_FMT_MeditASCII); + fmtin = PMMG_Get_format(ptr,MMG5_FMT_MeditASCII); /* Compute default output format */ ptr = MMG5_Get_filenameExt(parmesh->meshout); /* Format from output mesh name */ - fmtout = MMG5_Get_format(ptr,fmtin); + fmtout = PMMG_Get_format(ptr,fmtin); distributedInput = 0; @@ -238,13 +242,11 @@ int main( int argc, char *argv[] ) if ( grp->mesh->info.lag >= 0 || grp->mesh->info.iso ) { /* displacement or isovalue are mandatory */ if( !distributedInput ) { - iermesh = ( PMMG_loadSol_centralized( parmesh, NULL ) ); + iermesh = ( PMMG_loadSol_centralized( parmesh, parmesh->lsin ) ); } else { - printf(" ## Error: Distributed input not yet implemented for displacement.\n"); - //int ier_loc = PMMG_loadSol_distributed( parmesh, NULL ); - //MPI_Allreduce( &ier_loc, &iermesh, 1, MPI_INT, MPI_MIN, parmesh->comm); - iermesh = 0; + int ier_loc = PMMG_loadSol_distributed( parmesh, parmesh->lsin ); + MPI_Allreduce( &ier_loc, &iermesh, 1, MPI_INT, MPI_MIN, parmesh->comm); } if ( iermesh < 1 ) { if ( rank == parmesh->info.root ) { @@ -272,20 +274,35 @@ int main( int argc, char *argv[] ) } } /* In iso mode: read metric if any */ - if ( grp->mesh->info.iso && parmesh->metin ) { - if ( !distributedInput ) { - iermesh = PMMG_loadMet_centralized( parmesh, parmesh->metin ); + if ( grp->mesh->info.iso) { + if ( parmesh->metin ) { + if ( !distributedInput ) { + iermesh = PMMG_loadMet_centralized( parmesh, parmesh->metin ); + } + else { + int ier_loc = PMMG_loadMet_distributed( parmesh, parmesh->metin ); + MPI_Allreduce( &ier_loc, &iermesh, 1, MPI_INT, MPI_MIN, parmesh->comm); + } + if ( -1 == iermesh ) { + if ( rank == parmesh->info.root ) { + fprintf(stderr,"\n ## ERROR: UNABLE TO LOAD METRIC.\n"); + } + ier = 0; + goto check_mesh_loading; + } } else { - int ier_loc = PMMG_loadMet_distributed( parmesh, parmesh->metin ); - MPI_Allreduce( &ier_loc, &iermesh, 1, MPI_INT, MPI_MIN, parmesh->comm); - } - if ( -1 == iermesh ) { - if ( rank == parmesh->info.root ) { - fprintf(stderr,"\n ## ERROR: UNABLE TO LOAD METRIC.\n"); + /* Give a name to the metric if not provided for distributed metric output */ + if ( !MMG5_Set_inputSolName(grp->mesh,grp->met,"") ) { + fprintf(stdout," ## WARNING: Unable to give a name to the metric.\n"); + } + else { + ier = PMMG_Set_name(parmesh,&parmesh->metin,grp->met->namein,"mesh.sol"); + if (!ier) { + fprintf(stdout," ## ERROR: Unable to give a name to the metric.\n"); + PMMG_RETURN_AND_FREE( parmesh, PMMG_LOWFAILURE ); + } } - ier = 0; - goto check_mesh_loading; } } @@ -295,18 +312,26 @@ int main( int argc, char *argv[] ) iermesh = PMMG_loadAllSols_centralized(parmesh,parmesh->fieldin); } else { - //int ier_loc = PMMG_loadAllSols_distributed(parmesh,parmesh->fieldin); - //MPI_Allreduce( &ier_loc, &iermesh, 1, MPI_INT, MPI_MIN, parmesh->comm); - printf(" ## Error: Distributed fields input not yet implemented.\n"); - iermesh = 0; + int ier_loc = PMMG_loadAllSols_distributed(parmesh,parmesh->fieldin); + MPI_Allreduce( &ier_loc, &iermesh, 1, MPI_INT, MPI_MIN, parmesh->comm); } if ( iermesh < 1 ) { + if ( rank == parmesh->info.root ) { + fprintf(stderr,"\n ## ERROR: UNABLE TO LOAD FIELDS.\n"); + } ier = 0; goto check_mesh_loading; } } break; + case PMMG_FMT_HDF5: + ier = PMMG_loadMesh_hdf5( parmesh, parmesh->meshin ); + parmesh->info.fmtout = fmtout; + distributedInput = 1; + + break; + default: if ( rank == parmesh->info.root ) { fprintf(stderr," ** I/O AT FORMAT %s NOT IMPLEMENTED.\n",MMG5_Get_formatName(fmtin) ); @@ -340,11 +365,11 @@ int main( int argc, char *argv[] ) } else if ( !distributedInput ) { /* Parallel remeshing starting from a centralized mesh */ - ier = PMMG_parmmglib_centralized(parmesh); + ier = PMMG_parmmg_centralized(parmesh); } else { /* Parallel remeshing starting from a distributed mesh */ - ier = PMMG_parmmglib_distributed(parmesh); + ier = PMMG_parmmg_distributed(parmesh); } /** Check result and save output files */ @@ -366,11 +391,35 @@ int main( int argc, char *argv[] ) fprintf(stdout,"\n -- WRITING DATA FILE %s..meshb\n",basename); MMG5_SAFE_FREE ( basename ); } + else if ( parmesh->info.fmtout == MMG5_FMT_VtkPvtu ) { + char *basename = MMG5_Remove_ext ( parmesh->meshout,".pvtu" ); + int i, rename=0; + for(i=0;basename[i]!='\0';i++) { + if(basename[i]=='.') { + basename[i] = '-'; + rename = 1; + } + } + fprintf(stdout,"\n -- WRITING DATA FILES %s.pvtu\n",basename); + if (rename) fprintf(stdout," ## WARNING: Filename has been changed: " + "%s => %s.pvtu\n",parmesh->meshout,basename); + MMG5_SAFE_FREE ( basename ); + } else { fprintf(stdout,"\n -- WRITING DATA FILE %s\n",parmesh->meshout); } + if (grp->field) { + fprintf(stdout," Writing mesh, metric and fields.\n"); + } + else { + fprintf(stdout," Writing mesh and metric.\n"); + } } + /* Initialize ierSave to 1 because for centralized output it will be assigned + * only on root rank. */ + ierSave = 1; + if ( parmesh->listgrp && parmesh->listgrp[0].mesh ) { grp = &parmesh->listgrp[0]; @@ -381,9 +430,17 @@ int main( int argc, char *argv[] ) printf(" ... SKIPPING!\n"); } break; + case ( MMG5_FMT_VtkPvtu ): - PMMG_savePvtuMesh(parmesh,parmesh->meshout); + if (grp->field) { + ier = PMMG_savePvtuMesh_and_allData(parmesh,parmesh->meshout); + } + else{ + ier = PMMG_savePvtuMesh(parmesh,parmesh->meshout); + } + MPI_Allreduce( &ier, &ierSave, 1, MPI_INT, MPI_MIN, parmesh->comm ); break; + case ( MMG5_FMT_GmshASCII ): case ( MMG5_FMT_GmshBinary ): case ( MMG5_FMT_VtkVtu ): case ( MMG5_FMT_VtkVtk ): @@ -400,44 +457,63 @@ int main( int argc, char *argv[] ) strlen(parmesh->meshout)+1,char,"",); strcat(parmesh->meshout,".mesh"); } + case ( PMMG_FMT_DistributedMeditBinary): assert ( parmesh->meshout ); - ierSave = PMMG_saveMesh_distributed(parmesh,parmesh->meshout); - if ( ierSave ) { + ier = PMMG_saveMesh_distributed(parmesh,parmesh->meshout); + if ( ier ) { if ( parmesh->listgrp[0].met && parmesh->listgrp[0].met->m ) { - ierSave = PMMG_saveMet_distributed(parmesh,parmesh->metout); + ier = PMMG_saveMet_distributed(parmesh,parmesh->metout); } } - if ( ierSave && grp->field ) { - fprintf(stderr," ## Error: %s: PMMG_saveAllSols_distributed function" - " not yet implemented." - " Ignored.\n",__func__); + if ( ier && grp->field ) { + ier = PMMG_saveAllSols_distributed(parmesh,parmesh->fieldout); } - MPI_Allreduce( &ierSave, &ier, 1, MPI_INT, MPI_MIN, parmesh->comm ); - if ( !ier ) { - PMMG_RETURN_AND_FREE(parmesh,PMMG_STRONGFAILURE); + if ( ier && grp->ls ) { + /* Warning: if the ls has the same name than the metric (default case + * when no input metric in ls mode), if the ls is not deallocated, the + * metric file is overwritten */ + ier = PMMG_saveLs_distributed(parmesh,parmesh->lsout); } + + MPI_Allreduce( &ier, &ierSave, 1, MPI_INT, MPI_MIN, parmesh->comm ); + break; + + case ( PMMG_FMT_HDF5 ): + ier = PMMG_saveMesh_hdf5(parmesh,parmesh->meshout); + MPI_Allreduce( &ier, &ierSave, 1, MPI_INT, MPI_MIN, parmesh->comm ); + + break; + default: ierSave = PMMG_saveMesh_centralized(parmesh,parmesh->meshout); - if ( !ierSave ) { - PMMG_RETURN_AND_FREE(parmesh,PMMG_STRONGFAILURE); + + if ( ierSave && parmesh->listgrp[0].met && parmesh->listgrp[0].met->m ) { + ierSave = PMMG_saveMet_centralized(parmesh,parmesh->metout); } - if ( parmesh->listgrp[0].met && parmesh->listgrp[0].met->m ) { - if ( !PMMG_saveMet_centralized(parmesh,parmesh->metout) ) { - PMMG_RETURN_AND_FREE(parmesh,PMMG_STRONGFAILURE); - } + + if ( ierSave && grp->field ) { + ierSave = PMMG_saveAllSols_centralized(parmesh,parmesh->fieldout); } - if ( grp->field && !PMMG_saveAllSols_centralized(parmesh,parmesh->fieldout) ) { - PMMG_RETURN_AND_FREE(parmesh,PMMG_STRONGFAILURE); + if ( ierSave && grp->ls ) { + /* Warning: if the ls has the same name than the metric (default case + * when no input metric in ls mode), if the ls is not deallocated, the + * metric file is overwritten */ + ierSave = PMMG_saveLs_centralized(parmesh,parmesh->lsout); } break; } } + /* Check output success */ + if ( ierSave<1 ) { + PMMG_RETURN_AND_FREE(parmesh,PMMG_STRONGFAILURE); + } + chrono(OFF,&PMMG_ctim[tim]); if ( parmesh->info.imprim > PMMG_VERB_VERSION ) fprintf(stdout," -- WRITING COMPLETED\n"); diff --git a/src/parmmg.h b/src/parmmg.h index 118cf7d4..c53f71b1 100644 --- a/src/parmmg.h +++ b/src/parmmg.h @@ -44,10 +44,13 @@ #include #include #include +#include "hdf_pmmg.h" +#include "metis.h" #include "libparmmg.h" #include "interpmesh_pmmg.h" -#include "mmg3d.h" +#include "libmmg3d.h" +#include "libmmg3d_private.h" #ifdef __cplusplus extern "C" { @@ -202,6 +205,7 @@ enum PMMG_Format { PMMG_FMT_Distributed, /*!< Distributed Setters/Getters */ PMMG_FMT_DistributedMeditASCII, /*!< Distributed ASCII Medit (.mesh) */ PMMG_FMT_DistributedMeditBinary, /*!< Distributed Binary Medit (.meshb) */ + PMMG_FMT_HDF5, /*!< HDF5 format */ PMMG_FMT_Unknown, /*!< Unrecognized */ }; @@ -269,6 +273,9 @@ static const int PMMG_MVIFCS_NLAYERS = 2; if ( parmesh->listgrp[kgrp].met ) \ parmesh->listgrp[kgrp].met->npi = parmesh->listgrp[kgrp].met->np; \ \ + if ( parmesh->listgrp[kgrp].ls ) \ + parmesh->listgrp[kgrp].ls->npi = parmesh->listgrp[kgrp].ls->np; \ + \ if ( parmesh->listgrp[kgrp].mesh ) { \ for ( ksol=0; ksollistgrp[kgrp].mesh->nsols; ++ksol ) { \ parmesh->listgrp[kgrp].field[ksol].npi = parmesh->listgrp[kgrp].field[ksol].np; \ @@ -282,12 +289,14 @@ static const int PMMG_MVIFCS_NLAYERS = 2; #define ERROR_AT(msg1,msg2) \ - fprintf( stderr, msg1 msg2 " function: %s, file: %s, line: %d \n", \ - __func__, __FILE__, __LINE__ ) + fprintf( stderr, "%s %s function: %s, file: %s, line: %d \n", \ + msg1, msg2, __func__, __FILE__, __LINE__ ) #define MEM_CHK_AVAIL(mesh,bytes,msg) do { \ if ( (mesh)->memCur + (bytes) > (mesh)->memMax ) { \ - ERROR_AT(msg," Exceeded max memory allowed: "); \ + char diag[1024]; \ + snprintf(diag, 1024, " Allocation of %ld bytes exceeds max %ld: ", bytes, (mesh)->memMax); \ + ERROR_AT(msg, diag); \ stat = PMMG_FAILURE; \ } else if ( (mesh)->memCur + (bytes) < 0 ) { \ ERROR_AT(msg," Tried to free more mem than allocated: " ); \ @@ -396,13 +405,13 @@ static const int PMMG_MVIFCS_NLAYERS = 2; } \ } while(0) -#define PMMG_RECALLOC(mesh,ptr,newsize,oldsize,type,msg,on_failure) do { \ - int my_stat = PMMG_SUCCESS; \ - \ - PMMG_REALLOC(mesh,ptr,newsize,oldsize,type,msg,my_stat=PMMG_FAILURE;on_failure;); \ - if ( (my_stat == PMMG_SUCCESS ) && ((newsize) > (oldsize)) ) { \ - memset( (ptr) + oldsize, 0, ((size_t)((newsize)-(oldsize)))*sizeof(type)); \ - } \ +#define PMMG_RECALLOC(mesh,ptr,newsize,oldsize,type,msg,on_failure) do { \ + int my_stat = PMMG_SUCCESS; \ + \ + PMMG_REALLOC(mesh,ptr,newsize,oldsize,type,msg,my_stat=PMMG_FAILURE;on_failure;); \ + if ( (my_stat == PMMG_SUCCESS ) && ((newsize) > (oldsize)) ) { \ + memset( (ptr) + oldsize, 0, ((size_t)((newsize)-(oldsize)))*sizeof(type)); \ + } \ } while(0) @@ -438,12 +447,38 @@ void PMMG_Analys_Init_SurfNormIndex( MMG5_pTetra pt ); int PMMG_Analys_Get_SurfNormalIndex( MMG5_pTetra pt,int ifac,int i ); int PMMG_boulernm(PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_Hash *hash,int start,int ip,int *ng,int *nr); int PMMG_boulen(PMMG_pParMesh parmesh,MMG5_pMesh mesh,int start,int ip,int iface,double t[3]); -int PMMG_analys_tria(PMMG_pParMesh parmesh,MMG5_pMesh mesh); -int PMMG_analys(PMMG_pParMesh parmesh,MMG5_pMesh mesh); +int PMMG_analys_tria(PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_int *permtria); +int PMMG_chkBdryTria(MMG5_pMesh mesh, MMG5_int* permtria); +int PMMG_analys(PMMG_pParMesh parmesh,MMG5_pMesh mesh,MPI_Comm comm); int PMMG_update_analys(PMMG_pParMesh parmesh); -int PMMG_hashPar( MMG5_pMesh mesh,MMG5_HGeom *pHash ); -int PMMG_hashPar_pmmg( PMMG_pParMesh parmesh,MMG5_HGeom *pHash ); +int PMMG_hashParTag_fromXtet( MMG5_pMesh mesh,MMG5_HGeom *pHash ); +int PMMG_hashPar_fromFaceComm( PMMG_pParMesh parmesh,MMG5_HGeom *pHash ); int PMMG_hashOldPar_pmmg( PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_Hash *hash ); +int PMMG_hashUpdate_s(MMG5_Hash *hash,MMG5_int ip0,MMG5_int ip1, MMG5_int s); +MMG5_int PMMG_hashGet_all(MMG5_Hash *hash,MMG5_int a,MMG5_int b,MMG5_int *k,MMG5_int *s); + +/* Overlap functions */ +int PMMG_create_overlap(PMMG_pParMesh parmesh,MPI_Comm comm); +int PMMG_delete_overlap(PMMG_pParMesh parmesh,MPI_Comm comm); + +/* Isovalue discretization functions */ +int PMMG_ls(PMMG_pParMesh parmesh); +int PMMG_cuttet_ls(PMMG_pParMesh parmesh); +int PMMG_resetRef_ls(PMMG_pParMesh parmesh,MMG5_pMesh mesh); +int PMMG_setref_ls(PMMG_pParMesh parmesh,MMG5_pMesh mesh, MMG5_pSol sol); +int PMMG_snpval_ls(PMMG_pParMesh parmesh,MPI_Comm comm); + +void PMMG_nosplit_sort(MMG5_pMesh mesh,MMG5_int k,int ifac,MMG5_int *tetra_sorted,MMG5_int *node_sorted); +void PMMG_split1_sort(MMG5_pMesh mesh,MMG5_int k,int ifac,uint8_t tau[4],MMG5_int ne_tmp,MMG5_int *tetra_sorted,MMG5_int *node_sorted); +void PMMG_split2sf_sort(MMG5_pMesh mesh,MMG5_int k,int ifac,uint8_t tau[4],int imin,MMG5_int ne_tmp,MMG5_int *tetra_sorted,MMG5_int *node_sorted); +void PMMG_split3cone_sort(MMG5_pMesh mesh,MMG5_int k,int ifac,uint8_t tau[4],int ia,int ib,MMG5_int ne_tmp,MMG5_int *tetra_sorted,MMG5_int *node_sorted); +void PMMG_split4op_sort(MMG5_pMesh mesh,MMG5_int k,int ifac,uint8_t tau[4],int imin01,int imin23,MMG5_int ne_tmp,MMG5_int *tetra_sorted,MMG5_int *node_sorted); +int PMMG_sort_vertices(MMG5_pMesh mesh,MMG5_int k,MMG5_int *v_t,int ifac); +void PMMG_sort_tetra(MMG5_int *tetra,MMG5_int *node,MMG5_int *v_t0,MMG5_int *v_t1,MMG5_int *v_t2); +void PMMG_swap_vertices(MMG5_int *a); +void PMMG_swap_ints(int *a, int *b); +void PMMG_swap_3int_arrays(int *a, int *b); +int PMMG_compare_3ints_array(int *a, int *b); /* Internal library */ void PMMG_setfunc( PMMG_pParMesh parmesh ); @@ -459,9 +494,9 @@ int PMMG_split_grps( PMMG_pParMesh parmesh,int grpIdOld,int ngrp,idx_t *part,int /* Load Balancing */ int PMMG_interactionMap(PMMG_pParMesh parmesh,int **interactions,int **interaction_map); int PMMG_transfer_all_grps(PMMG_pParMesh parmesh,idx_t *part,int); -int PMMG_distribute_grps( PMMG_pParMesh parmesh ); -int PMMG_loadBalancing( PMMG_pParMesh parmesh ); -int PMMG_split_n2mGrps( PMMG_pParMesh,int,int ); +int PMMG_distribute_grps( PMMG_pParMesh parmesh,int partitioning_mode ); +int PMMG_loadBalancing( PMMG_pParMesh parmesh,int partitioning_mode ); +int PMMG_split_n2mGrps( PMMG_pParMesh,int,int,int ); double PMMG_computeWgt( MMG5_pMesh mesh,MMG5_pSol met,MMG5_pTetra pt,int ifac ); void PMMG_computeWgt_mesh( MMG5_pMesh mesh,MMG5_pSol met,int tag ); @@ -478,28 +513,31 @@ void PMMG_parmesh_ext_comm_free( PMMG_pParMesh,PMMG_pExt_comm,int); void PMMG_grp_comm_free( PMMG_pParMesh ,int**,int**,int*); void PMMG_node_comm_free( PMMG_pParMesh ); void PMMG_edge_comm_free( PMMG_pParMesh ); -int PMMG_Compute_verticesGloNum( PMMG_pParMesh parmesh ); -int PMMG_color_commNodes( PMMG_pParMesh parmesh ); +int PMMG_Compute_verticesGloNum( PMMG_pParMesh parmesh,MPI_Comm comm ); +int PMMG_Compute_trianglesGloNum( PMMG_pParMesh parmesh,MPI_Comm comm ); +int PMMG_color_commNodes( PMMG_pParMesh parmesh,MPI_Comm comm ); void PMMG_tria2elmFace_flags( PMMG_pParMesh parmesh ); void PMMG_tria2elmFace_coords( PMMG_pParMesh parmesh ); +int PMMG_tria_highestcoord( MMG5_pMesh mesh, MMG5_int *v_t); int PMMG_build_nodeCommIndex( PMMG_pParMesh parmesh ); -int PMMG_build_faceCommIndex( PMMG_pParMesh parmesh ); -int PMMG_build_nodeCommFromFaces( PMMG_pParMesh parmesh ); -int PMMG_build_faceCommFromNodes( PMMG_pParMesh parmesh ); +int PMMG_build_faceCommIndex( PMMG_pParMesh parmesh, MMG5_int* permtria ); +int PMMG_build_nodeCommFromFaces( PMMG_pParMesh parmesh, MPI_Comm comm ); +int PMMG_build_faceCommFromNodes( PMMG_pParMesh parmesh, MPI_Comm comm ); int PMMG_build_simpleExtNodeComm( PMMG_pParMesh parmesh ); int PMMG_build_intNodeComm( PMMG_pParMesh parmesh ); -int PMMG_build_completeExtNodeComm( PMMG_pParMesh parmesh ); -int PMMG_build_edgeComm( PMMG_pParMesh parmesh,MMG5_pMesh mesh,MMG5_HGeom *hpar ); +int PMMG_build_completeExtNodeComm( PMMG_pParMesh parmesh, MPI_Comm comm ); +int PMMG_build_edgeComm( PMMG_pParMesh,MMG5_pMesh,MMG5_HGeom *hpar,MPI_Comm); +int PMMG_build_completeExtEdgeComm( PMMG_pParMesh parmesh, MPI_Comm comm ); int PMMG_pack_faceCommunicators(PMMG_pParMesh parmesh); int PMMG_pack_nodeCommunicators(PMMG_pParMesh parmesh); /* Communicators checks */ int PMMG_check_intFaceComm( PMMG_pParMesh parmesh ); -int PMMG_check_extFaceComm( PMMG_pParMesh parmesh ); -int PMMG_check_intNodeComm( PMMG_pParMesh parmesh ); -int PMMG_check_extNodeComm( PMMG_pParMesh parmesh ); -int PMMG_check_extEdgeComm( PMMG_pParMesh parmesh ); +int PMMG_check_extFaceComm( PMMG_pParMesh parmesh, MPI_Comm comm ); +int PMMG_check_intNodeComm( PMMG_pParMesh parmesh); +int PMMG_check_extNodeComm( PMMG_pParMesh parmesh, MPI_Comm comm ); +int PMMG_check_extEdgeComm( PMMG_pParMesh parmesh, MPI_Comm comm ); /* Tags */ void PMMG_tag_par_node(MMG5_pPoint ppt); @@ -511,6 +549,7 @@ void PMMG_untag_par_edge(MMG5_pxTetra pxt,int j); void PMMG_untag_par_face(MMG5_pxTetra pxt,int j); int PMMG_resetOldTag(PMMG_pParMesh parmesh); int PMMG_updateTag(PMMG_pParMesh parmesh); +void PMMG_updateTagRef_node(PMMG_pParMesh parmesh,MMG5_pMesh mesh); int PMMG_parbdySet( PMMG_pParMesh parmesh ); int PMMG_parbdyTria( PMMG_pParMesh parmesh ); @@ -553,6 +592,7 @@ int PMMG_updateMeshSize( PMMG_pParMesh parmesh,int fitMesh); void PMMG_parmesh_SetMemGloMax( PMMG_pParMesh parmesh ); void PMMG_parmesh_Free_Comm( PMMG_pParMesh parmesh ); void PMMG_parmesh_Free_Listgrp( PMMG_pParMesh parmesh ); +void PMMG_destroy_int( PMMG_pParMesh,void **ptr[],size_t,char*); int PMMG_clean_emptyMesh( PMMG_pParMesh parmesh, PMMG_pGrp listgrp, int ngrp ); int PMMG_resize_extComm ( PMMG_pParMesh,PMMG_pExt_comm,int,int* ); int PMMG_resize_extCommArray ( PMMG_pParMesh,PMMG_pExt_comm*,int,int*); @@ -561,8 +601,8 @@ int PMMG_resize_extCommArray ( PMMG_pParMesh,PMMG_pExt_comm*,int,int*); int PMMG_copy_mmgInfo ( MMG5_Info *info, MMG5_Info *info_cpy ); /* Quality */ -int PMMG_qualhisto( PMMG_pParMesh parmesh,int,int ); -int PMMG_prilen( PMMG_pParMesh parmesh,int8_t,int ); +int PMMG_qualhisto( PMMG_pParMesh parmesh,int,int,MPI_Comm comm ); +int PMMG_prilen( PMMG_pParMesh parmesh,int8_t,int,MPI_Comm comm ); int PMMG_tetraQual( PMMG_pParMesh parmesh,int8_t metRidTyp ); /* Variadic_pmmg.c */ @@ -571,6 +611,12 @@ int PMMG_Free_all_var(va_list argptr); const char* PMMG_Get_pmmgArgName(int typArg); +/* Private I/Os and APIs*/ +int PMMG_loadMesh_hdf5_i(PMMG_pParMesh parmesh, int *load_entities, const char *filename); +int PMMG_saveMesh_hdf5_i(PMMG_pParMesh parmesh, int *save_entities, const char *filename); +int PMMG_Set_defaultIOEntities_i(int io_entities[PMMG_IO_ENTITIES_size] ); +int PMMG_Set_IOEntities_i(int io_entities[PMMG_IO_ENTITIES_size], int target, int val); +int PMMG_Get_format( char *ptr, int fmt ); #ifdef __cplusplus } diff --git a/src/quality_pmmg.c b/src/quality_pmmg.c index 470a0a17..f0a16b30 100644 --- a/src/quality_pmmg.c +++ b/src/quality_pmmg.c @@ -23,7 +23,8 @@ #include "parmmg.h" #include -#include "inlined_functions_3d.h" +#include "inlined_functions_3d_private.h" +#include "mmgexterns_private.h" typedef struct { double min; @@ -148,12 +149,16 @@ static void PMMG_compute_lenStats( void* in1,void* out1,int *len, MPI_Datatype * * \param opt PMMG_INQUA if called before the Mmg call, PMMG_OUTQUA otherwise * \param isCentral 1 for centralized mesh (no parallel communication), 0 for * distributed mesh + * \param comm pointer toward the MPI communicator to use: when called before + * the first mesh balancing (at preprocessing stage) we have to use the + * read_comm communicator (i.e. the communicator used to provide the inputs). + * For all ather calls, comm has to be the communicator to use for computations. * * \return 1 if success, 0 if fail; * * Print quality histogram among all group meshes and all processors */ -int PMMG_qualhisto( PMMG_pParMesh parmesh, int opt, int isCentral ) +int PMMG_qualhisto( PMMG_pParMesh parmesh, int opt, int isCentral, MPI_Comm comm ) { PMMG_pGrp grp; PMMG_pInt_comm int_node_comm; @@ -184,9 +189,9 @@ int PMMG_qualhisto( PMMG_pParMesh parmesh, int opt, int isCentral ) iel_grp = 0; np = 0; ne = 0; - max = DBL_MIN; + max = max_cur = DBL_MIN; avg = 0.; - min = DBL_MAX; + min = min_cur = DBL_MAX; iel = 0; good = 0; med = 0; @@ -259,6 +264,10 @@ int PMMG_qualhisto( PMMG_pParMesh parmesh, int opt, int isCentral ) nrid += nrid_cur; } + + if( int_node_comm ) + PMMG_DEL_MEM( parmesh,int_node_comm->intvalues,int,"intvalues" ); + if ( parmesh->info.imprim0 <= PMMG_VERB_VERSION ) return 1; @@ -272,13 +281,13 @@ int PMMG_qualhisto( PMMG_pParMesh parmesh, int opt, int isCentral ) max_result = max; optimLES_result = optimLES; } else { - MPI_Reduce( &np, &np_result, 1, MPI_INT64_T, MPI_SUM, 0, parmesh->comm ); - MPI_Reduce( &ne, &ne_result, 1, MPI_INT64_T, MPI_SUM, 0, parmesh->comm ); - MPI_Reduce( &avg, &avg_result, 1, MPI_DOUBLE, MPI_SUM, 0, parmesh->comm ); - MPI_Reduce( &med, &med_result, 1, MPI_INT, MPI_SUM, 0, parmesh->comm ); - MPI_Reduce( &good, &good_result, 1, MPI_INT, MPI_SUM, 0, parmesh->comm ); - MPI_Reduce( &max, &max_result, 1, MPI_DOUBLE, MPI_MAX, 0, parmesh->comm ); - MPI_Reduce( &optimLES,&optimLES_result,1,MPI_INT,MPI_MAX,0,parmesh->comm ); + MPI_Reduce( &np, &np_result, 1, MPI_INT64_T, MPI_SUM, 0, comm ); + MPI_Reduce( &ne, &ne_result, 1, MPI_INT64_T, MPI_SUM, 0, comm ); + MPI_Reduce( &avg, &avg_result, 1, MPI_DOUBLE, MPI_SUM, 0, comm ); + MPI_Reduce( &med, &med_result, 1, MPI_INT, MPI_SUM, 0, comm ); + MPI_Reduce( &good, &good_result, 1, MPI_INT, MPI_SUM, 0, comm ); + MPI_Reduce( &max, &max_result, 1, MPI_DOUBLE, MPI_MAX, 0, comm ); + MPI_Reduce( &optimLES,&optimLES_result,1,MPI_INT,MPI_MAX,0,comm ); } min_iel.min = min; @@ -299,9 +308,9 @@ int PMMG_qualhisto( PMMG_pParMesh parmesh, int opt, int isCentral ) MPI_Type_commit( &mpi_iel_min_t ); MPI_Op_create( PMMG_min_iel_compute, 1, &iel_min_op ); - MPI_Reduce( &min_iel, &min_iel_result, 1, mpi_iel_min_t, iel_min_op, 0, parmesh->comm ); - MPI_Reduce( his, his_result, PMMG_QUAL_HISSIZE, MPI_INT, MPI_SUM, 0, parmesh->comm ); - MPI_Reduce( &nrid, &nrid_result, 1, MPI_INT, MPI_SUM, 0, parmesh->comm ); + MPI_Reduce( &min_iel, &min_iel_result, 1, mpi_iel_min_t, iel_min_op, 0, comm ); + MPI_Reduce( his, his_result, PMMG_QUAL_HISSIZE, MPI_INT, MPI_SUM, 0, comm ); + MPI_Reduce( &nrid, &nrid_result, 1, MPI_INT, MPI_SUM, 0, comm ); MPI_Type_free( &mpi_iel_min_t ); MPI_Op_free( &iel_min_op ); } @@ -339,9 +348,6 @@ int PMMG_qualhisto( PMMG_pParMesh parmesh, int opt, int isCentral ) if ( !ier ) return 0; } - if( int_node_comm ) - PMMG_DEL_MEM( parmesh,int_node_comm->intvalues,int,"intvalues" ); - return 1; } @@ -361,15 +367,20 @@ int PMMG_qualhisto( PMMG_pParMesh parmesh, int opt, int isCentral ) * \param metRidTyp (to fill). * \param bd_in (to fill). * \param hl (to fill). + * \param comm pointer toward the MPI communicator to use: when called before + * the first mesh balancing (at preprocessing stage) we have to use the + * read_comm communicator (i.e. the communicator used to provide the inputs). + * For all ather calls, comm has to be the communicator to use for computations. * - * \return 0 if fail, 1 otherwise. + * \return 2 without metric, 0 if fail, 1 otherwise. * * Compute the required information to print the length histogram * */ int PMMG_computePrilen( PMMG_pParMesh parmesh,MMG5_pMesh mesh, MMG5_pSol met, double* avlen, double* lmin, double* lmax, int* ned, int* amin, int* bmin, int* amax, - int* bmax, int* nullEdge,int8_t metRidTyp, double** bd_in, int hl[9] ) + int* bmax, int* nullEdge,int8_t metRidTyp, double** bd_in, + int hl[9],MPI_Comm comm ) { PMMG_pGrp grp; PMMG_pInt_comm int_edge_comm; @@ -382,7 +393,7 @@ int PMMG_computePrilen( PMMG_pParMesh parmesh,MMG5_pMesh mesh, MMG5_pSol met, do double len; int i,k,ia,np,nq,n; int ref; - int16_t tag; + uint16_t tag; int8_t i0,i1,ier; static double bd[9]= {0.0, 0.3, 0.6, 0.7071, 0.9, 1.3, 1.4142, 2.0, 5.0}; @@ -395,11 +406,17 @@ int PMMG_computePrilen( PMMG_pParMesh parmesh,MMG5_pMesh mesh, MMG5_pSol met, do *amin = *amax = *bmin = *bmax = 0; *nullEdge = 0; + if ( (!met) || (!met->m) ) { + /* the functions that computes the edge length cannot be called without an + * allocated metric */ + return 2; + } + /* Hash parallel edges in the mesh */ - if ( PMMG_hashPar(mesh,&hpar) != PMMG_SUCCESS ) return 0; + if ( PMMG_hashParTag_fromXtet(mesh,&hpar) != PMMG_SUCCESS ) return 0; /* Build parallel edge communicator */ - if( !PMMG_build_edgeComm( parmesh,mesh,&hpar ) ) return 0; + if( !PMMG_build_edgeComm( parmesh,mesh,&hpar,comm ) ) return 0; /* Initialize internal communicator with current rank */ int_edge_comm = parmesh->int_edge_comm; @@ -459,11 +476,18 @@ int PMMG_computePrilen( PMMG_pParMesh parmesh,MMG5_pMesh mesh, MMG5_pSol met, do /* Remove edge from hash ; ier = 1 if edge has been found */ ier = MMG5_hashPop(&hash,np,nq); if( ier ) { - if ( (!metRidTyp) && met->size==6 && met->m ) { + assert ( met->m ); + if ( (!metRidTyp) && met->size==6 ) { + assert ( met->m ); + /* We pass here if metric is aniso without metRidTyp */ len = MMG5_lenSurfEdg33_ani(mesh,met,np,nq,(tag & MG_GEO)); } - else - len = MMG5_lenSurfEdg_iso(mesh,met,np,nq,0); + else { + /* We pass here if metric is aniso with metRidTyp or iso with + * allocated metric. Note that the lenSurfEdg function segfault if called + * with met==NULL or met->m==NULL */ + len = MMG5_lenSurfEdg(mesh,met,np,nq,0); + } if ( !len ) { @@ -524,6 +548,7 @@ int PMMG_computePrilen( PMMG_pParMesh parmesh,MMG5_pMesh mesh, MMG5_pSol met, do /* Remove edge from hash ; ier = 1 if edge has been found */ ier = MMG5_hashPop(&hash,np,nq); if( ier ) { + assert ( met->m ); if ( (!metRidTyp) && met->size==6 && met->m ) { len = MMG5_lenedg33_ani(mesh,met,ia,pt); } @@ -577,6 +602,10 @@ int PMMG_computePrilen( PMMG_pParMesh parmesh,MMG5_pMesh mesh, MMG5_pSol met, do * \param parmesh pointer to parmesh structure * \param metRidTyp Type of storage of ridges metrics: 0 for classic storage, * \param isCentral 1 for centralized mesh, 0 for distributed mesh. + * \param comm pointer toward the MPI communicator to use: when called before + * the first mesh balancing (at preprocessing stage) we have to use the + * read_comm communicator (i.e. the communicator used to provide the inputs). + * For all ather calls, comm has to be the communicator to use for computations. * * \return 1 if success, 0 if fail; * @@ -588,7 +617,7 @@ int PMMG_computePrilen( PMMG_pParMesh parmesh,MMG5_pMesh mesh, MMG5_pSol met, do * \warning for now, only callable on "merged" parmeshes (=1 group per parmesh) * */ -int PMMG_prilen( PMMG_pParMesh parmesh, int8_t metRidTyp, int isCentral ) +int PMMG_prilen( PMMG_pParMesh parmesh, int8_t metRidTyp, int isCentral, MPI_Comm comm ) { MMG5_pMesh mesh; MMG5_pSol met; @@ -642,28 +671,45 @@ int PMMG_prilen( PMMG_pParMesh parmesh, int8_t metRidTyp, int isCentral ) if ( parmesh->ngrp==1 ) { mesh = parmesh->listgrp[0].mesh; met = parmesh->listgrp[0].met; - if ( met && met->m ) { - if( isCentral ) - ier = MMG3D_computePrilen( mesh, met, - &lenStats.avlen, &lenStats.lmin, - &lenStats.lmax, &lenStats.ned, &lenStats.amin, - &lenStats.bmin, &lenStats.amax, &lenStats.bmax, - &lenStats.nullEdge, metRidTyp, &bd, lenStats.hl ); - else - ier = PMMG_computePrilen( parmesh, mesh, met, - &lenStats.avlen, &lenStats.lmin, - &lenStats.lmax, &lenStats.ned, &lenStats.amin, - &lenStats.bmin, &lenStats.amax, &lenStats.bmax, - &lenStats.nullEdge, metRidTyp, &bd, lenStats.hl ); + if( isCentral ) { + /* If metric is not allocated or if hash table alloc fails, the next + * function returns 0, which allows to detect that we cannot print the + * edge length histo. */ + ier = MMG3D_computePrilen( mesh, met, + &lenStats.avlen, &lenStats.lmin, + &lenStats.lmax, &lenStats.ned, &lenStats.amin, + &lenStats.bmin, &lenStats.amax, &lenStats.bmax, + &lenStats.nullEdge, metRidTyp, &bd, + lenStats.hl ); + } + else { + /* The next function returns 0 if the hash table alloc fails and 2 if + * called without metric (in this case we are not able to compute the edge + * lengths). It allows to detect: + * - if we can't print the histo due to an alloc error (ier = 0 on 1 MPI + * process at least) + * - if we can't print the histo because the metric is not allocated + * (ier=2 on all the MPI process) + * - if we can print the histo (metric is allocated on at least 1 MPI + * process and no MPI process fail, thus ier is at least 1 on all the + * MPI proc but may be 2 on some of them) + */ + ier = PMMG_computePrilen( parmesh, mesh, met, + &lenStats.avlen, &lenStats.lmin, + &lenStats.lmax, &lenStats.ned, &lenStats.amin, + &lenStats.bmin, &lenStats.amax, &lenStats.bmax, + &lenStats.nullEdge, metRidTyp, &bd, + lenStats.hl,comm ); } } if( isCentral ) ieresult = ier; else - MPI_Reduce( &ier, &ieresult,1, MPI_INT, MPI_MIN, parmesh->info.root, parmesh->comm ); + MPI_Reduce( &ier, &ieresult,1, MPI_INT, MPI_MIN, parmesh->info.root, comm ); - if ( !ieresult ) { + if ( (ieresult==0) || ieresult==2 ) { + /* We are not able to print the histogram */ MPI_Type_free( &mpi_lenStats_t ); MPI_Op_free( &mpi_lenStats_op ); return 0; @@ -672,7 +718,8 @@ int PMMG_prilen( PMMG_pParMesh parmesh, int8_t metRidTyp, int isCentral ) if( isCentral ) memcpy(&lenStats_result,&lenStats,sizeof(PMMG_lenStats)); else - MPI_Reduce( &lenStats, &lenStats_result, 1, mpi_lenStats_t, mpi_lenStats_op, 0, parmesh->comm ); + MPI_Reduce( &lenStats, &lenStats_result, 1, mpi_lenStats_t, mpi_lenStats_op, + 0, comm ); MPI_Type_free( &mpi_lenStats_t ); MPI_Op_free( &mpi_lenStats_op ); diff --git a/src/tag_pmmg.c b/src/tag_pmmg.c index 9cc8ad5f..38d88407 100644 --- a/src/tag_pmmg.c +++ b/src/tag_pmmg.c @@ -68,8 +68,8 @@ void PMMG_tag_par_edge(MMG5_pxTetra pxt,int j){ * Tag an edge as parallel. */ int PMMG_tag_par_edge_hash(MMG5_pTetra pt,MMG5_HGeom hash,int ia){ - int ip0,ip1,getref; - int16_t gettag; + int ip0,ip1,getref; + uint16_t gettag; ip0 = pt->v[MMG5_iare[ia][0]]; ip1 = pt->v[MMG5_iare[ia][1]]; @@ -261,7 +261,9 @@ inline int PMMG_resetOldTag(PMMG_pParMesh parmesh) { * * \return 0 if fail, 1 otherwise * - * Update the tag on the points and tetra + * Update the parallel-related tags on the points and tetra: first, remove + * obsolete parallel markers; second, re-tag boundary entites; third, tag new + * parallel interfaces from the internal communicator. * */ int PMMG_updateTag(PMMG_pParMesh parmesh) { @@ -273,7 +275,7 @@ int PMMG_updateTag(PMMG_pParMesh parmesh) { MMG5_HGeom hash; int *node2int_node_comm0_index1,*face2int_face_comm0_index1; int grpid,iel,ifac,ia,ip0,ip1,k,j,i,getref; - int16_t gettag; + uint16_t gettag; int8_t isbdy; /* Loop on groups */ @@ -289,15 +291,22 @@ int PMMG_updateTag(PMMG_pParMesh parmesh) { pt = &mesh->tetra[k]; if ( !pt->xt ) continue; pxt = &mesh->xtetra[pt->xt]; - /* Untag parallel nodes */ + /* Untag parallel nodes: remove PARBDY, BDY, REQ, PARBDYBDY and NOSURF + * tag. Point not marked by NOSURF tag are required by the user: re-add + * the REQ tag. */ for ( j=0 ; j<4 ; j++ ) { ppt = &mesh->point[pt->v[j]]; PMMG_untag_par_node(ppt); } - /* Untag parallel edges */ + /* Untag parallel edges: remove PARBDY, BDY, REQ, PARBDYBDY and NOSURF + * tag. Point not marked by NOSURF tag are required by the user: re-add + * the REQ tag. */ for ( j=0 ; j<6 ; j++ ) PMMG_untag_par_edge(pxt,j); - /* Untag parallel faces */ + /* Untag parallel faces: remove PARBDY, BDY, REQ and NOSURF tags but nor + * PARBDYBDY one (used to recognize BDY faces whose BDY tag has been + * removed). Point not marked by NOSURF tag are required by the user: + * re-add the REQ tag. */ for ( j=0 ; j<4 ; j++ ) PMMG_untag_par_face(pxt,j); } @@ -346,14 +355,6 @@ int PMMG_updateTag(PMMG_pParMesh parmesh) { ip0 = pt->v[MMG5_iare[ia][0]]; ip1 = pt->v[MMG5_iare[ia][1]]; if( !MMG5_hTag( &hash, ip0, ip1, 0, MG_BDY ) ) return 0; - /* Constrain boundary if -nosurf option */ - if( mesh->info.nosurf ) { - if( !MMG5_hGet( &hash, ip0, ip1, &getref, &gettag ) ) return 0; - if( !(gettag & MG_REQ) ) { - /* do not add the MG_NOSURF tag on a required entity */ - if( !MMG5_hTag( &hash, ip0, ip1, 0, MG_REQ + MG_NOSURF ) ) return 0; - } - } } /* Tag face nodes */ for ( j=0 ; j<3 ; j++) { @@ -432,8 +433,27 @@ int PMMG_updateTag(PMMG_pParMesh parmesh) { * so remove the MG_NOSURF tag if the edge is truly required */ if( pxt->tag[j] & MG_REQ ) gettag &= ~MG_NOSURF; - /* set edge tag */ - pxt->tag[j] |= gettag; + + /* set edge tag (without NOSURF tag if the edge is required by the + * user): here we preserve the initial MG_REQ tag of each tetra, thus, + * potential inconsistencies will not be solved. + * + * A xtetra may have an edge that is boundary but doesn't belong to any + * boundary face: + * - if this edge is marked as MG_BDY, the edge tag should be + * consistent with edge tag stored from a boundary face and we have + * to maintain this consistency; + * + * - if this edge is not marked as MG_BDY (tag == 0), we are not able to + * know if the edge is ref or required or if it has any other tag so + * we are not able to maintain the tag consistency and we have to + * preserve the fact that the edge is not MG_BDY. + * + */ + if ( (pxt->tag[j] & MG_BDY) || + ( (pxt->ftag[MMG5_ifar[j][0]] & MG_BDY) || (pxt->ftag[MMG5_ifar[j][1]] & MG_BDY) ) ) { + pxt->tag[j] |= gettag; + } } } PMMG_DEL_MEM( mesh, hash.geom, MMG5_hgeom, "Edge hash table" ); @@ -450,12 +470,58 @@ int PMMG_updateTag(PMMG_pParMesh parmesh) { return 1; } +/** + * \param parmesh pointer toward the parmesh structure. + * \param mesh pointer to the mesh structure + * + * Update the nodes tag with MG_REF if the edge in tetra is also MG_REF + * \remark When we enter this function, the tag MG_OLDPBDY is correctly assigned to xtetra.ftag only. + * However, we can have only an edge on the // interface (and no face). Therefore, we need + * to loop over all the edges of all the tetra. + * + */ +void PMMG_updateTagRef_node(PMMG_pParMesh parmesh, MMG5_pMesh mesh) { + MMG5_pTetra pt; + MMG5_pxTetra pxt; + MMG5_int k,ip0,ip1; + int ia; + + for ( k=1; k<=mesh->ne; k++ ) { + pt = &mesh->tetra[k]; + if ( !MG_EOK(pt) ) continue; + if ( !pt->xt ) continue; + pxt = &mesh->xtetra[pt->xt]; + for ( ia=0 ; ia<6 ; ia++ ) { + if ( pxt->tag[ia] & MG_REF) { + ip0 = pt->v[MMG5_iare[ia][0]]; + ip1 = pt->v[MMG5_iare[ia][1]]; + if ( !(mesh->point[ip0].tag & MG_REF) ) { + mesh->point[ip0].tag |= MG_REF; + } + if ( !(mesh->point[ip1].tag & MG_REF) ) { + mesh->point[ip1].tag |= MG_REF; + } + } + } + } +} + /** * \param parmesh pointer to parmesh structure. * \return 0 if fail, 1 if success. * * Check if faces on a parallel communicator connect elements with different * references, and tag them as a "true" boundary (thus PARBDYBDY). + * + * \remark: Edge tags are not updated along faces with the PARBDYBDY tag as + * it is not sufficient to maintain the consistency of PARBDYBDY tags through the mesh. + * Morover, even if we manage to have consistent tags inside one mesh, we will + * still have to synchronize the edge tags through the partition interfaces. In consequence, + * the PARBDYBDY tags may be not consistent throught the entire remeshing process. + * + * \todo all MPI_abort have to be removed and replaced by a clean error handling + * without deadlocks. + * */ int PMMG_parbdySet( PMMG_pParMesh parmesh ) { PMMG_pGrp grp; @@ -465,13 +531,11 @@ int PMMG_parbdySet( PMMG_pParMesh parmesh ) { MMG5_pTetra pt; MMG5_pxTetra pxt; MMG5_pPoint ppt; - MPI_Comm comm; MPI_Status status; int *face2int_face_comm_index1,*face2int_face_comm_index2; int *seenFace,*intvalues,*itosend,*itorecv; int ngrp,myrank,color,nitem,k,igrp,i,idx,ie,ifac; - comm = parmesh->comm; grp = parmesh->listgrp; myrank = parmesh->myrank; ngrp = parmesh->ngrp; @@ -479,11 +543,12 @@ int PMMG_parbdySet( PMMG_pParMesh parmesh ) { /* intvalues will be used to store tetra ref */ int_face_comm = parmesh->int_face_comm; PMMG_MALLOC(parmesh,int_face_comm->intvalues,int_face_comm->nitem,int, - "intvalues",return 0); + "intvalues", MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); intvalues = parmesh->int_face_comm->intvalues; /* seenFace will be used to recognize already visited faces */ - PMMG_CALLOC(parmesh,seenFace,int_face_comm->nitem,int,"seenFace",return 0); + PMMG_CALLOC(parmesh,seenFace,int_face_comm->nitem,int,"seenFace", + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); /** Fill the internal communicator with the first ref found */ for( igrp = 0; igrp < ngrp; igrp++ ) { @@ -535,11 +600,11 @@ int PMMG_parbdySet( PMMG_pParMesh parmesh ) { color = ext_face_comm->color_out; PMMG_CALLOC(parmesh,ext_face_comm->itosend,nitem,int,"itosend array", - return 0); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); itosend = ext_face_comm->itosend; PMMG_CALLOC(parmesh,ext_face_comm->itorecv,nitem,int,"itorecv array", - return 0); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); itorecv = ext_face_comm->itorecv; for ( i=0; iinfo.read_comm,&status), + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); /* Store the info in intvalues */ for ( i=0; iinfo.opnbdy && pxt->ref[ifac]>0) ) { pxt->ftag[ifac] |= MG_PARBDYBDY; for( i = 0; i < 3; i++) @@ -604,7 +672,6 @@ int PMMG_parbdySet( PMMG_pParMesh parmesh ) { } } - /* Tag parallel points touched by simple MG_BDY faces as MG_PARBDYBDY * (a parallel surface can pinch a regular surface in just one point). * The same problem on edges is handled by MMG5_mmgHashTria. */ @@ -642,6 +709,10 @@ int PMMG_parbdySet( PMMG_pParMesh parmesh ) { * * Check if faces on a parallel communicator connect elements with different * references, and tag them as a "true" boundary (thus PARBDYBDY). + * + * \todo clean parallel error handling (without MPI_abort call and without deadlocks) + * + * \todo manage opnbdy mode */ int PMMG_parbdyTria( PMMG_pParMesh parmesh ) { MMG5_Hash hash; @@ -660,26 +731,35 @@ int PMMG_parbdyTria( PMMG_pParMesh parmesh ) { comm = parmesh->comm; myrank = parmesh->myrank; - assert( parmesh->ngrp == 1 ); + + if ( !parmesh->ngrp ) { + return 1; + } + + assert( parmesh->ngrp == 1 && "Not implemented for multiple groups"); + mesh = grp->mesh; /* intvalues will be used to store tetra ref */ int_face_comm = parmesh->int_face_comm; PMMG_MALLOC(parmesh,int_face_comm->intvalues,int_face_comm->nitem,int, - "intvalues",return 0); + "intvalues",MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); intvalues = parmesh->int_face_comm->intvalues; /* seenFace will be used to recognize already visited faces */ - PMMG_CALLOC(parmesh,seenFace,int_face_comm->nitem,int,"seenFace",return 0); + PMMG_CALLOC(parmesh,seenFace,int_face_comm->nitem,int,"seenFace", + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); /* Hash triangles */ - if ( ! MMG5_hashNew(mesh,&hash,0.51*mesh->nt,1.51*mesh->nt) ) return 0; + if ( ! MMG5_hashNew(mesh,&hash,0.51*mesh->nt,1.51*mesh->nt) ) { + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); + } for (kt=1; kt<=mesh->nt; kt++) { ptt = &mesh->tria[kt]; if ( !MMG5_hashFace(mesh,&hash,ptt->v[0],ptt->v[1],ptt->v[2],kt) ) { MMG5_DEL_MEM(mesh,hash.item); - return 0; + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE); } } @@ -720,11 +800,11 @@ int PMMG_parbdyTria( PMMG_pParMesh parmesh ) { color = ext_face_comm->color_out; PMMG_CALLOC(parmesh,ext_face_comm->itosend,nitem,int,"itosend array", - return 0); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); itosend = ext_face_comm->itosend; PMMG_CALLOC(parmesh,ext_face_comm->itorecv,nitem,int,"itorecv array", - return 0); + MPI_Abort(parmesh->comm,PMMG_TMPFAILURE)); itorecv = ext_face_comm->itorecv; for ( i=0; icomm,PMMG_TMPFAILURE)); /* Store the info in intvalues */ for ( i=0; ipoint[ptt->v[j]].tag |= MG_PARBDYBDY; /* check orientation: set orientation of triangle outward w.r.t. the * highest tetra reference, remove MG_PARBDYBDY from the halo triangle */ +#warning Luca: no opnbdy yet if( pt->ref > intvalues[idx] ) { ptt->v[0] = ia; ptt->v[1] = ib; ptt->v[2] = ic; } else { + /* The boundary will belong to one partition only, for the other + * partition, the triangle will be considered as simply parallel. This + * means that the PARBDYBDY triangle tags may be not consistent from + * here. */ ptt->tag[0] &= ~MG_PARBDYBDY; ptt->tag[1] &= ~MG_PARBDYBDY; ptt->tag[2] &= ~MG_PARBDYBDY; diff --git a/src/tools_pmmg.c b/src/tools_pmmg.c index a136bc18..7c447a8c 100644 --- a/src/tools_pmmg.c +++ b/src/tools_pmmg.c @@ -111,6 +111,8 @@ const char* PMMG_Get_pmmgArgName(int typArg) int PMMG_copy_mmgInfo ( MMG5_Info *info, MMG5_Info *info_cpy ) { MMG5_pMat mat_tmp; MMG5_pPar par_tmp; + int *lookup_tmp; + int i; // assert to remove (we may authorize to have mat and par already allocated ) assert ( (!info_cpy->mat) && (!info_cpy->par) ); @@ -123,6 +125,22 @@ int PMMG_copy_mmgInfo ( MMG5_Info *info, MMG5_Info *info_cpy ) { } if ( mat_tmp ) { *mat_tmp = *info->mat; + for ( i=0; inmat; ++i ) { + mat_tmp[i]=info->mat[i]; + } + } + + if ( info->nmat && (!info_cpy->invmat.lookup) ) { + MMG5_SAFE_CALLOC(lookup_tmp,info->invmat.size,int,return 0); + } + else { + lookup_tmp = info_cpy->invmat.lookup; + } + if ( lookup_tmp ) { + *lookup_tmp = *info->invmat.lookup; + for ( i=0; iinvmat.size; ++i ) { + lookup_tmp[i]=info->invmat.lookup[i]; + } } /* local parameters */ @@ -134,12 +152,16 @@ int PMMG_copy_mmgInfo ( MMG5_Info *info, MMG5_Info *info_cpy ) { } if ( par_tmp ) { *par_tmp = *info->par; + for ( i=0; inpar; ++i ) { + par_tmp[i]=info->par[i]; + } } *info_cpy = *info; info_cpy->mat = mat_tmp; info_cpy->par = par_tmp; + info_cpy->invmat.lookup = lookup_tmp; return 1; } diff --git a/src/variadic_pmmg.c b/src/variadic_pmmg.c index fc00d993..49cb1f1d 100644 --- a/src/variadic_pmmg.c +++ b/src/variadic_pmmg.c @@ -73,6 +73,8 @@ int PMMG_Init_parMesh_var_internal(va_list argptr, int callFromC ) { PMMG_pGrp grp; MMG5_pMesh mesh; MMG5_pSol met; + MMG5_pSol ls; + size_t memAv; int typArg,dim,nsol,comm_f; int parmeshCount,meshCount,metCount,dimCount,solCount,commCount; diff --git a/src/zaldy_pmmg.c b/src/zaldy_pmmg.c index 6a34dd44..5757fbdd 100644 --- a/src/zaldy_pmmg.c +++ b/src/zaldy_pmmg.c @@ -58,42 +58,61 @@ void PMMG_parmesh_SetMemGloMax( PMMG_pParMesh parmesh ) assert ( (parmesh != NULL) && "trying to set glo max mem in empty parmesh" ); - /** Step 1: Get the numper of processes per node */ + /** Step 1: Get the number of processes per node */ MPI_Initialized( &flag ); if ( flag ) { MPI_Comm_split_type( parmesh->comm, MPI_COMM_TYPE_SHARED, 0, MPI_INFO_NULL, &comm_shm ); MPI_Comm_size( comm_shm, &parmesh->size_shm ); + MPI_Comm_free( &comm_shm ); } else { parmesh->size_shm = 1; } - /** Step 2: Set maximal memory per process depending on the -m option setting */ + /** Step 2: Set maximal memory per process depending on the -m option setting: + - if the user doesn't provides a memory value or provides an invalid value: we equirepartite the memory over the MPI processes of the node. Functions that consumes different amounts of memory depending on the process have to manage internally the memory repartition (for example the \a PMMG_loadMesh_centralized function). + - if the user provides a valid memory value (under or equal to the physical memory), it is used as is guessing that the user know what he is asking (it may be useful during the parallel calls of Mmg to not have a memory equirepartition as some process may use a smaller amount of memory than others but we are not able to predict it. + */ maxAvail = MMG5_memSize(); if ( parmesh->info.mem <= 0 ) { - /* Nos users specifications */ + /* Nos users specifications: equirepartition of */ if ( !maxAvail ) { /* default value when not able to compute the available memory = 800 MB */ printf(" Maximum memory per process set to default value: %d MB.\n",MMG5_MEMMAX); - parmesh->memGloMax = MMG5_MEMMAX << 20; + parmesh->memGloMax = (MMG5_MEMMAX/parmesh->size_shm) << 20; } else { - /* maximal memory = total physical memory */ - parmesh->memGloMax = maxAvail; + /* maximal memory = equirepartition of total physical memory over the MPI processes of the node. */ + parmesh->memGloMax = maxAvail/parmesh->size_shm; } } else { - /* memory asked by user if possible, otherwise total physical memory */ + int memOverflow = 0; + /* Memory asked by user if possible (authorized to ask the entire memory nod per process, independently of the number of process per node). */ if ( maxAvail && (size_t)parmesh->info.mem*MMG5_MILLION > maxAvail ) { - fprintf(stderr,"\n ## Warning: %s: asking for %d MB of memory per process ", + /* User asks for more than the memory of the node */ + fprintf(stdout,"\n ## Warning: %s: asking for %d MB of memory per process ", __func__,parmesh->info.mem); - fprintf(stderr,"when only %zu available.\n",maxAvail/MMG5_MILLION); + fprintf(stdout,"when only %zu available on the node.\n",maxAvail/MMG5_MILLION); + memOverflow = 1; } else { - parmesh->memGloMax= (size_t)parmesh->info.mem*MMG5_MILLION; + if ( (size_t)parmesh->info.mem*MMG5_MILLION > maxAvail/parmesh->size_shm ) { + /* User asks for more than the equirepartition of the node memory across the MPI processes */ + fprintf(stdout,"\n ## Warning: %s: asking for %d MB per MPI process with %d process per node and %zu MB available on the node.\n", + __func__,parmesh->info.mem,parmesh->size_shm,maxAvail/MMG5_MILLION); + memOverflow = 1; + } + } + + /* In all cases, impose what the user ask */ + parmesh->memGloMax= (size_t)parmesh->info.mem*MMG5_MILLION; + + if ( memOverflow ) { + fprintf(stdout," The program may run out of memory and be killed (Signal 9 or SIGKILL error).\n\n"); } } @@ -121,6 +140,10 @@ int PMMG_parmesh_SetMemMax( PMMG_pParMesh parmesh ) { for( i = 0; i < parmesh->ngrp; ++i ) { mesh = parmesh->listgrp[i].mesh; mesh->memMax = parmesh->memGloMax; + + /* Hack to not let Mmg recomputes the available memory by itself (it has no + * knowledge that it is called in parallel) */ + mesh->info.mem = mesh->memMax/MMG5_MILLION; } return 1; @@ -291,22 +314,37 @@ int PMMG_setMeshSize_alloc( MMG5_pMesh mesh ) { int PMMG_setMeshSize_realloc( MMG5_pMesh mesh,int npmax_old,int xpmax_old, int nemax_old,int xtmax_old ) { - PMMG_RECALLOC(mesh,mesh->point,mesh->npmax+1,npmax_old+1,MMG5_Point, + if ( !npmax_old ) + PMMG_CALLOC(mesh, mesh->point, mesh->npmax+1, MMG5_Point, "vertices array", return 0); - - PMMG_RECALLOC(mesh,mesh->xpoint,mesh->xpmax+1,xpmax_old+1,MMG5_xPoint, - "boundary vertices array", return 0); - - PMMG_RECALLOC(mesh,mesh->tetra,mesh->nemax+1,nemax_old+1,MMG5_Tetra, - "tetra array", return 0); + else + PMMG_RECALLOC(mesh,mesh->point,mesh->npmax+1,npmax_old+1,MMG5_Point, + "vertices array", return 0); + if ( !xpmax_old ) + PMMG_CALLOC(mesh, mesh->xpoint, mesh->xpmax+1, MMG5_xPoint, + "boundary vertices array", return 0); + else + PMMG_RECALLOC(mesh,mesh->xpoint,mesh->xpmax+1,xpmax_old+1,MMG5_xPoint, + "boundary vertices array", return 0); + + if ( !nemax_old ) + PMMG_CALLOC(mesh, mesh->tetra, mesh->nemax+1, MMG5_Tetra, + "tetra array", return 0); + else + PMMG_RECALLOC(mesh,mesh->tetra,mesh->nemax+1,nemax_old+1,MMG5_Tetra, + "tetra array", return 0); if ( mesh->adja ) { PMMG_RECALLOC(mesh,mesh->adja,4*mesh->nemax+5,4*nemax_old+5,int, "adja array", return 0); } - PMMG_RECALLOC(mesh,mesh->xtetra,mesh->xtmax+1,xtmax_old+1,MMG5_xTetra, - "boundary tetra array", return 0); + if ( !xtmax_old ) + PMMG_CALLOC(mesh, mesh->xtetra, mesh->xtmax+1, MMG5_xTetra, + "boundary tetra array", return 0); + else + PMMG_RECALLOC(mesh,mesh->xtetra,mesh->xtmax+1,xtmax_old+1,MMG5_xTetra, + "boundary tetra array", return 0); return ( PMMG_link_mesh( mesh ) ); } @@ -643,7 +681,7 @@ int PMMG_resize_extCommArray ( PMMG_pParMesh parmesh,PMMG_pExt_comm *ext_comm, PMMG_DEL_MEM ( parmesh,(*ext_comm+k)->rtosend, double,"rtosend" ); if ( (*ext_comm+k)->rtorecv ) - PMMG_DEL_MEM ( parmesh,(*ext_comm+k)->rtorecv,int,"rtorecv" ); + PMMG_DEL_MEM ( parmesh,(*ext_comm+k)->rtorecv,double,"rtorecv" ); }