diff --git a/CMakeLists.txt b/CMakeLists.txt index 3011a98d..53ce1d1d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8.12) +CMAKE_MINIMUM_REQUIRED(VERSION 3.5) INCLUDE(CMakeDependentOption) INCLUDE(cmake/modules/macros.cmake) diff --git a/cmake/testing/pmmg_tests.cmake b/cmake/testing/pmmg_tests.cmake index 75d71979..08e73f60 100644 --- a/cmake/testing/pmmg_tests.cmake +++ b/cmake/testing/pmmg_tests.cmake @@ -170,6 +170,31 @@ IF( BUILD_TESTING ) -out ${CI_DIR_RESULTS}/opnbdy-island.o.mesh ) + ### test -m option + #### The 2 tests are intentionnaly failing (inside an assert in debug mode, with an error + #### message otherwise) due to lack of memory when computing the hash table + add_test ( NAME memory-pmmg_sphere-2 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 2 $ + -mmg-v 5 -v 5 -m 15 + ${CI_DIR}/Sphere/sphere + -out ${CI_DIR_RESULTS}/memory-sphere.o.mesh + ) + set_property(TEST memory-pmmg_sphere-2 + PROPERTY + PASS_REGULAR_EXPRESSION "MAXIMUM MEMORY AUTHORIZED PER PROCESS \\(MB\\) 15" + ) + + add_test ( NAME memory-mmg_sphere-2 + COMMAND ${MPIEXEC} ${MPI_ARGS} ${MPIEXEC_NUMPROC_FLAG} 2 $ + -mmg-v 5 -v 5 -m 15 + ${CI_DIR}/Sphere/sphere + -out ${CI_DIR_RESULTS}/memory-sphere.o.mesh + ) + set_property(TEST memory-mmg_sphere-2 + PROPERTY + PASS_REGULAR_EXPRESSION "MAXIMUM MEMORY AUTHORIZED \\(MB\\) 15" + ) + ############################################################################### ##### ##### Test centralized/distributed I/O (on multidomain and openbdy tests) diff --git a/src/mergemesh_pmmg.c b/src/mergemesh_pmmg.c index 252422f5..d2c8667d 100644 --- a/src/mergemesh_pmmg.c +++ b/src/mergemesh_pmmg.c @@ -1097,7 +1097,6 @@ int PMMG_gather_parmesh( PMMG_pParMesh parmesh, PMMG_pInt_comm *rcv_int_node_comm, int **rcv_next_node_comm, PMMG_pExt_comm **rcv_ext_node_comm ) { - size_t pack_size_tot,next_disp,*displs,buf_idx; int *rcv_pack_size,ier,ier_glob,k,ier_pack; int nprocs,root,pack_size; diff --git a/src/parmmg.c b/src/parmmg.c index 25114e95..e736fd31 100644 --- a/src/parmmg.c +++ b/src/parmmg.c @@ -24,7 +24,7 @@ /** * \file parmmg.c * \brief main file for the parmmg application - * \author Cécile Dobrzynski (Bx INP/Inria) + * \author Cecile Dobrzynski (Bx INP/Inria) * \author Algiane Froehly (Inria) * \version 5 * \copyright GNU Lesser General Public License. diff --git a/src/zaldy_pmmg.c b/src/zaldy_pmmg.c index bc3a7ac5..5757fbdd 100644 --- a/src/zaldy_pmmg.c +++ b/src/zaldy_pmmg.c @@ -58,7 +58,7 @@ void PMMG_parmesh_SetMemGloMax( PMMG_pParMesh parmesh ) assert ( (parmesh != NULL) && "trying to set glo max mem in empty parmesh" ); - /** Step 1: Get the numper of processes per node */ + /** Step 1: Get the number of processes per node */ MPI_Initialized( &flag ); if ( flag ) { @@ -71,30 +71,48 @@ void PMMG_parmesh_SetMemGloMax( PMMG_pParMesh parmesh ) parmesh->size_shm = 1; } - /** Step 2: Set maximal memory per process depending on the -m option setting */ + /** Step 2: Set maximal memory per process depending on the -m option setting: + - if the user doesn't provides a memory value or provides an invalid value: we equirepartite the memory over the MPI processes of the node. Functions that consumes different amounts of memory depending on the process have to manage internally the memory repartition (for example the \a PMMG_loadMesh_centralized function). + - if the user provides a valid memory value (under or equal to the physical memory), it is used as is guessing that the user know what he is asking (it may be useful during the parallel calls of Mmg to not have a memory equirepartition as some process may use a smaller amount of memory than others but we are not able to predict it. + */ maxAvail = MMG5_memSize(); if ( parmesh->info.mem <= 0 ) { - /* Nos users specifications */ + /* Nos users specifications: equirepartition of */ if ( !maxAvail ) { /* default value when not able to compute the available memory = 800 MB */ printf(" Maximum memory per process set to default value: %d MB.\n",MMG5_MEMMAX); - parmesh->memGloMax = MMG5_MEMMAX << 20; + parmesh->memGloMax = (MMG5_MEMMAX/parmesh->size_shm) << 20; } else { - /* maximal memory = total physical memory */ - parmesh->memGloMax = maxAvail; + /* maximal memory = equirepartition of total physical memory over the MPI processes of the node. */ + parmesh->memGloMax = maxAvail/parmesh->size_shm; } } else { - /* memory asked by user if possible, otherwise total physical memory */ + int memOverflow = 0; + /* Memory asked by user if possible (authorized to ask the entire memory nod per process, independently of the number of process per node). */ if ( maxAvail && (size_t)parmesh->info.mem*MMG5_MILLION > maxAvail ) { - fprintf(stderr,"\n ## Warning: %s: asking for %d MB of memory per process ", + /* User asks for more than the memory of the node */ + fprintf(stdout,"\n ## Warning: %s: asking for %d MB of memory per process ", __func__,parmesh->info.mem); - fprintf(stderr,"when only %zu available.\n",maxAvail/MMG5_MILLION); + fprintf(stdout,"when only %zu available on the node.\n",maxAvail/MMG5_MILLION); + memOverflow = 1; } else { - parmesh->memGloMax= (size_t)parmesh->info.mem*MMG5_MILLION; + if ( (size_t)parmesh->info.mem*MMG5_MILLION > maxAvail/parmesh->size_shm ) { + /* User asks for more than the equirepartition of the node memory across the MPI processes */ + fprintf(stdout,"\n ## Warning: %s: asking for %d MB per MPI process with %d process per node and %zu MB available on the node.\n", + __func__,parmesh->info.mem,parmesh->size_shm,maxAvail/MMG5_MILLION); + memOverflow = 1; + } + } + + /* In all cases, impose what the user ask */ + parmesh->memGloMax= (size_t)parmesh->info.mem*MMG5_MILLION; + + if ( memOverflow ) { + fprintf(stdout," The program may run out of memory and be killed (Signal 9 or SIGKILL error).\n\n"); } } @@ -122,6 +140,10 @@ int PMMG_parmesh_SetMemMax( PMMG_pParMesh parmesh ) { for( i = 0; i < parmesh->ngrp; ++i ) { mesh = parmesh->listgrp[i].mesh; mesh->memMax = parmesh->memGloMax; + + /* Hack to not let Mmg recomputes the available memory by itself (it has no + * knowledge that it is called in parallel) */ + mesh->info.mem = mesh->memMax/MMG5_MILLION; } return 1; @@ -317,7 +339,7 @@ int PMMG_setMeshSize_realloc( MMG5_pMesh mesh,int npmax_old,int xpmax_old, "adja array", return 0); } - if ( !nemax_old ) + if ( !xtmax_old ) PMMG_CALLOC(mesh, mesh->xtetra, mesh->xtmax+1, MMG5_xTetra, "boundary tetra array", return 0); else