From 96936a6f5ef32bba56e59f3abb10065e6b510dfd Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 3 Aug 2023 16:39:47 +0800 Subject: [PATCH 01/16] Removed DistributedGridapType --- src/MultilevelTools/GridapDistributedExtensions.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/MultilevelTools/GridapDistributedExtensions.jl b/src/MultilevelTools/GridapDistributedExtensions.jl index 56fa3264..f3b8f346 100644 --- a/src/MultilevelTools/GridapDistributedExtensions.jl +++ b/src/MultilevelTools/GridapDistributedExtensions.jl @@ -65,7 +65,7 @@ function GridapDistributed.get_parts(x::VoidDistributedDiscreteModel) return x.parts end -struct VoidDistributedTriangulation{Dc,Dp,A} <: GridapDistributed.DistributedGridapType +struct VoidDistributedTriangulation{Dc,Dp,A} <: Gridap.GridapType parts::A function VoidDistributedTriangulation(Dc::Int,Dp::Int,parts) A = typeof(parts) @@ -85,7 +85,7 @@ function Gridap.Geometry.Triangulation(model::VoidDistributedDiscreteModel{Dc,Dp return VoidDistributedTriangulation(Dc,Dp,get_parts(model)) end -struct VoidDistributedFESpace{A} <: GridapDistributed.DistributedGridapType +struct VoidDistributedFESpace{A} <: Gridap.GridapType parts::A end From 01df9454bd2a1a6cf6c47967fd1410eb6ea859c6 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 3 Aug 2023 16:40:33 +0800 Subject: [PATCH 02/16] Ignore LocalPreferences.toml --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index f8509162..895712ba 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ .vscode Manifest.toml +LocalPreferences.toml From d9eed7c0a5a4e65b0dbe5d79c8a0aa32f5e27b6f Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 3 Aug 2023 17:51:36 +0800 Subject: [PATCH 03/16] Removed the map() spetialisation in GridapFixes.jl --- src/MultilevelTools/GridapFixes.jl | 33 ++---------------------------- test/seq/PatchLinearSolverTests.jl | 2 +- 2 files changed, 3 insertions(+), 32 deletions(-) diff --git a/src/MultilevelTools/GridapFixes.jl b/src/MultilevelTools/GridapFixes.jl index e5ccac7d..a2e9ba32 100644 --- a/src/MultilevelTools/GridapFixes.jl +++ b/src/MultilevelTools/GridapFixes.jl @@ -1,35 +1,5 @@ -function Gridap.Adaptivity.change_domain_n2o(f_fine,ftrian::Gridap.Adaptivity.AdaptedTriangulation{Dc},ctrian::Gridap.Geometry.Triangulation,glue::Gridap.Adaptivity.AdaptivityGlue{<:Gridap.Adaptivity.RefinementGlue}) where Dc - fglue = Gridap.Geometry.get_glue(ftrian,Val(Dc)) - cglue = Gridap.Geometry.get_glue(ctrian,Val(Dc)) - - @notimplementedif Gridap.Geometry.num_point_dims(ftrian) != Dc - @notimplementedif isa(cglue,Nothing) - - if (num_cells(ctrian) != 0) - ### New Triangulation -> New Model - fine_tface_to_field = Gridap.CellData.get_data(f_fine) - fine_mface_to_field = Gridap.Geometry.extend(fine_tface_to_field,fglue.mface_to_tface) - - ### New Model -> Old Model - # f_c2f[i_coarse] = [f_fine[i_fine_1], ..., f_fine[i_fine_nChildren]] - f_c2f = Gridap.Adaptivity.f2c_reindex(fine_mface_to_field,glue) - - child_ids = Gridap.Adaptivity.f2c_reindex(glue.n2o_cell_to_child_id,glue) - rrules = Gridap.Adaptivity.get_old_cell_refinement_rules(glue) - coarse_mface_to_field = lazy_map(Gridap.Adaptivity.FineToCoarseField,f_c2f,rrules,child_ids) - - ### Old Model -> Old Triangulation - coarse_tface_to_field = lazy_map(Reindex(coarse_mface_to_field),cglue.tface_to_mface) - f_coarse = lazy_map(Broadcasting(∘),coarse_tface_to_field,cglue.tface_to_mface_map) - - return Gridap.CellData.similar_cell_field(f_fine,f_coarse,ctrian,ReferenceDomain()) - else - f_coarse = Fill(Gridap.Fields.ConstantField(0.0),num_cells(fcoarse)) - return Gridap.CellData.similar_cell_field(f_fine,f_coarse,ctrian,ReferenceDomain()) - end -end - +""" function Base.map(::typeof(Gridap.Arrays.testitem), a::Tuple{<:AbstractVector{<:AbstractVector{<:VectorValue}},<:AbstractVector{<:Gridap.Fields.LinearCombinationFieldVector}}) a2=Gridap.Arrays.testitem(a[2]) @@ -37,6 +7,7 @@ function Base.map(::typeof(Gridap.Arrays.testitem), a1.=zero(Gridap.Arrays.testitem(a1)) (a1,a2) end +""" # This fix is required to be able to integrate in the overlapped mesh underlying patch smoothers function Gridap.Geometry.get_glue(trian::BodyFittedTriangulation{Dt},::Val{Dt}) where Dt diff --git a/test/seq/PatchLinearSolverTests.jl b/test/seq/PatchLinearSolverTests.jl index 5df48824..e9c045ac 100644 --- a/test/seq/PatchLinearSolverTests.jl +++ b/test/seq/PatchLinearSolverTests.jl @@ -67,7 +67,7 @@ module PatchLinearSolverTests model = CartesianDiscreteModel(domain,partition) _,Ph,xh,Vh = returns_PD_Ph_xh_Vh(model) - parts = get_part_ids(sequential,(1,2)) + parts = get_part_ids(SequentialBackend(),(1,2)) dmodel = CartesianDiscreteModel(parts,domain,partition) _,dPh,dxh,dVh = returns_PD_Ph_xh_Vh(dmodel); From 6b63a37ba2a651c0bb562d1b0dae70d3e71d6340 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 8 Aug 2023 19:17:56 +1000 Subject: [PATCH 04/16] Started making changes. GridapSolvers compiles --- Project.toml | 10 +- src/LinearSolvers/GMGLinearSolvers.jl | 12 +- src/LinearSolvers/JacobiLinearSolvers.jl | 6 +- .../DistributedGridTransferOperators.jl | 8 +- .../GridapDistributedExtensions.jl | 10 +- src/MultilevelTools/ModelHierarchies.jl | 14 +- src/MultilevelTools/MultilevelTools.jl | 10 +- src/MultilevelTools/RedistributeTools.jl | 251 ------------------ src/MultilevelTools/RefinementTools.jl | 10 +- src/MultilevelTools/SubpartitioningTools.jl | 6 +- .../mpi/PatchDecompositions.jl | 8 +- src/PatchBasedSmoothers/mpi/PatchFESpaces.jl | 18 +- .../seq/PatchBasedLinearSolvers.jl | 2 +- .../DistributedGridTransferOperatorsTests.jl | 4 +- .../DistributedPatchFESpacesDebuggingTests.jl | 28 +- test/seq/DistributedPatchFESpacesTests.jl | 10 +- 16 files changed, 77 insertions(+), 330 deletions(-) delete mode 100644 src/MultilevelTools/RedistributeTools.jl diff --git a/Project.toml b/Project.toml index 5be24ce5..14ef43da 100644 --- a/Project.toml +++ b/Project.toml @@ -18,14 +18,12 @@ Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" [compat] ArgParse = "1" -FillArrays = "0.9, 0.10, 0.11, 0.12" -Gridap = "0.17.17" -GridapDistributed = "0.2.7" +FillArrays = "0.9, 0.10, 0.11, 0.12, 0.13, 1.0" +Gridap = "0.17" +GridapDistributed = "0.3" GridapP4est = "0.2" -GridapPETSc = "0.4" IterativeSolvers = "0.9" -MPI = "0.19" -PartitionedArrays = "0.2.15" +PartitionedArrays = "0.3" julia = "1.7" [extras] diff --git a/src/LinearSolvers/GMGLinearSolvers.jl b/src/LinearSolvers/GMGLinearSolvers.jl index 9cd424c8..6b88f424 100644 --- a/src/LinearSolvers/GMGLinearSolvers.jl +++ b/src/LinearSolvers/GMGLinearSolvers.jl @@ -116,7 +116,7 @@ function setup_coarsest_solver_cache(mh::ModelHierarchy,coarsest_solver::LinearS if i_am_in(parts) mat = smatrices[nlevs] if (num_parts(parts) == 1) # Serial - cache = map_parts(mat.owned_owned_values) do Ah + cache = map(mat.owned_owned_values) do Ah ss = symbolic_setup(coarsest_solver, Ah) numerical_setup(ss, Ah) end @@ -136,7 +136,7 @@ function setup_coarsest_solver_cache(mh::ModelHierarchy,coarsest_solver::PETScLi if i_am_in(parts) mat = smatrices[nlevs] if (num_parts(parts) == 1) # Serial - cache = map_parts(mat.owned_owned_values) do Ah + cache = map(mat.owned_owned_values) do Ah rh = convert(PETScVector,fill(0.0,size(A,2))) xh = convert(PETScVector,fill(0.0,size(A,2))) ss = symbolic_setup(coarsest_solver, Ah) @@ -183,9 +183,9 @@ function allocate_work_vectors(mh::ModelHierarchy,smatrices::Vector{<:AbstractMa return work_vectors end -function solve_coarsest_level!(parts::AbstractPData,::LinearSolver,xh::PVector,rh::PVector,caches) +function solve_coarsest_level!(parts::AbstractArray,::LinearSolver,xh::PVector,rh::PVector,caches) if (num_parts(parts) == 1) - map_parts(xh.owned_values,rh.owned_values) do xh, rh + map(xh.owned_values,rh.owned_values) do xh, rh solve!(xh,caches,rh) end else @@ -193,10 +193,10 @@ function solve_coarsest_level!(parts::AbstractPData,::LinearSolver,xh::PVector,r end end -function solve_coarsest_level!(parts::AbstractPData,::PETScLinearSolver,xh::PVector,rh::PVector,caches) +function solve_coarsest_level!(parts::AbstractArray,::PETScLinearSolver,xh::PVector,rh::PVector,caches) solver_ns, xh_petsc, rh_petsc = caches if (num_parts(parts) == 1) - map_parts(xh.owned_values,rh.owned_values) do xh, rh + map(xh.owned_values,rh.owned_values) do xh, rh copy!(rh_petsc,rh) solve!(xh_petsc,solver_ns,rh_petsc) copy!(xh,xh_petsc) diff --git a/src/LinearSolvers/JacobiLinearSolvers.jl b/src/LinearSolvers/JacobiLinearSolvers.jl index efe6ca63..abe8512b 100644 --- a/src/LinearSolvers/JacobiLinearSolvers.jl +++ b/src/LinearSolvers/JacobiLinearSolvers.jl @@ -22,14 +22,14 @@ function Gridap.Algebra.numerical_setup!(ns::JacobiNumericalSetup, A::AbstractMa end function Gridap.Algebra.numerical_setup(ss::JacobiSymbolicSetup,A::PSparseMatrix) - inv_diag=map_parts(A.owned_owned_values) do a + inv_diag=map(A.owned_owned_values) do a 1.0 ./ diag(a) end return JacobiNumericalSetup(inv_diag) end function Gridap.Algebra.numerical_setup!(ns::JacobiNumericalSetup, A::PSparseMatrix) - map_parts(ns.inv_diag,A.owned_owned_values) do inv_diag, a + map(ns.inv_diag,A.owned_owned_values) do inv_diag, a inv_diag .= 1.0 ./ diag(a) end return ns @@ -43,7 +43,7 @@ end function Gridap.Algebra.solve!(x::PVector, ns::JacobiNumericalSetup, b::PVector) inv_diag = ns.inv_diag - map_parts(inv_diag,x.owned_values,b.owned_values) do inv_diag, x, b + map(inv_diag,x.owned_values,b.owned_values) do inv_diag, x, b x .= inv_diag .* b end return x diff --git a/src/MultilevelTools/DistributedGridTransferOperators.jl b/src/MultilevelTools/DistributedGridTransferOperators.jl index d991bed6..f6070aa7 100644 --- a/src/MultilevelTools/DistributedGridTransferOperators.jl +++ b/src/MultilevelTools/DistributedGridTransferOperators.jl @@ -214,7 +214,7 @@ function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:r model_h, Uh, fv_h, dv_h, UH, fv_H, dv_H = cache_refine copy!(fv_h,x) # Matrix layout -> FE layout - exchange!(fv_h) + consistent!(fv_h) restrict_dofs!(fv_H,fv_h,dv_h,Uh,UH,get_adaptivity_glue(model_h)) copy!(y,fv_H) # FE layout -> Matrix layout @@ -249,7 +249,7 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer # 1 - Redistribute from fine partition to coarse partition copy!(fv_h_red,x) - exchange!(fv_h_red) + consistent!(fv_h_red) redistribute_free_values!(cache_exchange,fv_h,Uh,fv_h_red,dv_h_red,Uh_red,model_h,glue;reverse=true) # 2 - Interpolate in coarse partition @@ -270,7 +270,7 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer # 1 - Redistribute from fine partition to coarse partition copy!(fv_h_red,x) - exchange!(fv_h_red) + consistent!(fv_h_red) redistribute_free_values!(cache_exchange,fv_h,Uh,fv_h_red,dv_h_red,Uh_red,model_h,glue;reverse=true) # 2 - Solve f2c projection coarse partition @@ -295,7 +295,7 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer # 1 - Redistribute from fine partition to coarse partition copy!(fv_h_red,x) - exchange!(fv_h_red) + consistent!(fv_h_red) redistribute_free_values!(cache_exchange,fv_h,Uh,fv_h_red,dv_h_red,Uh_red,model_h,glue;reverse=true) # 2 - Interpolate in coarse partition diff --git a/src/MultilevelTools/GridapDistributedExtensions.jl b/src/MultilevelTools/GridapDistributedExtensions.jl index f3b8f346..55efe81b 100644 --- a/src/MultilevelTools/GridapDistributedExtensions.jl +++ b/src/MultilevelTools/GridapDistributedExtensions.jl @@ -6,7 +6,7 @@ function Gridap.CellData.Measure(tt::GridapDistributed.DistributedTriangulation{ args...) where {Dc,Dp} itrians = change_parts(local_views(it),get_parts(tt);default=void(BodyFittedTriangulation{Dc,Dp})) - measures = map_parts(local_views(tt),itrians) do ttrian, itrian + measures = map(local_views(tt),itrians) do ttrian, itrian Measure(ttrian,itrian,args...) end return GridapDistributed.DistributedMeasure(measures) @@ -18,9 +18,9 @@ end # change_parts -function change_parts(x::Union{AbstractPData,Nothing}, new_parts; default=nothing) - x_new = map_parts(new_parts) do _p - if isa(x,AbstractPData) +function change_parts(x::Union{AbstractArray,Nothing}, new_parts; default=nothing) + x_new = map(new_parts) do _p + if isa(x,AbstractArray) x.part else default @@ -41,7 +41,7 @@ end # DistributedFESpaces function get_test_space(U::GridapDistributed.DistributedSingleFieldFESpace) - spaces = map_parts(local_views(U)) do U + spaces = map(local_views(U)) do U U.space end return GridapDistributed.DistributedSingleFieldFESpace(spaces,U.gids,U.vector_type) diff --git a/src/MultilevelTools/ModelHierarchies.jl b/src/MultilevelTools/ModelHierarchies.jl index 8c50802c..51aeae5f 100644 --- a/src/MultilevelTools/ModelHierarchies.jl +++ b/src/MultilevelTools/ModelHierarchies.jl @@ -18,7 +18,7 @@ end """ """ struct ModelHierarchy - level_parts :: Vector{PartitionedArrays.AbstractPData} + level_parts :: Vector{PartitionedArrays.AbstractArray} levels :: Vector{ModelHierarchyLevel} end @@ -50,7 +50,7 @@ has_refinement(a::ModelHierarchyLevel{A,Nothing}) where A = false each level into. We need `num_procs_x_level[end]` to be equal to the number of parts of `model`. """ -function ModelHierarchy(root_parts ::AbstractPData, +function ModelHierarchy(root_parts ::AbstractArray, model ::GridapDistributed.DistributedDiscreteModel, num_procs_x_level ::Vector{<:Integer}; mesh_refinement = true, @@ -58,7 +58,7 @@ function ModelHierarchy(root_parts ::AbstractPData, # Request correct number of parts from MAIN model_parts = get_parts(model) - my_num_parts = map_parts(root_parts) do _p + my_num_parts = map(root_parts) do _p num_parts(model_parts) # == -1 if !i_am_in(my_parts) end main_num_parts = get_main_part(my_num_parts) @@ -80,7 +80,7 @@ function ModelHierarchy(root_parts ::AbstractPData, @error "Model parts do not correspond to coarsest or finest parts!" end -function _model_hierarchy_without_refinement_bottom_up(root_parts::AbstractPData, +function _model_hierarchy_without_refinement_bottom_up(root_parts::AbstractArray, bottom_model::GridapDistributed.DistributedDiscreteModel, num_procs_x_level::Vector{<:Integer}) num_levels = length(num_procs_x_level) @@ -106,7 +106,7 @@ function _model_hierarchy_without_refinement_bottom_up(root_parts::AbstractPData return convert_to_void_models(mh) end -function _model_hierarchy_without_refinement_top_down(root_parts::AbstractPData, +function _model_hierarchy_without_refinement_top_down(root_parts::AbstractArray, top_model::GridapDistributed.DistributedDiscreteModel, num_procs_x_level::Vector{<:Integer}) num_levels = length(num_procs_x_level) @@ -132,7 +132,7 @@ function _model_hierarchy_without_refinement_top_down(root_parts::AbstractPData, return convert_to_void_models(mh) end -function _model_hierarchy_by_refinement(root_parts::AbstractPData, +function _model_hierarchy_by_refinement(root_parts::AbstractArray, coarsest_model::GridapDistributed.DistributedDiscreteModel, num_procs_x_level::Vector{<:Integer}; num_refs_x_level=nothing) @@ -164,7 +164,7 @@ function _model_hierarchy_by_refinement(root_parts::AbstractPData, return convert_to_adapted_models(mh) end -function _model_hierarchy_by_coarsening(root_parts::AbstractPData, +function _model_hierarchy_by_coarsening(root_parts::AbstractArray, finest_model::GridapDistributed.DistributedDiscreteModel, num_procs_x_level::Vector{<:Integer}; num_refs_x_level=nothing) diff --git a/src/MultilevelTools/MultilevelTools.jl b/src/MultilevelTools/MultilevelTools.jl index 447ae753..8501afad 100644 --- a/src/MultilevelTools/MultilevelTools.jl +++ b/src/MultilevelTools/MultilevelTools.jl @@ -11,7 +11,11 @@ using Gridap.Geometry using Gridap.FESpaces using Gridap.Adaptivity using PartitionedArrays + using GridapDistributed +using GridapDistributed: redistribute_cell_dofs, redistribute_cell_dofs! +using GridapDistributed: redistribute_free_values, redistribute_free_values! +using GridapDistributed: redistribute_fe_function import LinearAlgebra: mul! import GridapDistributed: local_views @@ -21,9 +25,6 @@ export change_parts export generate_level_parts export generate_subparts -export redistribute_fe_function -export redistribute_free_values! - export ModelHierarchy export num_levels, get_level, get_level_parts export get_model, get_model_before_redist, has_refinement, has_redistribution @@ -38,10 +39,9 @@ export setup_transfer_operators export mul! include("SubpartitioningTools.jl") -include("GridapDistributedExtensions.jl") +#include("GridapDistributedExtensions.jl") include("GridapFixes.jl") include("RefinementTools.jl") -include("RedistributeTools.jl") include("ModelHierarchies.jl") include("FESpaceHierarchies.jl") include("DistributedGridTransferOperators.jl") diff --git a/src/MultilevelTools/RedistributeTools.jl b/src/MultilevelTools/RedistributeTools.jl deleted file mode 100644 index 54813a44..00000000 --- a/src/MultilevelTools/RedistributeTools.jl +++ /dev/null @@ -1,251 +0,0 @@ -function _allocate_cell_wise_dofs(cell_to_ldofs) - map_parts(cell_to_ldofs) do cell_to_ldofs - cache = array_cache(cell_to_ldofs) - ncells = length(cell_to_ldofs) - ptrs = Vector{Int32}(undef,ncells+1) - for cell in 1:ncells - ldofs = getindex!(cache,cell_to_ldofs,cell) - ptrs[cell+1] = length(ldofs) - end - PartitionedArrays.length_to_ptrs!(ptrs) - ndata = ptrs[end]-1 - data = Vector{Float64}(undef,ndata) - PartitionedArrays.Table(data,ptrs) - end -end - -function _update_cell_dof_values_with_local_info!(cell_dof_values_new, - cell_dof_values_old, - new2old) - map_parts(cell_dof_values_new, - cell_dof_values_old, - new2old) do cell_dof_values_new,cell_dof_values_old,new2old - ocache = array_cache(cell_dof_values_old) - for (ncell,ocell) in enumerate(new2old) - if ocell!=0 - # Copy ocell to ncell - oentry = getindex!(ocache,cell_dof_values_old,ocell) - range = cell_dof_values_new.ptrs[ncell]:cell_dof_values_new.ptrs[ncell+1]-1 - cell_dof_values_new.data[range] .= oentry - end - end - end -end - -function _allocate_comm_data(num_dofs_x_cell,lids) - map_parts(num_dofs_x_cell,lids) do num_dofs_x_cell,lids - n = length(lids) - ptrs = Vector{Int32}(undef,n+1) - ptrs.= 0 - for i = 1:n - for j = lids.ptrs[i]:lids.ptrs[i+1]-1 - ptrs[i+1] = ptrs[i+1] + num_dofs_x_cell.data[j] - end - end - PartitionedArrays.length_to_ptrs!(ptrs) - ndata = ptrs[end]-1 - data = Vector{Float64}(undef,ndata) - PartitionedArrays.Table(data,ptrs) - end -end - -function _pack_snd_data!(snd_data,cell_dof_values,snd_lids) - map_parts(snd_data,cell_dof_values,snd_lids) do snd_data,cell_dof_values,snd_lids - cache = array_cache(cell_dof_values) - s = 1 - for i = 1:length(snd_lids) - for j = snd_lids.ptrs[i]:snd_lids.ptrs[i+1]-1 - cell = snd_lids.data[j] - ldofs = getindex!(cache,cell_dof_values,cell) - - e = s+length(ldofs)-1 - range = s:e - snd_data.data[range] .= ldofs - s = e+1 - end - end - end -end - -function _unpack_rcv_data!(cell_dof_values,rcv_data,rcv_lids) - map_parts(cell_dof_values,rcv_data,rcv_lids) do cell_dof_values,rcv_data,rcv_lids - s = 1 - for i = 1:length(rcv_lids.ptrs)-1 - for j = rcv_lids.ptrs[i]:rcv_lids.ptrs[i+1]-1 - cell = rcv_lids.data[j] - range_cell_dof_values = cell_dof_values.ptrs[cell]:cell_dof_values.ptrs[cell+1]-1 - - e = s+length(range_cell_dof_values)-1 - range_rcv_data = s:e - cell_dof_values.data[range_cell_dof_values] .= rcv_data.data[range_rcv_data] - s = e+1 - end - end - end -end - -function get_glue_components(glue::GridapDistributed.RedistributeGlue,reverse::Val{false}) - return glue.lids_rcv, glue.lids_snd, glue.parts_rcv, glue.parts_snd, glue.new2old -end - -function get_glue_components(glue::GridapDistributed.RedistributeGlue,reverse::Val{true}) - return glue.lids_snd, glue.lids_rcv, glue.parts_snd, glue.parts_rcv, glue.old2new -end - -function _num_dofs_x_cell(cell_dofs_array,lids) - map_parts(cell_dofs_array,lids) do cell_dofs_array, lids - data = [length(cell_dofs_array[i]) for i = 1:length(cell_dofs_array) ] - PartitionedArrays.Table(data,lids.ptrs) - end -end - -function get_redistribute_cell_dofs_cache(cell_dof_values_old, - cell_dof_ids_new, - model_new, - glue::GridapDistributed.RedistributeGlue; - reverse=false) - - lids_rcv, lids_snd, parts_rcv, parts_snd, new2old = get_glue_components(glue,Val(reverse)) - - cell_dof_values_old = change_parts(cell_dof_values_old,get_parts(glue);default=[]) - cell_dof_ids_new = change_parts(cell_dof_ids_new,get_parts(glue);default=[[]]) - - num_dofs_x_cell_snd = _num_dofs_x_cell(cell_dof_values_old, lids_snd) - num_dofs_x_cell_rcv = _num_dofs_x_cell(cell_dof_ids_new, lids_rcv) - snd_data = _allocate_comm_data(num_dofs_x_cell_snd, lids_snd) - rcv_data = _allocate_comm_data(num_dofs_x_cell_rcv, lids_rcv) - - cell_dof_values_new = _allocate_cell_wise_dofs(cell_dof_ids_new) - - caches = snd_data, rcv_data, cell_dof_values_new - return caches -end - -function redistribute_cell_dofs(cell_dof_values_old, - cell_dof_ids_new, - model_new, - glue::GridapDistributed.RedistributeGlue; - reverse=false) - caches = get_redistribute_cell_dofs_cache(cell_dof_values_old,cell_dof_ids_new,model_new,glue;reverse=reverse) - return redistribute_cell_dofs!(caches,cell_dof_values_old,cell_dof_ids_new,model_new,glue;reverse=reverse) -end - -function redistribute_cell_dofs!(caches, - cell_dof_values_old, - cell_dof_ids_new, - model_new, - glue::GridapDistributed.RedistributeGlue; - reverse=false) - - snd_data, rcv_data, cell_dof_values_new = caches - lids_rcv, lids_snd, parts_rcv, parts_snd, new2old = get_glue_components(glue,Val(reverse)) - - cell_dof_values_old = change_parts(cell_dof_values_old,get_parts(glue);default=[]) - cell_dof_ids_new = change_parts(cell_dof_ids_new,get_parts(glue);default=[[]]) - - _pack_snd_data!(snd_data,cell_dof_values_old,lids_snd) - - tout = async_exchange!(rcv_data, - snd_data, - parts_rcv, - parts_snd, - PartitionedArrays._empty_tasks(parts_rcv)) - map_parts(schedule,tout) - - # We have to build the owned part of "cell_dof_values_new" out of - # 1. cell_dof_values_old (for those cells s.t. new2old[:]!=0) - # 2. cell_dof_values_new_rcv (for those cells s.t. new2old[:]=0) - _update_cell_dof_values_with_local_info!(cell_dof_values_new, - cell_dof_values_old, - new2old) - - map_parts(wait,tout) - _unpack_rcv_data!(cell_dof_values_new,rcv_data,lids_rcv) - - # Now that every part knows it's new owned dofs, exchange ghosts - new_parts = get_parts(model_new) - cell_dof_values_new = change_parts(cell_dof_values_new,new_parts) - if i_am_in(new_parts) - fgids = get_cell_gids(model_new) - exchange!(cell_dof_values_new,fgids.exchanger) - end - - return cell_dof_values_new -end - -function get_redistribute_free_values_cache(fv_new::Union{PVector,Nothing}, - Uh_new::Union{GridapDistributed.DistributedSingleFieldFESpace,VoidDistributedFESpace}, - fv_old::Union{PVector,Nothing}, - dv_old::Union{AbstractPData,Nothing}, - Uh_old::Union{GridapDistributed.DistributedSingleFieldFESpace,VoidDistributedFESpace}, - model_new, - glue::GridapDistributed.RedistributeGlue; - reverse=false) - cell_dof_values_old = !isa(fv_old,Nothing) ? map_parts(scatter_free_and_dirichlet_values,local_views(Uh_old),local_views(fv_old),dv_old) : nothing - cell_dof_ids_new = !isa(fv_new,Nothing) ? map_parts(get_cell_dof_ids, local_views(Uh_new)) : nothing - caches = get_redistribute_cell_dofs_cache(cell_dof_values_old,cell_dof_ids_new,model_new,glue;reverse=reverse) - - return caches -end - -function redistribute_free_values!(fv_new::Union{PVector,Nothing}, - Uh_new::Union{GridapDistributed.DistributedSingleFieldFESpace,VoidDistributedFESpace}, - fv_old::Union{PVector,Nothing}, - dv_old::Union{AbstractPData,Nothing}, - Uh_old::Union{GridapDistributed.DistributedSingleFieldFESpace,VoidDistributedFESpace}, - model_new, - glue::GridapDistributed.RedistributeGlue; - reverse=false) - - caches = get_redistribute_free_values_cache(fv_new,Uh_new,fv_old,dv_old,Uh_old,model_new,glue;reverse=reverse) - return redistribute_free_values!(caches,fv_new,Uh_new,fv_old,dv_old,Uh_old,model_new,glue;reverse=reverse) -end - -function redistribute_free_values!(caches, - fv_new::Union{PVector,Nothing}, - Uh_new::Union{GridapDistributed.DistributedSingleFieldFESpace,VoidDistributedFESpace}, - fv_old::Union{PVector,Nothing}, - dv_old::Union{AbstractPData,Nothing}, - Uh_old::Union{GridapDistributed.DistributedSingleFieldFESpace,VoidDistributedFESpace}, - model_new, - glue::GridapDistributed.RedistributeGlue; - reverse=false) - - cell_dof_values_old = !isa(fv_old,Nothing) ? map_parts(scatter_free_and_dirichlet_values,local_views(Uh_old),local_views(fv_old),dv_old) : nothing - cell_dof_ids_new = !isa(fv_new,Nothing) ? map_parts(get_cell_dof_ids, local_views(Uh_new)) : nothing - cell_dof_values_new = redistribute_cell_dofs!(caches,cell_dof_values_old,cell_dof_ids_new,model_new,glue;reverse=reverse) - - # Gather the new free dofs - if !isa(fv_new,Nothing) - Gridap.FESpaces.gather_free_values!(fv_new,Uh_new,cell_dof_values_new) - end - return fv_new -end - -function redistribute_fe_function(uh_old::Union{GridapDistributed.DistributedSingleFieldFEFunction,Nothing}, - Uh_new::Union{GridapDistributed.DistributedSingleFieldFESpace,VoidDistributedFESpace}, - model_new, - glue::GridapDistributed.RedistributeGlue; - reverse=false) - - cell_dof_values_old = !isa(uh_old,Nothing) ? map_parts(get_cell_dof_values,local_views(uh_old)) : nothing - cell_dof_ids_new = !isa(Uh_new,VoidDistributedFESpace) ? map_parts(get_cell_dof_ids,local_views(Uh_new)) : nothing - cell_dof_values_new = redistribute_cell_dofs(cell_dof_values_old,cell_dof_ids_new,model_new,glue;reverse=reverse) - - # Assemble the new FEFunction - if i_am_in(get_parts(Uh_new)) - free_values, dirichlet_values = Gridap.FESpaces.gather_free_and_dirichlet_values(Uh_new,cell_dof_values_new) - free_values = PVector(free_values,Uh_new.gids) - uh_new = FEFunction(Uh_new,free_values,dirichlet_values) - return uh_new - else - return nothing - end -end - -function Gridap.FESpaces.gather_free_and_dirichlet_values(f::GridapDistributed.DistributedFESpace,cv) - free_values, dirichlet_values = map_parts(local_views(f),cv) do f, cv - Gridap.FESpaces.gather_free_and_dirichlet_values(f,cv) - end - return free_values, dirichlet_values -end \ No newline at end of file diff --git a/src/MultilevelTools/RefinementTools.jl b/src/MultilevelTools/RefinementTools.jl index e2930cc5..603d35e4 100644 --- a/src/MultilevelTools/RefinementTools.jl +++ b/src/MultilevelTools/RefinementTools.jl @@ -1,19 +1,19 @@ # DistributedAdaptedTriangulations -const DistributedAdaptedTriangulation{Dc,Dp} = GridapDistributed.DistributedTriangulation{Dc,Dp,<:AbstractPData{<:AdaptedTriangulation{Dc,Dp}}} +const DistributedAdaptedTriangulation{Dc,Dp} = GridapDistributed.DistributedTriangulation{Dc,Dp,<:AbstractArray{<:AdaptedTriangulation{Dc,Dp}}} # Restriction of dofs function restrict_dofs!(fv_c::PVector, fv_f::PVector, - dv_f::AbstractPData, + dv_f::AbstractArray, U_f ::GridapDistributed.DistributedSingleFieldFESpace, U_c ::GridapDistributed.DistributedSingleFieldFESpace, - glue::AbstractPData{<:AdaptivityGlue}) + glue::AbstractArray{<:AdaptivityGlue}) - map_parts(restrict_dofs!,local_views(fv_c),local_views(fv_f),dv_f,local_views(U_f),local_views(U_c),glue) - exchange!(fv_c) + map(restrict_dofs!,local_views(fv_c),local_views(fv_f),dv_f,local_views(U_f),local_views(U_c),glue) + consistent!(fv_c) return fv_c end diff --git a/src/MultilevelTools/SubpartitioningTools.jl b/src/MultilevelTools/SubpartitioningTools.jl index 00d80187..f1ea2a84 100644 --- a/src/MultilevelTools/SubpartitioningTools.jl +++ b/src/MultilevelTools/SubpartitioningTools.jl @@ -1,5 +1,5 @@ -function generate_subparts(root_parts::AbstractPData,subpart_size::Integer) +function generate_subparts(root_parts::AbstractArray,subpart_size::Integer) root_comm = root_parts.comm rank = MPI.Comm_rank(root_comm) size = MPI.Comm_size(root_comm) @@ -14,14 +14,14 @@ function generate_subparts(root_parts::AbstractPData,subpart_size::Integer) return get_part_ids(comm) end -function generate_level_parts(root_parts::AbstractPData,last_level_parts::AbstractPData,level_parts_size::Integer) +function generate_level_parts(root_parts::AbstractArray,last_level_parts::AbstractArray,level_parts_size::Integer) if level_parts_size == num_parts(last_level_parts) return last_level_parts end return generate_subparts(root_parts,level_parts_size) end -function generate_level_parts(root_parts::AbstractPData,num_procs_x_level::Vector{<:Integer}) +function generate_level_parts(root_parts::AbstractArray,num_procs_x_level::Vector{<:Integer}) num_levels = length(num_procs_x_level) level_parts = Vector{typeof(parts)}(undef,num_levels) level_parts[1] = generate_subparts(root_parts,num_procs_x_level[1]) diff --git a/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl b/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl index 36c5dcba..160890d8 100644 --- a/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl +++ b/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl @@ -10,7 +10,7 @@ function PatchDecomposition(model::GridapDistributed.DistributedDiscreteModel{Dc Dr=0, patch_boundary_style::PatchBoundaryStyle=PatchBoundaryExclude()) where {Dc,Dp} mark_interface_facets!(model) - patch_decompositions = map_parts(local_views(model)) do lmodel + patch_decompositions = map(local_views(model)) do lmodel PatchDecomposition(lmodel; Dr=Dr, patch_boundary_style=patch_boundary_style, @@ -35,7 +35,7 @@ function PatchDecomposition(mh::ModelHierarchy;kwargs...) end function Gridap.Geometry.Triangulation(a::DistributedPatchDecomposition) - trians = map_parts(a.patch_decompositions) do a + trians = map(a.patch_decompositions) do a Triangulation(a) end return GridapDistributed.DistributedTriangulation(trians,a.model) @@ -43,7 +43,7 @@ end function get_patch_root_dim(a::DistributedPatchDecomposition) patch_root_dim = -1 - map_parts(a.patch_decompositions) do patch_decomposition + map(a.patch_decompositions) do patch_decomposition patch_root_dim = patch_decomposition.Dr end return patch_root_dim @@ -53,7 +53,7 @@ function mark_interface_facets!(model::GridapDistributed.DistributedDiscreteMode face_labeling = get_face_labeling(model) topo = get_grid_topology(model) - map_parts(local_views(face_labeling),local_views(topo)) do face_labeling, topo + map(local_views(face_labeling),local_views(topo)) do face_labeling, topo tag_to_name = face_labeling.tag_to_name tag_to_entities = face_labeling.tag_to_entities d_to_dface_to_entity = face_labeling.d_to_dface_to_entity diff --git a/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl index 5ba023b3..43287f2e 100644 --- a/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl +++ b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl @@ -14,7 +14,7 @@ function PatchFESpace(model::GridapDistributed.DistributedDiscreteModel, Vh::GridapDistributed.DistributedSingleFieldFESpace) root_gids = get_face_gids(model,get_patch_root_dim(patch_decomposition)) - spaces = map_parts(local_views(model), + spaces = map(local_views(model), local_views(patch_decomposition), local_views(Vh), root_gids.partition) do model, patch_decomposition, Vh, partition @@ -24,7 +24,7 @@ function PatchFESpace(model::GridapDistributed.DistributedDiscreteModel, end parts = get_parts(model) - local_ndofs = map_parts(num_free_dofs,spaces) + local_ndofs = map(num_free_dofs,spaces) global_ndofs = sum(local_ndofs) first_gdof, _ = xscan(+,reduce,local_ndofs,init=1) # This PRange has no ghost dofs @@ -57,10 +57,10 @@ end function prolongate!(x::PVector, Ph::GridapDistributed.DistributedSingleFieldFESpace, y::PVector) - map_parts(x.values,Ph.spaces,y.values) do x,Ph,y + map(x.values,Ph.spaces,y.values) do x,Ph,y prolongate!(x,Ph,y) end - exchange!(x) + consistent!(x) end # x \in SingleFESpace @@ -71,14 +71,14 @@ function inject!(x::PVector, w::PVector, w_sums::PVector) - #exchange!(y) - map_parts(x.values,Ph.spaces,y.values,w.values,w_sums.values) do x,Ph,y,w,w_sums + #consistent!(y) + map(x.values,Ph.spaces,y.values,w.values,w_sums.values) do x,Ph,y,w,w_sums inject!(x,Ph,y,w,w_sums) end # Exchange local contributions assemble!(x) - exchange!(x) # TO CONSIDER: Is this necessary? Do we need ghosts for later? + consistent!(x) # TO CONSIDER: Is this necessary? Do we need ghosts for later? return x end @@ -86,13 +86,13 @@ function compute_weight_operators(Ph::GridapDistributed.DistributedSingleFieldFE # Local weights and partial sums w = PVector(0.0,Ph.gids) w_sums = PVector(0.0,Vh.gids) - map_parts(w.values,w_sums.values,Ph.spaces) do w, w_sums, Ph + map(w.values,w_sums.values,Ph.spaces) do w, w_sums, Ph compute_weight_operators!(Ph,Ph.Vh,w,w_sums) end # partial sums -> global sums assemble!(w_sums) # ghost -> owners - exchange!(w_sums) # repopulate ghosts with owner info + consistent!(w_sums) # repopulate ghosts with owner info return w, w_sums end diff --git a/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl b/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl index 171188dc..92cdfa79 100644 --- a/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl +++ b/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl @@ -112,7 +112,7 @@ function Gridap.Algebra.solve!(x_mat::PVector,ns::PatchBasedSmootherNumericalSet rp, dxp, r, x = caches copy!(r,r_mat) - exchange!(r) + consistent!(r) prolongate!(rp,Ph,r) solve!(dxp,Ap_ns,rp) inject!(x,Ph,dxp,w,w_sums) diff --git a/test/mpi/DistributedGridTransferOperatorsTests.jl b/test/mpi/DistributedGridTransferOperatorsTests.jl index 276cffdf..3dd19bd8 100644 --- a/test/mpi/DistributedGridTransferOperatorsTests.jl +++ b/test/mpi/DistributedGridTransferOperatorsTests.jl @@ -76,7 +76,7 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) if i_am_in(parts_H) y_ref = PVector(1.0,AH.cols) - tests = map_parts(y_ref.owned_values,yH1.owned_values,yH2.owned_values,yH3.owned_values) do y_ref,y1,y2,y3 + tests = map(y_ref.owned_values,yH1.owned_values,yH2.owned_values,yH3.owned_values) do y_ref,y1,y2,y3 map(y -> norm(y-y_ref) < 1.e-3 ,[y1,y2,y3]) end @test all(tests.part) @@ -94,7 +94,7 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) mul!(yh3,P3,xH) y_ref = PVector(1.0,Ah.cols) - tests = map_parts(y_ref.owned_values,yh1.owned_values,yh2.owned_values,yh2.owned_values) do y_ref,y1,y2,y3 + tests = map(y_ref.owned_values,yh1.owned_values,yh2.owned_values,yh2.owned_values) do y_ref,y1,y2,y3 map(y -> norm(y-y_ref) < 1.e-3 ,[y1,y2,y3]) end @test all(tests.part) diff --git a/test/seq/DistributedPatchFESpacesDebuggingTests.jl b/test/seq/DistributedPatchFESpacesDebuggingTests.jl index ecf2202a..16e6350b 100644 --- a/test/seq/DistributedPatchFESpacesDebuggingTests.jl +++ b/test/seq/DistributedPatchFESpacesDebuggingTests.jl @@ -98,7 +98,7 @@ function run(parts) x1_mat = PVector(0.5,Ah.cols) r1_mat = fh-Ah*x1_mat - exchange!(r1_mat) + consistent!(r1_mat) r1 = PVector(0.0,Vh.gids) x1 = PVector(0.0,Vh.gids) @@ -108,11 +108,11 @@ function run(parts) xp_mat = PVector(0.0,Ahp.cols) copy!(r1,r1_mat) - exchange!(r1) + consistent!(r1) PBS.prolongate!(rp,Ph,r1) # OK copy!(rp_mat,rp) - exchange!(rp_mat) + consistent!(rp_mat) solve!(xp_mat,LUns,rp_mat) copy!(xp,xp_mat) # Some big numbers appear here.... @@ -124,16 +124,16 @@ function run(parts) x2_mat = PVector(0.5,Ah.cols) r2_mat = fh-Ah*x2_mat - exchange!(r2_mat) + consistent!(r2_mat) solve!(x2_mat,Mns,r2_mat) # ---- Smoother inside Richardson x3_mat = PVector(0.5,Ah.cols) r3_mat = fh-Ah*x3_mat - exchange!(r3_mat) + consistent!(r3_mat) solve!(x3_mat,Rns,r3_mat) - exchange!(x3_mat) + consistent!(x3_mat) # Outputs res = Dict{String,Any}() @@ -165,7 +165,7 @@ Mm,PDm,Phm,Vhm,res_multi = run(parts); println(repeat('#',80)) -map_parts(local_views(Ms)) do model +map(local_views(Ms)) do model cell_ids = get_cell_node_ids(model) cell_coords = get_cell_coordinates(model) display(reshape(cell_ids,length(cell_ids))) @@ -178,29 +178,29 @@ vertex_gids = get_face_gids(Mm,0) edge_gids = get_face_gids(Mm,1) println(">>> Cell gids") -map_parts(cell_gids.partition) do p +map(cell_gids.partition) do p println(transpose(p.lid_to_ohid)) end; println(repeat('-',80)) println(">>> Vertex gids") -map_parts(vertex_gids.partition) do p +map(vertex_gids.partition) do p println(transpose(p.lid_to_ohid)) end; println(repeat('-',80)) println(">>> Edge gids") -map_parts(edge_gids.partition) do p +map(edge_gids.partition) do p println(transpose(p.lid_to_ohid)) end; println(repeat('#',80)) -map_parts(local_views(Phs)) do Ph +map(local_views(Phs)) do Ph display(Ph.patch_cell_dofs_ids) end; -map_parts(local_views(Phm)) do Ph +map(local_views(Phm)) do Ph display(Ph.patch_cell_dofs_ids) end; @@ -211,10 +211,10 @@ for key in keys(res_single) val_m = res_multi[key] println(">>> ", key) - map_parts(val_s.values) do v + map(val_s.values) do v println(transpose(v)) end; - map_parts(val_m.owned_values) do v + map(val_m.owned_values) do v println(transpose(v)) end; println(repeat('-',80)) diff --git a/test/seq/DistributedPatchFESpacesTests.jl b/test/seq/DistributedPatchFESpacesTests.jl index 18dc37c3..b95c9edd 100644 --- a/test/seq/DistributedPatchFESpacesTests.jl +++ b/test/seq/DistributedPatchFESpacesTests.jl @@ -89,7 +89,7 @@ Rns = numerical_setup(Rss,Ah) x1_mat = PVector(0.5,Ah.cols) r1_mat = fh-Ah*x1_mat -exchange!(r1_mat) +consistent!(r1_mat) r1 = PVector(0.0,Vh.gids) x1 = PVector(0.0,Vh.gids) @@ -99,7 +99,7 @@ rp_mat = PVector(0.0,Ahp.cols) xp_mat = PVector(0.0,Ahp.cols) copy!(r1,r1_mat) -exchange!(r1) +consistent!(r1) PBS.prolongate!(rp,Ph,r1) copy!(rp_mat,rp) @@ -115,7 +115,7 @@ copy!(x1_mat,x1) x2_mat = PVector(0.5,Ah.cols) r2_mat = fh-Ah*x2_mat -exchange!(r2_mat) +consistent!(r2_mat) solve!(x2_mat,Mns,r2_mat) @@ -123,8 +123,8 @@ solve!(x2_mat,Mns,r2_mat) x3_mat = PVector(0.5,Ah.cols) r3_mat = fh-Ah*x3_mat -exchange!(r3_mat) +consistent!(r3_mat) solve!(x3_mat,Rns,r3_mat) -exchange!(x3_mat) +consistent!(x3_mat) end \ No newline at end of file From e8c0169dd7845e3837e7268373869f94acac157f Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 9 Aug 2023 14:30:09 +1000 Subject: [PATCH 05/16] More fixes --- test/mpi/DistributedGridTransferOperatorsTests.jl | 7 ++++++- test/mpi/GMGLinearSolversHDivRTTests.jl | 8 ++++++-- test/mpi/GMGLinearSolversLaplacianTests.jl | 7 +++++-- test/mpi/GMGLinearSolversMUMPSTests.jl | 7 +++++-- test/mpi/GMGLinearSolversPoissonTests.jl | 7 +++++-- test/mpi/GMGLinearSolversVectorLaplacianTests.jl | 7 +++++-- test/mpi/MUMPSSolversTests.jl | 7 +++++-- test/mpi/ModelHierarchiesTests.jl | 7 +++++-- test/mpi/PRefinementGMGLinearSolversPoissonTests.jl | 7 +++++-- test/mpi/RedistributeToolsTests.jl | 7 +++++-- test/mpi/RefinementToolsTests.jl | 8 +++++--- test/mpi/RestrictDofsTests.jl | 7 +++++-- test/mpi/RichardsonSmoothersTests.jl | 7 +++++-- test/runtests.jl | 2 +- 14 files changed, 68 insertions(+), 27 deletions(-) diff --git a/test/mpi/DistributedGridTransferOperatorsTests.jl b/test/mpi/DistributedGridTransferOperatorsTests.jl index 3dd19bd8..f4b521dd 100644 --- a/test/mpi/DistributedGridTransferOperatorsTests.jl +++ b/test/mpi/DistributedGridTransferOperatorsTests.jl @@ -109,7 +109,12 @@ num_trees = (1,1) # Number of initial P4est trees num_refs_coarse = 2 # Number of initial refinements num_ranks = num_parts_x_level[1] -with_backend(run,MPIBackend(),num_ranks,num_parts_x_level,num_trees,num_refs_coarse) + +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end +run(parts,num_parts_x_level,num_trees,num_refs_coarse) + println("AT THE END") MPI.Finalize() end diff --git a/test/mpi/GMGLinearSolversHDivRTTests.jl b/test/mpi/GMGLinearSolversHDivRTTests.jl index a982d8da..fa090ef1 100644 --- a/test/mpi/GMGLinearSolversHDivRTTests.jl +++ b/test/mpi/GMGLinearSolversHDivRTTests.jl @@ -118,8 +118,12 @@ num_refs_coarse = 2 α = 1.0 num_parts_x_level = [4,2,1] -ranks = num_parts_x_level[1] -num_iters, num_free_dofs2 = with_backend(main,MPIBackend(),ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order,α) +num_ranks = num_parts_x_level[1] +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end + +num_iters, num_free_dofs2 = main(parts,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order,α) """ diff --git a/test/mpi/GMGLinearSolversLaplacianTests.jl b/test/mpi/GMGLinearSolversLaplacianTests.jl index b39377b9..a59df1d7 100644 --- a/test/mpi/GMGLinearSolversLaplacianTests.jl +++ b/test/mpi/GMGLinearSolversLaplacianTests.jl @@ -92,8 +92,11 @@ num_refs_coarse = 2 α = 1.0 num_parts_x_level = [4,2,1] -ranks = num_parts_x_level[1] -num_iters, num_free_dofs2 = with_backend(main,MPIBackend(),ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order,α) +num_ranks = num_parts_x_level[1] +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end +num_iters, num_free_dofs2 = main(parts,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order,α) """ diff --git a/test/mpi/GMGLinearSolversMUMPSTests.jl b/test/mpi/GMGLinearSolversMUMPSTests.jl index 2d2cce68..1ee10d0e 100644 --- a/test/mpi/GMGLinearSolversMUMPSTests.jl +++ b/test/mpi/GMGLinearSolversMUMPSTests.jl @@ -112,8 +112,11 @@ coarse_grid_partition = (2,2) num_refs_coarse = 3 num_parts_x_level = [4,2] -ranks = num_parts_x_level[1] -with_backend(main,MPIBackend(),ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order) +num_ranks = num_parts_x_level[1] +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end +main(parts,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order) MPI.Finalize() diff --git a/test/mpi/GMGLinearSolversPoissonTests.jl b/test/mpi/GMGLinearSolversPoissonTests.jl index 72dfc714..2f862ebd 100644 --- a/test/mpi/GMGLinearSolversPoissonTests.jl +++ b/test/mpi/GMGLinearSolversPoissonTests.jl @@ -89,8 +89,11 @@ coarse_grid_partition = (2,2) num_refs_coarse = 2 num_parts_x_level = [4,2,1] -ranks = num_parts_x_level[1] -with_backend(main,MPIBackend(),ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order) +num_ranks = num_parts_x_level[1] +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end +main(parts,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order) MPI.Finalize() diff --git a/test/mpi/GMGLinearSolversVectorLaplacianTests.jl b/test/mpi/GMGLinearSolversVectorLaplacianTests.jl index 86bd509d..5c70a20e 100644 --- a/test/mpi/GMGLinearSolversVectorLaplacianTests.jl +++ b/test/mpi/GMGLinearSolversVectorLaplacianTests.jl @@ -92,8 +92,11 @@ num_refs_coarse = 2 α = 1.0 num_parts_x_level = [4,2,1] -ranks = num_parts_x_level[1] -num_iters, num_free_dofs2 = with_backend(main,MPIBackend(),ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order,α) +num_ranks = num_parts_x_level[1] +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end +num_iters, num_free_dofs2 = main(parts,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order,α) """ diff --git a/test/mpi/MUMPSSolversTests.jl b/test/mpi/MUMPSSolversTests.jl index 5be1501a..c70ff407 100644 --- a/test/mpi/MUMPSSolversTests.jl +++ b/test/mpi/MUMPSSolversTests.jl @@ -73,8 +73,11 @@ function main(parts,partition) end partition = (32,32) -ranks = (2,2) -with_backend(main,MPIBackend(),ranks,partition) +num_ranks = (2,2) +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end +main(parts,partition) MPI.Finalize() end \ No newline at end of file diff --git a/test/mpi/ModelHierarchiesTests.jl b/test/mpi/ModelHierarchiesTests.jl index a7cc00e1..2e8beeca 100644 --- a/test/mpi/ModelHierarchiesTests.jl +++ b/test/mpi/ModelHierarchiesTests.jl @@ -41,8 +41,11 @@ end num_parts_x_level = [4,4,2,2] # Procs in each refinement level -ranks = num_parts_x_level[1] -with_backend(main,MPIBackend(),ranks,num_parts_x_level) +num_ranks = num_parts_x_level[1] +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end +main(parts,num_parts_x_level) MPI.Finalize() end \ No newline at end of file diff --git a/test/mpi/PRefinementGMGLinearSolversPoissonTests.jl b/test/mpi/PRefinementGMGLinearSolversPoissonTests.jl index 9a8bd92c..79dd2442 100644 --- a/test/mpi/PRefinementGMGLinearSolversPoissonTests.jl +++ b/test/mpi/PRefinementGMGLinearSolversPoissonTests.jl @@ -93,8 +93,11 @@ coarse_grid_partition = (2,2) num_refs_coarse = 2 num_parts_x_level = [4,4,1] -ranks = num_parts_x_level[1] -with_backend(main,MPIBackend(),ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,max_order) +num_ranks = num_parts_x_level[1] +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end +main(parts,coarse_grid_partition,num_parts_x_level,num_refs_coarse,max_order) MPI.Finalize() diff --git a/test/mpi/RedistributeToolsTests.jl b/test/mpi/RedistributeToolsTests.jl index 20f130b9..a0539f48 100644 --- a/test/mpi/RedistributeToolsTests.jl +++ b/test/mpi/RedistributeToolsTests.jl @@ -82,7 +82,10 @@ num_parts_x_level = [4,2,2] # Procs in each refinement level num_trees = (1,1) # Number of initial P4est trees num_refs_coarse = 2 # Number of initial refinements -ranks = num_parts_x_level[1] -with_backend(run,MPIBackend(),ranks,num_parts_x_level,num_trees,num_refs_coarse) +num_ranks = num_parts_x_level[1] +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end +run(parts,num_parts_x_level,num_trees,num_refs_coarse) MPI.Finalize() end diff --git a/test/mpi/RefinementToolsTests.jl b/test/mpi/RefinementToolsTests.jl index 06a02165..6ebe86e6 100644 --- a/test/mpi/RefinementToolsTests.jl +++ b/test/mpi/RefinementToolsTests.jl @@ -79,12 +79,14 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) end end - num_parts_x_level = [4,2,2] # Procs in each refinement level num_trees = (1,1) # Number of initial P4est trees num_refs_coarse = 2 # Number of initial refinements -ranks = num_parts_x_level[1] -with_backend(run,MPIBackend(),ranks,num_parts_x_level,num_trees,num_refs_coarse) +num_ranks = num_parts_x_level[1] +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end +run(parts,num_parts_x_level,num_trees,num_refs_coarse) MPI.Finalize() end diff --git a/test/mpi/RestrictDofsTests.jl b/test/mpi/RestrictDofsTests.jl index 47612a06..7378ddfc 100644 --- a/test/mpi/RestrictDofsTests.jl +++ b/test/mpi/RestrictDofsTests.jl @@ -89,8 +89,11 @@ coarse_grid_partition = (2,2) num_refs_coarse = 2 num_parts_x_level = [4,2,1] -ranks = num_parts_x_level[1] -with_backend(main,MPIBackend(),ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order) +num_ranks = num_parts_x_level[1] +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end +main(parts,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order) MPI.Finalize() diff --git a/test/mpi/RichardsonSmoothersTests.jl b/test/mpi/RichardsonSmoothersTests.jl index ca0d2835..6175ff9e 100644 --- a/test/mpi/RichardsonSmoothersTests.jl +++ b/test/mpi/RichardsonSmoothersTests.jl @@ -58,8 +58,11 @@ function main(parts,partition) end partition = (32,32) -ranks = (2,2) -with_backend(main,MPIBackend(),ranks,partition) +num_ranks = (2,2) +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end +main(parts,partition) MPI.Finalize() end \ No newline at end of file diff --git a/test/runtests.jl b/test/runtests.jl index 0f62f474..8a02ea0a 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -54,7 +54,7 @@ function run_tests(testdir) extra_args = "" end if ! image_file_exists - cmd = `$cmd -n $(np) --allow-run-as-root --oversubscribe $(Base.julia_cmd()) --project=. $(joinpath(testdir, f)) $(split(extra_args))` + cmd = `$cmd -n $(np) $(Base.julia_cmd()) --project=. $(joinpath(testdir, f)) $(split(extra_args))` else cmd = `$cmd -n $(np) --allow-run-as-root --oversubscribe $(Base.julia_cmd()) -J$(image_file_path) --project=. $(joinpath(testdir, f)) $(split(extra_args))` end From 82ed7892f1ff39700cd7ccdfc04095770791c50e Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 16 Aug 2023 15:50:51 +1000 Subject: [PATCH 06/16] More fixes --- src/LinearSolvers/GMGLinearSolvers.jl | 26 +++---- src/LinearSolvers/JacobiLinearSolvers.jl | 6 +- src/LinearSolvers/RichardsonSmoothers.jl | 4 +- .../DistributedGridTransferOperators.jl | 8 +- src/MultilevelTools/FESpaceHierarchies.jl | 30 +++++--- src/MultilevelTools/ModelHierarchies.jl | 76 +++++++------------ src/MultilevelTools/MultilevelTools.jl | 11 +-- src/MultilevelTools/RefinementTools.jl | 4 +- src/MultilevelTools/SubpartitioningTools.jl | 30 +++++--- .../seq/PatchBasedLinearSolvers.jl | 4 +- .../DistributedGridTransferOperatorsTests.jl | 28 +++---- test/mpi/GMGLinearSolversHDivRTTests.jl | 2 +- test/mpi/GMGLinearSolversLaplacianTests.jl | 2 +- test/mpi/GMGLinearSolversMUMPSTests.jl | 2 +- test/mpi/GMGLinearSolversPoissonTests.jl | 2 +- .../GMGLinearSolversVectorLaplacianTests.jl | 2 +- test/mpi/MUMPSSolversTests.jl | 2 +- ...PRefinementGMGLinearSolversPoissonTests.jl | 2 +- test/mpi/RefinementToolsTests.jl | 4 +- test/mpi/RestrictDofsTests.jl | 2 +- test/mpi/RichardsonSmoothersTests.jl | 2 +- test/seq/DegenerateTests.jl | 2 +- .../DistributedPatchFESpacesDebuggingTests.jl | 20 ++--- test/seq/DistributedPatchFESpacesTests.jl | 10 +-- test/seq/PatchLinearSolverTests.jl | 11 ++- 25 files changed, 148 insertions(+), 144 deletions(-) diff --git a/src/LinearSolvers/GMGLinearSolvers.jl b/src/LinearSolvers/GMGLinearSolvers.jl index 6b88f424..d03ed8d3 100644 --- a/src/LinearSolvers/GMGLinearSolvers.jl +++ b/src/LinearSolvers/GMGLinearSolvers.jl @@ -88,7 +88,7 @@ function setup_finest_level_cache(mh::ModelHierarchy,smatrices::Vector{<:Abstrac parts = get_level_parts(mh,1) if i_am_in(parts) Ah = smatrices[1] - rh = PVector(0.0, Ah.cols) + rh = pfill(0.0, partition(axes(Ah,2))) cache = rh end return cache @@ -116,11 +116,11 @@ function setup_coarsest_solver_cache(mh::ModelHierarchy,coarsest_solver::LinearS if i_am_in(parts) mat = smatrices[nlevs] if (num_parts(parts) == 1) # Serial - cache = map(mat.owned_owned_values) do Ah + cache = map(own_values(mat)) do Ah ss = symbolic_setup(coarsest_solver, Ah) numerical_setup(ss, Ah) end - cache = cache.part + cache = PartitionedArrays.getany(cache) else # Parallel ss = symbolic_setup(coarsest_solver, mat) cache = numerical_setup(ss, mat) @@ -136,17 +136,17 @@ function setup_coarsest_solver_cache(mh::ModelHierarchy,coarsest_solver::PETScLi if i_am_in(parts) mat = smatrices[nlevs] if (num_parts(parts) == 1) # Serial - cache = map(mat.owned_owned_values) do Ah + cache = map(own_values(mat)) do Ah rh = convert(PETScVector,fill(0.0,size(A,2))) xh = convert(PETScVector,fill(0.0,size(A,2))) ss = symbolic_setup(coarsest_solver, Ah) ns = numerical_setup(ss, Ah) return ns, xh, rh end - cache = cache.part + cache = PartitionedArrays.getany(cache) else # Parallel - rh = convert(PETScVector,PVector(0.0,mat.cols)) - xh = convert(PETScVector,PVector(0.0,mat.cols)) + rh = convert(PETScVector,pfill(0.0,partition(axes(mat,2)))) + xh = convert(PETScVector,pfill(0.0,partition(axes(mat,2)))) ss = symbolic_setup(coarsest_solver, mat) ns = numerical_setup(ss, mat) cache = ns, xh, rh @@ -156,14 +156,14 @@ function setup_coarsest_solver_cache(mh::ModelHierarchy,coarsest_solver::PETScLi end function allocate_level_work_vectors(mh::ModelHierarchy,smatrices::Vector{<:AbstractMatrix},lev::Integer) - dxh = PVector(0.0, smatrices[lev].cols) - Adxh = PVector(0.0, smatrices[lev].rows) + dxh = pfill(0.0,partition(axes(smatrices[lev],2))) + Adxh = pfill(0.0,partition(axes(smatrices[lev],1))) cparts = get_level_parts(mh,lev+1) if i_am_in(cparts) AH = smatrices[lev+1] - rH = PVector(0.0,AH.cols) - dxH = PVector(0.0,AH.cols) + rH = pfill(0.0,partition(axes(AH,2))) + dxH = pfill(0.0,partition(axes(AH,2))) else rH = nothing dxH = nothing @@ -185,7 +185,7 @@ end function solve_coarsest_level!(parts::AbstractArray,::LinearSolver,xh::PVector,rh::PVector,caches) if (num_parts(parts) == 1) - map(xh.owned_values,rh.owned_values) do xh, rh + map(own_values(xh),own_values(rh)) do xh, rh solve!(xh,caches,rh) end else @@ -196,7 +196,7 @@ end function solve_coarsest_level!(parts::AbstractArray,::PETScLinearSolver,xh::PVector,rh::PVector,caches) solver_ns, xh_petsc, rh_petsc = caches if (num_parts(parts) == 1) - map(xh.owned_values,rh.owned_values) do xh, rh + map(own_values(xh),own_values(rh)) do xh, rh copy!(rh_petsc,rh) solve!(xh_petsc,solver_ns,rh_petsc) copy!(xh,xh_petsc) diff --git a/src/LinearSolvers/JacobiLinearSolvers.jl b/src/LinearSolvers/JacobiLinearSolvers.jl index abe8512b..29319cce 100644 --- a/src/LinearSolvers/JacobiLinearSolvers.jl +++ b/src/LinearSolvers/JacobiLinearSolvers.jl @@ -22,14 +22,14 @@ function Gridap.Algebra.numerical_setup!(ns::JacobiNumericalSetup, A::AbstractMa end function Gridap.Algebra.numerical_setup(ss::JacobiSymbolicSetup,A::PSparseMatrix) - inv_diag=map(A.owned_owned_values) do a + inv_diag=map(own_values(A)) do a 1.0 ./ diag(a) end return JacobiNumericalSetup(inv_diag) end function Gridap.Algebra.numerical_setup!(ns::JacobiNumericalSetup, A::PSparseMatrix) - map(ns.inv_diag,A.owned_owned_values) do inv_diag, a + map(ns.inv_diag,own_values(A)) do inv_diag, a inv_diag .= 1.0 ./ diag(a) end return ns @@ -43,7 +43,7 @@ end function Gridap.Algebra.solve!(x::PVector, ns::JacobiNumericalSetup, b::PVector) inv_diag = ns.inv_diag - map(inv_diag,x.owned_values,b.owned_values) do inv_diag, x, b + map(inv_diag,own_values(x),own_values(b)) do inv_diag, x, b x .= inv_diag .* b end return x diff --git a/src/LinearSolvers/RichardsonSmoothers.jl b/src/LinearSolvers/RichardsonSmoothers.jl index c36d086c..39b45fce 100644 --- a/src/LinearSolvers/RichardsonSmoothers.jl +++ b/src/LinearSolvers/RichardsonSmoothers.jl @@ -40,8 +40,8 @@ function Gridap.Algebra.numerical_setup(ss::RichardsonSmootherSymbolicSetup, A:: end function Gridap.Algebra.numerical_setup(ss::RichardsonSmootherSymbolicSetup, A::PSparseMatrix) - Adx = PVector(0.0,A.rows) - dx = PVector(0.0,A.cols) + Adx = pfill(0.0,partition(axes(A,1))) + dx = pfill(0.0,partition(axes(A,2))) Mns = numerical_setup(ss.Mss,A) return RichardsonSmootherNumericalSetup(ss.smoother,A,Adx,dx,Mns) end diff --git a/src/MultilevelTools/DistributedGridTransferOperators.jl b/src/MultilevelTools/DistributedGridTransferOperators.jl index f6070aa7..8453d5ce 100644 --- a/src/MultilevelTools/DistributedGridTransferOperators.jl +++ b/src/MultilevelTools/DistributedGridTransferOperators.jl @@ -54,11 +54,11 @@ function _get_interpolation_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mod if i_am_in(cparts) model_h = get_model_before_redist(mh,lev) Uh = get_fe_space_before_redist(sh,lev) - fv_h = PVector(0.0,Uh.gids) + fv_h = pfill(0.0,partition(Uh.gids)) dv_h = (mode == :solution) ? get_dirichlet_dof_values(Uh) : zero_dirichlet_values(Uh) UH = get_fe_space(sh,lev+1) - fv_H = PVector(0.0,UH.gids) + fv_H = pfill(0.0,partition(UH.gids)) dv_H = (mode == :solution) ? get_dirichlet_dof_values(UH) : zero_dirichlet_values(UH) cache_refine = model_h, Uh, fv_h, dv_h, UH, fv_H, dv_H @@ -102,7 +102,7 @@ function _get_projection_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mode:: u,v = get_trial_fe_basis(UH), get_fe_basis(VH) data = collect_cell_matrix_and_vector(UH,VH,aH(u,v),lH(v,u00),u_dir) AH,bH0 = assemble_matrix_and_vector(assem,data) - xH = PVector(0.0,AH.cols) + xH = pfill(0.0,partition(axes(AH,2))) bH = copy(bH0) cache_refine = model_h, Uh, fv_h, dv_h, VH, AH, lH, xH, bH, bH0, assem @@ -125,7 +125,7 @@ function _get_redistribution_cache(lev::Int,sh::FESpaceHierarchy,mode::Symbol,op Uh_red = get_fe_space(sh,lev) model_h_red = get_model(mh,lev) - fv_h_red = PVector(0.0,Uh_red.gids) + fv_h_red = pfill(0.0,partition(Uh_red.gids)) dv_h_red = (mode == :solution) ? get_dirichlet_dof_values(Uh_red) : zero_dirichlet_values(Uh_red) glue = mh.levels[lev].red_glue diff --git a/src/MultilevelTools/FESpaceHierarchies.jl b/src/MultilevelTools/FESpaceHierarchies.jl index ef41ee1e..494d7f0f 100644 --- a/src/MultilevelTools/FESpaceHierarchies.jl +++ b/src/MultilevelTools/FESpaceHierarchies.jl @@ -42,30 +42,29 @@ function get_fe_space_before_redist(fh::FESpaceHierarchy,lev::Int) get_fe_space_before_redist(fh[lev]) end +# Test/Trial FESpaces for ModelHierarchyLevels + function Gridap.FESpaces.TestFESpace( mh::ModelHierarchyLevel{A,B,C,Nothing},args...;kwargs...) where {A,B,C} Vh = TestFESpace(get_model(mh),args...;kwargs...) FESpaceHierarchyLevel(mh.level,Vh,nothing) end -function Gridap.FESpaces.TestFESpace( - mh::ModelHierarchyLevel{A,B,C,D},args...;kwargs...) where {A,B,C,D} - Vh = TestFESpace(get_model_before_redist(mh),args...;kwargs...) +function Gridap.FESpaces.TestFESpace(mh::ModelHierarchyLevel{A,B,C,D},args...;kwargs...) where {A,B,C,D} + cparts, _ = get_old_and_new_parts(mh.red_glue,Val(false)) + Vh = i_am_in(cparts) ? TestFESpace(get_model_before_redist(mh),args...;kwargs...) : nothing Vh_red = TestFESpace(get_model(mh),args...;kwargs...) FESpaceHierarchyLevel(mh.level,Vh,Vh_red) end -function Gridap.FESpaces.TrialFESpace(a::FESpaceHierarchyLevel{A,Nothing},u) where {A} - Uh = TrialFESpace(a.fe_space,u) - FESpaceHierarchyLevel(a.level,Uh,nothing) -end - -function Gridap.FESpaces.TrialFESpace(a::FESpaceHierarchyLevel{A,B},u) where {A,B} - Uh = TrialFESpace(a.fe_space,u) - Uh_red = TrialFESpace(a.fe_space_red,u) +function Gridap.FESpaces.TrialFESpace(a::FESpaceHierarchyLevel,u) + Uh = !isa(a.fe_space,Nothing) ? TrialFESpace(a.fe_space,u) : nothing + Uh_red = !isa(a.fe_space_red,Nothing) ? TrialFESpace(a.fe_space_red,u) : nothing FESpaceHierarchyLevel(a.level,Uh,Uh_red) end +# Test/Trial FESpaces for ModelHierarchies/FESpaceHierarchy + function Gridap.FESpaces.TestFESpace(mh::ModelHierarchy,args...;kwargs...) test_spaces = Vector{FESpaceHierarchyLevel}(undef,num_levels(mh)) for i = 1:num_levels(mh) @@ -119,6 +118,8 @@ function Gridap.FESpaces.TrialFESpace(a::FESpaceHierarchy) FESpaceHierarchy(a.mh,trial_spaces) end +# Computing system matrices + function compute_hierarchy_matrices(trials::FESpaceHierarchy,a::Function,l::Function,qdegree::Integer) return compute_hierarchy_matrices(trials,a,l,Fill(qdegree,num_levels(trials))) end @@ -153,3 +154,10 @@ function compute_hierarchy_matrices(trials::FESpaceHierarchy,a::Function,l::Func end return mats, A, b end + +function get_test_space(U::GridapDistributed.DistributedSingleFieldFESpace) + spaces = map(local_views(U)) do U + U.space + end + return GridapDistributed.DistributedSingleFieldFESpace(spaces,U.gids,U.vector_type) +end \ No newline at end of file diff --git a/src/MultilevelTools/ModelHierarchies.jl b/src/MultilevelTools/ModelHierarchies.jl index 51aeae5f..86709463 100644 --- a/src/MultilevelTools/ModelHierarchies.jl +++ b/src/MultilevelTools/ModelHierarchies.jl @@ -61,7 +61,7 @@ function ModelHierarchy(root_parts ::AbstractArray, my_num_parts = map(root_parts) do _p num_parts(model_parts) # == -1 if !i_am_in(my_parts) end - main_num_parts = get_main_part(my_num_parts) + main_num_parts = PartitionedArrays.getany(emit(my_num_parts)) if main_num_parts == num_procs_x_level[end] # Coarsest model if mesh_refinement @@ -80,12 +80,12 @@ function ModelHierarchy(root_parts ::AbstractArray, @error "Model parts do not correspond to coarsest or finest parts!" end -function _model_hierarchy_without_refinement_bottom_up(root_parts::AbstractArray, +function _model_hierarchy_without_refinement_bottom_up(root_parts::AbstractArray{T}, bottom_model::GridapDistributed.DistributedDiscreteModel, - num_procs_x_level::Vector{<:Integer}) - num_levels = length(num_procs_x_level) - level_parts = Vector{typeof(root_parts)}(undef,num_levels) - meshes = Vector{ModelHierarchyLevel}(undef,num_levels) + num_procs_x_level::Vector{<:Integer}) where T + num_levels = length(num_procs_x_level) + level_parts = Vector{Union{typeof(root_parts),GridapDistributed.MPIVoidVector{T}}}(undef,num_levels) + meshes = Vector{ModelHierarchyLevel}(undef,num_levels) level_parts[num_levels] = get_parts(bottom_model) meshes[num_levels] = ModelHierarchyLevel(num_levels,bottom_model,nothing,nothing,nothing) @@ -103,15 +103,15 @@ function _model_hierarchy_without_refinement_bottom_up(root_parts::AbstractArray end mh = ModelHierarchy(level_parts,meshes) - return convert_to_void_models(mh) + return mh end -function _model_hierarchy_without_refinement_top_down(root_parts::AbstractArray, +function _model_hierarchy_without_refinement_top_down(root_parts::AbstractArray{T}, top_model::GridapDistributed.DistributedDiscreteModel, - num_procs_x_level::Vector{<:Integer}) - num_levels = length(num_procs_x_level) - level_parts = Vector{typeof(root_parts)}(undef,num_levels) - meshes = Vector{ModelHierarchyLevel}(undef,num_levels) + num_procs_x_level::Vector{<:Integer}) where T + num_levels = length(num_procs_x_level) + level_parts = Vector{Union{typeof(root_parts),GridapDistributed.MPIVoidVector{T}}}(undef,num_levels) + meshes = Vector{ModelHierarchyLevel}(undef,num_levels) level_parts[1] = get_parts(top_model) model = top_model @@ -129,17 +129,17 @@ function _model_hierarchy_without_refinement_top_down(root_parts::AbstractArray, meshes[num_levels] = ModelHierarchyLevel(num_levels,model,nothing,nothing,nothing) mh = ModelHierarchy(level_parts,meshes) - return convert_to_void_models(mh) + return mh end -function _model_hierarchy_by_refinement(root_parts::AbstractArray, +function _model_hierarchy_by_refinement(root_parts::AbstractArray{T}, coarsest_model::GridapDistributed.DistributedDiscreteModel, num_procs_x_level::Vector{<:Integer}; - num_refs_x_level=nothing) + num_refs_x_level=nothing) where T # TODO: Implement support for num_refs_x_level? (future work) - num_levels = length(num_procs_x_level) - level_parts = Vector{typeof(root_parts)}(undef,num_levels) - meshes = Vector{ModelHierarchyLevel}(undef,num_levels) + num_levels = length(num_procs_x_level) + level_parts = Vector{Union{typeof(root_parts),GridapDistributed.MPIVoidVector{T}}}(undef,num_levels) + meshes = Vector{ModelHierarchyLevel}(undef,num_levels) level_parts[num_levels] = get_parts(coarsest_model) meshes[num_levels] = ModelHierarchyLevel(num_levels,coarsest_model,nothing,nothing,nothing) @@ -164,14 +164,14 @@ function _model_hierarchy_by_refinement(root_parts::AbstractArray, return convert_to_adapted_models(mh) end -function _model_hierarchy_by_coarsening(root_parts::AbstractArray, +function _model_hierarchy_by_coarsening(root_parts::AbstractArray{T}, finest_model::GridapDistributed.DistributedDiscreteModel, num_procs_x_level::Vector{<:Integer}; - num_refs_x_level=nothing) + num_refs_x_level=nothing) where T # TODO: Implement support for num_refs_x_level? (future work) - num_levels = length(num_procs_x_level) - level_parts = Vector{typeof(root_parts)}(undef,num_levels) - meshes = Vector{ModelHierarchyLevel}(undef,num_levels) + num_levels = length(num_procs_x_level) + level_parts = Vector{Union{typeof(root_parts),GridapDistributed.MPIVoidVector{T}}}(undef,num_levels) + meshes = Vector{ModelHierarchyLevel}(undef,num_levels) level_parts[1] = get_parts(finest_model) model = finest_model @@ -202,34 +202,16 @@ function convert_to_adapted_models(mh::ModelHierarchy) levels = Vector{ModelHierarchyLevel}(undef,nlevs) for lev in 1:nlevs-1 cparts = get_level_parts(mh,lev+1) + mhlev = get_level(mh,lev) if i_am_in(cparts) - model = get_model_before_redist(mh,lev) - parent = get_model(mh,lev+1) - ref_glue = mh.levels[lev].ref_glue - model_ref = GridapDistributed.DistributedAdaptedDiscreteModel(model,parent,ref_glue) - else model = get_model_before_redist(mh,lev) - model_ref = VoidDistributedDiscreteModel(model) - end - levels[lev] = ModelHierarchyLevel(lev,model_ref,mh.levels[lev].ref_glue,mh.levels[lev].model_red,mh.levels[lev].red_glue) - end - levels[nlevs] = mh.levels[nlevs] - - return ModelHierarchy(mh.level_parts,levels) -end - -function convert_to_void_models(mh::ModelHierarchy) - nlevs = num_levels(mh) - levels = Vector{ModelHierarchyLevel}(undef,nlevs) - for lev in 1:nlevs-1 - cparts = get_level_parts(mh,lev+1) - if i_am_in(cparts) - model_ref = get_model_before_redist(mh,lev) + parent = get_model(mh,lev+1) + ref_glue = mhlev.ref_glue + model_ref = GridapDistributed.DistributedAdaptedDiscreteModel(model,parent,ref_glue) else - model = get_model_before_redist(mh,lev) - model_ref = VoidDistributedDiscreteModel(model) + model_ref = nothing end - levels[lev] = ModelHierarchyLevel(lev,model_ref,mh.levels[lev].ref_glue,mh.levels[lev].model_red,mh.levels[lev].red_glue) + levels[lev] = ModelHierarchyLevel(lev,model_ref,mhlev.ref_glue,mhlev.model_red,mhlev.red_glue) end levels[nlevs] = mh.levels[nlevs] diff --git a/src/MultilevelTools/MultilevelTools.jl b/src/MultilevelTools/MultilevelTools.jl index 8501afad..9d27e510 100644 --- a/src/MultilevelTools/MultilevelTools.jl +++ b/src/MultilevelTools/MultilevelTools.jl @@ -13,17 +13,18 @@ using Gridap.Adaptivity using PartitionedArrays using GridapDistributed -using GridapDistributed: redistribute_cell_dofs, redistribute_cell_dofs! -using GridapDistributed: redistribute_free_values, redistribute_free_values! +using GridapDistributed: redistribute_cell_dofs, redistribute_cell_dofs!, get_redistribute_cell_dofs_cache +using GridapDistributed: redistribute_free_values, redistribute_free_values!, get_redistribute_free_values_cache using GridapDistributed: redistribute_fe_function +using GridapDistributed: get_old_and_new_parts +import GridapDistributed: generate_subparts import LinearAlgebra: mul! import GridapDistributed: local_views -export change_parts -export generate_level_parts -export generate_subparts +export change_parts, num_parts, i_am_in +export generate_level_parts, generate_subparts export ModelHierarchy export num_levels, get_level, get_level_parts diff --git a/src/MultilevelTools/RefinementTools.jl b/src/MultilevelTools/RefinementTools.jl index 603d35e4..5dfad129 100644 --- a/src/MultilevelTools/RefinementTools.jl +++ b/src/MultilevelTools/RefinementTools.jl @@ -28,8 +28,8 @@ function restrict_dofs!(fv_c::AbstractVector, fine_cell_ids = get_cell_dof_ids(U_f) fine_cell_values = Gridap.Arrays.Table(lazy_map(Gridap.Arrays.PosNegReindex(fv_f,dv_f),fine_cell_ids.data),fine_cell_ids.ptrs) coarse_rrules = Gridap.Adaptivity.get_old_cell_refinement_rules(glue) - f2c_cell_values = Gridap.Adaptivity.f2c_reindex(fine_cell_values,glue) - child_ids = Gridap.Adaptivity.f2c_reindex(glue.n2o_cell_to_child_id,glue) + f2c_cell_values = Gridap.Adaptivity.n2o_reindex(fine_cell_values,glue) + child_ids = Gridap.Adaptivity.n2o_reindex(glue.n2o_cell_to_child_id,glue) f2c_maps = lazy_map(FineToCoarseDofMap,coarse_rrules) caches = lazy_map(Gridap.Arrays.return_cache,f2c_maps,f2c_cell_values,child_ids) diff --git a/src/MultilevelTools/SubpartitioningTools.jl b/src/MultilevelTools/SubpartitioningTools.jl index f1ea2a84..e05bdee0 100644 --- a/src/MultilevelTools/SubpartitioningTools.jl +++ b/src/MultilevelTools/SubpartitioningTools.jl @@ -1,19 +1,29 @@ -function generate_subparts(root_parts::AbstractArray,subpart_size::Integer) - root_comm = root_parts.comm - rank = MPI.Comm_rank(root_comm) - size = MPI.Comm_size(root_comm) - Gridap.Helpers.@check all(subpart_size .<= size) - Gridap.Helpers.@check all(subpart_size .>= 1) +function num_parts(comm::MPI.Comm) + if comm != MPI.COMM_NULL + nparts = MPI.Comm_size(comm) + else + nparts = -1 + end + nparts +end - if rank < subpart_size - comm = MPI.Comm_split(root_comm, 0, 0) +num_parts(comm::MPIArray) = num_parts(comm.comm) +num_parts(comm::GridapDistributed.MPIVoidVector) = num_parts(comm.comm) + +function get_part_id(comm::MPI.Comm) + if comm != MPI.COMM_NULL + id = MPI.Comm_rank(comm)+1 else - comm = MPI.Comm_split(root_comm, MPI.MPI_UNDEFINED, MPI.MPI_UNDEFINED) + id = -1 end - return get_part_ids(comm) + id end +i_am_in(comm::MPI.Comm) = get_part_id(comm) >=0 +i_am_in(comm::MPIArray) = i_am_in(comm.comm) +i_am_in(comm::GridapDistributed.MPIVoidVector) = i_am_in(comm.comm) + function generate_level_parts(root_parts::AbstractArray,last_level_parts::AbstractArray,level_parts_size::Integer) if level_parts_size == num_parts(last_level_parts) return last_level_parts diff --git a/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl b/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl index 92cdfa79..df48cd18 100644 --- a/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl +++ b/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl @@ -79,11 +79,11 @@ function _allocate_row_vector(A::AbstractMatrix) end function _allocate_col_vector(A::PSparseMatrix) - PVector(0.0,A.cols) + pfill(0.0,partition(axes(A,2))) end function _allocate_row_vector(A::PSparseMatrix) - PVector(0.0,A.rows) + pfill(0.0,partition(axes(A,1))) end function Gridap.Algebra.numerical_setup!(ns::PatchBasedSmootherNumericalSetup, A::AbstractMatrix) diff --git a/test/mpi/DistributedGridTransferOperatorsTests.jl b/test/mpi/DistributedGridTransferOperatorsTests.jl index f4b521dd..aa4259d0 100644 --- a/test/mpi/DistributedGridTransferOperatorsTests.jl +++ b/test/mpi/DistributedGridTransferOperatorsTests.jl @@ -45,17 +45,17 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) if i_am_in(parts_h) i_am_main(parts_h) && println("Lev : ", lev) Ah = mats[lev] - xh = PVector(1.0,Ah.cols) - yh1 = PVector(0.0,Ah.cols) - yh2 = PVector(0.0,Ah.cols) - yh3 = PVector(0.0,Ah.cols) + xh = pfill(1.0,partition(axes(Ah,2))) + yh1 = pfill(0.0,partition(axes(Ah,2))) + yh2 = pfill(0.0,partition(axes(Ah,2))) + yh3 = pfill(0.0,partition(axes(Ah,2))) if i_am_in(parts_H) AH = mats[lev+1] - xH = PVector(1.0,AH.cols) - yH1 = PVector(0.0,AH.cols) - yH2 = PVector(0.0,AH.cols) - yH3 = PVector(0.0,AH.cols) + xH = pfill(1.0,partition(axes(AH,2))) + yH1 = pfill(0.0,partition(axes(AH,2))) + yH2 = pfill(0.0,partition(axes(AH,2))) + yH3 = pfill(0.0,partition(axes(AH,2))) else xH = nothing yH1 = nothing @@ -75,11 +75,11 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) mul!(yH3,R3,xh) if i_am_in(parts_H) - y_ref = PVector(1.0,AH.cols) - tests = map(y_ref.owned_values,yH1.owned_values,yH2.owned_values,yH3.owned_values) do y_ref,y1,y2,y3 + y_ref = pfill(1.0,partition(axes(AH,2))) + tests = map(own_values(y_ref),own_values(yH1),own_values(yH2),own_values(yH3)) do y_ref,y1,y2,y3 map(y -> norm(y-y_ref) < 1.e-3 ,[y1,y2,y3]) end - @test all(tests.part) + @test all(PartitionedArrays.getany(tests)) end # ---- Prolongation ---- @@ -93,11 +93,11 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) P3 = prolongations3[lev] mul!(yh3,P3,xH) - y_ref = PVector(1.0,Ah.cols) - tests = map(y_ref.owned_values,yh1.owned_values,yh2.owned_values,yh2.owned_values) do y_ref,y1,y2,y3 + y_ref = pfill(1.0,partition(axes(Ah))) + tests = map(own_values(y_ref),own_values(yh1),own_values(yh2),own_values(yh3)) do y_ref,y1,y2,y3 map(y -> norm(y-y_ref) < 1.e-3 ,[y1,y2,y3]) end - @test all(tests.part) + @test all(PartitionedArrays.getany(tests)) end end diff --git a/test/mpi/GMGLinearSolversHDivRTTests.jl b/test/mpi/GMGLinearSolversHDivRTTests.jl index fa090ef1..9a07c5b8 100644 --- a/test/mpi/GMGLinearSolversHDivRTTests.jl +++ b/test/mpi/GMGLinearSolversHDivRTTests.jl @@ -79,7 +79,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, ns = numerical_setup(ss,A) # Solve - x = PVector(0.0,A.cols) + x = pfill(0.0,partition(axes(A,2))) x, history = IterativeSolvers.cg!(x,A,b; verbose=i_am_main(parts), reltol=1.0e-8, diff --git a/test/mpi/GMGLinearSolversLaplacianTests.jl b/test/mpi/GMGLinearSolversLaplacianTests.jl index a59df1d7..7a0a3046 100644 --- a/test/mpi/GMGLinearSolversLaplacianTests.jl +++ b/test/mpi/GMGLinearSolversLaplacianTests.jl @@ -54,7 +54,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, ns = numerical_setup(ss,A) # Solve - x = PVector(0.0,A.cols) + x = pfill(0.0,partition(axes(A,2))) x, history = IterativeSolvers.cg!(x,A,b; verbose=i_am_main(parts), reltol=1.0e-12, diff --git a/test/mpi/GMGLinearSolversMUMPSTests.jl b/test/mpi/GMGLinearSolversMUMPSTests.jl index 1ee10d0e..de4d3ae8 100644 --- a/test/mpi/GMGLinearSolversMUMPSTests.jl +++ b/test/mpi/GMGLinearSolversMUMPSTests.jl @@ -76,7 +76,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, ns = numerical_setup(ss,A) # Solve - x = PVector(0.0,A.cols) + x = pfill(0.0,partition(axes(A,2))) x, history = IterativeSolvers.cg!(x,A,b; verbose=i_am_main(parts), reltol=1.0e-12, diff --git a/test/mpi/GMGLinearSolversPoissonTests.jl b/test/mpi/GMGLinearSolversPoissonTests.jl index 2f862ebd..4bf6a7d3 100644 --- a/test/mpi/GMGLinearSolversPoissonTests.jl +++ b/test/mpi/GMGLinearSolversPoissonTests.jl @@ -54,7 +54,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, ns = numerical_setup(ss,A) # Solve - x = PVector(0.0,A.cols) + x = pfill(0.0,partition(axes(A,2))) x, history = IterativeSolvers.cg!(x,A,b; verbose=i_am_main(parts), reltol=1.0e-12, diff --git a/test/mpi/GMGLinearSolversVectorLaplacianTests.jl b/test/mpi/GMGLinearSolversVectorLaplacianTests.jl index 5c70a20e..58c3c6da 100644 --- a/test/mpi/GMGLinearSolversVectorLaplacianTests.jl +++ b/test/mpi/GMGLinearSolversVectorLaplacianTests.jl @@ -54,7 +54,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, ns = numerical_setup(ss,A) # Solve - x = PVector(0.0,A.cols) + x = pfill(0.0,partition(axes(A,2))) x, history = IterativeSolvers.cg!(x,A,b; verbose=i_am_main(parts), reltol=1.0e-12, diff --git a/test/mpi/MUMPSSolversTests.jl b/test/mpi/MUMPSSolversTests.jl index c70ff407..67527175 100644 --- a/test/mpi/MUMPSSolversTests.jl +++ b/test/mpi/MUMPSSolversTests.jl @@ -57,7 +57,7 @@ function main(parts,partition) ss = symbolic_setup(P,A) ns = numerical_setup(ss,A) - x = PVector(1.0,A.cols) + x = pfill(0.0,partition(axes(A,2))) solve!(x,ns,b) u = interpolate(sol,Uh) diff --git a/test/mpi/PRefinementGMGLinearSolversPoissonTests.jl b/test/mpi/PRefinementGMGLinearSolversPoissonTests.jl index 79dd2442..0c27c4f0 100644 --- a/test/mpi/PRefinementGMGLinearSolversPoissonTests.jl +++ b/test/mpi/PRefinementGMGLinearSolversPoissonTests.jl @@ -58,7 +58,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, ns = numerical_setup(ss,A) # Solve - x = PVector(0.0,A.cols) + x = pfill(0.0,partition(axes(A,2))) x, history = IterativeSolvers.cg!(x,A,b; verbose=i_am_main(parts), reltol=1.0e-12, diff --git a/test/mpi/RefinementToolsTests.jl b/test/mpi/RefinementToolsTests.jl index 6ebe86e6..16eab53b 100644 --- a/test/mpi/RefinementToolsTests.jl +++ b/test/mpi/RefinementToolsTests.jl @@ -53,7 +53,7 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) Ah = assemble_matrix(ah,Uh,Vh) bh = assemble_vector(lh,Vh) - xh = PVector(0.0,Ah.cols) + xh = pfill(0.0,partition(axes(Ah,2))) IterativeSolvers.cg!(xh,Ah,bh;verbose=i_am_main(parts),reltol=1.0e-08) uH_projected = FEFunction(Uh,xh) @@ -67,7 +67,7 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) AH = assemble_matrix(aH,UH,VH) bH = assemble_vector(lH,VH) - xH = PVector(0.0,AH.cols) + xH = pfill(0.0,partition(axes(AH,2))) IterativeSolvers.cg!(xH,AH,bH;verbose=i_am_main(parts),reltol=1.0e-08) uh_projected = FEFunction(UH,xH) diff --git a/test/mpi/RestrictDofsTests.jl b/test/mpi/RestrictDofsTests.jl index 7378ddfc..c87c92ac 100644 --- a/test/mpi/RestrictDofsTests.jl +++ b/test/mpi/RestrictDofsTests.jl @@ -54,7 +54,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, ns = numerical_setup(ss,A) # Solve - x = PVector(0.0,A.cols) + x = pfill(0.0,partition(axes(A,2))) x, history = IterativeSolvers.cg!(x,A,b; verbose=i_am_main(parts), reltol=1.0e-12, diff --git a/test/mpi/RichardsonSmoothersTests.jl b/test/mpi/RichardsonSmoothersTests.jl index 6175ff9e..2131c87b 100644 --- a/test/mpi/RichardsonSmoothersTests.jl +++ b/test/mpi/RichardsonSmoothersTests.jl @@ -38,7 +38,7 @@ function main(parts,partition) ss = symbolic_setup(P,A) ns = numerical_setup(ss,A) - x = PVector(1.0,A.cols) + x = pfill(1.0,partition(axes(A,2))) x, history = IterativeSolvers.cg!(x,A,b; verbose=i_am_main(parts), reltol=1.0e-8, diff --git a/test/seq/DegenerateTests.jl b/test/seq/DegenerateTests.jl index b64dcf2b..7d213ab6 100644 --- a/test/seq/DegenerateTests.jl +++ b/test/seq/DegenerateTests.jl @@ -16,7 +16,7 @@ include("../../src/GridapFixes.jl") function cg_solve(op) A = get_matrix(op) b = get_vector(op) - x = PVector(0.0,A.cols) + x = pfill(0.0,partition(axes(A,2))) IterativeSolvers.cg!(x,A,b;verbose=true,reltol=1.0e-06) return x end diff --git a/test/seq/DistributedPatchFESpacesDebuggingTests.jl b/test/seq/DistributedPatchFESpacesDebuggingTests.jl index 16e6350b..69097719 100644 --- a/test/seq/DistributedPatchFESpacesDebuggingTests.jl +++ b/test/seq/DistributedPatchFESpacesDebuggingTests.jl @@ -96,16 +96,16 @@ function run(parts) # ---- Manual solve using LU ---- # - x1_mat = PVector(0.5,Ah.cols) + x1_mat = pfill(0.5,partition(axes(Ah,2))) r1_mat = fh-Ah*x1_mat consistent!(r1_mat) - r1 = PVector(0.0,Vh.gids) - x1 = PVector(0.0,Vh.gids) - rp = PVector(0.0,Ph.gids) - xp = PVector(0.0,Ph.gids) - rp_mat = PVector(0.0,Ahp.cols) - xp_mat = PVector(0.0,Ahp.cols) + r1 = pfill(0.0,partition(Vh.gids)) + x1 = pfill(0.0,partition(Vh.gids)) + rp = pfill(0.0,partition(Ph.gids)) + xp = pfill(0.0,partition(Ph.gids)) + rp_mat = pfill(0.0,partition(axes(Ahp,2))) + xp_mat = pfill(0.0,partition(axes(Ahp,2))) copy!(r1,r1_mat) consistent!(r1) @@ -122,14 +122,14 @@ function run(parts) # ---- Same using the PatchBasedSmoother ---- # - x2_mat = PVector(0.5,Ah.cols) + x2_mat = pfill(0.5,partition(axes(Ah,2))) r2_mat = fh-Ah*x2_mat consistent!(r2_mat) solve!(x2_mat,Mns,r2_mat) # ---- Smoother inside Richardson - x3_mat = PVector(0.5,Ah.cols) + x3_mat = pfill(0.5,partition(axes(Ah,2))) r3_mat = fh-Ah*x3_mat consistent!(r3_mat) solve!(x3_mat,Rns,r3_mat) @@ -214,7 +214,7 @@ for key in keys(res_single) map(val_s.values) do v println(transpose(v)) end; - map(val_m.owned_values) do v + map(own_values(val_m)) do v println(transpose(v)) end; println(repeat('-',80)) diff --git a/test/seq/DistributedPatchFESpacesTests.jl b/test/seq/DistributedPatchFESpacesTests.jl index b95c9edd..9c0a7638 100644 --- a/test/seq/DistributedPatchFESpacesTests.jl +++ b/test/seq/DistributedPatchFESpacesTests.jl @@ -87,7 +87,7 @@ Rns = numerical_setup(Rss,Ah) # ---- Manual solve using LU ---- # -x1_mat = PVector(0.5,Ah.cols) +x1_mat = pfill(0.5,partition(axes(Ah,2))) r1_mat = fh-Ah*x1_mat consistent!(r1_mat) @@ -95,8 +95,8 @@ r1 = PVector(0.0,Vh.gids) x1 = PVector(0.0,Vh.gids) rp = PVector(0.0,Ph.gids) xp = PVector(0.0,Ph.gids) -rp_mat = PVector(0.0,Ahp.cols) -xp_mat = PVector(0.0,Ahp.cols) +rp_mat = pfill(0.0,partition(axes(Ahp,2))) +xp_mat = pfill(0.0,partition(axes(Ahp,2))) copy!(r1,r1_mat) consistent!(r1) @@ -113,7 +113,7 @@ copy!(x1_mat,x1) # ---- Same using the PatchBasedSmoother ---- # -x2_mat = PVector(0.5,Ah.cols) +x2_mat = pfill(0.5,partition(axes(Ah,2))) r2_mat = fh-Ah*x2_mat consistent!(r2_mat) solve!(x2_mat,Mns,r2_mat) @@ -121,7 +121,7 @@ solve!(x2_mat,Mns,r2_mat) # ---- Smoother inside Richardson -x3_mat = PVector(0.5,Ah.cols) +x3_mat = pfill(0.5,partition(axes(Ah,2))) r3_mat = fh-Ah*x3_mat consistent!(r3_mat) solve!(x3_mat,Rns,r3_mat) diff --git a/test/seq/PatchLinearSolverTests.jl b/test/seq/PatchLinearSolverTests.jl index e9c045ac..afbbf2b6 100644 --- a/test/seq/PatchLinearSolverTests.jl +++ b/test/seq/PatchLinearSolverTests.jl @@ -61,19 +61,22 @@ module PatchLinearSolverTests ################################################## + parts = with_debug() do distribute + distribute(LinearIndices((2,))) + end + domain = (0.0,1.0,0.0,1.0) partition = (2,3) model = CartesianDiscreteModel(domain,partition) _,Ph,xh,Vh = returns_PD_Ph_xh_Vh(model) - parts = get_part_ids(SequentialBackend(),(1,2)) dmodel = CartesianDiscreteModel(parts,domain,partition) _,dPh,dxh,dVh = returns_PD_Ph_xh_Vh(dmodel); @test num_free_dofs(Ph) == num_free_dofs(dPh) - @test all(dxh.owned_values.parts[1] .≈ xh[1:3]) - @test all(dxh.owned_values.parts[2] .≈ xh[4:end]) + @test all(own_values(dxh).items[1] .≈ xh[1:3]) + @test all(own_values(dxh).items[2] .≈ xh[4:end]) ################################################# @@ -88,5 +91,5 @@ module PatchLinearSolverTests dA,db = compute_matrix_vector(dmodel,dVh); dx = test_smoother(dPD,dPh,dVh,dA,db) - @test all(dx.owned_values.parts[1] .≈ x) + @test all(own_values(dx).items[1] .≈ x) end From 2c3fc0565104a244e80b26c1bf7d83c29abf46a2 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 17 Aug 2023 12:55:21 +1000 Subject: [PATCH 07/16] Bugfix: add fetch after ghost exchange --- .../DistributedGridTransferOperators.jl | 18 ++- src/MultilevelTools/FESpaceHierarchies.jl | 12 +- .../GridapDistributedExtensions.jl | 142 ------------------ src/MultilevelTools/GridapFixes.jl | 2 - src/MultilevelTools/MultilevelTools.jl | 1 - src/MultilevelTools/RefinementTools.jl | 2 +- src/MultilevelTools/SubpartitioningTools.jl | 12 ++ src/PatchBasedSmoothers/mpi/PatchFESpaces.jl | 6 +- .../seq/PatchBasedLinearSolvers.jl | 2 +- .../DistributedGridTransferOperatorsTests.jl | 4 +- test/mpi/RedistributeToolsTests.jl | 29 ++-- test/mpi/RefinementToolsTests.jl | 26 +++- .../DistributedPatchFESpacesDebuggingTests.jl | 10 +- test/seq/DistributedPatchFESpacesTests.jl | 10 +- 14 files changed, 80 insertions(+), 196 deletions(-) delete mode 100644 src/MultilevelTools/GridapDistributedExtensions.jl diff --git a/src/MultilevelTools/DistributedGridTransferOperators.jl b/src/MultilevelTools/DistributedGridTransferOperators.jl index 8453d5ce..9e882c4e 100644 --- a/src/MultilevelTools/DistributedGridTransferOperators.jl +++ b/src/MultilevelTools/DistributedGridTransferOperators.jl @@ -53,11 +53,11 @@ function _get_interpolation_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mod if i_am_in(cparts) model_h = get_model_before_redist(mh,lev) - Uh = get_fe_space_before_redist(sh,lev) + Uh = get_fe_space_before_redist(sh,lev) fv_h = pfill(0.0,partition(Uh.gids)) dv_h = (mode == :solution) ? get_dirichlet_dof_values(Uh) : zero_dirichlet_values(Uh) - UH = get_fe_space(sh,lev+1) + UH = get_fe_space(sh,lev+1) fv_H = pfill(0.0,partition(UH.gids)) dv_H = (mode == :solution) ? get_dirichlet_dof_values(UH) : zero_dirichlet_values(UH) @@ -214,7 +214,7 @@ function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:r model_h, Uh, fv_h, dv_h, UH, fv_H, dv_H = cache_refine copy!(fv_h,x) # Matrix layout -> FE layout - consistent!(fv_h) + consistent!(fv_h) |> fetch restrict_dofs!(fv_H,fv_h,dv_h,Uh,UH,get_adaptivity_glue(model_h)) copy!(y,fv_H) # FE layout -> Matrix layout @@ -249,7 +249,7 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer # 1 - Redistribute from fine partition to coarse partition copy!(fv_h_red,x) - consistent!(fv_h_red) + consistent!(fv_h_red) |> fetch redistribute_free_values!(cache_exchange,fv_h,Uh,fv_h_red,dv_h_red,Uh_red,model_h,glue;reverse=true) # 2 - Interpolate in coarse partition @@ -259,7 +259,7 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer copy!(y,fv_H) # FE layout -> Matrix layout end - return y + return y end # D.2) Restriction, with redistribution, by projection @@ -270,11 +270,12 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer # 1 - Redistribute from fine partition to coarse partition copy!(fv_h_red,x) - consistent!(fv_h_red) + consistent!(fv_h_red) |> fetch redistribute_free_values!(cache_exchange,fv_h,Uh,fv_h_red,dv_h_red,Uh_red,model_h,glue;reverse=true) # 2 - Solve f2c projection coarse partition if !isa(y,Nothing) + consistent!(fv_h) |> fetch uh = FEFunction(Uh,fv_h,dv_h) v = get_fe_basis(VH) vec_data = collect_cell_vector(VH,lH(v,uh)) @@ -284,7 +285,7 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer copy!(y,xH) end - return y + return y end # D.3) Restriction, with redistribution, by dof selection (only nodal dofs) @@ -295,11 +296,12 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer # 1 - Redistribute from fine partition to coarse partition copy!(fv_h_red,x) - consistent!(fv_h_red) + consistent!(fv_h_red) |> fetch redistribute_free_values!(cache_exchange,fv_h,Uh,fv_h_red,dv_h_red,Uh_red,model_h,glue;reverse=true) # 2 - Interpolate in coarse partition if !isa(y,Nothing) + consistent!(fv_h) |> fetch restrict_dofs!(fv_H,fv_h,dv_h,Uh,UH,get_adaptivity_glue(model_h)) copy!(y,fv_H) # FE layout -> Matrix layout end diff --git a/src/MultilevelTools/FESpaceHierarchies.jl b/src/MultilevelTools/FESpaceHierarchies.jl index 494d7f0f..ff51728e 100644 --- a/src/MultilevelTools/FESpaceHierarchies.jl +++ b/src/MultilevelTools/FESpaceHierarchies.jl @@ -57,9 +57,9 @@ function Gridap.FESpaces.TestFESpace(mh::ModelHierarchyLevel{A,B,C,D},args...;kw FESpaceHierarchyLevel(mh.level,Vh,Vh_red) end -function Gridap.FESpaces.TrialFESpace(a::FESpaceHierarchyLevel,u) - Uh = !isa(a.fe_space,Nothing) ? TrialFESpace(a.fe_space,u) : nothing - Uh_red = !isa(a.fe_space_red,Nothing) ? TrialFESpace(a.fe_space_red,u) : nothing +function Gridap.FESpaces.TrialFESpace(a::FESpaceHierarchyLevel,args...;kwargs...) + Uh = !isa(a.fe_space,Nothing) ? TrialFESpace(a.fe_space,args...;kwargs...) : nothing + Uh_red = !isa(a.fe_space_red,Nothing) ? TrialFESpace(a.fe_space_red,args...;kwargs...) : nothing FESpaceHierarchyLevel(a.level,Uh,Uh_red) end @@ -157,7 +157,11 @@ end function get_test_space(U::GridapDistributed.DistributedSingleFieldFESpace) spaces = map(local_views(U)) do U - U.space + if isa(U,Gridap.FESpaces.UnconstrainedFESpace) + U + else + U.space + end end return GridapDistributed.DistributedSingleFieldFESpace(spaces,U.gids,U.vector_type) end \ No newline at end of file diff --git a/src/MultilevelTools/GridapDistributedExtensions.jl b/src/MultilevelTools/GridapDistributedExtensions.jl deleted file mode 100644 index 55efe81b..00000000 --- a/src/MultilevelTools/GridapDistributedExtensions.jl +++ /dev/null @@ -1,142 +0,0 @@ - -# DistributedCompositeMeasure - -function Gridap.CellData.Measure(tt::GridapDistributed.DistributedTriangulation{Dc,Dp}, - it::GridapDistributed.DistributedTriangulation{Dc,Dp}, - args...) where {Dc,Dp} - itrians = change_parts(local_views(it),get_parts(tt);default=void(BodyFittedTriangulation{Dc,Dp})) - - measures = map(local_views(tt),itrians) do ttrian, itrian - Measure(ttrian,itrian,args...) - end - return GridapDistributed.DistributedMeasure(measures) -end - -function GridapDistributed.get_parts(x::GridapDistributed.DistributedFESpace) - PartitionedArrays.get_part_ids(local_views(x)) -end - -# change_parts - -function change_parts(x::Union{AbstractArray,Nothing}, new_parts; default=nothing) - x_new = map(new_parts) do _p - if isa(x,AbstractArray) - x.part - else - default - end - end - return x_new -end - -function change_parts(::Type{<:GridapDistributed.DistributedCellField},x,new_parts) - if isa(x,GridapDistributed.DistributedCellField) - fields = change_parts(local_views(x),new_parts) - else - fields = change_parts(nothing,new_parts;default=void(CellField)) - end - return GridapDistributed.DistributedCellField(fields) -end - -# DistributedFESpaces - -function get_test_space(U::GridapDistributed.DistributedSingleFieldFESpace) - spaces = map(local_views(U)) do U - U.space - end - return GridapDistributed.DistributedSingleFieldFESpace(spaces,U.gids,U.vector_type) -end - -# Void GridapDistributed structures - -struct VoidDistributedDiscreteModel{Dc,Dp,A} <: GridapDistributed.DistributedDiscreteModel{Dc,Dp} - parts::A - function VoidDistributedDiscreteModel(Dc::Int,Dp::Int,parts) - A = typeof(parts) - return new{Dc,Dp,A}(parts) - end -end - -function VoidDistributedDiscreteModel(model::GridapDistributed.DistributedDiscreteModel{Dc,Dp}) where {Dc,Dp} - return VoidDistributedDiscreteModel(Dc,Dp,get_parts(model)) -end - -function GridapDistributed.get_parts(x::VoidDistributedDiscreteModel) - return x.parts -end - -struct VoidDistributedTriangulation{Dc,Dp,A} <: Gridap.GridapType - parts::A - function VoidDistributedTriangulation(Dc::Int,Dp::Int,parts) - A = typeof(parts) - return new{Dc,Dp,A}(parts) - end -end - -function GridapDistributed.get_parts(x::VoidDistributedTriangulation) - return x.parts -end - -function VoidDistributedTriangulation(trian::GridapDistributed.DistributedTriangulation{Dc,Dp}) where {Dc,Dp} - return VoidDistributedTriangulation(Dc,Dp,get_parts(trian)) -end - -function Gridap.Geometry.Triangulation(model::VoidDistributedDiscreteModel{Dc,Dp}) where {Dc,Dp} - return VoidDistributedTriangulation(Dc,Dp,get_parts(model)) -end - -struct VoidDistributedFESpace{A} <: Gridap.GridapType - parts::A -end - -function GridapDistributed.get_parts(x::VoidDistributedFESpace) - return x.parts -end - -function Gridap.FESpaces.TestFESpace(model::VoidDistributedDiscreteModel,args...;kwargs...) - return VoidDistributedFESpace(get_parts(model)) -end - -function Gridap.FESpaces.TrialFESpace(space::VoidDistributedFESpace,args...;kwargs...) - return VoidDistributedFESpace(get_parts(space)) -end - -function FESpaces.get_triangulation(f::VoidDistributedFESpace,model::VoidDistributedDiscreteModel) - return VoidDistributedTriangulation(model) -end - -# Void Gridap structures - -function void(::Type{<:CartesianDiscreteModel{Dc,Dp}}) where {Dc,Dp} - #domain = Tuple(fill(0.0,2*Dc)) - domain = Tuple(repeat([0,1],Dc)) - partition = Tuple(fill(0,Dc)) - return CartesianDiscreteModel(domain,partition) -end - -function void(::Type{<:UnstructuredDiscreteModel{Dc,Dp}}) where {Dc,Dp} - cmodel = void(CartesianDiscreteModel{Dc,Dp}) - return UnstructuredDiscreteModel(cmodel) -end - -function void(::Type{<:AdaptivityGlue}) - f2c_faces_map = [Int32[],Int32[],Int32[]] - fcell_to_child_id = Int32[] - rrules = Fill(void(RefinementRule),0) - return AdaptivityGlue(f2c_faces_map,fcell_to_child_id,rrules) -end - -function void(::Type{<:RefinementRule}) - reffe = Gridap.ReferenceFEs.LagrangianRefFE(Float64,QUAD,1) - return RefinementRule(reffe,1) -end - -function void(::Type{<:BodyFittedTriangulation{Dc,Dp}}) where {Dc,Dp} - model = void(UnstructuredDiscreteModel{Dc,Dp}) - return Gridap.Geometry.Triangulation(model) -end - -function void(::Type{<:CellField}) - trian = void(BodyFittedTriangulation{2,2}) - return Gridap.CellData.CellField(0.0,trian,ReferenceDomain()) -end diff --git a/src/MultilevelTools/GridapFixes.jl b/src/MultilevelTools/GridapFixes.jl index a2e9ba32..ae2f8efb 100644 --- a/src/MultilevelTools/GridapFixes.jl +++ b/src/MultilevelTools/GridapFixes.jl @@ -1,5 +1,4 @@ -""" function Base.map(::typeof(Gridap.Arrays.testitem), a::Tuple{<:AbstractVector{<:AbstractVector{<:VectorValue}},<:AbstractVector{<:Gridap.Fields.LinearCombinationFieldVector}}) a2=Gridap.Arrays.testitem(a[2]) @@ -7,7 +6,6 @@ function Base.map(::typeof(Gridap.Arrays.testitem), a1.=zero(Gridap.Arrays.testitem(a1)) (a1,a2) end -""" # This fix is required to be able to integrate in the overlapped mesh underlying patch smoothers function Gridap.Geometry.get_glue(trian::BodyFittedTriangulation{Dt},::Val{Dt}) where Dt diff --git a/src/MultilevelTools/MultilevelTools.jl b/src/MultilevelTools/MultilevelTools.jl index 9d27e510..6072714b 100644 --- a/src/MultilevelTools/MultilevelTools.jl +++ b/src/MultilevelTools/MultilevelTools.jl @@ -40,7 +40,6 @@ export setup_transfer_operators export mul! include("SubpartitioningTools.jl") -#include("GridapDistributedExtensions.jl") include("GridapFixes.jl") include("RefinementTools.jl") include("ModelHierarchies.jl") diff --git a/src/MultilevelTools/RefinementTools.jl b/src/MultilevelTools/RefinementTools.jl index 5dfad129..a9449174 100644 --- a/src/MultilevelTools/RefinementTools.jl +++ b/src/MultilevelTools/RefinementTools.jl @@ -13,7 +13,7 @@ function restrict_dofs!(fv_c::PVector, glue::AbstractArray{<:AdaptivityGlue}) map(restrict_dofs!,local_views(fv_c),local_views(fv_f),dv_f,local_views(U_f),local_views(U_c),glue) - consistent!(fv_c) + consistent!(fv_c) |> fetch return fv_c end diff --git a/src/MultilevelTools/SubpartitioningTools.jl b/src/MultilevelTools/SubpartitioningTools.jl index e05bdee0..b4cc80c2 100644 --- a/src/MultilevelTools/SubpartitioningTools.jl +++ b/src/MultilevelTools/SubpartitioningTools.jl @@ -39,4 +39,16 @@ function generate_level_parts(root_parts::AbstractArray,num_procs_x_level::Vecto level_parts[l] = generate_level_parts(root_parts,level_parts[l-1],num_procs_x_level[l]) end return level_parts +end + +my_print(x::PVector,s) = my_print(partition(x),s) + +function my_print(x::MPIArray,s) + parts = linear_indices(x) + i_am_main(parts) && println(s) + map(parts,x) do p,xi + sleep(p*0.2) + println(" > $p: ", xi) + end + sleep(2) end \ No newline at end of file diff --git a/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl index 43287f2e..344d7c37 100644 --- a/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl +++ b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl @@ -60,7 +60,7 @@ function prolongate!(x::PVector, map(x.values,Ph.spaces,y.values) do x,Ph,y prolongate!(x,Ph,y) end - consistent!(x) + consistent!(x) |> fetch end # x \in SingleFESpace @@ -78,7 +78,7 @@ function inject!(x::PVector, # Exchange local contributions assemble!(x) - consistent!(x) # TO CONSIDER: Is this necessary? Do we need ghosts for later? + consistent!(x) |> fetch # TO CONSIDER: Is this necessary? Do we need ghosts for later? return x end @@ -92,7 +92,7 @@ function compute_weight_operators(Ph::GridapDistributed.DistributedSingleFieldFE # partial sums -> global sums assemble!(w_sums) # ghost -> owners - consistent!(w_sums) # repopulate ghosts with owner info + consistent!(w_sums) |> fetch # repopulate ghosts with owner info return w, w_sums end diff --git a/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl b/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl index df48cd18..f4fe526b 100644 --- a/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl +++ b/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl @@ -112,7 +112,7 @@ function Gridap.Algebra.solve!(x_mat::PVector,ns::PatchBasedSmootherNumericalSet rp, dxp, r, x = caches copy!(r,r_mat) - consistent!(r) + consistent!(r) |> fetch prolongate!(rp,Ph,r) solve!(dxp,Ap_ns,rp) inject!(x,Ph,dxp,w,w_sums) diff --git a/test/mpi/DistributedGridTransferOperatorsTests.jl b/test/mpi/DistributedGridTransferOperatorsTests.jl index aa4259d0..d18af0e7 100644 --- a/test/mpi/DistributedGridTransferOperatorsTests.jl +++ b/test/mpi/DistributedGridTransferOperatorsTests.jl @@ -34,7 +34,7 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) ops3 = setup_transfer_operators(trials, qdegree; restriction_method=:dof_mask, mode=:solution) restrictions3, prolongations3 = ops3 - a(u,v,dΩ) = ∫(∇(v)⋅∇(u))*dΩ + a(u,v,dΩ) = ∫(v⋅u)*dΩ l(v,dΩ) = ∫(v⋅u)*dΩ mats, A, b = compute_hierarchy_matrices(trials,a,l,qdegree) @@ -93,7 +93,7 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) P3 = prolongations3[lev] mul!(yh3,P3,xH) - y_ref = pfill(1.0,partition(axes(Ah))) + y_ref = pfill(1.0,partition(axes(Ah,2))) tests = map(own_values(y_ref),own_values(yh1),own_values(yh2),own_values(yh3)) do y_ref,y1,y2,y3 map(y -> norm(y-y_ref) < 1.e-3 ,[y1,y2,y3]) end diff --git a/test/mpi/RedistributeToolsTests.jl b/test/mpi/RedistributeToolsTests.jl index a0539f48..3a57e801 100644 --- a/test/mpi/RedistributeToolsTests.jl +++ b/test/mpi/RedistributeToolsTests.jl @@ -8,6 +8,7 @@ using Test using GridapSolvers using GridapSolvers.MultilevelTools +using GridapDistributed: redistribute_fe_function function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) GridapP4est.with(parts) do @@ -23,14 +24,19 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) new_parts = level_parts[1] # FE Spaces - order = 1 - u(x) = x[1] + x[2] + order = 2 + u(x) = x[1]^2 + x[2]^2 - 3.0*x[1]*x[2] reffe = ReferenceFE(lagrangian,Float64,order) glue = mh.levels[1].red_glue model_old = get_model_before_redist(mh.levels[1]) - VOLD = TestFESpace(model_old,reffe,dirichlet_tags="boundary") - UOLD = TrialFESpace(VOLD,u) + if i_am_in(old_parts) + VOLD = TestFESpace(model_old,reffe,dirichlet_tags="boundary") + UOLD = TrialFESpace(VOLD,u) + else + VOLD = nothing + UOLD = nothing + end model_new = get_model(mh.levels[1]) VNEW = TestFESpace(model_new,reffe,dirichlet_tags="boundary") @@ -53,10 +59,7 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) end # Old -> New - uh_old_red = redistribute_fe_function(uh_old, - UNEW, - model_new, - glue) + uh_old_red = redistribute_fe_function(uh_old,UNEW,model_new,glue) n = sum(∫(uh_old_red)*dΩ_new) if i_am_in(old_parts) o = sum(∫(uh_old)*dΩ_old) @@ -64,11 +67,7 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) end # New -> Old - uh_new_red = redistribute_fe_function(uh_new, - UOLD, - model_old, - glue; - reverse=true) + uh_new_red = redistribute_fe_function(uh_new,UOLD,model_old,glue;reverse=true) n = sum(∫(uh_new)*dΩ_new) if i_am_in(old_parts) o = sum(∫(uh_new_red)*dΩ_old) @@ -78,9 +77,9 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) end -num_parts_x_level = [4,2,2] # Procs in each refinement level +num_parts_x_level = [4,2] # Procs in each refinement level num_trees = (1,1) # Number of initial P4est trees -num_refs_coarse = 2 # Number of initial refinements +num_refs_coarse = 3 # Number of initial refinements num_ranks = num_parts_x_level[1] parts = with_mpi() do distribute diff --git a/test/mpi/RefinementToolsTests.jl b/test/mpi/RefinementToolsTests.jl index 16eab53b..4fb128fb 100644 --- a/test/mpi/RefinementToolsTests.jl +++ b/test/mpi/RefinementToolsTests.jl @@ -20,13 +20,13 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) # FE Spaces - order = 1 - sol(x) = x[1] + x[2] + order = 2 + sol(x) = x[1]^2 + x[2]^2 - 3.0*x[1]*x[2] reffe = ReferenceFE(lagrangian,Float64,order) tests = TestFESpace(mh,reffe;conformity=:H1,dirichlet_tags="boundary") trials = TrialFESpace(tests,sol) - quad_order = 3*order+1 + quad_order = 2*order+1 for lev in 1:num_levels-1 fparts = get_level_parts(mh,lev) cparts = get_level_parts(mh,lev+1) @@ -50,8 +50,9 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) # Coarse FEFunction -> Fine FEFunction, by projection ah(u,v) = ∫(v⋅u)*dΩh lh(v) = ∫(v⋅uH)*dΩh - Ah = assemble_matrix(ah,Uh,Vh) - bh = assemble_vector(lh,Vh) + oph = AffineFEOperator(ah,lh,Uh,Vh) + Ah = get_matrix(oph) + bh = get_vector(oph) xh = pfill(0.0,partition(axes(Ah,2))) IterativeSolvers.cg!(xh,Ah,bh;verbose=i_am_main(parts),reltol=1.0e-08) @@ -60,12 +61,14 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) _eh = uh-uH_projected eh = sum(∫(_eh⋅_eh)*dΩh) i_am_main(parts) && println("Error H2h: ", eh) + @test eh < 1.0e-10 # Fine FEFunction -> Coarse FEFunction, by projection aH(u,v) = ∫(v⋅u)*dΩH lH(v) = ∫(v⋅uH_projected)*dΩhH - AH = assemble_matrix(aH,UH,VH) - bH = assemble_vector(lH,VH) + opH = AffineFEOperator(aH,lH,UH,VH) + AH = get_matrix(opH) + bH = get_vector(opH) xH = pfill(0.0,partition(axes(AH,2))) IterativeSolvers.cg!(xH,AH,bH;verbose=i_am_main(parts),reltol=1.0e-08) @@ -74,6 +77,15 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) _eH = uH-uh_projected eH = sum(∫(_eH⋅_eH)*dΩH) i_am_main(parts) && println("Error h2H: ", eH) + @test eh < 1.0e-10 + + # Coarse FEFunction -> Fine FEFunction, by interpolation + uH_i = interpolate(uH,Uh) + + _eh = uH_i-uh + eh = sum(∫(_eh⋅_eh)*dΩh) + i_am_main(parts) && println("Error h2H: ", eh) + @test eh < 1.0e-10 end end end diff --git a/test/seq/DistributedPatchFESpacesDebuggingTests.jl b/test/seq/DistributedPatchFESpacesDebuggingTests.jl index 69097719..c2110a92 100644 --- a/test/seq/DistributedPatchFESpacesDebuggingTests.jl +++ b/test/seq/DistributedPatchFESpacesDebuggingTests.jl @@ -98,7 +98,7 @@ function run(parts) x1_mat = pfill(0.5,partition(axes(Ah,2))) r1_mat = fh-Ah*x1_mat - consistent!(r1_mat) + consistent!(r1_mat) |> fetch r1 = pfill(0.0,partition(Vh.gids)) x1 = pfill(0.0,partition(Vh.gids)) @@ -108,11 +108,11 @@ function run(parts) xp_mat = pfill(0.0,partition(axes(Ahp,2))) copy!(r1,r1_mat) - consistent!(r1) + consistent!(r1) |> fetch PBS.prolongate!(rp,Ph,r1) # OK copy!(rp_mat,rp) - consistent!(rp_mat) + consistent!(rp_mat) |> fetch solve!(xp_mat,LUns,rp_mat) copy!(xp,xp_mat) # Some big numbers appear here.... @@ -124,7 +124,7 @@ function run(parts) x2_mat = pfill(0.5,partition(axes(Ah,2))) r2_mat = fh-Ah*x2_mat - consistent!(r2_mat) + consistent!(r2_mat) |> fetch solve!(x2_mat,Mns,r2_mat) # ---- Smoother inside Richardson @@ -133,7 +133,7 @@ function run(parts) r3_mat = fh-Ah*x3_mat consistent!(r3_mat) solve!(x3_mat,Rns,r3_mat) - consistent!(x3_mat) + consistent!(x3_mat) |> fetch # Outputs res = Dict{String,Any}() diff --git a/test/seq/DistributedPatchFESpacesTests.jl b/test/seq/DistributedPatchFESpacesTests.jl index 9c0a7638..62dc427e 100644 --- a/test/seq/DistributedPatchFESpacesTests.jl +++ b/test/seq/DistributedPatchFESpacesTests.jl @@ -89,7 +89,7 @@ Rns = numerical_setup(Rss,Ah) x1_mat = pfill(0.5,partition(axes(Ah,2))) r1_mat = fh-Ah*x1_mat -consistent!(r1_mat) +consistent!(r1_mat) |> fetch r1 = PVector(0.0,Vh.gids) x1 = PVector(0.0,Vh.gids) @@ -99,7 +99,7 @@ rp_mat = pfill(0.0,partition(axes(Ahp,2))) xp_mat = pfill(0.0,partition(axes(Ahp,2))) copy!(r1,r1_mat) -consistent!(r1) +consistent!(r1) |> fetch PBS.prolongate!(rp,Ph,r1) copy!(rp_mat,rp) @@ -115,7 +115,7 @@ copy!(x1_mat,x1) x2_mat = pfill(0.5,partition(axes(Ah,2))) r2_mat = fh-Ah*x2_mat -consistent!(r2_mat) +consistent!(r2_mat) |> fetch solve!(x2_mat,Mns,r2_mat) @@ -123,8 +123,8 @@ solve!(x2_mat,Mns,r2_mat) x3_mat = pfill(0.5,partition(axes(Ah,2))) r3_mat = fh-Ah*x3_mat -consistent!(r3_mat) +consistent!(r3_mat) |> fetch solve!(x3_mat,Rns,r3_mat) -consistent!(x3_mat) +consistent!(x3_mat) |> fetch end \ No newline at end of file From bf5c44d5c4a56e7fc740c4510f8863441193597f Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Mon, 21 Aug 2023 12:33:45 +1000 Subject: [PATCH 08/16] Fixes after merge --- .github/workflows/CI.yml | 2 +- Project.toml | 2 +- src/MultilevelTools/FESpaceHierarchies.jl | 14 +++++++------- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 41bd4b39..fba28f3f 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -22,7 +22,7 @@ jobs: fail-fast: false matrix: version: - - '1.7' + - '1.9' os: - ubuntu-latest arch: diff --git a/Project.toml b/Project.toml index 02df4231..67db3237 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "GridapSolvers" uuid = "6d3209ee-5e3c-4db7-a716-942eb12ed534" authors = ["Santiago Badia ", "Jordi Manyer ", "Alberto F. Martin ", "Javier Principe "] -version = "0.1.1" +version = "0.2.0" [deps] ArgParse = "c7e460c6-2fb9-53a9-8c5b-16f535851c63" diff --git a/src/MultilevelTools/FESpaceHierarchies.jl b/src/MultilevelTools/FESpaceHierarchies.jl index ff51728e..4c337b86 100644 --- a/src/MultilevelTools/FESpaceHierarchies.jl +++ b/src/MultilevelTools/FESpaceHierarchies.jl @@ -44,16 +44,16 @@ end # Test/Trial FESpaces for ModelHierarchyLevels -function Gridap.FESpaces.TestFESpace( +function Gridap.FESpaces.FESpace( mh::ModelHierarchyLevel{A,B,C,Nothing},args...;kwargs...) where {A,B,C} - Vh = TestFESpace(get_model(mh),args...;kwargs...) + Vh = FESpace(get_model(mh),args...;kwargs...) FESpaceHierarchyLevel(mh.level,Vh,nothing) end -function Gridap.FESpaces.TestFESpace(mh::ModelHierarchyLevel{A,B,C,D},args...;kwargs...) where {A,B,C,D} +function Gridap.FESpaces.FESpace(mh::ModelHierarchyLevel{A,B,C,D},args...;kwargs...) where {A,B,C,D} cparts, _ = get_old_and_new_parts(mh.red_glue,Val(false)) - Vh = i_am_in(cparts) ? TestFESpace(get_model_before_redist(mh),args...;kwargs...) : nothing - Vh_red = TestFESpace(get_model(mh),args...;kwargs...) + Vh = i_am_in(cparts) ? FESpace(get_model_before_redist(mh),args...;kwargs...) : nothing + Vh_red = FESpace(get_model(mh),args...;kwargs...) FESpaceHierarchyLevel(mh.level,Vh,Vh_red) end @@ -65,7 +65,7 @@ end # Test/Trial FESpaces for ModelHierarchies/FESpaceHierarchy -function Gridap.FESpaces.TestFESpace(mh::ModelHierarchy,args...;kwargs...) +function Gridap.FESpaces.FESpace(mh::ModelHierarchy,args...;kwargs...) test_spaces = Vector{FESpaceHierarchyLevel}(undef,num_levels(mh)) for i = 1:num_levels(mh) parts = get_level_parts(mh,i) @@ -77,7 +77,7 @@ function Gridap.FESpaces.TestFESpace(mh::ModelHierarchy,args...;kwargs...) FESpaceHierarchy(mh,test_spaces) end -function Gridap.FESpaces.TestFESpace( +function Gridap.FESpaces.FESpace( mh::ModelHierarchy, arg_vector::AbstractVector{<:Union{ReferenceFE,Tuple{<:Gridap.ReferenceFEs.ReferenceFEName,Any,Any}}}; kwargs...) From d581cad8721124737b17cf54b9340f5ef76fc44a Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Mon, 21 Aug 2023 14:03:06 +1000 Subject: [PATCH 09/16] Fixes for Patch based solvers --- .../mpi/PatchDecompositions.jl | 4 +-- src/PatchBasedSmoothers/mpi/PatchFESpaces.jl | 31 +++++++++---------- .../seq/PatchBasedLinearSolvers.jl | 8 ++--- 3 files changed, 21 insertions(+), 22 deletions(-) diff --git a/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl b/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl index 160890d8..85387f2c 100644 --- a/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl +++ b/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl @@ -58,8 +58,8 @@ function mark_interface_facets!(model::GridapDistributed.DistributedDiscreteMode tag_to_entities = face_labeling.tag_to_entities d_to_dface_to_entity = face_labeling.d_to_dface_to_entity - # Create new tag & entity - interface_entity = maximum(map(maximum,tag_to_entities)) + 1 + # Create new tag & entity + interface_entity = maximum(map(x -> maximum(x;init=0),tag_to_entities)) + 1 push!(tag_to_entities,[interface_entity]) push!(tag_to_name,"interface") diff --git a/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl index 344d7c37..c2ab79aa 100644 --- a/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl +++ b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl @@ -15,20 +15,19 @@ function PatchFESpace(model::GridapDistributed.DistributedDiscreteModel, root_gids = get_face_gids(model,get_patch_root_dim(patch_decomposition)) spaces = map(local_views(model), - local_views(patch_decomposition), - local_views(Vh), - root_gids.partition) do model, patch_decomposition, Vh, partition - patches_mask = fill(false,length(partition.lid_to_gid)) - patches_mask[partition.hid_to_lid] .= true # Mask ghost patch roots + local_views(patch_decomposition), + local_views(Vh), + partition(root_gids)) do model, patch_decomposition, Vh, partition + patches_mask = fill(false,local_length(partition)) + patches_mask[ghost_to_local(partition)] .= true # Mask ghost patch roots PatchFESpace(model,reffe,conformity,patch_decomposition,Vh;patches_mask=patches_mask) end - parts = get_parts(model) + # This PRange has no ghost dofs local_ndofs = map(num_free_dofs,spaces) global_ndofs = sum(local_ndofs) - first_gdof, _ = xscan(+,reduce,local_ndofs,init=1) - # This PRange has no ghost dofs - gids = PRange(parts,global_ndofs,local_ndofs,first_gdof) + patch_partition = variable_partition(local_ndofs,global_ndofs,false) + gids = PRange(patch_partition) return GridapDistributed.DistributedSingleFieldFESpace(spaces,gids,get_vector_type(Vh)) end @@ -57,7 +56,7 @@ end function prolongate!(x::PVector, Ph::GridapDistributed.DistributedSingleFieldFESpace, y::PVector) - map(x.values,Ph.spaces,y.values) do x,Ph,y + map(partition(x),local_views(Ph),partition(y)) do x,Ph,y prolongate!(x,Ph,y) end consistent!(x) |> fetch @@ -72,26 +71,26 @@ function inject!(x::PVector, w_sums::PVector) #consistent!(y) - map(x.values,Ph.spaces,y.values,w.values,w_sums.values) do x,Ph,y,w,w_sums + map(partition(x),local_views(Ph),partition(y),partition(w),partition(w_sums)) do x,Ph,y,w,w_sums inject!(x,Ph,y,w,w_sums) end # Exchange local contributions - assemble!(x) + assemble!(x) |> fetch consistent!(x) |> fetch # TO CONSIDER: Is this necessary? Do we need ghosts for later? return x end function compute_weight_operators(Ph::GridapDistributed.DistributedSingleFieldFESpace,Vh) # Local weights and partial sums - w = PVector(0.0,Ph.gids) - w_sums = PVector(0.0,Vh.gids) - map(w.values,w_sums.values,Ph.spaces) do w, w_sums, Ph + w = pfill(0.0,partition(Ph.gids)) + w_sums = pfill(0.0,partition(Vh.gids)) + map(partition(w),partition(w_sums),local_views(Ph)) do w, w_sums, Ph compute_weight_operators!(Ph,Ph.Vh,w,w_sums) end # partial sums -> global sums - assemble!(w_sums) # ghost -> owners + assemble!(w_sums) |> fetch# ghost -> owners consistent!(w_sums) |> fetch # repopulate ghosts with owner info return w, w_sums diff --git a/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl b/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl index f4fe526b..1adba372 100644 --- a/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl +++ b/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl @@ -63,10 +63,10 @@ end function _patch_based_solver_caches(Ph::GridapDistributed.DistributedSingleFieldFESpace, Vh::GridapDistributed.DistributedSingleFieldFESpace, Ap::PSparseMatrix) - rp = PVector(0.0,Ph.gids) - dxp = PVector(0.0,Ph.gids) - r = PVector(0.0,Vh.gids) - x = PVector(0.0,Vh.gids) + rp = pfill(0.0,partition(Ph.gids)) + dxp = pfill(0.0,partition(Ph.gids)) + r = pfill(0.0,partition(Vh.gids)) + x = pfill(0.0,partition(Vh.gids)) return rp, dxp, r, x end From b87705e49899df51320ba18eb89d20e78536e97f Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Mon, 21 Aug 2023 17:14:18 +1000 Subject: [PATCH 10/16] More test fixes --- test/mpi/MUMPSSolversTests.jl | 10 +-- test/mpi/RichardsonSmoothersTests.jl | 8 +-- test/seq/DegenerateTests.jl | 61 ------------------- .../DistributedPatchFESpacesDebuggingTests.jl | 30 ++++----- test/seq/DistributedPatchFESpacesTests.jl | 23 +++---- 5 files changed, 34 insertions(+), 98 deletions(-) delete mode 100644 test/seq/DegenerateTests.jl diff --git a/test/mpi/MUMPSSolversTests.jl b/test/mpi/MUMPSSolversTests.jl index 67527175..beaa8905 100644 --- a/test/mpi/MUMPSSolversTests.jl +++ b/test/mpi/MUMPSSolversTests.jl @@ -1,4 +1,4 @@ -module RichardsonSmoothersTests +module MUMPSSolversTests using Test using MPI @@ -30,10 +30,10 @@ function set_ksp_options(ksp) @check_error_code GridapPETSc.PETSC.MatMumpsSetCntl(mumpsmat[], 3, 1.0e-6) end -function main(parts,partition) +function main(parts,nranks,domain_partition) GridapPETSc.with() do domain = (0,1,0,1) - model = CartesianDiscreteModel(parts,domain,partition) + model = CartesianDiscreteModel(parts,nranks,domain,domain_partition) sol(x) = x[1] + x[2] f(x) = -Δ(sol)(x) @@ -72,12 +72,12 @@ function main(parts,partition) end end -partition = (32,32) +domain_partition = (32,32) num_ranks = (2,2) parts = with_mpi() do distribute distribute(LinearIndices((prod(num_ranks),))) end -main(parts,partition) +main(parts,num_ranks,domain_partition) MPI.Finalize() end \ No newline at end of file diff --git a/test/mpi/RichardsonSmoothersTests.jl b/test/mpi/RichardsonSmoothersTests.jl index 2131c87b..d13e62e1 100644 --- a/test/mpi/RichardsonSmoothersTests.jl +++ b/test/mpi/RichardsonSmoothersTests.jl @@ -11,10 +11,10 @@ using IterativeSolvers using GridapSolvers using GridapSolvers.LinearSolvers -function main(parts,partition) +function main(parts,nranks,domain_partition) GridapP4est.with(parts) do domain = (0,1,0,1) - model = CartesianDiscreteModel(parts,domain,partition) + model = CartesianDiscreteModel(parts,nranks,domain,domain_partition) sol(x) = x[1] + x[2] f(x) = -Δ(sol)(x) @@ -57,12 +57,12 @@ function main(parts,partition) end end -partition = (32,32) +domain_partition = (32,32) num_ranks = (2,2) parts = with_mpi() do distribute distribute(LinearIndices((prod(num_ranks),))) end -main(parts,partition) +main(parts,num_ranks,domain_partition) MPI.Finalize() end \ No newline at end of file diff --git a/test/seq/DegenerateTests.jl b/test/seq/DegenerateTests.jl deleted file mode 100644 index 7d213ab6..00000000 --- a/test/seq/DegenerateTests.jl +++ /dev/null @@ -1,61 +0,0 @@ -#module DegenerateTests - -using LinearAlgebra -using FillArrays -using Gridap -using Gridap.Helpers -using Gridap.Algebra -using Gridap.Geometry -using Gridap.FESpaces -using Gridap.Adaptivity -using Test -using IterativeSolvers - -include("../../src/GridapFixes.jl") - -function cg_solve(op) - A = get_matrix(op) - b = get_vector(op) - x = pfill(0.0,partition(axes(A,2))) - IterativeSolvers.cg!(x,A,b;verbose=true,reltol=1.0e-06) - return x -end - -#function main() - domain = (0,1,0,1) - partition = (0,0) - cmodel = CartesianDiscreteModel(domain,partition) - model = UnstructuredDiscreteModel(cmodel) - - order = 1 - sol(x) = x[1] + x[2] - reffe = ReferenceFE(lagrangian,Float64,order) - Vh = TestFESpace(model,reffe,conformity=:H1,dirichlet_tags="boundary") - Uh = TrialFESpace(sol,Vh) - Ω = Triangulation(model) - dΩ = Measure(Ω,2*order+1) - - u_sol = interpolate(sol,Vh) - a(u,v) = ∫(v⋅u)*dΩ - l(v) = ∫(v⋅u_sol)*dΩ - op = AffineFEOperator(a,l,Uh,Vh) - - assemble_matrix(a,Uh,Vh) - - c = a(get_trial_fe_basis(Uh),get_fe_basis(Vh)) - first(c.dict) - - dofs = get_fe_dof_basis(Uh) - dofs(u_sol) - - x = cg_solve(op) - uh = FEFunction(x,Uh) - - eh = ∫(uh-u_sol)*dΩ - e = sum(eh) - println(e) -#end - -main() - -#end \ No newline at end of file diff --git a/test/seq/DistributedPatchFESpacesDebuggingTests.jl b/test/seq/DistributedPatchFESpacesDebuggingTests.jl index c2110a92..bea13993 100644 --- a/test/seq/DistributedPatchFESpacesDebuggingTests.jl +++ b/test/seq/DistributedPatchFESpacesDebuggingTests.jl @@ -14,16 +14,14 @@ using FillArrays using GridapSolvers import GridapSolvers.PatchBasedSmoothers as PBS -#= -backend = SequentialBackend() -ranks = (1,2) -parts = get_part_ids(backend,ranks) -=# +function run(ranks) + parts = with_debug() do distribute + distribute(LinearIndices((prod(ranks),))) + end -function run(parts) domain = (0.0,1.0,0.0,1.0) partition = (2,4) - model = CartesianDiscreteModel(parts,domain,partition) + model = CartesianDiscreteModel(parts,ranks,domain,partition) order = 0 reffe = ReferenceFE(raviart_thomas,Float64,order) @@ -36,10 +34,10 @@ function run(parts) w, w_sums = PBS.compute_weight_operators(Ph,Vh); - xP = PVector(1.0,Ph.gids) - yP = PVector(0.0,Ph.gids) - x = PVector(1.0,Vh.gids) - y = PVector(0.0,Vh.gids) + xP = pfill(1.0,partition(Ph.gids)) + yP = pfill(0.0,partition(Ph.gids)) + x = pfill(1.0,partition(Vh.gids)) + y = pfill(0.0,partition(Vh.gids)) PBS.prolongate!(yP,Ph,x) PBS.inject!(y,Ph,yP,w,w_sums) @@ -155,13 +153,11 @@ function run(parts) return model,PD,Ph,Vh,res end -backend = SequentialBackend() - -parts = get_part_ids(backend,(1,1)) -Ms,PDs,Phs,Vhs,res_single = run(parts); +ranks = (1,1) +Ms,PDs,Phs,Vhs,res_single = run(ranks); -parts = get_part_ids(backend,(1,2)) -Mm,PDm,Phm,Vhm,res_multi = run(parts); +ranks = (1,2) +Mm,PDm,Phm,Vhm,res_multi = run(ranks); println(repeat('#',80)) diff --git a/test/seq/DistributedPatchFESpacesTests.jl b/test/seq/DistributedPatchFESpacesTests.jl index 62dc427e..df3334fb 100644 --- a/test/seq/DistributedPatchFESpacesTests.jl +++ b/test/seq/DistributedPatchFESpacesTests.jl @@ -13,13 +13,14 @@ using FillArrays using GridapSolvers import GridapSolvers.PatchBasedSmoothers as PBS -backend = SequentialBackend() ranks = (1,2) -parts = get_part_ids(backend,ranks) +parts = with_debug() do distribute + distribute(LinearIndices((prod(ranks),))) +end domain = (0.0,1.0,0.0,1.0) partition = (2,4) -model = CartesianDiscreteModel(parts,domain,partition) +model = CartesianDiscreteModel(parts,ranks,domain,partition) # order = 1 # reffe = ReferenceFE(lagrangian,Float64,order) @@ -33,10 +34,10 @@ Ph = PBS.PatchFESpace(model,reffe,DivConformity(),PD,Vh) w, w_sums = PBS.compute_weight_operators(Ph,Vh); -xP = PVector(1.0,Ph.gids) -yP = PVector(0.0,Ph.gids) -x = PVector(1.0,Vh.gids) -y = PVector(0.0,Vh.gids) +xP = pfill(1.0,partition(Ph.gids)) +yP = pfill(0.0,partition(Ph.gids)) +x = pfill(1.0,partition(Vh.gids)) +y = pfill(0.0,partition(Vh.gids)) PBS.prolongate!(yP,Ph,x) PBS.inject!(y,Ph,yP,w,w_sums) @@ -91,10 +92,10 @@ x1_mat = pfill(0.5,partition(axes(Ah,2))) r1_mat = fh-Ah*x1_mat consistent!(r1_mat) |> fetch -r1 = PVector(0.0,Vh.gids) -x1 = PVector(0.0,Vh.gids) -rp = PVector(0.0,Ph.gids) -xp = PVector(0.0,Ph.gids) +r1 = pfill(0.0,partition(Vh.gids)) +x1 = pfill(0.0,partition(Vh.gids)) +rp = pfill(0.0,partition(Ph.gids)) +xp = pfill(0.0,partition(Ph.gids)) rp_mat = pfill(0.0,partition(axes(Ahp,2))) xp_mat = pfill(0.0,partition(axes(Ahp,2))) From 8e469c87535044919d6f8a63569c0498914bc627 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Mon, 21 Aug 2023 17:34:28 +1000 Subject: [PATCH 11/16] More fixes, all tests working --- test/seq/DistributedPatchFESpacesTests.jl | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/test/seq/DistributedPatchFESpacesTests.jl b/test/seq/DistributedPatchFESpacesTests.jl index df3334fb..03d5e5bb 100644 --- a/test/seq/DistributedPatchFESpacesTests.jl +++ b/test/seq/DistributedPatchFESpacesTests.jl @@ -19,16 +19,17 @@ parts = with_debug() do distribute end domain = (0.0,1.0,0.0,1.0) -partition = (2,4) -model = CartesianDiscreteModel(parts,ranks,domain,partition) +domain_partition = (2,4) +model = CartesianDiscreteModel(parts,ranks,domain,domain_partition) -# order = 1 -# reffe = ReferenceFE(lagrangian,Float64,order) -order = 0 -reffe = ReferenceFE(raviart_thomas,Float64,order) +order = 1 +reffe = ReferenceFE(lagrangian,Float64,order) +#order = 0 +#reffe = ReferenceFE(raviart_thomas,Float64,order) Vh = TestFESpace(model,reffe) PD = PBS.PatchDecomposition(model) -Ph = PBS.PatchFESpace(model,reffe,DivConformity(),PD,Vh) +Ph = PBS.PatchFESpace(model,reffe,H1Conformity(),PD,Vh) +# Ph = PBS.PatchFESpace(model,reffe,DivConformity(),PD,Vh) # ---- Testing Prolongation and Injection ---- # From 305fe184957864bd417bd30975ba81cd3baf0206 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Mon, 21 Aug 2023 21:08:49 +1000 Subject: [PATCH 12/16] Update compats --- Project.toml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Project.toml b/Project.toml index 67db3237..474c235e 100644 --- a/Project.toml +++ b/Project.toml @@ -19,10 +19,12 @@ Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" [compat] ArgParse = "1" FillArrays = "0.9, 0.10, 0.11, 0.12, 0.13, 1.0" -Gridap = "0.17" +Gridap = "0.17.18" GridapDistributed = "0.3" -GridapP4est = "0.2" +GridapP4est = "0.3" +GridapPETSc = "0.5" IterativeSolvers = "0.9" +MPI = "0.20" PartitionedArrays = "0.3" julia = "1.7" From b63a3d483f05a79a4c7a6bfa68347de2d6efcf12 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Mon, 21 Aug 2023 23:26:40 +1000 Subject: [PATCH 13/16] Added MPIPreferences to CI --- .github/workflows/CI.yml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index fba28f3f..65ee570d 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -15,7 +15,6 @@ jobs: name: Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} runs-on: ${{ matrix.os }} env: - JULIA_MPI_BINARY: "system" P4EST_ROOT_DIR: "/opt/p4est/2.2/" JULIA_PETSC_LIBRARY: "/opt/petsc/3.18/lib/libpetsc" strategy: @@ -93,6 +92,16 @@ jobs: --with-mpi=1 --with-64-bit-indices make make install + - name: add MPIPreferences + shell: julia --color=yes --project=. {0} + run: | + using Pkg + Pkg.add("MPIPreferences") + - name: use MPI system binary + shell: julia --color=yes --project=. {0} + run: | + using MPIPreferences + MPIPreferences.use_system_binary() - uses: julia-actions/julia-buildpkg@latest - run: echo $PWD - run: julia --project=. -e 'using Pkg; Pkg.instantiate(); Pkg.build(); Pkg.precompile()' From d21ed35ba46f2ed79430f360e2e73571396120d0 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 22 Aug 2023 00:22:20 +1000 Subject: [PATCH 14/16] Small fix in tests --- test/runtests.jl | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/test/runtests.jl b/test/runtests.jl index 50b26a40..ae14d3e1 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -32,6 +32,7 @@ function run_tests(testdir) "RedistributeToolsTests.jl", "RefinementToolsTests.jl", "RichardsonSmoothersTests.jl", + "ModelHierarchiesTests.jl", "GMGLinearSolversPoissonTests.jl", "GMGLinearSolversLaplacianTests.jl", "GMGLinearSolversVectorLaplacianTests.jl", @@ -41,13 +42,6 @@ function run_tests(testdir) "RestrictDofsTests.jl", "PRefinementGMGLinearSolversPoissonTests.jl"] np = 4 - #extra_args = "-s 2 2 -r 2" - extra_args = "" - elseif f in ["ModelHierarchiesTests.jl"] - np = 6 - extra_args = "" - elseif f in [""] - np = 1 extra_args = "" else np = nprocs From 1870d8aa8e5379c70a8d837415340381311de5d4 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 22 Aug 2023 09:00:35 +1000 Subject: [PATCH 15/16] Small test fix --- test/seq/PatchLinearSolverTests.jl | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/test/seq/PatchLinearSolverTests.jl b/test/seq/PatchLinearSolverTests.jl index afbbf2b6..c78beaca 100644 --- a/test/seq/PatchLinearSolverTests.jl +++ b/test/seq/PatchLinearSolverTests.jl @@ -10,8 +10,6 @@ module PatchLinearSolverTests using GridapSolvers using GridapSolvers.PatchBasedSmoothers - order=1 - function returns_PD_Ph_xh_Vh(model;style=GridapSolvers.PatchBasedSmoothers.PatchBoundaryExclude()) reffe = ReferenceFE(lagrangian,Float64,order) # reffe=ReferenceFE(lagrangian,VectorValue{2,Float64},order) @santiagobadia: For Vector Laplacian @@ -60,33 +58,39 @@ module PatchLinearSolverTests end ################################################## + order = 1 + rank_partition = (2,1) parts = with_debug() do distribute - distribute(LinearIndices((2,))) + distribute(LinearIndices((prod(rank_partition),))) end domain = (0.0,1.0,0.0,1.0) - partition = (2,3) + mesh_partition = (2,3) - model = CartesianDiscreteModel(domain,partition) + model = CartesianDiscreteModel(domain,mesh_partition) _,Ph,xh,Vh = returns_PD_Ph_xh_Vh(model) - dmodel = CartesianDiscreteModel(parts,domain,partition) + dmodel = CartesianDiscreteModel(parts,rank_partition,domain,mesh_partition) _,dPh,dxh,dVh = returns_PD_Ph_xh_Vh(dmodel); @test num_free_dofs(Ph) == num_free_dofs(dPh) - @test all(own_values(dxh).items[1] .≈ xh[1:3]) - @test all(own_values(dxh).items[2] .≈ xh[4:end]) + @test all(partition(dxh).items[1] .≈ xh[1:3]) + @test all(partition(dxh).items[2] .≈ xh[4:end]) ################################################# - model = CartesianDiscreteModel(domain,partition) + model = CartesianDiscreteModel(domain,mesh_partition) PD,Ph,xh,Vh = returns_PD_Ph_xh_Vh(model) A,b = compute_matrix_vector(model,Vh) x = test_smoother(PD,Ph,Vh,A,b) - parts = get_part_ids(SequentialBackend(),(1,1)) - dmodel = CartesianDiscreteModel(parts,domain,partition) + rank_partition = (1,1) + parts = with_debug() do distribute + distribute(LinearIndices((prod(rank_partition),))) + end + + dmodel = CartesianDiscreteModel(parts,rank_partition,domain,mesh_partition) dPD,dPh,dxh,dVh = returns_PD_Ph_xh_Vh(dmodel); dA,db = compute_matrix_vector(dmodel,dVh); dx = test_smoother(dPD,dPh,dVh,dA,db) From 512a5cf8a89e62960e011dab46d5034a8660f64b Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 22 Aug 2023 09:01:52 +1000 Subject: [PATCH 16/16] Small test fix --- test/seq/PatchLinearSolverTests.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/seq/PatchLinearSolverTests.jl b/test/seq/PatchLinearSolverTests.jl index c78beaca..01393c5e 100644 --- a/test/seq/PatchLinearSolverTests.jl +++ b/test/seq/PatchLinearSolverTests.jl @@ -75,8 +75,8 @@ module PatchLinearSolverTests _,dPh,dxh,dVh = returns_PD_Ph_xh_Vh(dmodel); @test num_free_dofs(Ph) == num_free_dofs(dPh) - @test all(partition(dxh).items[1] .≈ xh[1:3]) - @test all(partition(dxh).items[2] .≈ xh[4:end]) + @test all(own_values(dxh).items[1] .≈ xh[1:4]) + @test all(own_values(dxh).items[2] .≈ xh[5:end]) #################################################