diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 41bd4b39..65ee570d 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -15,14 +15,13 @@ jobs: name: Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} runs-on: ${{ matrix.os }} env: - JULIA_MPI_BINARY: "system" P4EST_ROOT_DIR: "/opt/p4est/2.2/" JULIA_PETSC_LIBRARY: "/opt/petsc/3.18/lib/libpetsc" strategy: fail-fast: false matrix: version: - - '1.7' + - '1.9' os: - ubuntu-latest arch: @@ -93,6 +92,16 @@ jobs: --with-mpi=1 --with-64-bit-indices make make install + - name: add MPIPreferences + shell: julia --color=yes --project=. {0} + run: | + using Pkg + Pkg.add("MPIPreferences") + - name: use MPI system binary + shell: julia --color=yes --project=. {0} + run: | + using MPIPreferences + MPIPreferences.use_system_binary() - uses: julia-actions/julia-buildpkg@latest - run: echo $PWD - run: julia --project=. -e 'using Pkg; Pkg.instantiate(); Pkg.build(); Pkg.precompile()' diff --git a/.gitignore b/.gitignore index f8509162..895712ba 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ .vscode Manifest.toml +LocalPreferences.toml diff --git a/Project.toml b/Project.toml index bace7095..474c235e 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "GridapSolvers" uuid = "6d3209ee-5e3c-4db7-a716-942eb12ed534" authors = ["Santiago Badia ", "Jordi Manyer ", "Alberto F. Martin ", "Javier Principe "] -version = "0.1.1" +version = "0.2.0" [deps] ArgParse = "c7e460c6-2fb9-53a9-8c5b-16f535851c63" @@ -18,14 +18,14 @@ Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" [compat] ArgParse = "1" -FillArrays = "0.9, 0.10, 0.11, 0.12" -Gridap = "0.17.17" -GridapDistributed = "0.2.7" -GridapP4est = "0.2" -GridapPETSc = "0.4" +FillArrays = "0.9, 0.10, 0.11, 0.12, 0.13, 1.0" +Gridap = "0.17.18" +GridapDistributed = "0.3" +GridapP4est = "0.3" +GridapPETSc = "0.5" IterativeSolvers = "0.9" -MPI = "0.19" -PartitionedArrays = "0.2.15" +MPI = "0.20" +PartitionedArrays = "0.3" julia = "1.7" [extras] diff --git a/src/LinearSolvers/GMGLinearSolvers.jl b/src/LinearSolvers/GMGLinearSolvers.jl index 9cd424c8..d03ed8d3 100644 --- a/src/LinearSolvers/GMGLinearSolvers.jl +++ b/src/LinearSolvers/GMGLinearSolvers.jl @@ -88,7 +88,7 @@ function setup_finest_level_cache(mh::ModelHierarchy,smatrices::Vector{<:Abstrac parts = get_level_parts(mh,1) if i_am_in(parts) Ah = smatrices[1] - rh = PVector(0.0, Ah.cols) + rh = pfill(0.0, partition(axes(Ah,2))) cache = rh end return cache @@ -116,11 +116,11 @@ function setup_coarsest_solver_cache(mh::ModelHierarchy,coarsest_solver::LinearS if i_am_in(parts) mat = smatrices[nlevs] if (num_parts(parts) == 1) # Serial - cache = map_parts(mat.owned_owned_values) do Ah + cache = map(own_values(mat)) do Ah ss = symbolic_setup(coarsest_solver, Ah) numerical_setup(ss, Ah) end - cache = cache.part + cache = PartitionedArrays.getany(cache) else # Parallel ss = symbolic_setup(coarsest_solver, mat) cache = numerical_setup(ss, mat) @@ -136,17 +136,17 @@ function setup_coarsest_solver_cache(mh::ModelHierarchy,coarsest_solver::PETScLi if i_am_in(parts) mat = smatrices[nlevs] if (num_parts(parts) == 1) # Serial - cache = map_parts(mat.owned_owned_values) do Ah + cache = map(own_values(mat)) do Ah rh = convert(PETScVector,fill(0.0,size(A,2))) xh = convert(PETScVector,fill(0.0,size(A,2))) ss = symbolic_setup(coarsest_solver, Ah) ns = numerical_setup(ss, Ah) return ns, xh, rh end - cache = cache.part + cache = PartitionedArrays.getany(cache) else # Parallel - rh = convert(PETScVector,PVector(0.0,mat.cols)) - xh = convert(PETScVector,PVector(0.0,mat.cols)) + rh = convert(PETScVector,pfill(0.0,partition(axes(mat,2)))) + xh = convert(PETScVector,pfill(0.0,partition(axes(mat,2)))) ss = symbolic_setup(coarsest_solver, mat) ns = numerical_setup(ss, mat) cache = ns, xh, rh @@ -156,14 +156,14 @@ function setup_coarsest_solver_cache(mh::ModelHierarchy,coarsest_solver::PETScLi end function allocate_level_work_vectors(mh::ModelHierarchy,smatrices::Vector{<:AbstractMatrix},lev::Integer) - dxh = PVector(0.0, smatrices[lev].cols) - Adxh = PVector(0.0, smatrices[lev].rows) + dxh = pfill(0.0,partition(axes(smatrices[lev],2))) + Adxh = pfill(0.0,partition(axes(smatrices[lev],1))) cparts = get_level_parts(mh,lev+1) if i_am_in(cparts) AH = smatrices[lev+1] - rH = PVector(0.0,AH.cols) - dxH = PVector(0.0,AH.cols) + rH = pfill(0.0,partition(axes(AH,2))) + dxH = pfill(0.0,partition(axes(AH,2))) else rH = nothing dxH = nothing @@ -183,9 +183,9 @@ function allocate_work_vectors(mh::ModelHierarchy,smatrices::Vector{<:AbstractMa return work_vectors end -function solve_coarsest_level!(parts::AbstractPData,::LinearSolver,xh::PVector,rh::PVector,caches) +function solve_coarsest_level!(parts::AbstractArray,::LinearSolver,xh::PVector,rh::PVector,caches) if (num_parts(parts) == 1) - map_parts(xh.owned_values,rh.owned_values) do xh, rh + map(own_values(xh),own_values(rh)) do xh, rh solve!(xh,caches,rh) end else @@ -193,10 +193,10 @@ function solve_coarsest_level!(parts::AbstractPData,::LinearSolver,xh::PVector,r end end -function solve_coarsest_level!(parts::AbstractPData,::PETScLinearSolver,xh::PVector,rh::PVector,caches) +function solve_coarsest_level!(parts::AbstractArray,::PETScLinearSolver,xh::PVector,rh::PVector,caches) solver_ns, xh_petsc, rh_petsc = caches if (num_parts(parts) == 1) - map_parts(xh.owned_values,rh.owned_values) do xh, rh + map(own_values(xh),own_values(rh)) do xh, rh copy!(rh_petsc,rh) solve!(xh_petsc,solver_ns,rh_petsc) copy!(xh,xh_petsc) diff --git a/src/LinearSolvers/JacobiLinearSolvers.jl b/src/LinearSolvers/JacobiLinearSolvers.jl index efe6ca63..29319cce 100644 --- a/src/LinearSolvers/JacobiLinearSolvers.jl +++ b/src/LinearSolvers/JacobiLinearSolvers.jl @@ -22,14 +22,14 @@ function Gridap.Algebra.numerical_setup!(ns::JacobiNumericalSetup, A::AbstractMa end function Gridap.Algebra.numerical_setup(ss::JacobiSymbolicSetup,A::PSparseMatrix) - inv_diag=map_parts(A.owned_owned_values) do a + inv_diag=map(own_values(A)) do a 1.0 ./ diag(a) end return JacobiNumericalSetup(inv_diag) end function Gridap.Algebra.numerical_setup!(ns::JacobiNumericalSetup, A::PSparseMatrix) - map_parts(ns.inv_diag,A.owned_owned_values) do inv_diag, a + map(ns.inv_diag,own_values(A)) do inv_diag, a inv_diag .= 1.0 ./ diag(a) end return ns @@ -43,7 +43,7 @@ end function Gridap.Algebra.solve!(x::PVector, ns::JacobiNumericalSetup, b::PVector) inv_diag = ns.inv_diag - map_parts(inv_diag,x.owned_values,b.owned_values) do inv_diag, x, b + map(inv_diag,own_values(x),own_values(b)) do inv_diag, x, b x .= inv_diag .* b end return x diff --git a/src/LinearSolvers/RichardsonSmoothers.jl b/src/LinearSolvers/RichardsonSmoothers.jl index c36d086c..39b45fce 100644 --- a/src/LinearSolvers/RichardsonSmoothers.jl +++ b/src/LinearSolvers/RichardsonSmoothers.jl @@ -40,8 +40,8 @@ function Gridap.Algebra.numerical_setup(ss::RichardsonSmootherSymbolicSetup, A:: end function Gridap.Algebra.numerical_setup(ss::RichardsonSmootherSymbolicSetup, A::PSparseMatrix) - Adx = PVector(0.0,A.rows) - dx = PVector(0.0,A.cols) + Adx = pfill(0.0,partition(axes(A,1))) + dx = pfill(0.0,partition(axes(A,2))) Mns = numerical_setup(ss.Mss,A) return RichardsonSmootherNumericalSetup(ss.smoother,A,Adx,dx,Mns) end diff --git a/src/MultilevelTools/DistributedGridTransferOperators.jl b/src/MultilevelTools/DistributedGridTransferOperators.jl index d991bed6..9e882c4e 100644 --- a/src/MultilevelTools/DistributedGridTransferOperators.jl +++ b/src/MultilevelTools/DistributedGridTransferOperators.jl @@ -53,12 +53,12 @@ function _get_interpolation_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mod if i_am_in(cparts) model_h = get_model_before_redist(mh,lev) - Uh = get_fe_space_before_redist(sh,lev) - fv_h = PVector(0.0,Uh.gids) + Uh = get_fe_space_before_redist(sh,lev) + fv_h = pfill(0.0,partition(Uh.gids)) dv_h = (mode == :solution) ? get_dirichlet_dof_values(Uh) : zero_dirichlet_values(Uh) - UH = get_fe_space(sh,lev+1) - fv_H = PVector(0.0,UH.gids) + UH = get_fe_space(sh,lev+1) + fv_H = pfill(0.0,partition(UH.gids)) dv_H = (mode == :solution) ? get_dirichlet_dof_values(UH) : zero_dirichlet_values(UH) cache_refine = model_h, Uh, fv_h, dv_h, UH, fv_H, dv_H @@ -102,7 +102,7 @@ function _get_projection_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mode:: u,v = get_trial_fe_basis(UH), get_fe_basis(VH) data = collect_cell_matrix_and_vector(UH,VH,aH(u,v),lH(v,u00),u_dir) AH,bH0 = assemble_matrix_and_vector(assem,data) - xH = PVector(0.0,AH.cols) + xH = pfill(0.0,partition(axes(AH,2))) bH = copy(bH0) cache_refine = model_h, Uh, fv_h, dv_h, VH, AH, lH, xH, bH, bH0, assem @@ -125,7 +125,7 @@ function _get_redistribution_cache(lev::Int,sh::FESpaceHierarchy,mode::Symbol,op Uh_red = get_fe_space(sh,lev) model_h_red = get_model(mh,lev) - fv_h_red = PVector(0.0,Uh_red.gids) + fv_h_red = pfill(0.0,partition(Uh_red.gids)) dv_h_red = (mode == :solution) ? get_dirichlet_dof_values(Uh_red) : zero_dirichlet_values(Uh_red) glue = mh.levels[lev].red_glue @@ -214,7 +214,7 @@ function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:r model_h, Uh, fv_h, dv_h, UH, fv_H, dv_H = cache_refine copy!(fv_h,x) # Matrix layout -> FE layout - exchange!(fv_h) + consistent!(fv_h) |> fetch restrict_dofs!(fv_H,fv_h,dv_h,Uh,UH,get_adaptivity_glue(model_h)) copy!(y,fv_H) # FE layout -> Matrix layout @@ -249,7 +249,7 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer # 1 - Redistribute from fine partition to coarse partition copy!(fv_h_red,x) - exchange!(fv_h_red) + consistent!(fv_h_red) |> fetch redistribute_free_values!(cache_exchange,fv_h,Uh,fv_h_red,dv_h_red,Uh_red,model_h,glue;reverse=true) # 2 - Interpolate in coarse partition @@ -259,7 +259,7 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer copy!(y,fv_H) # FE layout -> Matrix layout end - return y + return y end # D.2) Restriction, with redistribution, by projection @@ -270,11 +270,12 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer # 1 - Redistribute from fine partition to coarse partition copy!(fv_h_red,x) - exchange!(fv_h_red) + consistent!(fv_h_red) |> fetch redistribute_free_values!(cache_exchange,fv_h,Uh,fv_h_red,dv_h_red,Uh_red,model_h,glue;reverse=true) # 2 - Solve f2c projection coarse partition if !isa(y,Nothing) + consistent!(fv_h) |> fetch uh = FEFunction(Uh,fv_h,dv_h) v = get_fe_basis(VH) vec_data = collect_cell_vector(VH,lH(v,uh)) @@ -284,7 +285,7 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer copy!(y,xH) end - return y + return y end # D.3) Restriction, with redistribution, by dof selection (only nodal dofs) @@ -295,11 +296,12 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer # 1 - Redistribute from fine partition to coarse partition copy!(fv_h_red,x) - exchange!(fv_h_red) + consistent!(fv_h_red) |> fetch redistribute_free_values!(cache_exchange,fv_h,Uh,fv_h_red,dv_h_red,Uh_red,model_h,glue;reverse=true) # 2 - Interpolate in coarse partition if !isa(y,Nothing) + consistent!(fv_h) |> fetch restrict_dofs!(fv_H,fv_h,dv_h,Uh,UH,get_adaptivity_glue(model_h)) copy!(y,fv_H) # FE layout -> Matrix layout end diff --git a/src/MultilevelTools/FESpaceHierarchies.jl b/src/MultilevelTools/FESpaceHierarchies.jl index ef41ee1e..4c337b86 100644 --- a/src/MultilevelTools/FESpaceHierarchies.jl +++ b/src/MultilevelTools/FESpaceHierarchies.jl @@ -42,31 +42,30 @@ function get_fe_space_before_redist(fh::FESpaceHierarchy,lev::Int) get_fe_space_before_redist(fh[lev]) end -function Gridap.FESpaces.TestFESpace( +# Test/Trial FESpaces for ModelHierarchyLevels + +function Gridap.FESpaces.FESpace( mh::ModelHierarchyLevel{A,B,C,Nothing},args...;kwargs...) where {A,B,C} - Vh = TestFESpace(get_model(mh),args...;kwargs...) + Vh = FESpace(get_model(mh),args...;kwargs...) FESpaceHierarchyLevel(mh.level,Vh,nothing) end -function Gridap.FESpaces.TestFESpace( - mh::ModelHierarchyLevel{A,B,C,D},args...;kwargs...) where {A,B,C,D} - Vh = TestFESpace(get_model_before_redist(mh),args...;kwargs...) - Vh_red = TestFESpace(get_model(mh),args...;kwargs...) +function Gridap.FESpaces.FESpace(mh::ModelHierarchyLevel{A,B,C,D},args...;kwargs...) where {A,B,C,D} + cparts, _ = get_old_and_new_parts(mh.red_glue,Val(false)) + Vh = i_am_in(cparts) ? FESpace(get_model_before_redist(mh),args...;kwargs...) : nothing + Vh_red = FESpace(get_model(mh),args...;kwargs...) FESpaceHierarchyLevel(mh.level,Vh,Vh_red) end -function Gridap.FESpaces.TrialFESpace(a::FESpaceHierarchyLevel{A,Nothing},u) where {A} - Uh = TrialFESpace(a.fe_space,u) - FESpaceHierarchyLevel(a.level,Uh,nothing) -end - -function Gridap.FESpaces.TrialFESpace(a::FESpaceHierarchyLevel{A,B},u) where {A,B} - Uh = TrialFESpace(a.fe_space,u) - Uh_red = TrialFESpace(a.fe_space_red,u) +function Gridap.FESpaces.TrialFESpace(a::FESpaceHierarchyLevel,args...;kwargs...) + Uh = !isa(a.fe_space,Nothing) ? TrialFESpace(a.fe_space,args...;kwargs...) : nothing + Uh_red = !isa(a.fe_space_red,Nothing) ? TrialFESpace(a.fe_space_red,args...;kwargs...) : nothing FESpaceHierarchyLevel(a.level,Uh,Uh_red) end -function Gridap.FESpaces.TestFESpace(mh::ModelHierarchy,args...;kwargs...) +# Test/Trial FESpaces for ModelHierarchies/FESpaceHierarchy + +function Gridap.FESpaces.FESpace(mh::ModelHierarchy,args...;kwargs...) test_spaces = Vector{FESpaceHierarchyLevel}(undef,num_levels(mh)) for i = 1:num_levels(mh) parts = get_level_parts(mh,i) @@ -78,7 +77,7 @@ function Gridap.FESpaces.TestFESpace(mh::ModelHierarchy,args...;kwargs...) FESpaceHierarchy(mh,test_spaces) end -function Gridap.FESpaces.TestFESpace( +function Gridap.FESpaces.FESpace( mh::ModelHierarchy, arg_vector::AbstractVector{<:Union{ReferenceFE,Tuple{<:Gridap.ReferenceFEs.ReferenceFEName,Any,Any}}}; kwargs...) @@ -119,6 +118,8 @@ function Gridap.FESpaces.TrialFESpace(a::FESpaceHierarchy) FESpaceHierarchy(a.mh,trial_spaces) end +# Computing system matrices + function compute_hierarchy_matrices(trials::FESpaceHierarchy,a::Function,l::Function,qdegree::Integer) return compute_hierarchy_matrices(trials,a,l,Fill(qdegree,num_levels(trials))) end @@ -153,3 +154,14 @@ function compute_hierarchy_matrices(trials::FESpaceHierarchy,a::Function,l::Func end return mats, A, b end + +function get_test_space(U::GridapDistributed.DistributedSingleFieldFESpace) + spaces = map(local_views(U)) do U + if isa(U,Gridap.FESpaces.UnconstrainedFESpace) + U + else + U.space + end + end + return GridapDistributed.DistributedSingleFieldFESpace(spaces,U.gids,U.vector_type) +end \ No newline at end of file diff --git a/src/MultilevelTools/GridapDistributedExtensions.jl b/src/MultilevelTools/GridapDistributedExtensions.jl deleted file mode 100644 index be9460c3..00000000 --- a/src/MultilevelTools/GridapDistributedExtensions.jl +++ /dev/null @@ -1,142 +0,0 @@ - -# DistributedCompositeMeasure - -function Gridap.CellData.Measure(tt::GridapDistributed.DistributedTriangulation{Dc,Dp}, - it::GridapDistributed.DistributedTriangulation{Dc,Dp}, - args...) where {Dc,Dp} - itrians = change_parts(local_views(it),get_parts(tt);default=void(BodyFittedTriangulation{Dc,Dp})) - - measures = map_parts(local_views(tt),itrians) do ttrian, itrian - Measure(ttrian,itrian,args...) - end - return GridapDistributed.DistributedMeasure(measures) -end - -function GridapDistributed.get_parts(x::GridapDistributed.DistributedFESpace) - PartitionedArrays.get_part_ids(local_views(x)) -end - -# change_parts - -function change_parts(x::Union{AbstractPData,Nothing}, new_parts; default=nothing) - x_new = map_parts(new_parts) do _p - if isa(x,AbstractPData) - x.part - else - default - end - end - return x_new -end - -function change_parts(::Type{<:GridapDistributed.DistributedCellField},x,new_parts) - if isa(x,GridapDistributed.DistributedCellField) - fields = change_parts(local_views(x),new_parts) - else - fields = change_parts(nothing,new_parts;default=void(CellField)) - end - return GridapDistributed.DistributedCellField(fields) -end - -# DistributedFESpaces - -function get_test_space(U::GridapDistributed.DistributedSingleFieldFESpace) - spaces = map_parts(local_views(U)) do U - U.space - end - return GridapDistributed.DistributedSingleFieldFESpace(spaces,U.gids,U.vector_type) -end - -# Void GridapDistributed structures - -struct VoidDistributedDiscreteModel{Dc,Dp,A} <: GridapDistributed.DistributedDiscreteModel{Dc,Dp} - parts::A - function VoidDistributedDiscreteModel(Dc::Int,Dp::Int,parts) - A = typeof(parts) - return new{Dc,Dp,A}(parts) - end -end - -function VoidDistributedDiscreteModel(model::GridapDistributed.DistributedDiscreteModel{Dc,Dp}) where {Dc,Dp} - return VoidDistributedDiscreteModel(Dc,Dp,get_parts(model)) -end - -function GridapDistributed.get_parts(x::VoidDistributedDiscreteModel) - return x.parts -end - -struct VoidDistributedTriangulation{Dc,Dp,A} <: GridapDistributed.GridapType - parts::A - function VoidDistributedTriangulation(Dc::Int,Dp::Int,parts) - A = typeof(parts) - return new{Dc,Dp,A}(parts) - end -end - -function GridapDistributed.get_parts(x::VoidDistributedTriangulation) - return x.parts -end - -function VoidDistributedTriangulation(trian::GridapDistributed.DistributedTriangulation{Dc,Dp}) where {Dc,Dp} - return VoidDistributedTriangulation(Dc,Dp,get_parts(trian)) -end - -function Gridap.Geometry.Triangulation(model::VoidDistributedDiscreteModel{Dc,Dp}) where {Dc,Dp} - return VoidDistributedTriangulation(Dc,Dp,get_parts(model)) -end - -struct VoidDistributedFESpace{A} <: GridapDistributed.GridapType - parts::A -end - -function GridapDistributed.get_parts(x::VoidDistributedFESpace) - return x.parts -end - -function Gridap.FESpaces.TestFESpace(model::VoidDistributedDiscreteModel,args...;kwargs...) - return VoidDistributedFESpace(get_parts(model)) -end - -function Gridap.FESpaces.TrialFESpace(space::VoidDistributedFESpace,args...;kwargs...) - return VoidDistributedFESpace(get_parts(space)) -end - -function FESpaces.get_triangulation(f::VoidDistributedFESpace,model::VoidDistributedDiscreteModel) - return VoidDistributedTriangulation(model) -end - -# Void Gridap structures - -function void(::Type{<:CartesianDiscreteModel{Dc,Dp}}) where {Dc,Dp} - #domain = Tuple(fill(0.0,2*Dc)) - domain = Tuple(repeat([0,1],Dc)) - partition = Tuple(fill(0,Dc)) - return CartesianDiscreteModel(domain,partition) -end - -function void(::Type{<:UnstructuredDiscreteModel{Dc,Dp}}) where {Dc,Dp} - cmodel = void(CartesianDiscreteModel{Dc,Dp}) - return UnstructuredDiscreteModel(cmodel) -end - -function void(::Type{<:AdaptivityGlue}) - f2c_faces_map = [Int32[],Int32[],Int32[]] - fcell_to_child_id = Int32[] - rrules = Fill(void(RefinementRule),0) - return AdaptivityGlue(f2c_faces_map,fcell_to_child_id,rrules) -end - -function void(::Type{<:RefinementRule}) - reffe = Gridap.ReferenceFEs.LagrangianRefFE(Float64,QUAD,1) - return RefinementRule(reffe,1) -end - -function void(::Type{<:BodyFittedTriangulation{Dc,Dp}}) where {Dc,Dp} - model = void(UnstructuredDiscreteModel{Dc,Dp}) - return Gridap.Geometry.Triangulation(model) -end - -function void(::Type{<:CellField}) - trian = void(BodyFittedTriangulation{2,2}) - return Gridap.CellData.CellField(0.0,trian,ReferenceDomain()) -end diff --git a/src/MultilevelTools/GridapFixes.jl b/src/MultilevelTools/GridapFixes.jl index 2c41cc5c..ae2f8efb 100644 --- a/src/MultilevelTools/GridapFixes.jl +++ b/src/MultilevelTools/GridapFixes.jl @@ -1,35 +1,4 @@ -function Gridap.Adaptivity.change_domain_n2o(f_fine,ftrian::Gridap.Adaptivity.AdaptedTriangulation{Dc},ctrian::Gridap.Geometry.Triangulation,glue::Gridap.Adaptivity.AdaptivityGlue{<:Gridap.Adaptivity.RefinementGlue}) where Dc - fglue = Gridap.Geometry.get_glue(ftrian,Val(Dc)) - cglue = Gridap.Geometry.get_glue(ctrian,Val(Dc)) - - @notimplementedif Gridap.Geometry.num_point_dims(ftrian) != Dc - @notimplementedif isa(cglue,Nothing) - - if (num_cells(ctrian) != 0) - ### New Triangulation -> New Model - fine_tface_to_field = Gridap.CellData.get_data(f_fine) - fine_mface_to_field = Gridap.Geometry.extend(fine_tface_to_field,fglue.mface_to_tface) - - ### New Model -> Old Model - # f_c2f[i_coarse] = [f_fine[i_fine_1], ..., f_fine[i_fine_nChildren]] - f_c2f = Gridap.Adaptivity.n2o_reindex(fine_mface_to_field,glue) - - child_ids = Gridap.Adaptivity.n2o_reindex(glue.n2o_cell_to_child_id,glue) - rrules = Gridap.Adaptivity.get_old_cell_refinement_rules(glue) - coarse_mface_to_field = lazy_map(Gridap.Adaptivity.FineToCoarseField,f_c2f,rrules,child_ids) - - ### Old Model -> Old Triangulation - coarse_tface_to_field = lazy_map(Reindex(coarse_mface_to_field),cglue.tface_to_mface) - f_coarse = lazy_map(Broadcasting(∘),coarse_tface_to_field,cglue.tface_to_mface_map) - - return Gridap.CellData.similar_cell_field(f_fine,f_coarse,ctrian,ReferenceDomain()) - else - f_coarse = Fill(Gridap.Fields.ConstantField(0.0),num_cells(fcoarse)) - return Gridap.CellData.similar_cell_field(f_fine,f_coarse,ctrian,ReferenceDomain()) - end -end - function Base.map(::typeof(Gridap.Arrays.testitem), a::Tuple{<:AbstractVector{<:AbstractVector{<:VectorValue}},<:AbstractVector{<:Gridap.Fields.LinearCombinationFieldVector}}) a2=Gridap.Arrays.testitem(a[2]) diff --git a/src/MultilevelTools/ModelHierarchies.jl b/src/MultilevelTools/ModelHierarchies.jl index 8c50802c..86709463 100644 --- a/src/MultilevelTools/ModelHierarchies.jl +++ b/src/MultilevelTools/ModelHierarchies.jl @@ -18,7 +18,7 @@ end """ """ struct ModelHierarchy - level_parts :: Vector{PartitionedArrays.AbstractPData} + level_parts :: Vector{PartitionedArrays.AbstractArray} levels :: Vector{ModelHierarchyLevel} end @@ -50,7 +50,7 @@ has_refinement(a::ModelHierarchyLevel{A,Nothing}) where A = false each level into. We need `num_procs_x_level[end]` to be equal to the number of parts of `model`. """ -function ModelHierarchy(root_parts ::AbstractPData, +function ModelHierarchy(root_parts ::AbstractArray, model ::GridapDistributed.DistributedDiscreteModel, num_procs_x_level ::Vector{<:Integer}; mesh_refinement = true, @@ -58,10 +58,10 @@ function ModelHierarchy(root_parts ::AbstractPData, # Request correct number of parts from MAIN model_parts = get_parts(model) - my_num_parts = map_parts(root_parts) do _p + my_num_parts = map(root_parts) do _p num_parts(model_parts) # == -1 if !i_am_in(my_parts) end - main_num_parts = get_main_part(my_num_parts) + main_num_parts = PartitionedArrays.getany(emit(my_num_parts)) if main_num_parts == num_procs_x_level[end] # Coarsest model if mesh_refinement @@ -80,12 +80,12 @@ function ModelHierarchy(root_parts ::AbstractPData, @error "Model parts do not correspond to coarsest or finest parts!" end -function _model_hierarchy_without_refinement_bottom_up(root_parts::AbstractPData, +function _model_hierarchy_without_refinement_bottom_up(root_parts::AbstractArray{T}, bottom_model::GridapDistributed.DistributedDiscreteModel, - num_procs_x_level::Vector{<:Integer}) - num_levels = length(num_procs_x_level) - level_parts = Vector{typeof(root_parts)}(undef,num_levels) - meshes = Vector{ModelHierarchyLevel}(undef,num_levels) + num_procs_x_level::Vector{<:Integer}) where T + num_levels = length(num_procs_x_level) + level_parts = Vector{Union{typeof(root_parts),GridapDistributed.MPIVoidVector{T}}}(undef,num_levels) + meshes = Vector{ModelHierarchyLevel}(undef,num_levels) level_parts[num_levels] = get_parts(bottom_model) meshes[num_levels] = ModelHierarchyLevel(num_levels,bottom_model,nothing,nothing,nothing) @@ -103,15 +103,15 @@ function _model_hierarchy_without_refinement_bottom_up(root_parts::AbstractPData end mh = ModelHierarchy(level_parts,meshes) - return convert_to_void_models(mh) + return mh end -function _model_hierarchy_without_refinement_top_down(root_parts::AbstractPData, +function _model_hierarchy_without_refinement_top_down(root_parts::AbstractArray{T}, top_model::GridapDistributed.DistributedDiscreteModel, - num_procs_x_level::Vector{<:Integer}) - num_levels = length(num_procs_x_level) - level_parts = Vector{typeof(root_parts)}(undef,num_levels) - meshes = Vector{ModelHierarchyLevel}(undef,num_levels) + num_procs_x_level::Vector{<:Integer}) where T + num_levels = length(num_procs_x_level) + level_parts = Vector{Union{typeof(root_parts),GridapDistributed.MPIVoidVector{T}}}(undef,num_levels) + meshes = Vector{ModelHierarchyLevel}(undef,num_levels) level_parts[1] = get_parts(top_model) model = top_model @@ -129,17 +129,17 @@ function _model_hierarchy_without_refinement_top_down(root_parts::AbstractPData, meshes[num_levels] = ModelHierarchyLevel(num_levels,model,nothing,nothing,nothing) mh = ModelHierarchy(level_parts,meshes) - return convert_to_void_models(mh) + return mh end -function _model_hierarchy_by_refinement(root_parts::AbstractPData, +function _model_hierarchy_by_refinement(root_parts::AbstractArray{T}, coarsest_model::GridapDistributed.DistributedDiscreteModel, num_procs_x_level::Vector{<:Integer}; - num_refs_x_level=nothing) + num_refs_x_level=nothing) where T # TODO: Implement support for num_refs_x_level? (future work) - num_levels = length(num_procs_x_level) - level_parts = Vector{typeof(root_parts)}(undef,num_levels) - meshes = Vector{ModelHierarchyLevel}(undef,num_levels) + num_levels = length(num_procs_x_level) + level_parts = Vector{Union{typeof(root_parts),GridapDistributed.MPIVoidVector{T}}}(undef,num_levels) + meshes = Vector{ModelHierarchyLevel}(undef,num_levels) level_parts[num_levels] = get_parts(coarsest_model) meshes[num_levels] = ModelHierarchyLevel(num_levels,coarsest_model,nothing,nothing,nothing) @@ -164,14 +164,14 @@ function _model_hierarchy_by_refinement(root_parts::AbstractPData, return convert_to_adapted_models(mh) end -function _model_hierarchy_by_coarsening(root_parts::AbstractPData, +function _model_hierarchy_by_coarsening(root_parts::AbstractArray{T}, finest_model::GridapDistributed.DistributedDiscreteModel, num_procs_x_level::Vector{<:Integer}; - num_refs_x_level=nothing) + num_refs_x_level=nothing) where T # TODO: Implement support for num_refs_x_level? (future work) - num_levels = length(num_procs_x_level) - level_parts = Vector{typeof(root_parts)}(undef,num_levels) - meshes = Vector{ModelHierarchyLevel}(undef,num_levels) + num_levels = length(num_procs_x_level) + level_parts = Vector{Union{typeof(root_parts),GridapDistributed.MPIVoidVector{T}}}(undef,num_levels) + meshes = Vector{ModelHierarchyLevel}(undef,num_levels) level_parts[1] = get_parts(finest_model) model = finest_model @@ -202,34 +202,16 @@ function convert_to_adapted_models(mh::ModelHierarchy) levels = Vector{ModelHierarchyLevel}(undef,nlevs) for lev in 1:nlevs-1 cparts = get_level_parts(mh,lev+1) + mhlev = get_level(mh,lev) if i_am_in(cparts) - model = get_model_before_redist(mh,lev) - parent = get_model(mh,lev+1) - ref_glue = mh.levels[lev].ref_glue - model_ref = GridapDistributed.DistributedAdaptedDiscreteModel(model,parent,ref_glue) - else model = get_model_before_redist(mh,lev) - model_ref = VoidDistributedDiscreteModel(model) - end - levels[lev] = ModelHierarchyLevel(lev,model_ref,mh.levels[lev].ref_glue,mh.levels[lev].model_red,mh.levels[lev].red_glue) - end - levels[nlevs] = mh.levels[nlevs] - - return ModelHierarchy(mh.level_parts,levels) -end - -function convert_to_void_models(mh::ModelHierarchy) - nlevs = num_levels(mh) - levels = Vector{ModelHierarchyLevel}(undef,nlevs) - for lev in 1:nlevs-1 - cparts = get_level_parts(mh,lev+1) - if i_am_in(cparts) - model_ref = get_model_before_redist(mh,lev) + parent = get_model(mh,lev+1) + ref_glue = mhlev.ref_glue + model_ref = GridapDistributed.DistributedAdaptedDiscreteModel(model,parent,ref_glue) else - model = get_model_before_redist(mh,lev) - model_ref = VoidDistributedDiscreteModel(model) + model_ref = nothing end - levels[lev] = ModelHierarchyLevel(lev,model_ref,mh.levels[lev].ref_glue,mh.levels[lev].model_red,mh.levels[lev].red_glue) + levels[lev] = ModelHierarchyLevel(lev,model_ref,mhlev.ref_glue,mhlev.model_red,mhlev.red_glue) end levels[nlevs] = mh.levels[nlevs] diff --git a/src/MultilevelTools/MultilevelTools.jl b/src/MultilevelTools/MultilevelTools.jl index 447ae753..6072714b 100644 --- a/src/MultilevelTools/MultilevelTools.jl +++ b/src/MultilevelTools/MultilevelTools.jl @@ -11,18 +11,20 @@ using Gridap.Geometry using Gridap.FESpaces using Gridap.Adaptivity using PartitionedArrays + using GridapDistributed +using GridapDistributed: redistribute_cell_dofs, redistribute_cell_dofs!, get_redistribute_cell_dofs_cache +using GridapDistributed: redistribute_free_values, redistribute_free_values!, get_redistribute_free_values_cache +using GridapDistributed: redistribute_fe_function +using GridapDistributed: get_old_and_new_parts +import GridapDistributed: generate_subparts import LinearAlgebra: mul! import GridapDistributed: local_views -export change_parts -export generate_level_parts -export generate_subparts - -export redistribute_fe_function -export redistribute_free_values! +export change_parts, num_parts, i_am_in +export generate_level_parts, generate_subparts export ModelHierarchy export num_levels, get_level, get_level_parts @@ -38,10 +40,8 @@ export setup_transfer_operators export mul! include("SubpartitioningTools.jl") -include("GridapDistributedExtensions.jl") include("GridapFixes.jl") include("RefinementTools.jl") -include("RedistributeTools.jl") include("ModelHierarchies.jl") include("FESpaceHierarchies.jl") include("DistributedGridTransferOperators.jl") diff --git a/src/MultilevelTools/RedistributeTools.jl b/src/MultilevelTools/RedistributeTools.jl deleted file mode 100644 index 54813a44..00000000 --- a/src/MultilevelTools/RedistributeTools.jl +++ /dev/null @@ -1,251 +0,0 @@ -function _allocate_cell_wise_dofs(cell_to_ldofs) - map_parts(cell_to_ldofs) do cell_to_ldofs - cache = array_cache(cell_to_ldofs) - ncells = length(cell_to_ldofs) - ptrs = Vector{Int32}(undef,ncells+1) - for cell in 1:ncells - ldofs = getindex!(cache,cell_to_ldofs,cell) - ptrs[cell+1] = length(ldofs) - end - PartitionedArrays.length_to_ptrs!(ptrs) - ndata = ptrs[end]-1 - data = Vector{Float64}(undef,ndata) - PartitionedArrays.Table(data,ptrs) - end -end - -function _update_cell_dof_values_with_local_info!(cell_dof_values_new, - cell_dof_values_old, - new2old) - map_parts(cell_dof_values_new, - cell_dof_values_old, - new2old) do cell_dof_values_new,cell_dof_values_old,new2old - ocache = array_cache(cell_dof_values_old) - for (ncell,ocell) in enumerate(new2old) - if ocell!=0 - # Copy ocell to ncell - oentry = getindex!(ocache,cell_dof_values_old,ocell) - range = cell_dof_values_new.ptrs[ncell]:cell_dof_values_new.ptrs[ncell+1]-1 - cell_dof_values_new.data[range] .= oentry - end - end - end -end - -function _allocate_comm_data(num_dofs_x_cell,lids) - map_parts(num_dofs_x_cell,lids) do num_dofs_x_cell,lids - n = length(lids) - ptrs = Vector{Int32}(undef,n+1) - ptrs.= 0 - for i = 1:n - for j = lids.ptrs[i]:lids.ptrs[i+1]-1 - ptrs[i+1] = ptrs[i+1] + num_dofs_x_cell.data[j] - end - end - PartitionedArrays.length_to_ptrs!(ptrs) - ndata = ptrs[end]-1 - data = Vector{Float64}(undef,ndata) - PartitionedArrays.Table(data,ptrs) - end -end - -function _pack_snd_data!(snd_data,cell_dof_values,snd_lids) - map_parts(snd_data,cell_dof_values,snd_lids) do snd_data,cell_dof_values,snd_lids - cache = array_cache(cell_dof_values) - s = 1 - for i = 1:length(snd_lids) - for j = snd_lids.ptrs[i]:snd_lids.ptrs[i+1]-1 - cell = snd_lids.data[j] - ldofs = getindex!(cache,cell_dof_values,cell) - - e = s+length(ldofs)-1 - range = s:e - snd_data.data[range] .= ldofs - s = e+1 - end - end - end -end - -function _unpack_rcv_data!(cell_dof_values,rcv_data,rcv_lids) - map_parts(cell_dof_values,rcv_data,rcv_lids) do cell_dof_values,rcv_data,rcv_lids - s = 1 - for i = 1:length(rcv_lids.ptrs)-1 - for j = rcv_lids.ptrs[i]:rcv_lids.ptrs[i+1]-1 - cell = rcv_lids.data[j] - range_cell_dof_values = cell_dof_values.ptrs[cell]:cell_dof_values.ptrs[cell+1]-1 - - e = s+length(range_cell_dof_values)-1 - range_rcv_data = s:e - cell_dof_values.data[range_cell_dof_values] .= rcv_data.data[range_rcv_data] - s = e+1 - end - end - end -end - -function get_glue_components(glue::GridapDistributed.RedistributeGlue,reverse::Val{false}) - return glue.lids_rcv, glue.lids_snd, glue.parts_rcv, glue.parts_snd, glue.new2old -end - -function get_glue_components(glue::GridapDistributed.RedistributeGlue,reverse::Val{true}) - return glue.lids_snd, glue.lids_rcv, glue.parts_snd, glue.parts_rcv, glue.old2new -end - -function _num_dofs_x_cell(cell_dofs_array,lids) - map_parts(cell_dofs_array,lids) do cell_dofs_array, lids - data = [length(cell_dofs_array[i]) for i = 1:length(cell_dofs_array) ] - PartitionedArrays.Table(data,lids.ptrs) - end -end - -function get_redistribute_cell_dofs_cache(cell_dof_values_old, - cell_dof_ids_new, - model_new, - glue::GridapDistributed.RedistributeGlue; - reverse=false) - - lids_rcv, lids_snd, parts_rcv, parts_snd, new2old = get_glue_components(glue,Val(reverse)) - - cell_dof_values_old = change_parts(cell_dof_values_old,get_parts(glue);default=[]) - cell_dof_ids_new = change_parts(cell_dof_ids_new,get_parts(glue);default=[[]]) - - num_dofs_x_cell_snd = _num_dofs_x_cell(cell_dof_values_old, lids_snd) - num_dofs_x_cell_rcv = _num_dofs_x_cell(cell_dof_ids_new, lids_rcv) - snd_data = _allocate_comm_data(num_dofs_x_cell_snd, lids_snd) - rcv_data = _allocate_comm_data(num_dofs_x_cell_rcv, lids_rcv) - - cell_dof_values_new = _allocate_cell_wise_dofs(cell_dof_ids_new) - - caches = snd_data, rcv_data, cell_dof_values_new - return caches -end - -function redistribute_cell_dofs(cell_dof_values_old, - cell_dof_ids_new, - model_new, - glue::GridapDistributed.RedistributeGlue; - reverse=false) - caches = get_redistribute_cell_dofs_cache(cell_dof_values_old,cell_dof_ids_new,model_new,glue;reverse=reverse) - return redistribute_cell_dofs!(caches,cell_dof_values_old,cell_dof_ids_new,model_new,glue;reverse=reverse) -end - -function redistribute_cell_dofs!(caches, - cell_dof_values_old, - cell_dof_ids_new, - model_new, - glue::GridapDistributed.RedistributeGlue; - reverse=false) - - snd_data, rcv_data, cell_dof_values_new = caches - lids_rcv, lids_snd, parts_rcv, parts_snd, new2old = get_glue_components(glue,Val(reverse)) - - cell_dof_values_old = change_parts(cell_dof_values_old,get_parts(glue);default=[]) - cell_dof_ids_new = change_parts(cell_dof_ids_new,get_parts(glue);default=[[]]) - - _pack_snd_data!(snd_data,cell_dof_values_old,lids_snd) - - tout = async_exchange!(rcv_data, - snd_data, - parts_rcv, - parts_snd, - PartitionedArrays._empty_tasks(parts_rcv)) - map_parts(schedule,tout) - - # We have to build the owned part of "cell_dof_values_new" out of - # 1. cell_dof_values_old (for those cells s.t. new2old[:]!=0) - # 2. cell_dof_values_new_rcv (for those cells s.t. new2old[:]=0) - _update_cell_dof_values_with_local_info!(cell_dof_values_new, - cell_dof_values_old, - new2old) - - map_parts(wait,tout) - _unpack_rcv_data!(cell_dof_values_new,rcv_data,lids_rcv) - - # Now that every part knows it's new owned dofs, exchange ghosts - new_parts = get_parts(model_new) - cell_dof_values_new = change_parts(cell_dof_values_new,new_parts) - if i_am_in(new_parts) - fgids = get_cell_gids(model_new) - exchange!(cell_dof_values_new,fgids.exchanger) - end - - return cell_dof_values_new -end - -function get_redistribute_free_values_cache(fv_new::Union{PVector,Nothing}, - Uh_new::Union{GridapDistributed.DistributedSingleFieldFESpace,VoidDistributedFESpace}, - fv_old::Union{PVector,Nothing}, - dv_old::Union{AbstractPData,Nothing}, - Uh_old::Union{GridapDistributed.DistributedSingleFieldFESpace,VoidDistributedFESpace}, - model_new, - glue::GridapDistributed.RedistributeGlue; - reverse=false) - cell_dof_values_old = !isa(fv_old,Nothing) ? map_parts(scatter_free_and_dirichlet_values,local_views(Uh_old),local_views(fv_old),dv_old) : nothing - cell_dof_ids_new = !isa(fv_new,Nothing) ? map_parts(get_cell_dof_ids, local_views(Uh_new)) : nothing - caches = get_redistribute_cell_dofs_cache(cell_dof_values_old,cell_dof_ids_new,model_new,glue;reverse=reverse) - - return caches -end - -function redistribute_free_values!(fv_new::Union{PVector,Nothing}, - Uh_new::Union{GridapDistributed.DistributedSingleFieldFESpace,VoidDistributedFESpace}, - fv_old::Union{PVector,Nothing}, - dv_old::Union{AbstractPData,Nothing}, - Uh_old::Union{GridapDistributed.DistributedSingleFieldFESpace,VoidDistributedFESpace}, - model_new, - glue::GridapDistributed.RedistributeGlue; - reverse=false) - - caches = get_redistribute_free_values_cache(fv_new,Uh_new,fv_old,dv_old,Uh_old,model_new,glue;reverse=reverse) - return redistribute_free_values!(caches,fv_new,Uh_new,fv_old,dv_old,Uh_old,model_new,glue;reverse=reverse) -end - -function redistribute_free_values!(caches, - fv_new::Union{PVector,Nothing}, - Uh_new::Union{GridapDistributed.DistributedSingleFieldFESpace,VoidDistributedFESpace}, - fv_old::Union{PVector,Nothing}, - dv_old::Union{AbstractPData,Nothing}, - Uh_old::Union{GridapDistributed.DistributedSingleFieldFESpace,VoidDistributedFESpace}, - model_new, - glue::GridapDistributed.RedistributeGlue; - reverse=false) - - cell_dof_values_old = !isa(fv_old,Nothing) ? map_parts(scatter_free_and_dirichlet_values,local_views(Uh_old),local_views(fv_old),dv_old) : nothing - cell_dof_ids_new = !isa(fv_new,Nothing) ? map_parts(get_cell_dof_ids, local_views(Uh_new)) : nothing - cell_dof_values_new = redistribute_cell_dofs!(caches,cell_dof_values_old,cell_dof_ids_new,model_new,glue;reverse=reverse) - - # Gather the new free dofs - if !isa(fv_new,Nothing) - Gridap.FESpaces.gather_free_values!(fv_new,Uh_new,cell_dof_values_new) - end - return fv_new -end - -function redistribute_fe_function(uh_old::Union{GridapDistributed.DistributedSingleFieldFEFunction,Nothing}, - Uh_new::Union{GridapDistributed.DistributedSingleFieldFESpace,VoidDistributedFESpace}, - model_new, - glue::GridapDistributed.RedistributeGlue; - reverse=false) - - cell_dof_values_old = !isa(uh_old,Nothing) ? map_parts(get_cell_dof_values,local_views(uh_old)) : nothing - cell_dof_ids_new = !isa(Uh_new,VoidDistributedFESpace) ? map_parts(get_cell_dof_ids,local_views(Uh_new)) : nothing - cell_dof_values_new = redistribute_cell_dofs(cell_dof_values_old,cell_dof_ids_new,model_new,glue;reverse=reverse) - - # Assemble the new FEFunction - if i_am_in(get_parts(Uh_new)) - free_values, dirichlet_values = Gridap.FESpaces.gather_free_and_dirichlet_values(Uh_new,cell_dof_values_new) - free_values = PVector(free_values,Uh_new.gids) - uh_new = FEFunction(Uh_new,free_values,dirichlet_values) - return uh_new - else - return nothing - end -end - -function Gridap.FESpaces.gather_free_and_dirichlet_values(f::GridapDistributed.DistributedFESpace,cv) - free_values, dirichlet_values = map_parts(local_views(f),cv) do f, cv - Gridap.FESpaces.gather_free_and_dirichlet_values(f,cv) - end - return free_values, dirichlet_values -end \ No newline at end of file diff --git a/src/MultilevelTools/RefinementTools.jl b/src/MultilevelTools/RefinementTools.jl index 6eca822c..a9449174 100644 --- a/src/MultilevelTools/RefinementTools.jl +++ b/src/MultilevelTools/RefinementTools.jl @@ -1,19 +1,19 @@ # DistributedAdaptedTriangulations -const DistributedAdaptedTriangulation{Dc,Dp} = GridapDistributed.DistributedTriangulation{Dc,Dp,<:AbstractPData{<:AdaptedTriangulation{Dc,Dp}}} +const DistributedAdaptedTriangulation{Dc,Dp} = GridapDistributed.DistributedTriangulation{Dc,Dp,<:AbstractArray{<:AdaptedTriangulation{Dc,Dp}}} # Restriction of dofs function restrict_dofs!(fv_c::PVector, fv_f::PVector, - dv_f::AbstractPData, + dv_f::AbstractArray, U_f ::GridapDistributed.DistributedSingleFieldFESpace, U_c ::GridapDistributed.DistributedSingleFieldFESpace, - glue::AbstractPData{<:AdaptivityGlue}) + glue::AbstractArray{<:AdaptivityGlue}) - map_parts(restrict_dofs!,local_views(fv_c),local_views(fv_f),dv_f,local_views(U_f),local_views(U_c),glue) - exchange!(fv_c) + map(restrict_dofs!,local_views(fv_c),local_views(fv_f),dv_f,local_views(U_f),local_views(U_c),glue) + consistent!(fv_c) |> fetch return fv_c end diff --git a/src/MultilevelTools/SubpartitioningTools.jl b/src/MultilevelTools/SubpartitioningTools.jl index 00d80187..b4cc80c2 100644 --- a/src/MultilevelTools/SubpartitioningTools.jl +++ b/src/MultilevelTools/SubpartitioningTools.jl @@ -1,27 +1,37 @@ -function generate_subparts(root_parts::AbstractPData,subpart_size::Integer) - root_comm = root_parts.comm - rank = MPI.Comm_rank(root_comm) - size = MPI.Comm_size(root_comm) - Gridap.Helpers.@check all(subpart_size .<= size) - Gridap.Helpers.@check all(subpart_size .>= 1) - - if rank < subpart_size - comm = MPI.Comm_split(root_comm, 0, 0) +function num_parts(comm::MPI.Comm) + if comm != MPI.COMM_NULL + nparts = MPI.Comm_size(comm) else - comm = MPI.Comm_split(root_comm, MPI.MPI_UNDEFINED, MPI.MPI_UNDEFINED) + nparts = -1 end - return get_part_ids(comm) + nparts end -function generate_level_parts(root_parts::AbstractPData,last_level_parts::AbstractPData,level_parts_size::Integer) +num_parts(comm::MPIArray) = num_parts(comm.comm) +num_parts(comm::GridapDistributed.MPIVoidVector) = num_parts(comm.comm) + +function get_part_id(comm::MPI.Comm) + if comm != MPI.COMM_NULL + id = MPI.Comm_rank(comm)+1 + else + id = -1 + end + id +end + +i_am_in(comm::MPI.Comm) = get_part_id(comm) >=0 +i_am_in(comm::MPIArray) = i_am_in(comm.comm) +i_am_in(comm::GridapDistributed.MPIVoidVector) = i_am_in(comm.comm) + +function generate_level_parts(root_parts::AbstractArray,last_level_parts::AbstractArray,level_parts_size::Integer) if level_parts_size == num_parts(last_level_parts) return last_level_parts end return generate_subparts(root_parts,level_parts_size) end -function generate_level_parts(root_parts::AbstractPData,num_procs_x_level::Vector{<:Integer}) +function generate_level_parts(root_parts::AbstractArray,num_procs_x_level::Vector{<:Integer}) num_levels = length(num_procs_x_level) level_parts = Vector{typeof(parts)}(undef,num_levels) level_parts[1] = generate_subparts(root_parts,num_procs_x_level[1]) @@ -29,4 +39,16 @@ function generate_level_parts(root_parts::AbstractPData,num_procs_x_level::Vecto level_parts[l] = generate_level_parts(root_parts,level_parts[l-1],num_procs_x_level[l]) end return level_parts +end + +my_print(x::PVector,s) = my_print(partition(x),s) + +function my_print(x::MPIArray,s) + parts = linear_indices(x) + i_am_main(parts) && println(s) + map(parts,x) do p,xi + sleep(p*0.2) + println(" > $p: ", xi) + end + sleep(2) end \ No newline at end of file diff --git a/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl b/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl index 36c5dcba..85387f2c 100644 --- a/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl +++ b/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl @@ -10,7 +10,7 @@ function PatchDecomposition(model::GridapDistributed.DistributedDiscreteModel{Dc Dr=0, patch_boundary_style::PatchBoundaryStyle=PatchBoundaryExclude()) where {Dc,Dp} mark_interface_facets!(model) - patch_decompositions = map_parts(local_views(model)) do lmodel + patch_decompositions = map(local_views(model)) do lmodel PatchDecomposition(lmodel; Dr=Dr, patch_boundary_style=patch_boundary_style, @@ -35,7 +35,7 @@ function PatchDecomposition(mh::ModelHierarchy;kwargs...) end function Gridap.Geometry.Triangulation(a::DistributedPatchDecomposition) - trians = map_parts(a.patch_decompositions) do a + trians = map(a.patch_decompositions) do a Triangulation(a) end return GridapDistributed.DistributedTriangulation(trians,a.model) @@ -43,7 +43,7 @@ end function get_patch_root_dim(a::DistributedPatchDecomposition) patch_root_dim = -1 - map_parts(a.patch_decompositions) do patch_decomposition + map(a.patch_decompositions) do patch_decomposition patch_root_dim = patch_decomposition.Dr end return patch_root_dim @@ -53,13 +53,13 @@ function mark_interface_facets!(model::GridapDistributed.DistributedDiscreteMode face_labeling = get_face_labeling(model) topo = get_grid_topology(model) - map_parts(local_views(face_labeling),local_views(topo)) do face_labeling, topo + map(local_views(face_labeling),local_views(topo)) do face_labeling, topo tag_to_name = face_labeling.tag_to_name tag_to_entities = face_labeling.tag_to_entities d_to_dface_to_entity = face_labeling.d_to_dface_to_entity - # Create new tag & entity - interface_entity = maximum(map(maximum,tag_to_entities)) + 1 + # Create new tag & entity + interface_entity = maximum(map(x -> maximum(x;init=0),tag_to_entities)) + 1 push!(tag_to_entities,[interface_entity]) push!(tag_to_name,"interface") diff --git a/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl index 5ba023b3..c2ab79aa 100644 --- a/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl +++ b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl @@ -14,21 +14,20 @@ function PatchFESpace(model::GridapDistributed.DistributedDiscreteModel, Vh::GridapDistributed.DistributedSingleFieldFESpace) root_gids = get_face_gids(model,get_patch_root_dim(patch_decomposition)) - spaces = map_parts(local_views(model), - local_views(patch_decomposition), - local_views(Vh), - root_gids.partition) do model, patch_decomposition, Vh, partition - patches_mask = fill(false,length(partition.lid_to_gid)) - patches_mask[partition.hid_to_lid] .= true # Mask ghost patch roots + spaces = map(local_views(model), + local_views(patch_decomposition), + local_views(Vh), + partition(root_gids)) do model, patch_decomposition, Vh, partition + patches_mask = fill(false,local_length(partition)) + patches_mask[ghost_to_local(partition)] .= true # Mask ghost patch roots PatchFESpace(model,reffe,conformity,patch_decomposition,Vh;patches_mask=patches_mask) end - parts = get_parts(model) - local_ndofs = map_parts(num_free_dofs,spaces) - global_ndofs = sum(local_ndofs) - first_gdof, _ = xscan(+,reduce,local_ndofs,init=1) # This PRange has no ghost dofs - gids = PRange(parts,global_ndofs,local_ndofs,first_gdof) + local_ndofs = map(num_free_dofs,spaces) + global_ndofs = sum(local_ndofs) + patch_partition = variable_partition(local_ndofs,global_ndofs,false) + gids = PRange(patch_partition) return GridapDistributed.DistributedSingleFieldFESpace(spaces,gids,get_vector_type(Vh)) end @@ -57,10 +56,10 @@ end function prolongate!(x::PVector, Ph::GridapDistributed.DistributedSingleFieldFESpace, y::PVector) - map_parts(x.values,Ph.spaces,y.values) do x,Ph,y + map(partition(x),local_views(Ph),partition(y)) do x,Ph,y prolongate!(x,Ph,y) end - exchange!(x) + consistent!(x) |> fetch end # x \in SingleFESpace @@ -71,28 +70,28 @@ function inject!(x::PVector, w::PVector, w_sums::PVector) - #exchange!(y) - map_parts(x.values,Ph.spaces,y.values,w.values,w_sums.values) do x,Ph,y,w,w_sums + #consistent!(y) + map(partition(x),local_views(Ph),partition(y),partition(w),partition(w_sums)) do x,Ph,y,w,w_sums inject!(x,Ph,y,w,w_sums) end # Exchange local contributions - assemble!(x) - exchange!(x) # TO CONSIDER: Is this necessary? Do we need ghosts for later? + assemble!(x) |> fetch + consistent!(x) |> fetch # TO CONSIDER: Is this necessary? Do we need ghosts for later? return x end function compute_weight_operators(Ph::GridapDistributed.DistributedSingleFieldFESpace,Vh) # Local weights and partial sums - w = PVector(0.0,Ph.gids) - w_sums = PVector(0.0,Vh.gids) - map_parts(w.values,w_sums.values,Ph.spaces) do w, w_sums, Ph + w = pfill(0.0,partition(Ph.gids)) + w_sums = pfill(0.0,partition(Vh.gids)) + map(partition(w),partition(w_sums),local_views(Ph)) do w, w_sums, Ph compute_weight_operators!(Ph,Ph.Vh,w,w_sums) end # partial sums -> global sums - assemble!(w_sums) # ghost -> owners - exchange!(w_sums) # repopulate ghosts with owner info + assemble!(w_sums) |> fetch# ghost -> owners + consistent!(w_sums) |> fetch # repopulate ghosts with owner info return w, w_sums end diff --git a/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl b/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl index 171188dc..1adba372 100644 --- a/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl +++ b/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl @@ -63,10 +63,10 @@ end function _patch_based_solver_caches(Ph::GridapDistributed.DistributedSingleFieldFESpace, Vh::GridapDistributed.DistributedSingleFieldFESpace, Ap::PSparseMatrix) - rp = PVector(0.0,Ph.gids) - dxp = PVector(0.0,Ph.gids) - r = PVector(0.0,Vh.gids) - x = PVector(0.0,Vh.gids) + rp = pfill(0.0,partition(Ph.gids)) + dxp = pfill(0.0,partition(Ph.gids)) + r = pfill(0.0,partition(Vh.gids)) + x = pfill(0.0,partition(Vh.gids)) return rp, dxp, r, x end @@ -79,11 +79,11 @@ function _allocate_row_vector(A::AbstractMatrix) end function _allocate_col_vector(A::PSparseMatrix) - PVector(0.0,A.cols) + pfill(0.0,partition(axes(A,2))) end function _allocate_row_vector(A::PSparseMatrix) - PVector(0.0,A.rows) + pfill(0.0,partition(axes(A,1))) end function Gridap.Algebra.numerical_setup!(ns::PatchBasedSmootherNumericalSetup, A::AbstractMatrix) @@ -112,7 +112,7 @@ function Gridap.Algebra.solve!(x_mat::PVector,ns::PatchBasedSmootherNumericalSet rp, dxp, r, x = caches copy!(r,r_mat) - exchange!(r) + consistent!(r) |> fetch prolongate!(rp,Ph,r) solve!(dxp,Ap_ns,rp) inject!(x,Ph,dxp,w,w_sums) diff --git a/test/mpi/DistributedGridTransferOperatorsTests.jl b/test/mpi/DistributedGridTransferOperatorsTests.jl index 276cffdf..d18af0e7 100644 --- a/test/mpi/DistributedGridTransferOperatorsTests.jl +++ b/test/mpi/DistributedGridTransferOperatorsTests.jl @@ -34,7 +34,7 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) ops3 = setup_transfer_operators(trials, qdegree; restriction_method=:dof_mask, mode=:solution) restrictions3, prolongations3 = ops3 - a(u,v,dΩ) = ∫(∇(v)⋅∇(u))*dΩ + a(u,v,dΩ) = ∫(v⋅u)*dΩ l(v,dΩ) = ∫(v⋅u)*dΩ mats, A, b = compute_hierarchy_matrices(trials,a,l,qdegree) @@ -45,17 +45,17 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) if i_am_in(parts_h) i_am_main(parts_h) && println("Lev : ", lev) Ah = mats[lev] - xh = PVector(1.0,Ah.cols) - yh1 = PVector(0.0,Ah.cols) - yh2 = PVector(0.0,Ah.cols) - yh3 = PVector(0.0,Ah.cols) + xh = pfill(1.0,partition(axes(Ah,2))) + yh1 = pfill(0.0,partition(axes(Ah,2))) + yh2 = pfill(0.0,partition(axes(Ah,2))) + yh3 = pfill(0.0,partition(axes(Ah,2))) if i_am_in(parts_H) AH = mats[lev+1] - xH = PVector(1.0,AH.cols) - yH1 = PVector(0.0,AH.cols) - yH2 = PVector(0.0,AH.cols) - yH3 = PVector(0.0,AH.cols) + xH = pfill(1.0,partition(axes(AH,2))) + yH1 = pfill(0.0,partition(axes(AH,2))) + yH2 = pfill(0.0,partition(axes(AH,2))) + yH3 = pfill(0.0,partition(axes(AH,2))) else xH = nothing yH1 = nothing @@ -75,11 +75,11 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) mul!(yH3,R3,xh) if i_am_in(parts_H) - y_ref = PVector(1.0,AH.cols) - tests = map_parts(y_ref.owned_values,yH1.owned_values,yH2.owned_values,yH3.owned_values) do y_ref,y1,y2,y3 + y_ref = pfill(1.0,partition(axes(AH,2))) + tests = map(own_values(y_ref),own_values(yH1),own_values(yH2),own_values(yH3)) do y_ref,y1,y2,y3 map(y -> norm(y-y_ref) < 1.e-3 ,[y1,y2,y3]) end - @test all(tests.part) + @test all(PartitionedArrays.getany(tests)) end # ---- Prolongation ---- @@ -93,11 +93,11 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) P3 = prolongations3[lev] mul!(yh3,P3,xH) - y_ref = PVector(1.0,Ah.cols) - tests = map_parts(y_ref.owned_values,yh1.owned_values,yh2.owned_values,yh2.owned_values) do y_ref,y1,y2,y3 + y_ref = pfill(1.0,partition(axes(Ah,2))) + tests = map(own_values(y_ref),own_values(yh1),own_values(yh2),own_values(yh3)) do y_ref,y1,y2,y3 map(y -> norm(y-y_ref) < 1.e-3 ,[y1,y2,y3]) end - @test all(tests.part) + @test all(PartitionedArrays.getany(tests)) end end @@ -109,7 +109,12 @@ num_trees = (1,1) # Number of initial P4est trees num_refs_coarse = 2 # Number of initial refinements num_ranks = num_parts_x_level[1] -with_backend(run,MPIBackend(),num_ranks,num_parts_x_level,num_trees,num_refs_coarse) + +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end +run(parts,num_parts_x_level,num_trees,num_refs_coarse) + println("AT THE END") MPI.Finalize() end diff --git a/test/mpi/GMGLinearSolversHDivRTTests.jl b/test/mpi/GMGLinearSolversHDivRTTests.jl index a982d8da..9a07c5b8 100644 --- a/test/mpi/GMGLinearSolversHDivRTTests.jl +++ b/test/mpi/GMGLinearSolversHDivRTTests.jl @@ -79,7 +79,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, ns = numerical_setup(ss,A) # Solve - x = PVector(0.0,A.cols) + x = pfill(0.0,partition(axes(A,2))) x, history = IterativeSolvers.cg!(x,A,b; verbose=i_am_main(parts), reltol=1.0e-8, @@ -118,8 +118,12 @@ num_refs_coarse = 2 α = 1.0 num_parts_x_level = [4,2,1] -ranks = num_parts_x_level[1] -num_iters, num_free_dofs2 = with_backend(main,MPIBackend(),ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order,α) +num_ranks = num_parts_x_level[1] +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end + +num_iters, num_free_dofs2 = main(parts,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order,α) """ diff --git a/test/mpi/GMGLinearSolversLaplacianTests.jl b/test/mpi/GMGLinearSolversLaplacianTests.jl index b39377b9..7a0a3046 100644 --- a/test/mpi/GMGLinearSolversLaplacianTests.jl +++ b/test/mpi/GMGLinearSolversLaplacianTests.jl @@ -54,7 +54,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, ns = numerical_setup(ss,A) # Solve - x = PVector(0.0,A.cols) + x = pfill(0.0,partition(axes(A,2))) x, history = IterativeSolvers.cg!(x,A,b; verbose=i_am_main(parts), reltol=1.0e-12, @@ -92,8 +92,11 @@ num_refs_coarse = 2 α = 1.0 num_parts_x_level = [4,2,1] -ranks = num_parts_x_level[1] -num_iters, num_free_dofs2 = with_backend(main,MPIBackend(),ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order,α) +num_ranks = num_parts_x_level[1] +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end +num_iters, num_free_dofs2 = main(parts,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order,α) """ diff --git a/test/mpi/GMGLinearSolversMUMPSTests.jl b/test/mpi/GMGLinearSolversMUMPSTests.jl index 2d2cce68..de4d3ae8 100644 --- a/test/mpi/GMGLinearSolversMUMPSTests.jl +++ b/test/mpi/GMGLinearSolversMUMPSTests.jl @@ -76,7 +76,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, ns = numerical_setup(ss,A) # Solve - x = PVector(0.0,A.cols) + x = pfill(0.0,partition(axes(A,2))) x, history = IterativeSolvers.cg!(x,A,b; verbose=i_am_main(parts), reltol=1.0e-12, @@ -112,8 +112,11 @@ coarse_grid_partition = (2,2) num_refs_coarse = 3 num_parts_x_level = [4,2] -ranks = num_parts_x_level[1] -with_backend(main,MPIBackend(),ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order) +num_ranks = num_parts_x_level[1] +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end +main(parts,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order) MPI.Finalize() diff --git a/test/mpi/GMGLinearSolversPoissonTests.jl b/test/mpi/GMGLinearSolversPoissonTests.jl index 72dfc714..4bf6a7d3 100644 --- a/test/mpi/GMGLinearSolversPoissonTests.jl +++ b/test/mpi/GMGLinearSolversPoissonTests.jl @@ -54,7 +54,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, ns = numerical_setup(ss,A) # Solve - x = PVector(0.0,A.cols) + x = pfill(0.0,partition(axes(A,2))) x, history = IterativeSolvers.cg!(x,A,b; verbose=i_am_main(parts), reltol=1.0e-12, @@ -89,8 +89,11 @@ coarse_grid_partition = (2,2) num_refs_coarse = 2 num_parts_x_level = [4,2,1] -ranks = num_parts_x_level[1] -with_backend(main,MPIBackend(),ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order) +num_ranks = num_parts_x_level[1] +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end +main(parts,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order) MPI.Finalize() diff --git a/test/mpi/GMGLinearSolversVectorLaplacianTests.jl b/test/mpi/GMGLinearSolversVectorLaplacianTests.jl index 86bd509d..58c3c6da 100644 --- a/test/mpi/GMGLinearSolversVectorLaplacianTests.jl +++ b/test/mpi/GMGLinearSolversVectorLaplacianTests.jl @@ -54,7 +54,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, ns = numerical_setup(ss,A) # Solve - x = PVector(0.0,A.cols) + x = pfill(0.0,partition(axes(A,2))) x, history = IterativeSolvers.cg!(x,A,b; verbose=i_am_main(parts), reltol=1.0e-12, @@ -92,8 +92,11 @@ num_refs_coarse = 2 α = 1.0 num_parts_x_level = [4,2,1] -ranks = num_parts_x_level[1] -num_iters, num_free_dofs2 = with_backend(main,MPIBackend(),ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order,α) +num_ranks = num_parts_x_level[1] +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end +num_iters, num_free_dofs2 = main(parts,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order,α) """ diff --git a/test/mpi/MUMPSSolversTests.jl b/test/mpi/MUMPSSolversTests.jl index 5be1501a..beaa8905 100644 --- a/test/mpi/MUMPSSolversTests.jl +++ b/test/mpi/MUMPSSolversTests.jl @@ -1,4 +1,4 @@ -module RichardsonSmoothersTests +module MUMPSSolversTests using Test using MPI @@ -30,10 +30,10 @@ function set_ksp_options(ksp) @check_error_code GridapPETSc.PETSC.MatMumpsSetCntl(mumpsmat[], 3, 1.0e-6) end -function main(parts,partition) +function main(parts,nranks,domain_partition) GridapPETSc.with() do domain = (0,1,0,1) - model = CartesianDiscreteModel(parts,domain,partition) + model = CartesianDiscreteModel(parts,nranks,domain,domain_partition) sol(x) = x[1] + x[2] f(x) = -Δ(sol)(x) @@ -57,7 +57,7 @@ function main(parts,partition) ss = symbolic_setup(P,A) ns = numerical_setup(ss,A) - x = PVector(1.0,A.cols) + x = pfill(0.0,partition(axes(A,2))) solve!(x,ns,b) u = interpolate(sol,Uh) @@ -72,9 +72,12 @@ function main(parts,partition) end end -partition = (32,32) -ranks = (2,2) -with_backend(main,MPIBackend(),ranks,partition) +domain_partition = (32,32) +num_ranks = (2,2) +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end +main(parts,num_ranks,domain_partition) MPI.Finalize() end \ No newline at end of file diff --git a/test/mpi/ModelHierarchiesTests.jl b/test/mpi/ModelHierarchiesTests.jl index a7cc00e1..2e8beeca 100644 --- a/test/mpi/ModelHierarchiesTests.jl +++ b/test/mpi/ModelHierarchiesTests.jl @@ -41,8 +41,11 @@ end num_parts_x_level = [4,4,2,2] # Procs in each refinement level -ranks = num_parts_x_level[1] -with_backend(main,MPIBackend(),ranks,num_parts_x_level) +num_ranks = num_parts_x_level[1] +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end +main(parts,num_parts_x_level) MPI.Finalize() end \ No newline at end of file diff --git a/test/mpi/PRefinementGMGLinearSolversPoissonTests.jl b/test/mpi/PRefinementGMGLinearSolversPoissonTests.jl index 9a8bd92c..0c27c4f0 100644 --- a/test/mpi/PRefinementGMGLinearSolversPoissonTests.jl +++ b/test/mpi/PRefinementGMGLinearSolversPoissonTests.jl @@ -58,7 +58,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, ns = numerical_setup(ss,A) # Solve - x = PVector(0.0,A.cols) + x = pfill(0.0,partition(axes(A,2))) x, history = IterativeSolvers.cg!(x,A,b; verbose=i_am_main(parts), reltol=1.0e-12, @@ -93,8 +93,11 @@ coarse_grid_partition = (2,2) num_refs_coarse = 2 num_parts_x_level = [4,4,1] -ranks = num_parts_x_level[1] -with_backend(main,MPIBackend(),ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,max_order) +num_ranks = num_parts_x_level[1] +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end +main(parts,coarse_grid_partition,num_parts_x_level,num_refs_coarse,max_order) MPI.Finalize() diff --git a/test/mpi/RedistributeToolsTests.jl b/test/mpi/RedistributeToolsTests.jl index 20f130b9..3a57e801 100644 --- a/test/mpi/RedistributeToolsTests.jl +++ b/test/mpi/RedistributeToolsTests.jl @@ -8,6 +8,7 @@ using Test using GridapSolvers using GridapSolvers.MultilevelTools +using GridapDistributed: redistribute_fe_function function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) GridapP4est.with(parts) do @@ -23,14 +24,19 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) new_parts = level_parts[1] # FE Spaces - order = 1 - u(x) = x[1] + x[2] + order = 2 + u(x) = x[1]^2 + x[2]^2 - 3.0*x[1]*x[2] reffe = ReferenceFE(lagrangian,Float64,order) glue = mh.levels[1].red_glue model_old = get_model_before_redist(mh.levels[1]) - VOLD = TestFESpace(model_old,reffe,dirichlet_tags="boundary") - UOLD = TrialFESpace(VOLD,u) + if i_am_in(old_parts) + VOLD = TestFESpace(model_old,reffe,dirichlet_tags="boundary") + UOLD = TrialFESpace(VOLD,u) + else + VOLD = nothing + UOLD = nothing + end model_new = get_model(mh.levels[1]) VNEW = TestFESpace(model_new,reffe,dirichlet_tags="boundary") @@ -53,10 +59,7 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) end # Old -> New - uh_old_red = redistribute_fe_function(uh_old, - UNEW, - model_new, - glue) + uh_old_red = redistribute_fe_function(uh_old,UNEW,model_new,glue) n = sum(∫(uh_old_red)*dΩ_new) if i_am_in(old_parts) o = sum(∫(uh_old)*dΩ_old) @@ -64,11 +67,7 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) end # New -> Old - uh_new_red = redistribute_fe_function(uh_new, - UOLD, - model_old, - glue; - reverse=true) + uh_new_red = redistribute_fe_function(uh_new,UOLD,model_old,glue;reverse=true) n = sum(∫(uh_new)*dΩ_new) if i_am_in(old_parts) o = sum(∫(uh_new_red)*dΩ_old) @@ -78,11 +77,14 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) end -num_parts_x_level = [4,2,2] # Procs in each refinement level +num_parts_x_level = [4,2] # Procs in each refinement level num_trees = (1,1) # Number of initial P4est trees -num_refs_coarse = 2 # Number of initial refinements +num_refs_coarse = 3 # Number of initial refinements -ranks = num_parts_x_level[1] -with_backend(run,MPIBackend(),ranks,num_parts_x_level,num_trees,num_refs_coarse) +num_ranks = num_parts_x_level[1] +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end +run(parts,num_parts_x_level,num_trees,num_refs_coarse) MPI.Finalize() end diff --git a/test/mpi/RefinementToolsTests.jl b/test/mpi/RefinementToolsTests.jl index 06a02165..4fb128fb 100644 --- a/test/mpi/RefinementToolsTests.jl +++ b/test/mpi/RefinementToolsTests.jl @@ -20,13 +20,13 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) # FE Spaces - order = 1 - sol(x) = x[1] + x[2] + order = 2 + sol(x) = x[1]^2 + x[2]^2 - 3.0*x[1]*x[2] reffe = ReferenceFE(lagrangian,Float64,order) tests = TestFESpace(mh,reffe;conformity=:H1,dirichlet_tags="boundary") trials = TrialFESpace(tests,sol) - quad_order = 3*order+1 + quad_order = 2*order+1 for lev in 1:num_levels-1 fparts = get_level_parts(mh,lev) cparts = get_level_parts(mh,lev+1) @@ -50,41 +50,55 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) # Coarse FEFunction -> Fine FEFunction, by projection ah(u,v) = ∫(v⋅u)*dΩh lh(v) = ∫(v⋅uH)*dΩh - Ah = assemble_matrix(ah,Uh,Vh) - bh = assemble_vector(lh,Vh) + oph = AffineFEOperator(ah,lh,Uh,Vh) + Ah = get_matrix(oph) + bh = get_vector(oph) - xh = PVector(0.0,Ah.cols) + xh = pfill(0.0,partition(axes(Ah,2))) IterativeSolvers.cg!(xh,Ah,bh;verbose=i_am_main(parts),reltol=1.0e-08) uH_projected = FEFunction(Uh,xh) _eh = uh-uH_projected eh = sum(∫(_eh⋅_eh)*dΩh) i_am_main(parts) && println("Error H2h: ", eh) + @test eh < 1.0e-10 # Fine FEFunction -> Coarse FEFunction, by projection aH(u,v) = ∫(v⋅u)*dΩH lH(v) = ∫(v⋅uH_projected)*dΩhH - AH = assemble_matrix(aH,UH,VH) - bH = assemble_vector(lH,VH) + opH = AffineFEOperator(aH,lH,UH,VH) + AH = get_matrix(opH) + bH = get_vector(opH) - xH = PVector(0.0,AH.cols) + xH = pfill(0.0,partition(axes(AH,2))) IterativeSolvers.cg!(xH,AH,bH;verbose=i_am_main(parts),reltol=1.0e-08) uh_projected = FEFunction(UH,xH) _eH = uH-uh_projected eH = sum(∫(_eH⋅_eH)*dΩH) i_am_main(parts) && println("Error h2H: ", eH) + @test eh < 1.0e-10 + + # Coarse FEFunction -> Fine FEFunction, by interpolation + uH_i = interpolate(uH,Uh) + + _eh = uH_i-uh + eh = sum(∫(_eh⋅_eh)*dΩh) + i_am_main(parts) && println("Error h2H: ", eh) + @test eh < 1.0e-10 end end end end - num_parts_x_level = [4,2,2] # Procs in each refinement level num_trees = (1,1) # Number of initial P4est trees num_refs_coarse = 2 # Number of initial refinements -ranks = num_parts_x_level[1] -with_backend(run,MPIBackend(),ranks,num_parts_x_level,num_trees,num_refs_coarse) +num_ranks = num_parts_x_level[1] +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end +run(parts,num_parts_x_level,num_trees,num_refs_coarse) MPI.Finalize() end diff --git a/test/mpi/RestrictDofsTests.jl b/test/mpi/RestrictDofsTests.jl index 47612a06..c87c92ac 100644 --- a/test/mpi/RestrictDofsTests.jl +++ b/test/mpi/RestrictDofsTests.jl @@ -54,7 +54,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, ns = numerical_setup(ss,A) # Solve - x = PVector(0.0,A.cols) + x = pfill(0.0,partition(axes(A,2))) x, history = IterativeSolvers.cg!(x,A,b; verbose=i_am_main(parts), reltol=1.0e-12, @@ -89,8 +89,11 @@ coarse_grid_partition = (2,2) num_refs_coarse = 2 num_parts_x_level = [4,2,1] -ranks = num_parts_x_level[1] -with_backend(main,MPIBackend(),ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order) +num_ranks = num_parts_x_level[1] +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end +main(parts,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order) MPI.Finalize() diff --git a/test/mpi/RichardsonSmoothersTests.jl b/test/mpi/RichardsonSmoothersTests.jl index ca0d2835..d13e62e1 100644 --- a/test/mpi/RichardsonSmoothersTests.jl +++ b/test/mpi/RichardsonSmoothersTests.jl @@ -11,10 +11,10 @@ using IterativeSolvers using GridapSolvers using GridapSolvers.LinearSolvers -function main(parts,partition) +function main(parts,nranks,domain_partition) GridapP4est.with(parts) do domain = (0,1,0,1) - model = CartesianDiscreteModel(parts,domain,partition) + model = CartesianDiscreteModel(parts,nranks,domain,domain_partition) sol(x) = x[1] + x[2] f(x) = -Δ(sol)(x) @@ -38,7 +38,7 @@ function main(parts,partition) ss = symbolic_setup(P,A) ns = numerical_setup(ss,A) - x = PVector(1.0,A.cols) + x = pfill(1.0,partition(axes(A,2))) x, history = IterativeSolvers.cg!(x,A,b; verbose=i_am_main(parts), reltol=1.0e-8, @@ -57,9 +57,12 @@ function main(parts,partition) end end -partition = (32,32) -ranks = (2,2) -with_backend(main,MPIBackend(),ranks,partition) +domain_partition = (32,32) +num_ranks = (2,2) +parts = with_mpi() do distribute + distribute(LinearIndices((prod(num_ranks),))) +end +main(parts,num_ranks,domain_partition) MPI.Finalize() end \ No newline at end of file diff --git a/test/runtests.jl b/test/runtests.jl index 50b26a40..ae14d3e1 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -32,6 +32,7 @@ function run_tests(testdir) "RedistributeToolsTests.jl", "RefinementToolsTests.jl", "RichardsonSmoothersTests.jl", + "ModelHierarchiesTests.jl", "GMGLinearSolversPoissonTests.jl", "GMGLinearSolversLaplacianTests.jl", "GMGLinearSolversVectorLaplacianTests.jl", @@ -41,13 +42,6 @@ function run_tests(testdir) "RestrictDofsTests.jl", "PRefinementGMGLinearSolversPoissonTests.jl"] np = 4 - #extra_args = "-s 2 2 -r 2" - extra_args = "" - elseif f in ["ModelHierarchiesTests.jl"] - np = 6 - extra_args = "" - elseif f in [""] - np = 1 extra_args = "" else np = nprocs diff --git a/test/seq/DegenerateTests.jl b/test/seq/DegenerateTests.jl deleted file mode 100644 index b64dcf2b..00000000 --- a/test/seq/DegenerateTests.jl +++ /dev/null @@ -1,61 +0,0 @@ -#module DegenerateTests - -using LinearAlgebra -using FillArrays -using Gridap -using Gridap.Helpers -using Gridap.Algebra -using Gridap.Geometry -using Gridap.FESpaces -using Gridap.Adaptivity -using Test -using IterativeSolvers - -include("../../src/GridapFixes.jl") - -function cg_solve(op) - A = get_matrix(op) - b = get_vector(op) - x = PVector(0.0,A.cols) - IterativeSolvers.cg!(x,A,b;verbose=true,reltol=1.0e-06) - return x -end - -#function main() - domain = (0,1,0,1) - partition = (0,0) - cmodel = CartesianDiscreteModel(domain,partition) - model = UnstructuredDiscreteModel(cmodel) - - order = 1 - sol(x) = x[1] + x[2] - reffe = ReferenceFE(lagrangian,Float64,order) - Vh = TestFESpace(model,reffe,conformity=:H1,dirichlet_tags="boundary") - Uh = TrialFESpace(sol,Vh) - Ω = Triangulation(model) - dΩ = Measure(Ω,2*order+1) - - u_sol = interpolate(sol,Vh) - a(u,v) = ∫(v⋅u)*dΩ - l(v) = ∫(v⋅u_sol)*dΩ - op = AffineFEOperator(a,l,Uh,Vh) - - assemble_matrix(a,Uh,Vh) - - c = a(get_trial_fe_basis(Uh),get_fe_basis(Vh)) - first(c.dict) - - dofs = get_fe_dof_basis(Uh) - dofs(u_sol) - - x = cg_solve(op) - uh = FEFunction(x,Uh) - - eh = ∫(uh-u_sol)*dΩ - e = sum(eh) - println(e) -#end - -main() - -#end \ No newline at end of file diff --git a/test/seq/DistributedPatchFESpacesDebuggingTests.jl b/test/seq/DistributedPatchFESpacesDebuggingTests.jl index ecf2202a..bea13993 100644 --- a/test/seq/DistributedPatchFESpacesDebuggingTests.jl +++ b/test/seq/DistributedPatchFESpacesDebuggingTests.jl @@ -14,16 +14,14 @@ using FillArrays using GridapSolvers import GridapSolvers.PatchBasedSmoothers as PBS -#= -backend = SequentialBackend() -ranks = (1,2) -parts = get_part_ids(backend,ranks) -=# +function run(ranks) + parts = with_debug() do distribute + distribute(LinearIndices((prod(ranks),))) + end -function run(parts) domain = (0.0,1.0,0.0,1.0) partition = (2,4) - model = CartesianDiscreteModel(parts,domain,partition) + model = CartesianDiscreteModel(parts,ranks,domain,partition) order = 0 reffe = ReferenceFE(raviart_thomas,Float64,order) @@ -36,10 +34,10 @@ function run(parts) w, w_sums = PBS.compute_weight_operators(Ph,Vh); - xP = PVector(1.0,Ph.gids) - yP = PVector(0.0,Ph.gids) - x = PVector(1.0,Vh.gids) - y = PVector(0.0,Vh.gids) + xP = pfill(1.0,partition(Ph.gids)) + yP = pfill(0.0,partition(Ph.gids)) + x = pfill(1.0,partition(Vh.gids)) + y = pfill(0.0,partition(Vh.gids)) PBS.prolongate!(yP,Ph,x) PBS.inject!(y,Ph,yP,w,w_sums) @@ -96,23 +94,23 @@ function run(parts) # ---- Manual solve using LU ---- # - x1_mat = PVector(0.5,Ah.cols) + x1_mat = pfill(0.5,partition(axes(Ah,2))) r1_mat = fh-Ah*x1_mat - exchange!(r1_mat) + consistent!(r1_mat) |> fetch - r1 = PVector(0.0,Vh.gids) - x1 = PVector(0.0,Vh.gids) - rp = PVector(0.0,Ph.gids) - xp = PVector(0.0,Ph.gids) - rp_mat = PVector(0.0,Ahp.cols) - xp_mat = PVector(0.0,Ahp.cols) + r1 = pfill(0.0,partition(Vh.gids)) + x1 = pfill(0.0,partition(Vh.gids)) + rp = pfill(0.0,partition(Ph.gids)) + xp = pfill(0.0,partition(Ph.gids)) + rp_mat = pfill(0.0,partition(axes(Ahp,2))) + xp_mat = pfill(0.0,partition(axes(Ahp,2))) copy!(r1,r1_mat) - exchange!(r1) + consistent!(r1) |> fetch PBS.prolongate!(rp,Ph,r1) # OK copy!(rp_mat,rp) - exchange!(rp_mat) + consistent!(rp_mat) |> fetch solve!(xp_mat,LUns,rp_mat) copy!(xp,xp_mat) # Some big numbers appear here.... @@ -122,18 +120,18 @@ function run(parts) # ---- Same using the PatchBasedSmoother ---- # - x2_mat = PVector(0.5,Ah.cols) + x2_mat = pfill(0.5,partition(axes(Ah,2))) r2_mat = fh-Ah*x2_mat - exchange!(r2_mat) + consistent!(r2_mat) |> fetch solve!(x2_mat,Mns,r2_mat) # ---- Smoother inside Richardson - x3_mat = PVector(0.5,Ah.cols) + x3_mat = pfill(0.5,partition(axes(Ah,2))) r3_mat = fh-Ah*x3_mat - exchange!(r3_mat) + consistent!(r3_mat) solve!(x3_mat,Rns,r3_mat) - exchange!(x3_mat) + consistent!(x3_mat) |> fetch # Outputs res = Dict{String,Any}() @@ -155,17 +153,15 @@ function run(parts) return model,PD,Ph,Vh,res end -backend = SequentialBackend() - -parts = get_part_ids(backend,(1,1)) -Ms,PDs,Phs,Vhs,res_single = run(parts); +ranks = (1,1) +Ms,PDs,Phs,Vhs,res_single = run(ranks); -parts = get_part_ids(backend,(1,2)) -Mm,PDm,Phm,Vhm,res_multi = run(parts); +ranks = (1,2) +Mm,PDm,Phm,Vhm,res_multi = run(ranks); println(repeat('#',80)) -map_parts(local_views(Ms)) do model +map(local_views(Ms)) do model cell_ids = get_cell_node_ids(model) cell_coords = get_cell_coordinates(model) display(reshape(cell_ids,length(cell_ids))) @@ -178,29 +174,29 @@ vertex_gids = get_face_gids(Mm,0) edge_gids = get_face_gids(Mm,1) println(">>> Cell gids") -map_parts(cell_gids.partition) do p +map(cell_gids.partition) do p println(transpose(p.lid_to_ohid)) end; println(repeat('-',80)) println(">>> Vertex gids") -map_parts(vertex_gids.partition) do p +map(vertex_gids.partition) do p println(transpose(p.lid_to_ohid)) end; println(repeat('-',80)) println(">>> Edge gids") -map_parts(edge_gids.partition) do p +map(edge_gids.partition) do p println(transpose(p.lid_to_ohid)) end; println(repeat('#',80)) -map_parts(local_views(Phs)) do Ph +map(local_views(Phs)) do Ph display(Ph.patch_cell_dofs_ids) end; -map_parts(local_views(Phm)) do Ph +map(local_views(Phm)) do Ph display(Ph.patch_cell_dofs_ids) end; @@ -211,10 +207,10 @@ for key in keys(res_single) val_m = res_multi[key] println(">>> ", key) - map_parts(val_s.values) do v + map(val_s.values) do v println(transpose(v)) end; - map_parts(val_m.owned_values) do v + map(own_values(val_m)) do v println(transpose(v)) end; println(repeat('-',80)) diff --git a/test/seq/DistributedPatchFESpacesTests.jl b/test/seq/DistributedPatchFESpacesTests.jl index 18dc37c3..03d5e5bb 100644 --- a/test/seq/DistributedPatchFESpacesTests.jl +++ b/test/seq/DistributedPatchFESpacesTests.jl @@ -13,30 +13,32 @@ using FillArrays using GridapSolvers import GridapSolvers.PatchBasedSmoothers as PBS -backend = SequentialBackend() ranks = (1,2) -parts = get_part_ids(backend,ranks) +parts = with_debug() do distribute + distribute(LinearIndices((prod(ranks),))) +end domain = (0.0,1.0,0.0,1.0) -partition = (2,4) -model = CartesianDiscreteModel(parts,domain,partition) +domain_partition = (2,4) +model = CartesianDiscreteModel(parts,ranks,domain,domain_partition) -# order = 1 -# reffe = ReferenceFE(lagrangian,Float64,order) -order = 0 -reffe = ReferenceFE(raviart_thomas,Float64,order) +order = 1 +reffe = ReferenceFE(lagrangian,Float64,order) +#order = 0 +#reffe = ReferenceFE(raviart_thomas,Float64,order) Vh = TestFESpace(model,reffe) PD = PBS.PatchDecomposition(model) -Ph = PBS.PatchFESpace(model,reffe,DivConformity(),PD,Vh) +Ph = PBS.PatchFESpace(model,reffe,H1Conformity(),PD,Vh) +# Ph = PBS.PatchFESpace(model,reffe,DivConformity(),PD,Vh) # ---- Testing Prolongation and Injection ---- # w, w_sums = PBS.compute_weight_operators(Ph,Vh); -xP = PVector(1.0,Ph.gids) -yP = PVector(0.0,Ph.gids) -x = PVector(1.0,Vh.gids) -y = PVector(0.0,Vh.gids) +xP = pfill(1.0,partition(Ph.gids)) +yP = pfill(0.0,partition(Ph.gids)) +x = pfill(1.0,partition(Vh.gids)) +y = pfill(0.0,partition(Vh.gids)) PBS.prolongate!(yP,Ph,x) PBS.inject!(y,Ph,yP,w,w_sums) @@ -87,19 +89,19 @@ Rns = numerical_setup(Rss,Ah) # ---- Manual solve using LU ---- # -x1_mat = PVector(0.5,Ah.cols) +x1_mat = pfill(0.5,partition(axes(Ah,2))) r1_mat = fh-Ah*x1_mat -exchange!(r1_mat) +consistent!(r1_mat) |> fetch -r1 = PVector(0.0,Vh.gids) -x1 = PVector(0.0,Vh.gids) -rp = PVector(0.0,Ph.gids) -xp = PVector(0.0,Ph.gids) -rp_mat = PVector(0.0,Ahp.cols) -xp_mat = PVector(0.0,Ahp.cols) +r1 = pfill(0.0,partition(Vh.gids)) +x1 = pfill(0.0,partition(Vh.gids)) +rp = pfill(0.0,partition(Ph.gids)) +xp = pfill(0.0,partition(Ph.gids)) +rp_mat = pfill(0.0,partition(axes(Ahp,2))) +xp_mat = pfill(0.0,partition(axes(Ahp,2))) copy!(r1,r1_mat) -exchange!(r1) +consistent!(r1) |> fetch PBS.prolongate!(rp,Ph,r1) copy!(rp_mat,rp) @@ -113,18 +115,18 @@ copy!(x1_mat,x1) # ---- Same using the PatchBasedSmoother ---- # -x2_mat = PVector(0.5,Ah.cols) +x2_mat = pfill(0.5,partition(axes(Ah,2))) r2_mat = fh-Ah*x2_mat -exchange!(r2_mat) +consistent!(r2_mat) |> fetch solve!(x2_mat,Mns,r2_mat) # ---- Smoother inside Richardson -x3_mat = PVector(0.5,Ah.cols) +x3_mat = pfill(0.5,partition(axes(Ah,2))) r3_mat = fh-Ah*x3_mat -exchange!(r3_mat) +consistent!(r3_mat) |> fetch solve!(x3_mat,Rns,r3_mat) -exchange!(x3_mat) +consistent!(x3_mat) |> fetch end \ No newline at end of file diff --git a/test/seq/PatchLinearSolverTests.jl b/test/seq/PatchLinearSolverTests.jl index 5df48824..01393c5e 100644 --- a/test/seq/PatchLinearSolverTests.jl +++ b/test/seq/PatchLinearSolverTests.jl @@ -10,8 +10,6 @@ module PatchLinearSolverTests using GridapSolvers using GridapSolvers.PatchBasedSmoothers - order=1 - function returns_PD_Ph_xh_Vh(model;style=GridapSolvers.PatchBasedSmoothers.PatchBoundaryExclude()) reffe = ReferenceFE(lagrangian,Float64,order) # reffe=ReferenceFE(lagrangian,VectorValue{2,Float64},order) @santiagobadia: For Vector Laplacian @@ -60,33 +58,42 @@ module PatchLinearSolverTests end ################################################## + order = 1 + + rank_partition = (2,1) + parts = with_debug() do distribute + distribute(LinearIndices((prod(rank_partition),))) + end domain = (0.0,1.0,0.0,1.0) - partition = (2,3) + mesh_partition = (2,3) - model = CartesianDiscreteModel(domain,partition) + model = CartesianDiscreteModel(domain,mesh_partition) _,Ph,xh,Vh = returns_PD_Ph_xh_Vh(model) - parts = get_part_ids(sequential,(1,2)) - dmodel = CartesianDiscreteModel(parts,domain,partition) + dmodel = CartesianDiscreteModel(parts,rank_partition,domain,mesh_partition) _,dPh,dxh,dVh = returns_PD_Ph_xh_Vh(dmodel); @test num_free_dofs(Ph) == num_free_dofs(dPh) - @test all(dxh.owned_values.parts[1] .≈ xh[1:3]) - @test all(dxh.owned_values.parts[2] .≈ xh[4:end]) + @test all(own_values(dxh).items[1] .≈ xh[1:4]) + @test all(own_values(dxh).items[2] .≈ xh[5:end]) ################################################# - model = CartesianDiscreteModel(domain,partition) + model = CartesianDiscreteModel(domain,mesh_partition) PD,Ph,xh,Vh = returns_PD_Ph_xh_Vh(model) A,b = compute_matrix_vector(model,Vh) x = test_smoother(PD,Ph,Vh,A,b) - parts = get_part_ids(SequentialBackend(),(1,1)) - dmodel = CartesianDiscreteModel(parts,domain,partition) + rank_partition = (1,1) + parts = with_debug() do distribute + distribute(LinearIndices((prod(rank_partition),))) + end + + dmodel = CartesianDiscreteModel(parts,rank_partition,domain,mesh_partition) dPD,dPh,dxh,dVh = returns_PD_Ph_xh_Vh(dmodel); dA,db = compute_matrix_vector(dmodel,dVh); dx = test_smoother(dPD,dPh,dVh,dA,db) - @test all(dx.owned_values.parts[1] .≈ x) + @test all(own_values(dx).items[1] .≈ x) end