Skip to content

Commit

Permalink
Merge pull request #31 from gridap/partitioned_arrays_v0.3
Browse files Browse the repository at this point in the history
Updating to PartitionedArrays v0.3
  • Loading branch information
JordiManyer authored Aug 22, 2023
2 parents 8186314 + 512a5cf commit acea3e2
Show file tree
Hide file tree
Showing 36 changed files with 414 additions and 821 deletions.
13 changes: 11 additions & 2 deletions .github/workflows/CI.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,13 @@ jobs:
name: Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }}
runs-on: ${{ matrix.os }}
env:
JULIA_MPI_BINARY: "system"
P4EST_ROOT_DIR: "/opt/p4est/2.2/"
JULIA_PETSC_LIBRARY: "/opt/petsc/3.18/lib/libpetsc"
strategy:
fail-fast: false
matrix:
version:
- '1.7'
- '1.9'
os:
- ubuntu-latest
arch:
Expand Down Expand Up @@ -93,6 +92,16 @@ jobs:
--with-mpi=1 --with-64-bit-indices
make
make install
- name: add MPIPreferences
shell: julia --color=yes --project=. {0}
run: |
using Pkg
Pkg.add("MPIPreferences")
- name: use MPI system binary
shell: julia --color=yes --project=. {0}
run: |
using MPIPreferences
MPIPreferences.use_system_binary()
- uses: julia-actions/julia-buildpkg@latest
- run: echo $PWD
- run: julia --project=. -e 'using Pkg; Pkg.instantiate(); Pkg.build(); Pkg.precompile()'
Expand Down
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
.vscode
Manifest.toml
LocalPreferences.toml
16 changes: 8 additions & 8 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "GridapSolvers"
uuid = "6d3209ee-5e3c-4db7-a716-942eb12ed534"
authors = ["Santiago Badia <[email protected]>", "Jordi Manyer <[email protected]>", "Alberto F. Martin <[email protected]>", "Javier Principe <[email protected]>"]
version = "0.1.1"
version = "0.2.0"

[deps]
ArgParse = "c7e460c6-2fb9-53a9-8c5b-16f535851c63"
Expand All @@ -18,14 +18,14 @@ Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"

[compat]
ArgParse = "1"
FillArrays = "0.9, 0.10, 0.11, 0.12"
Gridap = "0.17.17"
GridapDistributed = "0.2.7"
GridapP4est = "0.2"
GridapPETSc = "0.4"
FillArrays = "0.9, 0.10, 0.11, 0.12, 0.13, 1.0"
Gridap = "0.17.18"
GridapDistributed = "0.3"
GridapP4est = "0.3"
GridapPETSc = "0.5"
IterativeSolvers = "0.9"
MPI = "0.19"
PartitionedArrays = "0.2.15"
MPI = "0.20"
PartitionedArrays = "0.3"
julia = "1.7"

[extras]
Expand Down
30 changes: 15 additions & 15 deletions src/LinearSolvers/GMGLinearSolvers.jl
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ function setup_finest_level_cache(mh::ModelHierarchy,smatrices::Vector{<:Abstrac
parts = get_level_parts(mh,1)
if i_am_in(parts)
Ah = smatrices[1]
rh = PVector(0.0, Ah.cols)
rh = pfill(0.0, partition(axes(Ah,2)))
cache = rh
end
return cache
Expand Down Expand Up @@ -116,11 +116,11 @@ function setup_coarsest_solver_cache(mh::ModelHierarchy,coarsest_solver::LinearS
if i_am_in(parts)
mat = smatrices[nlevs]
if (num_parts(parts) == 1) # Serial
cache = map_parts(mat.owned_owned_values) do Ah
cache = map(own_values(mat)) do Ah
ss = symbolic_setup(coarsest_solver, Ah)
numerical_setup(ss, Ah)
end
cache = cache.part
cache = PartitionedArrays.getany(cache)
else # Parallel
ss = symbolic_setup(coarsest_solver, mat)
cache = numerical_setup(ss, mat)
Expand All @@ -136,17 +136,17 @@ function setup_coarsest_solver_cache(mh::ModelHierarchy,coarsest_solver::PETScLi
if i_am_in(parts)
mat = smatrices[nlevs]
if (num_parts(parts) == 1) # Serial
cache = map_parts(mat.owned_owned_values) do Ah
cache = map(own_values(mat)) do Ah
rh = convert(PETScVector,fill(0.0,size(A,2)))
xh = convert(PETScVector,fill(0.0,size(A,2)))
ss = symbolic_setup(coarsest_solver, Ah)
ns = numerical_setup(ss, Ah)
return ns, xh, rh
end
cache = cache.part
cache = PartitionedArrays.getany(cache)
else # Parallel
rh = convert(PETScVector,PVector(0.0,mat.cols))
xh = convert(PETScVector,PVector(0.0,mat.cols))
rh = convert(PETScVector,pfill(0.0,partition(axes(mat,2))))
xh = convert(PETScVector,pfill(0.0,partition(axes(mat,2))))
ss = symbolic_setup(coarsest_solver, mat)
ns = numerical_setup(ss, mat)
cache = ns, xh, rh
Expand All @@ -156,14 +156,14 @@ function setup_coarsest_solver_cache(mh::ModelHierarchy,coarsest_solver::PETScLi
end

function allocate_level_work_vectors(mh::ModelHierarchy,smatrices::Vector{<:AbstractMatrix},lev::Integer)
dxh = PVector(0.0, smatrices[lev].cols)
Adxh = PVector(0.0, smatrices[lev].rows)
dxh = pfill(0.0,partition(axes(smatrices[lev],2)))
Adxh = pfill(0.0,partition(axes(smatrices[lev],1)))

cparts = get_level_parts(mh,lev+1)
if i_am_in(cparts)
AH = smatrices[lev+1]
rH = PVector(0.0,AH.cols)
dxH = PVector(0.0,AH.cols)
rH = pfill(0.0,partition(axes(AH,2)))
dxH = pfill(0.0,partition(axes(AH,2)))
else
rH = nothing
dxH = nothing
Expand All @@ -183,20 +183,20 @@ function allocate_work_vectors(mh::ModelHierarchy,smatrices::Vector{<:AbstractMa
return work_vectors
end

function solve_coarsest_level!(parts::AbstractPData,::LinearSolver,xh::PVector,rh::PVector,caches)
function solve_coarsest_level!(parts::AbstractArray,::LinearSolver,xh::PVector,rh::PVector,caches)
if (num_parts(parts) == 1)
map_parts(xh.owned_values,rh.owned_values) do xh, rh
map(own_values(xh),own_values(rh)) do xh, rh
solve!(xh,caches,rh)
end
else
solve!(xh,caches,rh)
end
end

function solve_coarsest_level!(parts::AbstractPData,::PETScLinearSolver,xh::PVector,rh::PVector,caches)
function solve_coarsest_level!(parts::AbstractArray,::PETScLinearSolver,xh::PVector,rh::PVector,caches)
solver_ns, xh_petsc, rh_petsc = caches
if (num_parts(parts) == 1)
map_parts(xh.owned_values,rh.owned_values) do xh, rh
map(own_values(xh),own_values(rh)) do xh, rh
copy!(rh_petsc,rh)
solve!(xh_petsc,solver_ns,rh_petsc)
copy!(xh,xh_petsc)
Expand Down
6 changes: 3 additions & 3 deletions src/LinearSolvers/JacobiLinearSolvers.jl
Original file line number Diff line number Diff line change
Expand Up @@ -22,14 +22,14 @@ function Gridap.Algebra.numerical_setup!(ns::JacobiNumericalSetup, A::AbstractMa
end

function Gridap.Algebra.numerical_setup(ss::JacobiSymbolicSetup,A::PSparseMatrix)
inv_diag=map_parts(A.owned_owned_values) do a
inv_diag=map(own_values(A)) do a
1.0 ./ diag(a)
end
return JacobiNumericalSetup(inv_diag)
end

function Gridap.Algebra.numerical_setup!(ns::JacobiNumericalSetup, A::PSparseMatrix)
map_parts(ns.inv_diag,A.owned_owned_values) do inv_diag, a
map(ns.inv_diag,own_values(A)) do inv_diag, a
inv_diag .= 1.0 ./ diag(a)
end
return ns
Expand All @@ -43,7 +43,7 @@ end

function Gridap.Algebra.solve!(x::PVector, ns::JacobiNumericalSetup, b::PVector)
inv_diag = ns.inv_diag
map_parts(inv_diag,x.owned_values,b.owned_values) do inv_diag, x, b
map(inv_diag,own_values(x),own_values(b)) do inv_diag, x, b
x .= inv_diag .* b
end
return x
Expand Down
4 changes: 2 additions & 2 deletions src/LinearSolvers/RichardsonSmoothers.jl
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,8 @@ function Gridap.Algebra.numerical_setup(ss::RichardsonSmootherSymbolicSetup, A::
end

function Gridap.Algebra.numerical_setup(ss::RichardsonSmootherSymbolicSetup, A::PSparseMatrix)
Adx = PVector(0.0,A.rows)
dx = PVector(0.0,A.cols)
Adx = pfill(0.0,partition(axes(A,1)))
dx = pfill(0.0,partition(axes(A,2)))
Mns = numerical_setup(ss.Mss,A)
return RichardsonSmootherNumericalSetup(ss.smoother,A,Adx,dx,Mns)
end
Expand Down
26 changes: 14 additions & 12 deletions src/MultilevelTools/DistributedGridTransferOperators.jl
Original file line number Diff line number Diff line change
Expand Up @@ -53,12 +53,12 @@ function _get_interpolation_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mod

if i_am_in(cparts)
model_h = get_model_before_redist(mh,lev)
Uh = get_fe_space_before_redist(sh,lev)
fv_h = PVector(0.0,Uh.gids)
Uh = get_fe_space_before_redist(sh,lev)
fv_h = pfill(0.0,partition(Uh.gids))
dv_h = (mode == :solution) ? get_dirichlet_dof_values(Uh) : zero_dirichlet_values(Uh)

UH = get_fe_space(sh,lev+1)
fv_H = PVector(0.0,UH.gids)
UH = get_fe_space(sh,lev+1)
fv_H = pfill(0.0,partition(UH.gids))
dv_H = (mode == :solution) ? get_dirichlet_dof_values(UH) : zero_dirichlet_values(UH)

cache_refine = model_h, Uh, fv_h, dv_h, UH, fv_H, dv_H
Expand Down Expand Up @@ -102,7 +102,7 @@ function _get_projection_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mode::
u,v = get_trial_fe_basis(UH), get_fe_basis(VH)
data = collect_cell_matrix_and_vector(UH,VH,aH(u,v),lH(v,u00),u_dir)
AH,bH0 = assemble_matrix_and_vector(assem,data)
xH = PVector(0.0,AH.cols)
xH = pfill(0.0,partition(axes(AH,2)))
bH = copy(bH0)

cache_refine = model_h, Uh, fv_h, dv_h, VH, AH, lH, xH, bH, bH0, assem
Expand All @@ -125,7 +125,7 @@ function _get_redistribution_cache(lev::Int,sh::FESpaceHierarchy,mode::Symbol,op

Uh_red = get_fe_space(sh,lev)
model_h_red = get_model(mh,lev)
fv_h_red = PVector(0.0,Uh_red.gids)
fv_h_red = pfill(0.0,partition(Uh_red.gids))
dv_h_red = (mode == :solution) ? get_dirichlet_dof_values(Uh_red) : zero_dirichlet_values(Uh_red)
glue = mh.levels[lev].red_glue

Expand Down Expand Up @@ -214,7 +214,7 @@ function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:r
model_h, Uh, fv_h, dv_h, UH, fv_H, dv_H = cache_refine

copy!(fv_h,x) # Matrix layout -> FE layout
exchange!(fv_h)
consistent!(fv_h) |> fetch
restrict_dofs!(fv_H,fv_h,dv_h,Uh,UH,get_adaptivity_glue(model_h))
copy!(y,fv_H) # FE layout -> Matrix layout

Expand Down Expand Up @@ -249,7 +249,7 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer

# 1 - Redistribute from fine partition to coarse partition
copy!(fv_h_red,x)
exchange!(fv_h_red)
consistent!(fv_h_red) |> fetch
redistribute_free_values!(cache_exchange,fv_h,Uh,fv_h_red,dv_h_red,Uh_red,model_h,glue;reverse=true)

# 2 - Interpolate in coarse partition
Expand All @@ -259,7 +259,7 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer
copy!(y,fv_H) # FE layout -> Matrix layout
end

return y
return y
end

# D.2) Restriction, with redistribution, by projection
Expand All @@ -270,11 +270,12 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer

# 1 - Redistribute from fine partition to coarse partition
copy!(fv_h_red,x)
exchange!(fv_h_red)
consistent!(fv_h_red) |> fetch
redistribute_free_values!(cache_exchange,fv_h,Uh,fv_h_red,dv_h_red,Uh_red,model_h,glue;reverse=true)

# 2 - Solve f2c projection coarse partition
if !isa(y,Nothing)
consistent!(fv_h) |> fetch
uh = FEFunction(Uh,fv_h,dv_h)
v = get_fe_basis(VH)
vec_data = collect_cell_vector(VH,lH(v,uh))
Expand All @@ -284,7 +285,7 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer
copy!(y,xH)
end

return y
return y
end

# D.3) Restriction, with redistribution, by dof selection (only nodal dofs)
Expand All @@ -295,11 +296,12 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer

# 1 - Redistribute from fine partition to coarse partition
copy!(fv_h_red,x)
exchange!(fv_h_red)
consistent!(fv_h_red) |> fetch
redistribute_free_values!(cache_exchange,fv_h,Uh,fv_h_red,dv_h_red,Uh_red,model_h,glue;reverse=true)

# 2 - Interpolate in coarse partition
if !isa(y,Nothing)
consistent!(fv_h) |> fetch
restrict_dofs!(fv_H,fv_h,dv_h,Uh,UH,get_adaptivity_glue(model_h))
copy!(y,fv_H) # FE layout -> Matrix layout
end
Expand Down
44 changes: 28 additions & 16 deletions src/MultilevelTools/FESpaceHierarchies.jl
Original file line number Diff line number Diff line change
Expand Up @@ -42,31 +42,30 @@ function get_fe_space_before_redist(fh::FESpaceHierarchy,lev::Int)
get_fe_space_before_redist(fh[lev])
end

function Gridap.FESpaces.TestFESpace(
# Test/Trial FESpaces for ModelHierarchyLevels

function Gridap.FESpaces.FESpace(
mh::ModelHierarchyLevel{A,B,C,Nothing},args...;kwargs...) where {A,B,C}
Vh = TestFESpace(get_model(mh),args...;kwargs...)
Vh = FESpace(get_model(mh),args...;kwargs...)
FESpaceHierarchyLevel(mh.level,Vh,nothing)
end

function Gridap.FESpaces.TestFESpace(
mh::ModelHierarchyLevel{A,B,C,D},args...;kwargs...) where {A,B,C,D}
Vh = TestFESpace(get_model_before_redist(mh),args...;kwargs...)
Vh_red = TestFESpace(get_model(mh),args...;kwargs...)
function Gridap.FESpaces.FESpace(mh::ModelHierarchyLevel{A,B,C,D},args...;kwargs...) where {A,B,C,D}
cparts, _ = get_old_and_new_parts(mh.red_glue,Val(false))
Vh = i_am_in(cparts) ? FESpace(get_model_before_redist(mh),args...;kwargs...) : nothing
Vh_red = FESpace(get_model(mh),args...;kwargs...)
FESpaceHierarchyLevel(mh.level,Vh,Vh_red)
end

function Gridap.FESpaces.TrialFESpace(a::FESpaceHierarchyLevel{A,Nothing},u) where {A}
Uh = TrialFESpace(a.fe_space,u)
FESpaceHierarchyLevel(a.level,Uh,nothing)
end

function Gridap.FESpaces.TrialFESpace(a::FESpaceHierarchyLevel{A,B},u) where {A,B}
Uh = TrialFESpace(a.fe_space,u)
Uh_red = TrialFESpace(a.fe_space_red,u)
function Gridap.FESpaces.TrialFESpace(a::FESpaceHierarchyLevel,args...;kwargs...)
Uh = !isa(a.fe_space,Nothing) ? TrialFESpace(a.fe_space,args...;kwargs...) : nothing
Uh_red = !isa(a.fe_space_red,Nothing) ? TrialFESpace(a.fe_space_red,args...;kwargs...) : nothing
FESpaceHierarchyLevel(a.level,Uh,Uh_red)
end

function Gridap.FESpaces.TestFESpace(mh::ModelHierarchy,args...;kwargs...)
# Test/Trial FESpaces for ModelHierarchies/FESpaceHierarchy

function Gridap.FESpaces.FESpace(mh::ModelHierarchy,args...;kwargs...)
test_spaces = Vector{FESpaceHierarchyLevel}(undef,num_levels(mh))
for i = 1:num_levels(mh)
parts = get_level_parts(mh,i)
Expand All @@ -78,7 +77,7 @@ function Gridap.FESpaces.TestFESpace(mh::ModelHierarchy,args...;kwargs...)
FESpaceHierarchy(mh,test_spaces)
end

function Gridap.FESpaces.TestFESpace(
function Gridap.FESpaces.FESpace(
mh::ModelHierarchy,
arg_vector::AbstractVector{<:Union{ReferenceFE,Tuple{<:Gridap.ReferenceFEs.ReferenceFEName,Any,Any}}};
kwargs...)
Expand Down Expand Up @@ -119,6 +118,8 @@ function Gridap.FESpaces.TrialFESpace(a::FESpaceHierarchy)
FESpaceHierarchy(a.mh,trial_spaces)
end

# Computing system matrices

function compute_hierarchy_matrices(trials::FESpaceHierarchy,a::Function,l::Function,qdegree::Integer)
return compute_hierarchy_matrices(trials,a,l,Fill(qdegree,num_levels(trials)))
end
Expand Down Expand Up @@ -153,3 +154,14 @@ function compute_hierarchy_matrices(trials::FESpaceHierarchy,a::Function,l::Func
end
return mats, A, b
end

function get_test_space(U::GridapDistributed.DistributedSingleFieldFESpace)
spaces = map(local_views(U)) do U
if isa(U,Gridap.FESpaces.UnconstrainedFESpace)
U
else
U.space
end
end
return GridapDistributed.DistributedSingleFieldFESpace(spaces,U.gids,U.vector_type)
end
Loading

2 comments on commit acea3e2

@JordiManyer
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@JuliaRegistrator register()

@JuliaRegistrator
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Registration pull request created: JuliaRegistries/General/90061

After the above pull request is merged, it is recommended that a tag is created on this repository for the registered package version.

This will be done automatically if the Julia TagBot GitHub Action is installed, or can be done manually through the github interface, or via:

git tag -a v0.2.0 -m "<description of version>" acea3e200d6892357ac46b28c59e6f63ecc572da
git push origin v0.2.0

Please sign in to comment.