Skip to content

Commit

Permalink
Constant (scalar) variance in ConditionalMvNormal mapping (#42)
Browse files Browse the repository at this point in the history
  • Loading branch information
nmheim authored Oct 26, 2020
1 parent 0e64e9c commit e0cd6bc
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 2 deletions.
9 changes: 9 additions & 0 deletions src/cond_mvnormal.jl
Original file line number Diff line number Diff line change
Expand Up @@ -47,9 +47,18 @@ function condition(p::ConditionalMvNormal, z::AbstractMatrix)
BatchMvNormal(μ,σ)
end

# dispatches for different outputs from mappings
# general case
mean_var(x::Tuple) = x
# single output assumes σ=1
mean_var(x::AbstractVector) = (x, 1)
mean_var(x::AbstractMatrix) = (x, fillsimilar(x,size(x,2),1))
# fixed scalar variance
# mean_var(x::Tuple{<:AbstractVector,<:Real}) = x; is already coverged
function mean_var(x::Tuple{<:AbstractMatrix,<:Real})
(μ,σ) = x
(μ,fillsimilar(μ,size(μ,2),σ))
end

# TODO: this should be moved to DistributionsAD
Distributions.mean(p::TuringDiagMvNormal) = p.m
Expand Down
28 changes: 26 additions & 2 deletions test/cond_mvnormal.jl
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@

# BatchScalMvNormal
m = SplitLayer(zlength, [xlength,1])
d = MvNormal(zeros(Float32,xlength), 1f0)
p = ConditionalMvNormal(m) |> gpu

res = condition(p, rand(zlength,batchsize)|>gpu)
Expand All @@ -70,7 +69,6 @@

# Unit variance
m = Dense(zlength,xlength)
d = MvNormal(zeros(Float32,xlength), 1f0)
p = ConditionalMvNormal(m) |> gpu

res = condition(p, rand(zlength,batchsize)|>gpu)
Expand All @@ -90,4 +88,30 @@

f() = sum(rand(p,z))
@test_nowarn Flux.gradient(f, ps)


# Fixed scalar variance
m = Dense(zlength,xlength)
σ(x::AbstractVector) = 2
σ(x::AbstractMatrix) = ones(Float32,size(x,2)) .* 2
p = ConditionalMvNormal(SplitLayer(m,σ)) |> gpu

res = condition(p, rand(zlength,batchsize)|>gpu)
μ = mean(res)
σ2 = var(res)
@test res isa ConditionalDists.BatchScalMvNormal
@test size(μ) == (xlength,batchsize)
@test size(σ2) == (xlength,batchsize)

x = rand(Float32, xlength, batchsize) |> gpu
z = rand(Float32, zlength, batchsize) |> gpu
loss() = sum(logpdf(p,x,z))
ps = Flux.params(p)
@test length(ps) == 2
@test loss() isa Float32
@test_nowarn gs = Flux.gradient(loss, ps)

f() = sum(rand(p,z))
@test_nowarn Flux.gradient(f, ps)

end

0 comments on commit e0cd6bc

Please sign in to comment.