diff --git a/src/mcmc/Inference.jl b/src/mcmc/Inference.jl index f38220201..7a12f4877 100644 --- a/src/mcmc/Inference.jl +++ b/src/mcmc/Inference.jl @@ -767,13 +767,13 @@ julia> [first(t.θ.x) for t in transitions] # extract samples for `x` [-1.704630494695469] ``` """ -function transitions_from_chain(model::Turing.Model, chain::MCMCChains.Chains; kwargs...) +function transitions_from_chain(model::DynamicPPL.Model, chain::MCMCChains.Chains; kwargs...) return transitions_from_chain(Random.default_rng(), model, chain; kwargs...) end function transitions_from_chain( rng::Random.AbstractRNG, - model::Turing.Model, + model::DynamicPPL.Model, chain::MCMCChains.Chains; sampler=DynamicPPL.SampleFromPrior(), ) diff --git a/test/mcmc/abstractmcmc.jl b/test/mcmc/abstractmcmc.jl index 4de63aa94..50334fc51 100644 --- a/test/mcmc/abstractmcmc.jl +++ b/test/mcmc/abstractmcmc.jl @@ -18,7 +18,7 @@ using Test: @test, @test_throws, @testset using Turing using Turing.Inference: AdvancedHMC -function initialize_nuts(model::Turing.Model) +function initialize_nuts(model::DynamicPPL.Model) # Create a log-density function with an implementation of the # gradient so we ensure that we're using the same AD backend as in Turing. f = LogDensityProblemsAD.ADgradient(DynamicPPL.LogDensityFunction(model))