Skip to content
Snippets Groups Projects
Commit b277b235 authored by Gregory Ashton's avatar Gregory Ashton
Browse files

Move from emcee to ptemcee

Previously, we used the emcee.PTSampler, but this has been removed from
the master branch of emcee (see discussion here
https://github.com/dfm/emcee/issues/236) and a fork, ptemcee has been
developed. Testing shows this is equivalent (after some changes in the
interface) and perhaps better as it contains the adaptive temperature
ladders developed by Will Vousden (the maintainer of ptemcee)
parent 0428e2aa
Branches
Tags
No related merge requests found
...@@ -46,7 +46,7 @@ $ git clone https://gitlab.aei.uni-hannover.de/GregAshton/PyFstat.git ...@@ -46,7 +46,7 @@ $ git clone https://gitlab.aei.uni-hannover.de/GregAshton/PyFstat.git
* [numpy](http://www.numpy.org/) * [numpy](http://www.numpy.org/)
* [matplotlib](http://matplotlib.org/) >= 1.4 * [matplotlib](http://matplotlib.org/) >= 1.4
* [scipy](https://www.scipy.org/) * [scipy](https://www.scipy.org/)
* [emcee](http://dan.iel.fm/emcee/current/) * [ptemcee](https://github.com/willvousden/ptemcee)
* [corner](https://pypi.python.org/pypi/corner/) * [corner](https://pypi.python.org/pypi/corner/)
* [dill](https://pypi.python.org/pypi/dill) * [dill](https://pypi.python.org/pypi/dill)
* [peakutils](https://pypi.python.org/pypi/PeakUtils) * [peakutils](https://pypi.python.org/pypi/PeakUtils)
......
...@@ -11,7 +11,7 @@ import subprocess ...@@ -11,7 +11,7 @@ import subprocess
import numpy as np import numpy as np
import matplotlib import matplotlib
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import emcee from ptemcee import Sampler as PTSampler
import corner import corner
import dill as pickle import dill as pickle
...@@ -54,7 +54,7 @@ class MCMCSearch(core.BaseSearchClass): ...@@ -54,7 +54,7 @@ class MCMCSearch(core.BaseSearchClass):
log10beta_min float < 0, optional log10beta_min float < 0, optional
The log_10(beta) value, if given the set of betas passed to PTSampler The log_10(beta) value, if given the set of betas passed to PTSampler
are generated from `np.logspace(0, log10beta_min, ntemps)` (given are generated from `np.logspace(0, log10beta_min, ntemps)` (given
in descending order to emcee). in descending order to ptemcee).
theta_initial: dict, array, optional theta_initial: dict, array, optional
A dictionary of distribution about which to distribute the A dictionary of distribution about which to distribute the
initial walkers about initial walkers about
...@@ -396,14 +396,9 @@ class MCMCSearch(core.BaseSearchClass): ...@@ -396,14 +396,9 @@ class MCMCSearch(core.BaseSearchClass):
self.tswap_acceptance_fraction = sampler.tswap_acceptance_fraction self.tswap_acceptance_fraction = sampler.tswap_acceptance_fraction
logging.info("Tswap acceptance fraction: {}" logging.info("Tswap acceptance fraction: {}"
.format(sampler.tswap_acceptance_fraction)) .format(sampler.tswap_acceptance_fraction))
try: self.autocorr_time = sampler.get_autocorr_time(window=window)
self.autocorr_time = sampler.get_autocorr_time(c=4)
logging.info("Autocorrelation length: {}".format( logging.info("Autocorrelation length: {}".format(
self.autocorr_time)) self.autocorr_time))
except emcee.autocorr.AutocorrError as e:
self.autocorr_time = np.nan
logging.warning(
'Autocorrelation calculation failed with message {}'.format(e))
return sampler return sampler
...@@ -430,7 +425,8 @@ class MCMCSearch(core.BaseSearchClass): ...@@ -430,7 +425,8 @@ class MCMCSearch(core.BaseSearchClass):
logging.info('Estimated run-time = {} s = {:1.0f}:{:1.0f} m'.format( logging.info('Estimated run-time = {} s = {:1.0f}:{:1.0f} m'.format(
a+b, *divmod(a+b, 60))) a+b, *divmod(a+b, 60)))
def run(self, proposal_scale_factor=2, create_plots=True, c=5, **kwargs): def run(self, proposal_scale_factor=2, create_plots=True, window=50,
**kwargs):
""" Run the MCMC simulatation """ Run the MCMC simulatation
Parameters Parameters
...@@ -442,17 +438,17 @@ class MCMCSearch(core.BaseSearchClass): ...@@ -442,17 +438,17 @@ class MCMCSearch(core.BaseSearchClass):
it by increasing the a parameter [Foreman-Mackay (2013)]. it by increasing the a parameter [Foreman-Mackay (2013)].
create_plots: bool create_plots: bool
If true, save trace plots of the walkers If true, save trace plots of the walkers
c: int window: int
The minimum number of autocorrelation times needed to trust the The minimum number of autocorrelation times needed to trust the
result when estimating the autocorrelation time (see result when estimating the autocorrelation time (see
emcee.autocorr.integrated_time for further details. Default is 5 ptemcee.Sampler.get_autocorr_time for further details.
**kwargs: **kwargs:
Passed to _plot_walkers to control the figures Passed to _plot_walkers to control the figures
Returns Returns
------- -------
sampler: emcee.ptsampler.PTSampler sampler: ptemcee.Sampler
The emcee ptsampler object The ptemcee ptsampler object
""" """
...@@ -470,8 +466,9 @@ class MCMCSearch(core.BaseSearchClass): ...@@ -470,8 +466,9 @@ class MCMCSearch(core.BaseSearchClass):
self._initiate_search_object() self._initiate_search_object()
self._estimate_run_time() self._estimate_run_time()
sampler = emcee.PTSampler( sampler = PTSampler(
self.ntemps, self.nwalkers, self.ndim, self.logl, self.logp, ntemps=self.ntemps, nwalkers=self.nwalkers, dim=self.ndim,
logl=self.logl, logp=self.logp,
logpargs=(self.theta_prior, self.theta_keys, self.search), logpargs=(self.theta_prior, self.theta_keys, self.search),
loglargs=(self.search,), betas=self.betas, a=proposal_scale_factor) loglargs=(self.search,), betas=self.betas, a=proposal_scale_factor)
...@@ -484,7 +481,7 @@ class MCMCSearch(core.BaseSearchClass): ...@@ -484,7 +481,7 @@ class MCMCSearch(core.BaseSearchClass):
for j, n in enumerate(self.nsteps[:-2]): for j, n in enumerate(self.nsteps[:-2]):
logging.info('Running {}/{} initialisation with {} steps'.format( logging.info('Running {}/{} initialisation with {} steps'.format(
j, ninit_steps, n)) j, ninit_steps, n))
sampler = self._run_sampler(sampler, p0, nburn=n) sampler = self._run_sampler(sampler, p0, nburn=n, window=window)
if create_plots: if create_plots:
fig, axes = self._plot_walkers(sampler, fig, axes = self._plot_walkers(sampler,
symbols=self.theta_symbols, symbols=self.theta_symbols,
...@@ -514,9 +511,9 @@ class MCMCSearch(core.BaseSearchClass): ...@@ -514,9 +511,9 @@ class MCMCSearch(core.BaseSearchClass):
) )
samples = sampler.chain[0, :, nburn:, :].reshape((-1, self.ndim)) samples = sampler.chain[0, :, nburn:, :].reshape((-1, self.ndim))
lnprobs = sampler.lnprobability[0, :, nburn:].reshape((-1)) lnprobs = sampler.logprobability[0, :, nburn:].reshape((-1))
lnlikes = sampler.lnlikelihood[0, :, nburn:].reshape((-1)) lnlikes = sampler.loglikelihood[0, :, nburn:].reshape((-1))
all_lnlikelihood = sampler.lnlikelihood[:, :, nburn:] all_lnlikelihood = sampler.loglikelihood[:, :, nburn:]
self.samples = samples self.samples = samples
self.lnprobs = lnprobs self.lnprobs = lnprobs
self.lnlikes = lnlikes self.lnlikes = lnlikes
...@@ -1053,7 +1050,7 @@ class MCMCSearch(core.BaseSearchClass): ...@@ -1053,7 +1050,7 @@ class MCMCSearch(core.BaseSearchClass):
if len(axes) == ndim: if len(axes) == ndim:
axes.append(fig.add_subplot(ndim+1, 1, ndim+1)) axes.append(fig.add_subplot(ndim+1, 1, ndim+1))
lnl = sampler.lnlikelihood[temp, :, :] lnl = sampler.loglikelihood[temp, :, :]
if burnin_idx and add_det_stat_burnin: if burnin_idx and add_det_stat_burnin:
burn_in_vals = lnl[:, :burnin_idx].flatten() burn_in_vals = lnl[:, :burnin_idx].flatten()
try: try:
...@@ -1135,8 +1132,8 @@ class MCMCSearch(core.BaseSearchClass): ...@@ -1135,8 +1132,8 @@ class MCMCSearch(core.BaseSearchClass):
""" """
temp_idx = 0 temp_idx = 0
pF = sampler.chain[temp_idx, :, :, :] pF = sampler.chain[temp_idx, :, :, :]
lnl = sampler.lnlikelihood[temp_idx, :, :] lnl = sampler.loglikelihood[temp_idx, :, :]
lnp = sampler.lnprobability[temp_idx, :, :] lnp = sampler.logprobability[temp_idx, :, :]
# General warnings about the state of lnp # General warnings about the state of lnp
if np.any(np.isnan(lnp)): if np.any(np.isnan(lnp)):
...@@ -2027,7 +2024,7 @@ class MCMCFollowUpSearch(MCMCSemiCoherentSearch): ...@@ -2027,7 +2024,7 @@ class MCMCFollowUpSearch(MCMCSemiCoherentSearch):
def run(self, run_setup=None, proposal_scale_factor=2, NstarMax=10, def run(self, run_setup=None, proposal_scale_factor=2, NstarMax=10,
Nsegs0=None, create_plots=True, log_table=True, gen_tex_table=True, Nsegs0=None, create_plots=True, log_table=True, gen_tex_table=True,
fig=None, axes=None, return_fig=False, **kwargs): fig=None, axes=None, return_fig=False, window=50, **kwargs):
""" Run the follow-up with the given run_setup """ Run the follow-up with the given run_setup
Parameters Parameters
...@@ -2040,10 +2037,10 @@ class MCMCFollowUpSearch(MCMCSemiCoherentSearch): ...@@ -2040,10 +2037,10 @@ class MCMCFollowUpSearch(MCMCSemiCoherentSearch):
it by increasing the a parameter [Foreman-Mackay (2013)]. it by increasing the a parameter [Foreman-Mackay (2013)].
create_plots: bool create_plots: bool
If true, save trace plots of the walkers If true, save trace plots of the walkers
c: int window: int
The minimum number of autocorrelation times needed to trust the The minimum number of autocorrelation times needed to trust the
result when estimating the autocorrelation time (see result when estimating the autocorrelation time (see
emcee.autocorr.integrated_time for further details. Default is 5 ptemcee.Sampler.get_autocorr_time for further details.
**kwargs: **kwargs:
Passed to _plot_walkers to control the figures Passed to _plot_walkers to control the figures
...@@ -2085,8 +2082,9 @@ class MCMCFollowUpSearch(MCMCSemiCoherentSearch): ...@@ -2085,8 +2082,9 @@ class MCMCFollowUpSearch(MCMCSemiCoherentSearch):
self.search.nsegs = nseg self.search.nsegs = nseg
self.update_search_object() self.update_search_object()
self.search.init_semicoherent_parameters() self.search.init_semicoherent_parameters()
sampler = emcee.PTSampler( sampler = PTSampler(
self.ntemps, self.nwalkers, self.ndim, self.logl, self.logp, ntemps=self.ntemps, nwalkers=self.nwalkers, dim=self.ndim,
logl=self.logl, logp=self.logp,
logpargs=(self.theta_prior, self.theta_keys, self.search), logpargs=(self.theta_prior, self.theta_keys, self.search),
loglargs=(self.search,), betas=self.betas, loglargs=(self.search,), betas=self.betas,
a=proposal_scale_factor) a=proposal_scale_factor)
...@@ -2095,9 +2093,10 @@ class MCMCFollowUpSearch(MCMCSemiCoherentSearch): ...@@ -2095,9 +2093,10 @@ class MCMCFollowUpSearch(MCMCSemiCoherentSearch):
logging.info(('Running {}/{} with {} steps and {} nsegs ' logging.info(('Running {}/{} with {} steps and {} nsegs '
'(Tcoh={:1.2f} days)').format( '(Tcoh={:1.2f} days)').format(
j+1, len(run_setup), (nburn, nprod), nseg, Tcoh)) j+1, len(run_setup), (nburn, nprod), nseg, Tcoh))
sampler = self._run_sampler(sampler, p0, nburn=nburn, nprod=nprod) sampler = self._run_sampler(sampler, p0, nburn=nburn, nprod=nprod,
window=window)
logging.info('Max detection statistic of run was {}'.format( logging.info('Max detection statistic of run was {}'.format(
np.max(sampler.lnlikelihood))) np.max(sampler.loglikelihood)))
if create_plots: if create_plots:
fig, axes = self._plot_walkers( fig, axes = self._plot_walkers(
...@@ -2109,9 +2108,9 @@ class MCMCFollowUpSearch(MCMCSemiCoherentSearch): ...@@ -2109,9 +2108,9 @@ class MCMCFollowUpSearch(MCMCSemiCoherentSearch):
nsteps_total += nburn+nprod nsteps_total += nburn+nprod
samples = sampler.chain[0, :, nburn:, :].reshape((-1, self.ndim)) samples = sampler.chain[0, :, nburn:, :].reshape((-1, self.ndim))
lnprobs = sampler.lnprobability[0, :, nburn:].reshape((-1)) lnprobs = sampler.logprobability[0, :, nburn:].reshape((-1))
lnlikes = sampler.lnlikelihood[0, :, nburn:].reshape((-1)) lnlikes = sampler.loglikelihood[0, :, nburn:].reshape((-1))
all_lnlikelihood = sampler.lnlikelihood all_lnlikelihood = sampler.loglikelihood
self.samples = samples self.samples = samples
self.lnprobs = lnprobs self.lnprobs = lnprobs
self.lnlikes = lnlikes self.lnlikes = lnlikes
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment