Commit 0428e2aa authored by Gregory Ashton's avatar Gregory Ashton
Browse files

Comment out all convergence testing methods

 This removes the convergence testing ideas previously implemented
 (currently juts commented, but later to be fully removed). These are
clearly not useful without further study, which in itself would be a
better time to develop an implementation.
parent 34df7108
...@@ -250,40 +250,6 @@ class MCMCSearch(core.BaseSearchClass): ...@@ -250,40 +250,6 @@ class MCMCSearch(core.BaseSearchClass):
return p0 return p0
def setup_burnin_convergence_testing(
self, n=10, test_type='autocorr', windowed=False, **kwargs):
""" Set up convergence testing during the MCMC simulation
Parameters
----------
n: int
Number of steps after which to test convergence
test_type: str ['autocorr', 'GR']
If 'autocorr' use the exponential autocorrelation time (kwargs
passed to `get_autocorr_convergence`). If 'GR' use the Gelman-Rubin
statistic (kwargs passed to `get_GR_convergence`)
windowed: bool
If True, only calculate the convergence test in a window of length
`n`
**kwargs:
Passed to either `_test_autocorr_convergence()` or
`_test_GR_convergence()` depending on `test_type`.
"""
logging.info('Setting up convergence testing')
self.convergence_n = n
self.convergence_windowed = windowed
self.convergence_test_type = test_type
self.convergence_kwargs = kwargs
self.convergence_diagnostic = []
self.convergence_diagnosticx = []
if test_type in ['autocorr']:
self._get_convergence_test = self._test_autocorr_convergence
elif test_type in ['GR']:
self._get_convergence_test = self._test_GR_convergence
else:
raise ValueError('test_type {} not understood'.format(test_type))
def setup_initialisation(self, nburn0, scatter_val=1e-10): def setup_initialisation(self, nburn0, scatter_val=1e-10):
""" Add an initialisation step to the MCMC run """ Add an initialisation step to the MCMC run
...@@ -308,84 +274,116 @@ class MCMCSearch(core.BaseSearchClass): ...@@ -308,84 +274,116 @@ class MCMCSearch(core.BaseSearchClass):
self.nsteps = [nburn0] + self.nsteps self.nsteps = [nburn0] + self.nsteps
self.scatter_val = scatter_val self.scatter_val = scatter_val
def _test_autocorr_convergence(self, i, sampler, test=True, n_cut=5): # def setup_burnin_convergence_testing(
try: # self, n=10, test_type='autocorr', windowed=False, **kwargs):
acors = np.zeros((self.ntemps, self.ndim)) # """ Set up convergence testing during the MCMC simulation
for temp in range(self.ntemps): #
if self.convergence_windowed: # Parameters
j = i-self.convergence_n # ----------
else: # n: int
j = 0 # Number of steps after which to test convergence
x = np.mean(sampler.chain[temp, :, j:i, :], axis=0) # test_type: str ['autocorr', 'GR']
acors[temp, :] = emcee.autocorr.exponential_time(x) # If 'autocorr' use the exponential autocorrelation time (kwargs
c = np.max(acors, axis=0) # passed to `get_autocorr_convergence`). If 'GR' use the Gelman-Rubin
except emcee.autocorr.AutocorrError: # statistic (kwargs passed to `get_GR_convergence`)
logging.info('Failed to calculate exponential autocorrelation') # windowed: bool
c = np.zeros(self.ndim) + np.nan # If True, only calculate the convergence test in a window of length
except AttributeError: # `n`
logging.info('Unable to calculate exponential autocorrelation') # **kwargs:
c = np.zeros(self.ndim) + np.nan # Passed to either `_test_autocorr_convergence()` or
# `_test_GR_convergence()` depending on `test_type`.
self.convergence_diagnosticx.append(i - self.convergence_n/2.) #
self.convergence_diagnostic.append(list(c)) # """
# logging.info('Setting up convergence testing')
if test: # self.convergence_n = n
return i > n_cut * np.max(c) # self.convergence_windowed = windowed
# self.convergence_test_type = test_type
def _test_GR_convergence(self, i, sampler, test=True, R=1.1): # self.convergence_kwargs = kwargs
if self.convergence_windowed: # self.convergence_diagnostic = []
s = sampler.chain[0, :, i-self.convergence_n+1:i+1, :] # self.convergence_diagnosticx = []
else: # if test_type in ['autocorr']:
s = sampler.chain[0, :, :i+1, :] # self._get_convergence_test = self._test_autocorr_convergence
N = float(self.convergence_n) # elif test_type in ['GR']:
M = float(self.nwalkers) # self._get_convergence_test = self._test_GR_convergence
W = np.mean(np.var(s, axis=1), axis=0) # else:
per_walker_mean = np.mean(s, axis=1) # raise ValueError('test_type {} not understood'.format(test_type))
mean = np.mean(per_walker_mean, axis=0) #
B = N / (M-1.) * np.sum((per_walker_mean-mean)**2, axis=0) #
Vhat = (N-1)/N * W + (M+1)/(M*N) * B # def _test_autocorr_convergence(self, i, sampler, test=True, n_cut=5):
c = np.sqrt(Vhat/W) # try:
self.convergence_diagnostic.append(c) # acors = np.zeros((self.ntemps, self.ndim))
self.convergence_diagnosticx.append(i - self.convergence_n/2.) # for temp in range(self.ntemps):
# if self.convergence_windowed:
if test and np.max(c) < R: # j = i-self.convergence_n
return True # else:
else: # j = 0
return False # x = np.mean(sampler.chain[temp, :, j:i, :], axis=0)
# acors[temp, :] = emcee.autocorr.exponential_time(x)
def _test_convergence(self, i, sampler, **kwargs): # c = np.max(acors, axis=0)
if np.mod(i+1, self.convergence_n) == 0: # except emcee.autocorr.AutocorrError:
return self._get_convergence_test(i, sampler, **kwargs) # logging.info('Failed to calculate exponential autocorrelation')
else: # c = np.zeros(self.ndim) + np.nan
return False # except AttributeError:
# logging.info('Unable to calculate exponential autocorrelation')
def _run_sampler_with_conv_test(self, sampler, p0, nprod=0, nburn=0): # c = np.zeros(self.ndim) + np.nan
logging.info('Running {} burn-in steps with convergence testing' #
.format(nburn)) # self.convergence_diagnosticx.append(i - self.convergence_n/2.)
iterator = tqdm(sampler.sample(p0, iterations=nburn), total=nburn) # self.convergence_diagnostic.append(list(c))
for i, output in enumerate(iterator): #
if self._test_convergence(i, sampler, test=True, # if test:
**self.convergence_kwargs): # return i > n_cut * np.max(c)
logging.info( #
'Converged at {} before max number {} of steps reached' # def _test_GR_convergence(self, i, sampler, test=True, R=1.1):
.format(i, nburn)) # if self.convergence_windowed:
self.convergence_idx = i # s = sampler.chain[0, :, i-self.convergence_n+1:i+1, :]
break # else:
iterator.close() # s = sampler.chain[0, :, :i+1, :]
logging.info('Running {} production steps'.format(nprod)) # N = float(self.convergence_n)
j = nburn # M = float(self.nwalkers)
iterator = tqdm(sampler.sample(output[0], iterations=nprod), # W = np.mean(np.var(s, axis=1), axis=0)
total=nprod) # per_walker_mean = np.mean(s, axis=1)
for result in iterator: # mean = np.mean(per_walker_mean, axis=0)
self._test_convergence(j, sampler, test=False, # B = N / (M-1.) * np.sum((per_walker_mean-mean)**2, axis=0)
**self.convergence_kwargs) # Vhat = (N-1)/N * W + (M+1)/(M*N) * B
j += 1 # c = np.sqrt(Vhat/W)
return sampler # self.convergence_diagnostic.append(c)
# self.convergence_diagnosticx.append(i - self.convergence_n/2.)
def _run_sampler(self, sampler, p0, nprod=0, nburn=0): #
if hasattr(self, 'convergence_n'): # if test and np.max(c) < R:
self._run_sampler_with_conv_test(sampler, p0, nprod, nburn) # return True
else: # else:
# return False
#
# def _test_convergence(self, i, sampler, **kwargs):
# if np.mod(i+1, self.convergence_n) == 0:
# return self._get_convergence_test(i, sampler, **kwargs)
# else:
# return False
#
# def _run_sampler_with_conv_test(self, sampler, p0, nprod=0, nburn=0):
# logging.info('Running {} burn-in steps with convergence testing'
# .format(nburn))
# iterator = tqdm(sampler.sample(p0, iterations=nburn), total=nburn)
# for i, output in enumerate(iterator):
# if self._test_convergence(i, sampler, test=True,
# **self.convergence_kwargs):
# logging.info(
# 'Converged at {} before max number {} of steps reached'
# .format(i, nburn))
# self.convergence_idx = i
# break
# iterator.close()
# logging.info('Running {} production steps'.format(nprod))
# j = nburn
# iterator = tqdm(sampler.sample(output[0], iterations=nprod),
# total=nprod)
# for result in iterator:
# self._test_convergence(j, sampler, test=False,
# **self.convergence_kwargs)
# j += 1
# return sampler
def _run_sampler(self, sampler, p0, nprod=0, nburn=0, window=50):
for result in tqdm(sampler.sample(p0, iterations=nburn+nprod), for result in tqdm(sampler.sample(p0, iterations=nburn+nprod),
total=nburn+nprod): total=nburn+nprod):
pass pass
...@@ -994,20 +992,20 @@ class MCMCSearch(core.BaseSearchClass): ...@@ -994,20 +992,20 @@ class MCMCSearch(core.BaseSearchClass):
idxs = np.arange(chain.shape[1]) idxs = np.arange(chain.shape[1])
burnin_idx = chain.shape[1] - nprod burnin_idx = chain.shape[1] - nprod
if hasattr(self, 'convergence_idx'): #if hasattr(self, 'convergence_idx'):
convergence_idx = self.convergence_idx # last_idx = self.convergence_idx
else: #else:
convergence_idx = burnin_idx last_idx = burnin_idx
if ndim > 1: if ndim > 1:
for i in range(ndim): for i in range(ndim):
axes[i].ticklabel_format(useOffset=False, axis='y') axes[i].ticklabel_format(useOffset=False, axis='y')
cs = chain[:, :, i].T cs = chain[:, :, i].T
if burnin_idx > 0: if burnin_idx > 0:
axes[i].plot(xoffset+idxs[:convergence_idx+1], axes[i].plot(xoffset+idxs[:last_idx+1],
cs[:convergence_idx+1]-subtractions[i], cs[:last_idx+1]-subtractions[i],
color="C3", alpha=alpha, color="C3", alpha=alpha,
lw=lw) lw=lw)
axes[i].axvline(xoffset+convergence_idx, axes[i].axvline(xoffset+last_idx,
color='k', ls='--', lw=0.5) color='k', ls='--', lw=0.5)
axes[i].plot(xoffset+idxs[burnin_idx:], axes[i].plot(xoffset+idxs[burnin_idx:],
cs[burnin_idx:]-subtractions[i], cs[burnin_idx:]-subtractions[i],
...@@ -1022,22 +1020,22 @@ class MCMCSearch(core.BaseSearchClass): ...@@ -1022,22 +1020,22 @@ class MCMCSearch(core.BaseSearchClass):
symbols[i]+'$-$'+symbols[i]+'$^\mathrm{s}$', symbols[i]+'$-$'+symbols[i]+'$^\mathrm{s}$',
labelpad=labelpad) labelpad=labelpad)
if hasattr(self, 'convergence_diagnostic'): # if hasattr(self, 'convergence_diagnostic'):
ax = axes[i].twinx() # ax = axes[i].twinx()
axes[i].set_zorder(ax.get_zorder()+1) # axes[i].set_zorder(ax.get_zorder()+1)
axes[i].patch.set_visible(False) # axes[i].patch.set_visible(False)
c_x = np.array(self.convergence_diagnosticx) # c_x = np.array(self.convergence_diagnosticx)
c_y = np.array(self.convergence_diagnostic) # c_y = np.array(self.convergence_diagnostic)
break_idx = np.argmin(np.abs(c_x - burnin_idx)) # break_idx = np.argmin(np.abs(c_x - burnin_idx))
ax.plot(c_x[:break_idx], c_y[:break_idx, i], '-C0', # ax.plot(c_x[:break_idx], c_y[:break_idx, i], '-C0',
zorder=-10) # zorder=-10)
ax.plot(c_x[break_idx:], c_y[break_idx:, i], '-C0', # ax.plot(c_x[break_idx:], c_y[break_idx:, i], '-C0',
zorder=-10) # zorder=-10)
if self.convergence_test_type == 'autocorr': # if self.convergence_test_type == 'autocorr':
ax.set_ylabel(r'$\tau_\mathrm{exp}$') # ax.set_ylabel(r'$\tau_\mathrm{exp}$')
elif self.convergence_test_type == 'GR': # elif self.convergence_test_type == 'GR':
ax.set_ylabel('PSRF') # ax.set_ylabel('PSRF')
ax.ticklabel_format(useOffset=False) # ax.ticklabel_format(useOffset=False)
else: else:
axes[0].ticklabel_format(useOffset=False, axis='y') axes[0].ticklabel_format(useOffset=False, axis='y')
cs = chain[:, :, temp].T cs = chain[:, :, temp].T
......
...@@ -326,7 +326,6 @@ class TestMCMCSearch(Test): ...@@ -326,7 +326,6 @@ class TestMCMCSearch(Test):
sftfilepattern='{}/*{}*sft'.format(Writer.outdir, Writer.label), sftfilepattern='{}/*{}*sft'.format(Writer.outdir, Writer.label),
minStartTime=minStartTime, maxStartTime=maxStartTime, minStartTime=minStartTime, maxStartTime=maxStartTime,
nsteps=[100, 100], nwalkers=100, ntemps=2, log10beta_min=-1) nsteps=[100, 100], nwalkers=100, ntemps=2, log10beta_min=-1)
search.setup_burnin_convergence_testing()
search.run(create_plots=False) search.run(create_plots=False)
_, FS = search.get_max_twoF() _, FS = search.get_max_twoF()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment