diff --git a/Paper/AllSkyMC/AllSkyMC_repeat.sh b/Paper/AllSkyMC/AllSkyMC_repeat.sh
deleted file mode 100755
index badf6b84cc8d83c1f1d544ea548cb965a855135c..0000000000000000000000000000000000000000
--- a/Paper/AllSkyMC/AllSkyMC_repeat.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-
-. /home/gregory.ashton/lalsuite-install/etc/lalapps-user-env.sh
-export PATH="/home/gregory.ashton/anaconda2/bin:$PATH"
-export MPLCONFIGDIR=/home/gregory.ashton/.config/matplotlib
-
-for ((n=0;n<10;n++))
-do
-/home/gregory.ashton/anaconda2/bin/python generate_data.py "$1" /local/user/gregory.ashton --no-template-counting --no-interactive
-done
-cp /local/user/gregory.ashton/MCResults_"$1".txt $(pwd)/CollectedOutput
diff --git a/Paper/AllSkyMC/generate_data.py b/Paper/AllSkyMC/generate_data.py
deleted file mode 100644
index e5f93237d94cd692882007b31dbf59d093ae1af9..0000000000000000000000000000000000000000
--- a/Paper/AllSkyMC/generate_data.py
+++ /dev/null
@@ -1,100 +0,0 @@
-import pyfstat
-import numpy as np
-import os
-import sys
-import time
-
-ID = sys.argv[1]
-outdir = sys.argv[2]
-
-label = 'run_{}'.format(ID)
-data_label = '{}_data'.format(label)
-results_file_name = '{}/MCResults_{}.txt'.format(outdir, ID)
-
-# Properties of the GW data
-sqrtSX = 1e-23
-tstart = 1000000000
-Tspan = 100*86400
-tend = tstart + Tspan
-
-# Fixed properties of the signal
-F0_center = 30
-F1_center = -1e-10
-F2 = 0
-tref = .5*(tstart+tend)
-
-
-VF0 = VF1 = 100
-DeltaF0 = VF0 * np.sqrt(3)/(np.pi*Tspan)
-DeltaF1 = VF1 * np.sqrt(45/4.)/(np.pi*Tspan**2)
-
-DeltaAlpha = 0.02
-DeltaDelta = 0.02
-
-depths = np.linspace(100, 400, 9)
-depths = [118.75, 156.25]
-
-nsteps = 50
-run_setup = [((nsteps, 0), 20, False),
-             ((nsteps, 0), 11, False),
-             ((nsteps, 0), 6, False),
-             ((nsteps, 0), 3, False),
-             ((nsteps, nsteps), 1, False)]
-
-
-for depth in depths:
-    h0 = sqrtSX / float(depth)
-    F0 = F0_center + np.random.uniform(-0.5, 0.5)*DeltaF0
-    F1 = F1_center + np.random.uniform(-0.5, 0.5)*DeltaF1
-    Alpha_center = np.random.uniform(DeltaAlpha, 2*np.pi-DeltaAlpha)
-    Delta_center = np.arccos(2*np.random.uniform(0, 1)-1)-np.pi/2
-    Alpha = Alpha_center + np.random.uniform(-0.5, 0.5)*DeltaAlpha
-    Delta = Delta_center + np.random.uniform(-0.5, 0.5)*DeltaDelta
-    psi = np.random.uniform(-np.pi/4, np.pi/4)
-    phi = np.random.uniform(0, 2*np.pi)
-    cosi = np.random.uniform(-1, 1)
-
-    data = pyfstat.Writer(
-        label=data_label, outdir=outdir, tref=tref,
-        tstart=tstart, F0=F0, F1=F1, F2=F2, duration=Tspan, Alpha=Alpha,
-        Delta=Delta, h0=h0, sqrtSX=sqrtSX, psi=psi, phi=phi, cosi=cosi,
-        detector='H1,L1')
-    data.make_data()
-
-    startTime = time.time()
-    theta_prior = {'F0': {'type': 'unif',
-                          'lower': F0_center-DeltaF0,
-                          'upper': F0_center+DeltaF0},
-                   'F1': {'type': 'unif',
-                          'lower': F1_center-DeltaF1,
-                          'upper': F1_center+DeltaF1},
-                   'F2': F2,
-                   'Alpha': {'type': 'unif',
-                             'lower': Alpha_center-DeltaAlpha,
-                             'upper': Alpha_center+DeltaAlpha},
-                   'Delta': {'type': 'unif',
-                             'lower': Delta_center-DeltaDelta,
-                             'upper': Delta_center+DeltaDelta},
-                   }
-
-    ntemps = 2
-    log10temperature_min = -1
-    nwalkers = 100
-
-    mcmc = pyfstat.MCMCFollowUpSearch(
-        label=label, outdir=outdir,
-        sftfilepath='{}/*{}*sft'.format(outdir, data_label),
-        theta_prior=theta_prior,
-        tref=tref, minStartTime=tstart, maxStartTime=tend,
-        nwalkers=nwalkers, ntemps=ntemps,
-        log10temperature_min=log10temperature_min)
-    mcmc.run(run_setup=run_setup, create_plots=False, log_table=False,
-             gen_tex_table=False)
-    d, maxtwoF = mcmc.get_max_twoF()
-    dF0 = F0 - d['F0']
-    dF1 = F1 - d['F1']
-    runTime = time.time() - startTime
-    with open(results_file_name, 'a') as f:
-        f.write('{} {:1.8e} {:1.8e} {:1.8e} {:1.8e} {}\n'
-                .format(depth, h0, dF0, dF1, maxtwoF, runTime))
-    os.system('rm {}/*{}*'.format(outdir, label))
diff --git a/Paper/AllSkyMC/generate_failures.py b/Paper/AllSkyMC/generate_failures.py
deleted file mode 100644
index 68aab6db0d756a7b439eaefb349809274fa7898b..0000000000000000000000000000000000000000
--- a/Paper/AllSkyMC/generate_failures.py
+++ /dev/null
@@ -1,92 +0,0 @@
-import pyfstat
-import numpy as np
-import os
-import time
-
-outdir = 'data'
-
-label = 'run_failures'
-data_label = '{}_data'.format(label)
-results_file_name = '{}/MCResults_failures.txt'.format(outdir)
-
-# Properties of the GW data
-sqrtSX = 2e-23
-tstart = 1000000000
-Tspan = 100*86400
-tend = tstart + Tspan
-
-# Fixed properties of the signal
-F0_center = 30
-F1_center = 1e-10
-F2 = 0
-tref = .5*(tstart+tend)
-
-
-VF0 = VF1 = 100
-DeltaF0 = VF0 * np.sqrt(3)/(np.pi*Tspan)
-DeltaF1 = VF1 * np.sqrt(45/4.)/(np.pi*Tspan**2)
-
-DeltaAlpha = 0.02
-DeltaDelta = 0.02
-
-depths = [140]
-
-nsteps = 50
-run_setup = [((nsteps, 0), 20, False),
-             ((nsteps, 0), 11, False),
-             ((nsteps, 0), 6, False),
-             ((nsteps, 0), 3, False),
-             ((nsteps, nsteps), 1, False)]
-
-
-for depth in depths:
-    h0 = sqrtSX / float(depth)
-    F0 = F0_center + np.random.uniform(-0.5, 0.5)*DeltaF0
-    F1 = F1_center + np.random.uniform(-0.5, 0.5)*DeltaF1
-    Alpha_center = np.random.uniform(0, 2*np.pi)
-    Delta_center = np.arccos(2*np.random.uniform(0, 1)-1)-np.pi/2
-    Alpha = Alpha_center + np.random.uniform(-0.5, 0.5)*DeltaAlpha
-    Delta = Delta_center + np.random.uniform(-0.5, 0.5)*DeltaDelta
-    psi = np.random.uniform(-np.pi/4, np.pi/4)
-    phi = np.random.uniform(0, 2*np.pi)
-    cosi = np.random.uniform(-1, 1)
-
-    data = pyfstat.Writer(
-        label=data_label, outdir=outdir, tref=tref,
-        tstart=tstart, F0=F0, F1=F1, F2=F2, duration=Tspan, Alpha=Alpha,
-        Delta=Delta, h0=h0, sqrtSX=sqrtSX, psi=psi, phi=phi, cosi=cosi,
-        detector='H1,L1')
-    data.make_data()
-    predicted_twoF = data.predict_fstat()
-
-    startTime = time.time()
-    theta_prior = {'F0': {'type': 'unif',
-                          'lower': F0_center-DeltaF0,
-                          'upper': F0_center+DeltaF0},
-                   'F1': {'type': 'unif',
-                          'lower': F1_center-DeltaF1,
-                          'upper': F1_center+DeltaF1},
-                   'F2': F2,
-                   'Alpha': {'type': 'unif',
-                             'lower': Alpha_center-DeltaAlpha,
-                             'upper': Alpha_center+DeltaAlpha},
-                   'Delta': {'type': 'unif',
-                             'lower': Delta_center-DeltaDelta,
-                             'upper': Delta_center+DeltaDelta},
-                   }
-
-    ntemps = 2
-    log10temperature_min = -1
-    nwalkers = 100
-
-    mcmc = pyfstat.MCMCFollowUpSearch(
-        label=label, outdir=outdir,
-        sftfilepath='{}/*{}*sft'.format(outdir, data_label),
-        theta_prior=theta_prior,
-        tref=tref, minStartTime=tstart, maxStartTime=tend,
-        nwalkers=nwalkers, ntemps=ntemps,
-        log10temperature_min=log10temperature_min)
-    mcmc.run(run_setup=run_setup, create_plots=True, log_table=False,
-             gen_tex_table=False)
-    d, maxtwoF = mcmc.get_max_twoF()
-    print 'MaxtwoF = {}'.format(maxtwoF)
diff --git a/Paper/AllSkyMC/generate_table.py b/Paper/AllSkyMC/generate_table.py
deleted file mode 100644
index 01ddfe5e1591981c8c9452a627875628b790107b..0000000000000000000000000000000000000000
--- a/Paper/AllSkyMC/generate_table.py
+++ /dev/null
@@ -1,88 +0,0 @@
-import pyfstat
-import numpy as np
-
-outdir = 'data'
-
-label = 'allsky_setup'
-data_label = '{}_data'.format(label)
-
-# Properties of the GW data
-sqrtSX = 2e-23
-tstart = 1000000000
-Tspan = 100*86400
-tend = tstart + Tspan
-
-# Fixed properties of the signal
-F0_center = 30
-F1_center = 1e-10
-F2 = 0
-tref = .5*(tstart+tend)
-
-
-VF0 = VF1 = 100
-DeltaF0 = VF0 * np.sqrt(3)/(np.pi*Tspan)
-DeltaF1 = VF1 * np.sqrt(45/4.)/(np.pi*Tspan**2)
-
-DeltaAlpha = 0.02
-DeltaDelta = 0.02
-
-depth = 100
-
-nsteps = 50
-run_setup = [((nsteps, 0), 20, False),
-             ((nsteps, 0), 11, False),
-             ((nsteps, 0), 6, False),
-             ((nsteps, 0), 3, False),
-             ((nsteps, nsteps), 1, False)]
-
-h0 = sqrtSX / float(depth)
-r = np.random.uniform(0, 1)
-theta = np.random.uniform(0, 2*np.pi)
-F0 = F0_center + 3*np.sqrt(r)*np.cos(theta)/(np.pi**2 * Tspan**2)
-F1 = F1_center + 45*np.sqrt(r)*np.sin(theta)/(4*np.pi**2 * Tspan**4)
-
-Alpha_center = 0
-Delta_center = 0
-Alpha = Alpha_center + np.random.uniform(-0.5, 0.5)*DeltaAlpha
-Delta = Delta_center + np.random.uniform(-0.5, 0.5)*DeltaDelta
-
-psi = np.random.uniform(-np.pi/4, np.pi/4)
-phi = np.random.uniform(0, 2*np.pi)
-cosi = np.random.uniform(-1, 1)
-
-data = pyfstat.Writer(
-    label=data_label, outdir=outdir, tref=tref,
-    tstart=tstart, F0=F0, F1=F1, F2=F2, duration=Tspan, Alpha=Alpha,
-    Delta=Delta, h0=h0, sqrtSX=sqrtSX, psi=psi, phi=phi, cosi=cosi,
-    detector='H1,L1')
-data.make_data()
-predicted_twoF = data.predict_fstat()
-
-theta_prior = {'F0': {'type': 'unif',
-                      'lower': F0_center-DeltaF0,
-                      'upper': F0_center+DeltaF0},
-               'F1': {'type': 'unif',
-                      'lower': F1_center-DeltaF1,
-                      'upper': F1_center+DeltaF1},
-               'F2': F2,
-               'Alpha': {'type': 'unif',
-                         'lower': Alpha_center-DeltaAlpha,
-                         'upper': Alpha_center+DeltaAlpha},
-               'Delta': {'type': 'unif',
-                         'lower': Delta_center-DeltaDelta,
-                         'upper': Delta_center+DeltaDelta},
-               }
-
-ntemps = 2
-log10temperature_min = -1
-nwalkers = 100
-
-mcmc = pyfstat.MCMCFollowUpSearch(
-    label=label, outdir=outdir,
-    sftfilepath='{}/*{}*sft'.format(outdir, data_label),
-    theta_prior=theta_prior,
-    tref=tref, minStartTime=tstart, maxStartTime=tend,
-    nwalkers=nwalkers, ntemps=ntemps, nsteps=[nsteps, nsteps],
-    log10temperature_min=log10temperature_min)
-mcmc.run(Nsegs0=20, R=10)
-#mcmc.run(run_setup)
diff --git a/Paper/AllSkyMC/plot_data.py b/Paper/AllSkyMC/plot_data.py
deleted file mode 100644
index ac7fd8670c2bca30f27091fa41b689bde283dfc7..0000000000000000000000000000000000000000
--- a/Paper/AllSkyMC/plot_data.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import matplotlib.pyplot as plt
-import pandas as pd
-import numpy as np
-import os
-from tqdm import tqdm
-from oct2py import octave
-import glob
-
-filenames = glob.glob("CollectedOutput/*.txt")
-
-plt.style.use('paper')
-
-Tspan = 100 * 86400
-
-
-def Recovery(Tspan, Depth, twoFstar=60, detectors='H1,L1'):
-    numDetectors = len(detectors.split(','))
-    cmd = ("DetectionProbabilityStackSlide('Nseg', 1, 'Tdata', {},"
-           "'misHist', createDeltaHist(0), 'avg2Fth', {}, 'detectors', '{}',"
-           "'Depth', {})"
-           ).format(numDetectors*Tspan, twoFstar, detectors, Depth)
-    return octave.eval(cmd, verbose=False)
-
-
-def binomialConfidenceInterval(N, K, confidence=0.95):
-    cmd = '[fLow, fUpper] = binomialConfidenceInterval({}, {}, {})'.format(
-        N, K, confidence)
-    [l, u] =  octave.eval(cmd, verbose=False, return_both=True)[0].split('\n')
-    return float(l.split('=')[1]), float(u.split('=')[1])
-
-df_list = []
-for fn in filenames:
-    df = pd.read_csv(
-        fn, sep=' ', names=['depth', 'h0', 'dF0', 'dF1', 'twoF', 'runTime'])
-    df['CLUSTER_ID'] = fn.split('_')[1]
-    df_list.append(df)
-df = pd.concat(df_list)
-
-twoFstar = 70
-depths = np.unique(df.depth.values)
-recovery_fraction = []
-recovery_fraction_CI = []
-for d in depths:
-    twoFs = df[df.depth == d].twoF.values
-    N = len(twoFs)
-    K = np.sum(twoFs > twoFstar)
-    print d, N, K
-    recovery_fraction.append(K/float(N))
-    [fLower, fUpper] = binomialConfidenceInterval(N, K)
-    recovery_fraction_CI.append([fLower, fUpper])
-
-yerr = np.abs(recovery_fraction - np.array(recovery_fraction_CI).T)
-fig, ax = plt.subplots()
-ax.errorbar(depths, recovery_fraction, yerr=yerr, fmt='sr', marker='s', ms=2,
-            capsize=1, capthick=0.5, elinewidth=0.5,
-            label='Monte-Carlo result', zorder=10)
-
-fname = 'analytic_data_{}.txt'.format(twoFstar)
-if os.path.isfile(fname):
-    depths_smooth, recovery_analytic = np.loadtxt(fname)
-else:
-    depths_smooth = np.linspace(10, 550, 100)
-    recovery_analytic = []
-    for d in tqdm(depths_smooth):
-        recovery_analytic.append(Recovery(Tspan, d, twoFstar))
-    np.savetxt(fname, np.array([depths_smooth, recovery_analytic]))
-depths_smooth = np.concatenate(([0], depths_smooth))
-recovery_analytic = np.concatenate(([1], recovery_analytic))
-ax.plot(depths_smooth, recovery_analytic, '-k', label='Theoretical maximum')
-
-
-ax.set_ylim(0, 1.05)
-ax.set_xlabel(r'Sensitivity depth', size=10)
-ax.set_ylabel(r'Recovered fraction', size=10)
-ax.legend(loc=1, frameon=False)
-
-fig.tight_layout()
-fig.savefig('allsky_recovery.png')
-
-
-total_number_steps = 6 * 50.
-fig, ax = plt.subplots()
-ax.hist(df.runTime/total_number_steps, bins=50)
-ax.set_xlabel('run-time per step [s]')
-fig.tight_layout()
-fig.savefig('runTimeHist.png')
-
diff --git a/Paper/AllSkyMC/runTimeHist.png b/Paper/AllSkyMC/runTimeHist.png
deleted file mode 100644
index dd96cf963d456bf02beac3c1455eaf1f067ae9a0..0000000000000000000000000000000000000000
Binary files a/Paper/AllSkyMC/runTimeHist.png and /dev/null differ
diff --git a/Paper/AllSkyMC/submitfile b/Paper/AllSkyMC/submitfile
deleted file mode 100644
index 4be5e0c5217bd26e13796bafe6566b60c94ba99e..0000000000000000000000000000000000000000
--- a/Paper/AllSkyMC/submitfile
+++ /dev/null
@@ -1,12 +0,0 @@
-Executable=AllSkyMC_repeat.sh
-Arguments=$(Cluster)_$(Process)
-Universe=vanilla
-Input=/dev/null
-accounting_group = ligo.dev.o2.cw.explore.test
-Output=CollectedOutput/out.$(Cluster).$(Process)
-Error=CollectedOutput/err.$(Cluster).$(Process)
-Log=CollectedOutput/log.$(Cluster).$(Process)
-request_cpus = 1
-request_memory = 16 GB
-
-Queue 1
diff --git a/Paper/AllSkyMCNoiseOnly/AllSkyMCNoiseOnly_repeat.sh b/Paper/AllSkyMCNoiseOnly/AllSkyMCNoiseOnly_repeat.sh
deleted file mode 100755
index ab5264cadeaf1a1a5b65202b21442e114eeeb538..0000000000000000000000000000000000000000
--- a/Paper/AllSkyMCNoiseOnly/AllSkyMCNoiseOnly_repeat.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-
-. /home/gregory.ashton/lalsuite-install/etc/lalapps-user-env.sh
-export PATH="/home/gregory.ashton/anaconda2/bin:$PATH"
-export MPLCONFIGDIR=/home/gregory.ashton/.config/matplotlib
-
-for ((n=0;n<90;n++))
-do
-/home/gregory.ashton/anaconda2/bin/python generate_data.py "$1" /local/user/gregory.ashton --no-template-counting --no-interactive
-done
-cp /local/user/gregory.ashton/NoiseOnlyMCResults_"$1".txt $(pwd)/CollectedOutput
diff --git a/Paper/AllSkyMCNoiseOnly/generate_data.py b/Paper/AllSkyMCNoiseOnly/generate_data.py
deleted file mode 100644
index ec082f540b7d15a0d07787a1c07a811197392878..0000000000000000000000000000000000000000
--- a/Paper/AllSkyMCNoiseOnly/generate_data.py
+++ /dev/null
@@ -1,96 +0,0 @@
-import pyfstat
-import numpy as np
-import os
-import sys
-import time
-
-ID = sys.argv[1]
-outdir = sys.argv[2]
-
-label = 'run_{}'.format(ID)
-data_label = '{}_data'.format(label)
-results_file_name = '{}/NoiseOnlyMCResults_{}.txt'.format(outdir, ID)
-
-# Properties of the GW data
-sqrtSX = 1e-23
-tstart = 1000000000
-Tspan = 100*86400
-tend = tstart + Tspan
-
-# Fixed properties of the signal
-F0_center = 30
-F1_center = -1e-10
-F2 = 0
-tref = .5*(tstart+tend)
-
-
-VF0 = VF1 = 100
-DeltaF0 = VF0 * np.sqrt(3)/(np.pi*Tspan)
-DeltaF1 = VF1 * np.sqrt(45/4.)/(np.pi*Tspan**2)
-
-DeltaAlpha = 0.02
-DeltaDelta = 0.02
-
-nsteps = 50
-run_setup = [((nsteps, 0), 20, False),
-             ((nsteps, 0), 11, False),
-             ((nsteps, 0), 6, False),
-             ((nsteps, 0), 3, False),
-             ((nsteps, nsteps), 1, False)]
-
-
-h0 = 0
-F0 = F0_center + np.random.uniform(-0.5, 0.5)*DeltaF0
-F1 = F1_center + np.random.uniform(-0.5, 0.5)*DeltaF1
-Alpha_center = np.random.uniform(DeltaAlpha, 2*np.pi-DeltaAlpha)
-Delta_center = np.arccos(2*np.random.uniform(0, 1)-1)-np.pi/2
-Alpha = Alpha_center + np.random.uniform(-0.5, 0.5)*DeltaAlpha
-Delta = Delta_center + np.random.uniform(-0.5, 0.5)*DeltaDelta
-psi = np.random.uniform(-np.pi/4, np.pi/4)
-phi = np.random.uniform(0, 2*np.pi)
-cosi = np.random.uniform(-1, 1)
-
-data = pyfstat.Writer(
-    label=data_label, outdir=outdir, tref=tref,
-    tstart=tstart, F0=F0, F1=F1, F2=F2, duration=Tspan, Alpha=Alpha,
-    Delta=Delta, h0=h0, sqrtSX=sqrtSX, psi=psi, phi=phi, cosi=cosi,
-    detector='H1,L1')
-data.make_data()
-
-startTime = time.time()
-theta_prior = {'F0': {'type': 'unif',
-                      'lower': F0_center-DeltaF0,
-                      'upper': F0_center+DeltaF0},
-               'F1': {'type': 'unif',
-                      'lower': F1_center-DeltaF1,
-                      'upper': F1_center+DeltaF1},
-               'F2': F2,
-               'Alpha': {'type': 'unif',
-                         'lower': Alpha_center-DeltaAlpha,
-                         'upper': Alpha_center+DeltaAlpha},
-               'Delta': {'type': 'unif',
-                         'lower': Delta_center-DeltaDelta,
-                         'upper': Delta_center+DeltaDelta},
-               }
-
-ntemps = 2
-log10temperature_min = -1
-nwalkers = 100
-
-mcmc = pyfstat.MCMCFollowUpSearch(
-    label=label, outdir=outdir,
-    sftfilepath='{}/*{}*sft'.format(outdir, data_label),
-    theta_prior=theta_prior,
-    tref=tref, minStartTime=tstart, maxStartTime=tend,
-    nwalkers=nwalkers, ntemps=ntemps,
-    log10temperature_min=log10temperature_min)
-mcmc.run(run_setup=run_setup, create_plots=False, log_table=False,
-         gen_tex_table=False)
-d, maxtwoF = mcmc.get_max_twoF()
-dF0 = F0 - d['F0']
-dF1 = F1 - d['F1']
-runTime = time.time() - startTime
-with open(results_file_name, 'a') as f:
-    f.write('{:1.8e} {:1.8e} {:1.8e} {}\n'
-            .format(dF0, dF1, maxtwoF, runTime))
-os.system('rm {}/*{}*'.format(outdir, label))
diff --git a/Paper/AllSkyMCNoiseOnly/generate_table.py b/Paper/AllSkyMCNoiseOnly/generate_table.py
deleted file mode 100644
index e304f107748cf58b9c78625f9c1f816cb70bf8ac..0000000000000000000000000000000000000000
--- a/Paper/AllSkyMCNoiseOnly/generate_table.py
+++ /dev/null
@@ -1,85 +0,0 @@
-import pyfstat
-import numpy as np
-
-outdir = 'data'
-
-label = 'allsky_setup'
-data_label = '{}_data'.format(label)
-
-# Properties of the GW data
-sqrtSX = 2e-23
-tstart = 1000000000
-Tspan = 100*86400
-tend = tstart + Tspan
-
-# Fixed properties of the signal
-F0_center = 30
-F1_center = 1e-10
-F2 = 0
-tref = .5*(tstart+tend)
-
-
-VF0 = VF1 = 100
-DeltaF0 = VF0 * np.sqrt(3)/(np.pi*Tspan)
-DeltaF1 = VF1 * np.sqrt(45/4.)/(np.pi*Tspan**2)
-
-DeltaAlpha = 0.05
-DeltaDelta = 0.05
-
-depth = 100
-
-nsteps = 50
-run_setup = [((nsteps, 0), 20, False),
-             ((nsteps, 0), 11, False),
-             ((nsteps, 0), 6, False),
-             ((nsteps, 0), 3, False),
-             ((nsteps, nsteps), 1, False)]
-
-h0 = sqrtSX / float(depth)
-r = np.random.uniform(0, 1)
-theta = np.random.uniform(0, 2*np.pi)
-F0 = F0_center + 3*np.sqrt(r)*np.cos(theta)/(np.pi**2 * Tspan**2)
-F1 = F1_center + 45*np.sqrt(r)*np.sin(theta)/(4*np.pi**2 * Tspan**4)
-
-Alpha = 0
-Delta = 0
-
-psi = np.random.uniform(-np.pi/4, np.pi/4)
-phi = np.random.uniform(0, 2*np.pi)
-cosi = np.random.uniform(-1, 1)
-
-data = pyfstat.Writer(
-    label=data_label, outdir=outdir, tref=tref,
-    tstart=tstart, F0=F0, F1=F1, F2=F2, duration=Tspan, Alpha=Alpha,
-    Delta=Delta, h0=h0, sqrtSX=sqrtSX, psi=psi, phi=phi, cosi=cosi,
-    detector='H1,L1')
-data.make_data()
-predicted_twoF = data.predict_fstat()
-
-theta_prior = {'F0': {'type': 'unif',
-                      'lower': F0-DeltaF0/2.,
-                      'upper': F0+DeltaF0/2.},
-               'F1': {'type': 'unif',
-                      'lower': F1-DeltaF1/2.,
-                      'upper': F1+DeltaF1/2.},
-               'F2': F2,
-               'Alpha': {'type': 'unif',
-                         'lower': Alpha-DeltaAlpha/2.,
-                         'upper': Alpha+DeltaAlpha/2.},
-               'Delta': {'type': 'unif',
-                         'lower': Delta-DeltaDelta/2.,
-                         'upper': Delta+DeltaDelta/2.},
-               }
-
-ntemps = 1
-log10temperature_min = -1
-nwalkers = 100
-
-mcmc = pyfstat.MCMCFollowUpSearch(
-    label=label, outdir=outdir,
-    sftfilepath='{}/*{}*sft'.format(outdir, data_label),
-    theta_prior=theta_prior,
-    tref=tref, minStartTime=tstart, maxStartTime=tend,
-    nwalkers=nwalkers, ntemps=ntemps,
-    log10temperature_min=log10temperature_min)
-mcmc.run(run_setup)
diff --git a/Paper/AllSkyMCNoiseOnly/plot_data.py b/Paper/AllSkyMCNoiseOnly/plot_data.py
deleted file mode 100644
index 9cb727909dce900dedffa5659cc146f7955e88a0..0000000000000000000000000000000000000000
--- a/Paper/AllSkyMCNoiseOnly/plot_data.py
+++ /dev/null
@@ -1,66 +0,0 @@
-import pyfstat
-import matplotlib.pyplot as plt
-import pandas as pd
-import numpy as np
-import os
-from tqdm import tqdm
-from oct2py import octave
-import glob
-from scipy.stats import rv_continuous, chi2
-
-filenames = glob.glob("CollectedOutput/*.txt")
-
-plt.style.use('paper')
-
-Tspan = 100 * 86400
-
-
-class maxtwoFinNoise_gen(rv_continuous):
-    def _pdf(self, twoF, Ntrials):
-        F = twoF/2.0
-        alpha = (1 + F)*np.exp(-F)
-        a = Ntrials/2.0*F*np.exp(-F)
-        b = (1 - alpha)**(Ntrials-1)
-        return a*b
-
-
-df_list = []
-for fn in filenames:
-    df = pd.read_csv(
-        fn, sep=' ', names=['dF0', 'dF1', 'twoF', 'runTime'])
-    df['CLUSTER_ID'] = fn.split('_')[1]
-    df_list.append(df)
-df = pd.concat(df_list)
-print 'Number of samples = ', len(df)
-
-fig, ax = plt.subplots()
-
-ax.hist(df.twoF, bins=50, histtype='step', color='k', normed=True, linewidth=1,
-        label='Monte-Carlo histogram')
-
-maxtwoFinNoise = maxtwoFinNoise_gen(a=0)
-Ntrials_effective, loc, scale = maxtwoFinNoise.fit(df.twoF.values, floc=0, fscale=1)
-print 'Ntrials effective = {:1.2e}'.format(Ntrials_effective)
-twoFsmooth = np.linspace(0, df.twoF.max(), 1000)
-best_fit_pdf = maxtwoFinNoise.pdf(twoFsmooth, Ntrials_effective)
-ax.plot(twoFsmooth, best_fit_pdf, '-r',
-        label=r'$p(2\mathcal{{F}}_{{\rm max}})$ for {} $N_{{\rm trials}}$'
-        .format(pyfstat.texify_float(Ntrials_effective, d=2)))
-
-pval = 1e-6
-twoFsmooth_HD = np.linspace(
-    twoFsmooth[np.argmax(best_fit_pdf)], df.twoF.max(), 100000)
-best_fit_pdf_HD = maxtwoFinNoise.pdf(twoFsmooth_HD, Ntrials_effective)
-spacing = twoFsmooth_HD[1]-twoFsmooth_HD[0]
-print twoFsmooth_HD[np.argmin(np.abs(best_fit_pdf_HD - pval))], spacing
-
-ax.set_xlabel('$\widetilde{2\mathcal{F}}$')
-ax.set_xlim(0, 60)
-ax.legend(frameon=False, fontsize=6, loc=2)
-fig.tight_layout()
-fig.savefig('allsky_noise_twoF_histogram.png')
-
-from latex_macro_generator import write_to_macro
-write_to_macro('AllSkyMCNoiseOnlyMaximum', '{:1.1f}'.format(np.max(df.twoF)),
-               '../macros.tex')
-write_to_macro('AllSkyMCNoiseN', len(df), '../macros.tex')
diff --git a/Paper/AllSkyMCNoiseOnly/submitfile b/Paper/AllSkyMCNoiseOnly/submitfile
deleted file mode 100644
index 6add374c6853fd6816ae8ed6dc7690d1d48ef04d..0000000000000000000000000000000000000000
--- a/Paper/AllSkyMCNoiseOnly/submitfile
+++ /dev/null
@@ -1,12 +0,0 @@
-Executable= AllSkyMCNoiseOnly_repeat.sh
-Arguments= $(Cluster)_$(Process)
-Universe=vanilla
-Input=/dev/null
-accounting_group = ligo.dev.o2.cw.explore.test
-Output=CollectedOutput/out.$(Cluster).$(Process)
-Error=CollectedOutput/err.$(Cluster).$(Process)
-Log=CollectedOutput/log.$(Cluster).$(Process)
-request_cpus = 1
-request_memory = 16 GB
-
-Queue 100
diff --git a/Paper/DirectedMC/DirectedMC_repeat.sh b/Paper/DirectedMC/DirectedMC_repeat.sh
deleted file mode 100755
index badf6b84cc8d83c1f1d544ea548cb965a855135c..0000000000000000000000000000000000000000
--- a/Paper/DirectedMC/DirectedMC_repeat.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-
-. /home/gregory.ashton/lalsuite-install/etc/lalapps-user-env.sh
-export PATH="/home/gregory.ashton/anaconda2/bin:$PATH"
-export MPLCONFIGDIR=/home/gregory.ashton/.config/matplotlib
-
-for ((n=0;n<10;n++))
-do
-/home/gregory.ashton/anaconda2/bin/python generate_data.py "$1" /local/user/gregory.ashton --no-template-counting --no-interactive
-done
-cp /local/user/gregory.ashton/MCResults_"$1".txt $(pwd)/CollectedOutput
diff --git a/Paper/DirectedMC/generate_data.py b/Paper/DirectedMC/generate_data.py
deleted file mode 100644
index 3e57334a7f948f549c4b2c6f78c0b6fa71ddf2ea..0000000000000000000000000000000000000000
--- a/Paper/DirectedMC/generate_data.py
+++ /dev/null
@@ -1,91 +0,0 @@
-import pyfstat
-import numpy as np
-import os
-import sys
-import time
-
-ID = sys.argv[1]
-outdir = sys.argv[2]
-
-label = 'run_{}'.format(ID)
-data_label = '{}_data'.format(label)
-results_file_name = '{}/MCResults_{}.txt'.format(outdir, ID)
-
-# Properties of the GW data
-sqrtSX = 1e-23
-tstart = 1000000000
-Tspan = 100*86400
-tend = tstart + Tspan
-
-# Fixed properties of the signal
-F0_center = 30
-F1_center = -1e-10
-F2 = 0
-Alpha = np.radians(83.6292)
-Delta = np.radians(22.0144)
-tref = .5*(tstart+tend)
-
-
-VF0 = VF1 = 100
-DeltaF0 = VF0 * np.sqrt(3)/(np.pi*Tspan)
-DeltaF1 = VF1 * np.sqrt(45/4.)/(np.pi*Tspan**2)
-
-depths = np.linspace(100, 400, 9)
-depths = [118.75, 156.25]
-
-nsteps = 25
-run_setup = [((nsteps, 0), 20, False),
-             ((nsteps, 0), 7, False),
-             ((nsteps, 0), 2, False),
-             ((nsteps, nsteps), 1, False)]
-
-for depth in depths:
-    h0 = sqrtSX / float(depth)
-    F0 = F0_center + np.random.uniform(-0.5, 0.5)*DeltaF0
-    F1 = F1_center + np.random.uniform(-0.5, 0.5)*DeltaF1
-
-    psi = np.random.uniform(-np.pi/4, np.pi/4)
-    phi = np.random.uniform(0, 2*np.pi)
-    cosi = np.random.uniform(-1, 1)
-
-    data = pyfstat.Writer(
-        label=data_label, outdir=outdir, tref=tref,
-        tstart=tstart, F0=F0, F1=F1, F2=F2, duration=Tspan, Alpha=Alpha,
-        Delta=Delta, h0=h0, sqrtSX=sqrtSX, psi=psi, phi=phi, cosi=cosi,
-        detector='H1,L1')
-    data.make_data()
-
-    startTime = time.time()
-    theta_prior = {'F0': {'type': 'unif',
-                          'lower': F0_center-DeltaF0,
-                          'upper': F0_center+DeltaF0},
-                   'F1': {'type': 'unif',
-                          'lower': F1_center-DeltaF1,
-                          'upper': F1_center+DeltaF1},
-                   'F2': F2,
-                   'Alpha': Alpha,
-                   'Delta': Delta
-                   }
-
-    ntemps = 2
-    log10temperature_min = -1
-    nwalkers = 100
-
-    mcmc = pyfstat.MCMCFollowUpSearch(
-        label=label, outdir=outdir,
-        sftfilepath='{}/*{}*sft'.format(outdir, data_label),
-        theta_prior=theta_prior,
-        tref=tref, minStartTime=tstart, maxStartTime=tend,
-        nwalkers=nwalkers, ntemps=ntemps,
-        log10temperature_min=log10temperature_min)
-    mcmc.run(run_setup=run_setup, create_plots=False, log_table=False,
-             gen_tex_table=False)
-    mcmc.print_summary()
-    d, maxtwoF = mcmc.get_max_twoF()
-    dF0 = F0 - d['F0']
-    dF1 = F1 - d['F1']
-    runTime = time.time() - startTime
-    with open(results_file_name, 'a') as f:
-        f.write('{} {:1.8e} {:1.8e} {:1.8e} {:1.8e} {}\n'
-                .format(depth, h0, dF0, dF1, maxtwoF, runTime))
-    os.system('rm {}/*{}*'.format(outdir, label))
diff --git a/Paper/DirectedMC/generate_table.py b/Paper/DirectedMC/generate_table.py
deleted file mode 100644
index c98489f8562c1205c8a6fd60b817c685258f1433..0000000000000000000000000000000000000000
--- a/Paper/DirectedMC/generate_table.py
+++ /dev/null
@@ -1,77 +0,0 @@
-import pyfstat
-import numpy as np
-
-outdir = 'data'
-
-label = 'directed_setup'
-data_label = '{}_data'.format(label)
-
-# Properties of the GW data
-sqrtSX = 1e-23
-tstart = 1000000000
-Tspan = 100*86400
-tend = tstart + Tspan
-
-# Fixed properties of the signal
-F0_center = 30
-F1_center = 1e-10
-F2 = 0
-Alpha = np.radians(83.6292)
-Delta = np.radians(22.0144)
-tref = .5*(tstart+tend)
-
-
-VF0 = VF1 = 100
-DeltaF0 = VF0 * np.sqrt(3)/(np.pi*Tspan)
-DeltaF1 = VF1 * np.sqrt(45/4.)/(np.pi*Tspan**2)
-
-depth = 100
-
-nsteps = 25
-run_setup = [((nsteps, 0), 20, False),
-             ((nsteps, 0), 7, False),
-             ((nsteps, 0), 2, False),
-             ((nsteps, nsteps), 1, False)]
-
-h0 = sqrtSX / float(depth)
-r = np.random.uniform(0, 1)
-theta = np.random.uniform(0, 2*np.pi)
-F0 = F0_center + 3*np.sqrt(r)*np.cos(theta)/(np.pi**2 * Tspan**2)
-F1 = F1_center + 45*np.sqrt(r)*np.sin(theta)/(4*np.pi**2 * Tspan**4)
-
-psi = np.random.uniform(-np.pi/4, np.pi/4)
-phi = np.random.uniform(0, 2*np.pi)
-cosi = np.random.uniform(-1, 1)
-
-data = pyfstat.Writer(
-    label=data_label, outdir=outdir, tref=tref,
-    tstart=tstart, F0=F0, F1=F1, F2=F2, duration=Tspan, Alpha=Alpha,
-    Delta=Delta, h0=h0, sqrtSX=sqrtSX, psi=psi, phi=phi, cosi=cosi,
-    detector='H1,L1')
-data.make_data()
-predicted_twoF = data.predict_fstat()
-
-theta_prior = {'F0': {'type': 'unif',
-                      'lower': F0-DeltaF0,
-                      'upper': F0+DeltaF0},
-               'F1': {'type': 'unif',
-                      'lower': F1-DeltaF1,
-                      'upper': F1+DeltaF1},
-               'F2': F2,
-               'Alpha': Alpha,
-               'Delta': Delta
-               }
-
-ntemps = 1
-log10temperature_min = -1
-nwalkers = 100
-
-mcmc = pyfstat.MCMCFollowUpSearch(
-    label=label, outdir=outdir,
-    sftfilepath='{}/*{}*sft'.format(outdir, data_label),
-    theta_prior=theta_prior,
-    tref=tref, minStartTime=tstart, maxStartTime=tend,
-    nwalkers=nwalkers, ntemps=ntemps, nsteps=[nsteps, nsteps],
-    log10temperature_min=log10temperature_min)
-#mcmc.run(Nsegs0=20, R=10)
-mcmc.run(run_setup)
diff --git a/Paper/DirectedMC/plot_data.py b/Paper/DirectedMC/plot_data.py
deleted file mode 100644
index 73155f49af389a5d629d7788f060252f73b02387..0000000000000000000000000000000000000000
--- a/Paper/DirectedMC/plot_data.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import matplotlib.pyplot as plt
-import pandas as pd
-import numpy as np
-import os
-from tqdm import tqdm
-from oct2py import octave
-import glob
-
-filenames = glob.glob("CollectedOutput/*.txt")
-
-plt.style.use('paper')
-
-Tspan = 100 * 86400
-
-
-def Recovery(Tspan, Depth, twoFstar=60, detectors='H1,L1'):
-    numDetectors = len(detectors.split(','))
-    cmd = ("DetectionProbabilityStackSlide('Nseg', 1, 'Tdata', {},"
-           "'misHist', createDeltaHist(0), 'avg2Fth', {}, 'detectors', '{}',"
-           "'Depth', {})"
-           ).format(numDetectors*Tspan, twoFstar, detectors, Depth)
-    return octave.eval(cmd, verbose=False)
-
-
-def binomialConfidenceInterval(N, K, confidence=0.95):
-    cmd = '[fLow, fUpper] = binomialConfidenceInterval({}, {}, {})'.format(
-        N, K, confidence)
-    [l, u] = octave.eval(cmd, verbose=False, return_both=True)[0].split('\n')
-    return float(l.split('=')[1]), float(u.split('=')[1])
-
-df_list = []
-for fn in filenames:
-    df = pd.read_csv(
-        fn, sep=' ', names=['depth', 'h0', 'dF0', 'dF1', 'twoF', 'runTime'])
-    df['CLUSTER_ID'] = fn.split('_')[1]
-    df_list.append(df)
-df = pd.concat(df_list)
-
-twoFstar = 60
-depths = np.unique(df.depth.values)
-recovery_fraction = []
-recovery_fraction_CI = []
-for d in depths:
-    twoFs = df[df.depth == d].twoF.values
-    N = len(twoFs)
-    K = np.sum(twoFs > twoFstar)
-    print d, N, K
-    recovery_fraction.append(K/float(N))
-    [fLower, fUpper] = binomialConfidenceInterval(N, K)
-    recovery_fraction_CI.append([fLower, fUpper])
-
-yerr = np.abs(recovery_fraction - np.array(recovery_fraction_CI).T)
-fig, ax = plt.subplots()
-ax.errorbar(depths, recovery_fraction, yerr=yerr, fmt='sr', marker='s', ms=2,
-            capsize=1, capthick=0.5, elinewidth=0.5,
-            label='Monte-Carlo result', zorder=10)
-
-fname = 'analytic_data_{}.txt'.format(twoFstar)
-if os.path.isfile(fname):
-    depths_smooth, recovery_analytic = np.loadtxt(fname)
-else:
-    depths_smooth = np.linspace(10, 550, 100)
-    recovery_analytic = []
-    for d in tqdm(depths_smooth):
-        recovery_analytic.append(Recovery(Tspan, d, twoFstar))
-    np.savetxt(fname, np.array([depths_smooth, recovery_analytic]))
-depths_smooth = np.concatenate(([0], depths_smooth))
-recovery_analytic = np.concatenate(([1], recovery_analytic))
-ax.plot(depths_smooth, recovery_analytic, '-k', label='Theoretical maximum')
-
-
-ax.set_ylim(0, 1.05)
-ax.set_xlabel(r'Sensitivity depth', size=10)
-ax.set_ylabel(r'Recovered fraction', size=10)
-ax.legend(loc=1, frameon=False)
-
-fig.tight_layout()
-fig.savefig('directed_recovery.png')
-
-
-total_number_steps = 5*25.
-fig, ax = plt.subplots()
-ax.hist(df.runTime/total_number_steps, bins=50)
-ax.set_xlabel('run-time per step [s]')
-fig.tight_layout()
-fig.savefig('runTimeHist.png')
-
diff --git a/Paper/DirectedMC/plot_recovery.py b/Paper/DirectedMC/plot_recovery.py
deleted file mode 100644
index cdcc231ce212e92221fed76fd46b4a03139b3771..0000000000000000000000000000000000000000
--- a/Paper/DirectedMC/plot_recovery.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import matplotlib.pyplot as plt
-import numpy as np
-import scipy.stats
-
-
-def Recovery(Tspan, Depth, twoFstar=60):
-    rho2 = 4*Tspan/25./Depth**2
-    twoF_Hs = scipy.stats.distributions.ncx2(df=4, nc=rho2)
-    return 1 - twoF_Hs.cdf(twoFstar)
-
-N = 500
-Tspan = np.linspace(0.1, 365*86400, N)
-Depth = np.linspace(10, 300, N)
-
-X, Y = np.meshgrid(Tspan, Depth)
-X = X / 86400
-
-Z = [[Recovery(t, d) for t in Tspan] for d in Depth]
-
-fig, ax = plt.subplots()
-pax = ax.pcolormesh(X, Y, Z, cmap=plt.cm.viridis)
-CS = ax.contour(X, Y, Z, [0.95])
-plt.clabel(CS, inline=1, fontsize=12, fmt='%s', manual=[(200, 180)])
-plt.colorbar(pax, label='Recovery fraction')
-ax.set_xlabel(r'$T_{\rm span}$ [days]', size=16)
-ax.set_ylabel(r'Depth=$\frac{\sqrt{S_{\rm n}}}{h_0}$', size=14)
-ax.set_xlim(min(Tspan)/86400., max(Tspan)/86400.)
-ax.set_ylim(min(Depth), max(Depth))
-
-fig.savefig('recovery.png')
diff --git a/Paper/DirectedMC/runTimeHist.png b/Paper/DirectedMC/runTimeHist.png
deleted file mode 100644
index 73fb0cb48e185013d430665c3ff00119ac12f81b..0000000000000000000000000000000000000000
Binary files a/Paper/DirectedMC/runTimeHist.png and /dev/null differ
diff --git a/Paper/DirectedMC/submitfile b/Paper/DirectedMC/submitfile
deleted file mode 100644
index 50a1130d98f23c532f6b0f9c67085800940541cc..0000000000000000000000000000000000000000
--- a/Paper/DirectedMC/submitfile
+++ /dev/null
@@ -1,12 +0,0 @@
-Executable=DirectedMC_repeat.sh
-Arguments=$(Cluster)_$(Process)
-Universe=vanilla
-Input=/dev/null
-accounting_group = ligo.dev.o2.cw.explore.test
-Output=CollectedOutput/out.$(Cluster).$(Process)
-Error=CollectedOutput/err.$(Cluster).$(Process)
-Log=CollectedOutput/log.$(Cluster).$(Process)
-request_cpus = 1
-request_memory = 16 GB
-
-Queue 1
diff --git a/Paper/DirectedMCNoiseOnly/DirectedMCNoiseOnly_repeat.sh b/Paper/DirectedMCNoiseOnly/DirectedMCNoiseOnly_repeat.sh
deleted file mode 100755
index 7b4d26d400c71a2f0d5c9a86e624a66fdbf3ca52..0000000000000000000000000000000000000000
--- a/Paper/DirectedMCNoiseOnly/DirectedMCNoiseOnly_repeat.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-
-. /home/gregory.ashton/lalsuite-install/etc/lalapps-user-env.sh
-export PATH="/home/gregory.ashton/anaconda2/bin:$PATH"
-export MPLCONFIGDIR=/home/gregory.ashton/.config/matplotlib
-
-for ((n=0;n<100;n++))
-do
-/home/gregory.ashton/anaconda2/bin/python generate_data.py "$1" /local/user/gregory.ashton --no-template-counting --no-interactive
-done
-cp /local/user/gregory.ashton/NoiseOnlyMCResults_"$1".txt $(pwd)/CollectedOutput
diff --git a/Paper/DirectedMCNoiseOnly/generate_data.py b/Paper/DirectedMCNoiseOnly/generate_data.py
deleted file mode 100644
index 5e388df7d554e62fe381c994e0dda3864aea3fd8..0000000000000000000000000000000000000000
--- a/Paper/DirectedMCNoiseOnly/generate_data.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import pyfstat
-import numpy as np
-import os
-import sys
-import time
-
-
-ID = sys.argv[1]
-outdir = sys.argv[2]
-
-label = 'run_{}'.format(ID)
-data_label = '{}_data'.format(label)
-results_file_name = '{}/NoiseOnlyMCResults_{}.txt'.format(outdir, ID)
-
-# Properties of the GW data
-sqrtSX = 1e-23
-tstart = 1000000000
-Tspan = 100*86400
-tend = tstart + Tspan
-
-# Fixed properties of the signal
-F0_center = 30
-F1_center = -1e-10
-F2 = 0
-Alpha = np.radians(83.6292)
-Delta = np.radians(22.0144)
-tref = .5*(tstart+tend)
-
-VF0 = VF1 = 100
-DeltaF0 = VF0 * np.sqrt(3)/(np.pi*Tspan)
-DeltaF1 = VF1 * np.sqrt(45/4.)/(np.pi*Tspan**2)
-
-nsteps = 25
-run_setup = [((nsteps, 0), 20, False),
-             ((nsteps, 0), 7, False),
-             ((nsteps, 0), 2, False),
-             ((nsteps, nsteps), 1, False)]
-
-h0 = 0
-F0 = F0_center + np.random.uniform(-0.5, 0.5)*DeltaF0
-F1 = F1_center + np.random.uniform(-0.5, 0.5)*DeltaF1
-
-psi = np.random.uniform(-np.pi/4, np.pi/4)
-phi = np.random.uniform(0, 2*np.pi)
-cosi = np.random.uniform(-1, 1)
-
-data = pyfstat.Writer(
-    label=data_label, outdir=outdir, tref=tref,
-    tstart=tstart, F0=F0, F1=F1, F2=F2, duration=Tspan, Alpha=Alpha,
-    Delta=Delta, h0=h0, sqrtSX=sqrtSX, psi=psi, phi=phi, cosi=cosi,
-    detector='H1,L1')
-data.make_data()
-
-startTime = time.time()
-theta_prior = {'F0': {'type': 'unif',
-                      'lower': F0_center-DeltaF0,
-                      'upper': F0_center+DeltaF0},
-               'F1': {'type': 'unif',
-                      'lower': F1_center-DeltaF1,
-                      'upper': F1_center+DeltaF1},
-               'F2': F2,
-               'Alpha': Alpha,
-               'Delta': Delta
-               }
-
-ntemps = 2
-log10temperature_min = -1
-nwalkers = 100
-
-mcmc = pyfstat.MCMCFollowUpSearch(
-    label=label, outdir=outdir,
-    sftfilepath='{}/*{}*sft'.format(outdir, data_label),
-    theta_prior=theta_prior,
-    tref=tref, minStartTime=tstart, maxStartTime=tend,
-    nwalkers=nwalkers, ntemps=ntemps,
-    log10temperature_min=log10temperature_min)
-mcmc.run(run_setup=run_setup, create_plots=False, log_table=False,
-         gen_tex_table=False)
-mcmc.print_summary()
-d, maxtwoF = mcmc.get_max_twoF()
-dF0 = F0 - d['F0']
-dF1 = F1 - d['F1']
-runTime = time.time() - startTime
-with open(results_file_name, 'a') as f:
-    f.write('{:1.8e} {:1.8e} {:1.8e} {}\n'
-            .format(dF0, dF1, maxtwoF, runTime))
-os.system('rm {}/*{}*'.format(outdir, label))
diff --git a/Paper/DirectedMCNoiseOnly/plot_data.py b/Paper/DirectedMCNoiseOnly/plot_data.py
deleted file mode 100644
index 0d7ec4d4e4900c89168197049f5881e87b5bf2c9..0000000000000000000000000000000000000000
--- a/Paper/DirectedMCNoiseOnly/plot_data.py
+++ /dev/null
@@ -1,65 +0,0 @@
-import pyfstat
-import matplotlib.pyplot as plt
-import pandas as pd
-import numpy as np
-import os
-from tqdm import tqdm
-from oct2py import octave
-import glob
-from scipy.stats import rv_continuous, chi2
-
-filenames = glob.glob("CollectedOutput/*.txt")
-
-plt.style.use('paper')
-
-Tspan = 100 * 86400
-
-
-class maxtwoFinNoise_gen(rv_continuous):
-    def _pdf(self, twoF, Ntrials):
-        F = twoF/2.0
-        alpha = (1 + F)*np.exp(-F)
-        a = Ntrials/2.0*F*np.exp(-F)
-        b = (1 - alpha)**(Ntrials-1)
-        return a*b
-
-
-df_list = []
-for fn in filenames:
-    df = pd.read_csv(
-        fn, sep=' ', names=['dF0', 'dF1', 'twoF', 'runTime'])
-    df['CLUSTER_ID'] = fn.split('_')[1]
-    df_list.append(df)
-df = pd.concat(df_list)
-print 'Number of samples = ', len(df)
-
-fig, ax = plt.subplots()
-ax.hist(df.twoF, bins=50, histtype='step', color='k', normed=True, linewidth=1,
-        label='Monte-Carlo histogram')
-
-maxtwoFinNoise = maxtwoFinNoise_gen(a=0)
-Ntrials_effective, loc, scale = maxtwoFinNoise.fit(df.twoF.values, floc=0, fscale=1)
-print 'Ntrials effective = {:1.2e}'.format(Ntrials_effective)
-twoFsmooth = np.linspace(0, df.twoF.max(), 1000)
-best_fit_pdf = maxtwoFinNoise.pdf(twoFsmooth, Ntrials_effective)
-ax.plot(twoFsmooth, best_fit_pdf, '-r',
-        label=r'$p(2\mathcal{{F}}_{{\rm max}})$ for {} $N_{{\rm trials}}$'
-        .format(pyfstat.texify_float(Ntrials_effective, d=2)))
-
-pval = 1e-6
-twoFsmooth_HD = np.linspace(
-    twoFsmooth[np.argmax(best_fit_pdf)], df.twoF.max(), 100000)
-best_fit_pdf_HD = maxtwoFinNoise.pdf(twoFsmooth_HD, Ntrials_effective)
-spacing = twoFsmooth_HD[1]-twoFsmooth_HD[0]
-print twoFsmooth_HD[np.argmin(np.abs(best_fit_pdf_HD - pval))], spacing
-
-ax.set_xlabel('$\widetilde{2\mathcal{F}}$')
-ax.set_xlim(0, 60)
-ax.legend(frameon=False, fontsize=6)
-fig.tight_layout()
-fig.savefig('directed_noise_twoF_histogram.png')
-
-from latex_macro_generator import write_to_macro
-write_to_macro('DirectedMCNoiseOnlyMaximum', '{:1.1f}'.format(np.max(df.twoF)),
-               '../macros.tex')
-write_to_macro('DirectedMCNoiseN', len(df), '../macros.tex')
diff --git a/Paper/DirectedMCNoiseOnly/submitfile b/Paper/DirectedMCNoiseOnly/submitfile
deleted file mode 100644
index 77d69ee4d0b86673b02ec07c08b2ad850c054343..0000000000000000000000000000000000000000
--- a/Paper/DirectedMCNoiseOnly/submitfile
+++ /dev/null
@@ -1,12 +0,0 @@
-Executable= DirectedMCNoiseOnly_repeat.sh
-Arguments= $(Cluster)_$(Process)
-Universe=vanilla
-Input=/dev/null
-accounting_group = ligo.dev.o2.cw.explore.test
-Output=CollectedOutput/out.$(Cluster).$(Process)
-Error=CollectedOutput/err.$(Cluster).$(Process)
-Log=CollectedOutput/log.$(Cluster).$(Process)
-request_cpus = 1
-request_memory = 16 GB
-
-Queue 1
diff --git a/Paper/Examples/Makefile b/Paper/Examples/Makefile
deleted file mode 100644
index 3557f777a60519dacd872c5ed0c83adf44d031da..0000000000000000000000000000000000000000
--- a/Paper/Examples/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-frequency_grid_files = grided_frequency_search_1D.png
-fully_coherent_files = fully_coherent_search_using_MCMC_walkers.png
-follow_up_files = follow_up_run_setup.tex follow_up_walkers.png
-transient_files = transient_search_initial_stage_twoFcumulative.png transient_search_corner.png
-glitch_files = single_glitch_F0F1_grid_2D.png single_glitch_glitchSearch_corner.png
-
-all_files = $(frequency_grid_files) $(fully_coherent_files) $(follow_up_files) $(transient_files) $(glitch_files)
-
-copyfiles:
-	cd data; cp $(all_files) ../../
diff --git a/Paper/Examples/follow_up.py b/Paper/Examples/follow_up.py
deleted file mode 100644
index 3093daa0ca7e9219e695c87f74a8ee1249833b4b..0000000000000000000000000000000000000000
--- a/Paper/Examples/follow_up.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import pyfstat
-import numpy as np
-import matplotlib.pyplot as plt
-import matplotlib
-
-F0 = 30.0
-F1 = -1e-10
-F2 = 0
-Alpha = 1.0
-Delta = 0.5
-
-# Properties of the GW data
-sqrtSX = 1e-23
-tstart = 1000000000
-duration = 100*86400
-tend = tstart+duration
-tref = .5*(tstart+tend)
-
-depth = 50
-data_label = 'follow_up'
-
-h0 = sqrtSX / depth
-
-data = pyfstat.Writer(
-    label=data_label, outdir='data', tref=tref,
-    tstart=tstart, F0=F0, F1=F1, F2=F2, duration=duration, Alpha=Alpha,
-    Delta=Delta, h0=h0, sqrtSX=sqrtSX)
-data.make_data()
-
-# The predicted twoF, given by lalapps_predictFstat can be accessed by
-twoF = data.predict_fstat()
-print 'Predicted twoF value: {}\n'.format(twoF)
-
-# Search
-VF0 = VF1 = 500
-DeltaF0 = VF0 * np.sqrt(3)/(np.pi*duration)
-DeltaF1 = VF1 * np.sqrt(45/4.)/(np.pi*duration**2)
-DeltaAlpha = 1e-1
-DeltaDelta = 1e-1
-theta_prior = {'F0': {'type': 'unif', 'lower': F0-DeltaF0/2.,
-                      'upper': F0+DeltaF0/2},
-               'F1': {'type': 'unif', 'lower': F1-DeltaF1/2.,
-                      'upper': F1+DeltaF1/2},
-               'F2': F2,
-               'Alpha': {'type': 'unif', 'lower': Alpha-DeltaAlpha,
-                         'upper': Alpha+DeltaAlpha},
-               'Delta': {'type': 'unif', 'lower': Delta-DeltaDelta,
-                         'upper': Delta+DeltaDelta},
-               }
-
-ntemps = 3
-log10temperature_min = -0.5
-nwalkers = 100
-scatter_val = 1e-10
-nsteps = [200, 200]
-
-mcmc = pyfstat.MCMCFollowUpSearch(
-    label='follow_up', outdir='data',
-    sftfilepath='data/*'+data_label+'*sft', theta_prior=theta_prior, tref=tref,
-    minStartTime=tstart, maxStartTime=tend, nwalkers=nwalkers, nsteps=nsteps,
-    ntemps=ntemps, log10temperature_min=log10temperature_min,
-    scatter_val=scatter_val)
-
-fig, axes = plt.subplots(nrows=2, ncols=2)
-fig, axes = mcmc.run(
-    R=10, Nsegs0=100, subtractions=[F0, F1, Alpha, Delta], labelpad=0.01,
-    fig=fig, axes=axes, plot_det_stat=False, return_fig=True)
-axes[3].set_xlabel(r'$\textrm{Number of steps}$', labelpad=0.1)
-for ax in axes:
-    ax.set_xlim(0, axes[0].get_xlim()[-1])
-    ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(5))
-fig.tight_layout()
-fig.savefig('{}/{}_walkers.png'.format(mcmc.outdir, mcmc.label), dpi=400)
-
-mcmc.plot_corner(add_prior=True)
-mcmc.print_summary()
diff --git a/Paper/Examples/fully_coherent_search_using_MCMC.py b/Paper/Examples/fully_coherent_search_using_MCMC.py
deleted file mode 100644
index ec34696d281e3f31bb69fa7b3d296511dbfd8eca..0000000000000000000000000000000000000000
--- a/Paper/Examples/fully_coherent_search_using_MCMC.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import pyfstat
-import numpy as np
-
-# Properties of the GW data
-sqrtSX = 1e-23
-tstart = 1000000000
-duration = 100*86400
-tend = tstart + duration
-
-# Properties of the signal
-F0 = 30.0
-F1 = -1e-10
-F2 = 0
-Alpha = 5e-3
-Delta = 6e-2
-tref = .5*(tstart+tend)
-
-depth = 10
-h0 = sqrtSX / depth
-data_label = 'fully_coherent_search_using_MCMC'
-
-data = pyfstat.Writer(
-    label=data_label, outdir='data', tref=tref,
-    tstart=tstart, F0=F0, F1=F1, F2=F2, duration=duration, Alpha=Alpha,
-    Delta=Delta, h0=h0, sqrtSX=sqrtSX)
-data.make_data()
-
-# The predicted twoF, given by lalapps_predictFstat can be accessed by
-twoF = data.predict_fstat()
-print 'Predicted twoF value: {}\n'.format(twoF)
-
-DeltaF0 = 1e-7
-DeltaF1 = 1e-13
-VF0 = (np.pi * duration * DeltaF0)**2 / 3.0
-VF1 = (np.pi * duration**2 * DeltaF1)**2 * 4/45.
-print '\nV={:1.2e}, VF0={:1.2e}, VF1={:1.2e}\n'.format(VF0*VF1, VF0, VF1)
-
-theta_prior = {'F0': {'type': 'unif',
-                      'lower': F0-DeltaF0/2.,
-                      'upper': F0+DeltaF0/2.},
-               'F1': {'type': 'unif',
-                      'lower': F1-DeltaF1/2.,
-                      'upper': F1+DeltaF1/2.},
-               'F2': F2,
-               'Alpha': Alpha,
-               'Delta': Delta
-               }
-
-ntemps = 1
-log10temperature_min = -1
-nwalkers = 1000
-nsteps = [50, 50]
-
-mcmc = pyfstat.MCMCSearch(
-    label='fully_coherent_search_using_MCMC', outdir='data',
-    sftfilepath='data/*'+data_label+'-*sft', theta_prior=theta_prior, tref=tref,
-    minStartTime=tstart, maxStartTime=tend, nsteps=nsteps, nwalkers=nwalkers,
-    ntemps=ntemps, log10temperature_min=log10temperature_min)
-mcmc.run(context='paper', subtractions=[30, -1e-10])
-mcmc.plot_corner(add_prior=True)
-mcmc.print_summary()
-
-from latex_macro_generator import write_to_macro
-write_to_macro('BasicExampleF0', '{:1.0f}'.format(F0), '../macros.tex')
-write_to_macro('BasicExampleF1', F1, '../macros.tex')
-write_to_macro('BasicExampleh0', h0, '../macros.tex')
-write_to_macro('BasicExampleSqrtSn', sqrtSX, '../macros.tex')
-write_to_macro('BasicExampleDepth', depth, '../macros.tex')
-write_to_macro('BasicExampleDeltaF0', DeltaF0, '../macros.tex')
-write_to_macro('BasicExampleDeltaF1', DeltaF1, '../macros.tex')
-write_to_macro('BasicExampleVF0', VF0, '../macros.tex')
-write_to_macro('BasicExampleVF1', VF1, '../macros.tex')
-write_to_macro('BasicExampleV', VF0*VF1, '../macros.tex')
-write_to_macro('BasicExamplenburn', nsteps[0], '../macros.tex')
-write_to_macro('BasicExamplenprod', nsteps[1], '../macros.tex')
-
diff --git a/Paper/Examples/fully_coherent_search_using_MCMC_convergence.py b/Paper/Examples/fully_coherent_search_using_MCMC_convergence.py
deleted file mode 100644
index e440eb37dae67e156b02c08f16247a76cd7aebca..0000000000000000000000000000000000000000
--- a/Paper/Examples/fully_coherent_search_using_MCMC_convergence.py
+++ /dev/null
@@ -1,64 +0,0 @@
-import pyfstat
-import numpy as np
-
-# Properties of the GW data
-sqrtSX = 1e-23
-tstart = 1000000000
-duration = 100*86400
-tend = tstart + duration
-
-# Properties of the signal
-F0 = 30.0
-F1 = -1e-10
-F2 = 0
-Alpha = 5e-3
-Delta = 6e-2
-tref = .5*(tstart+tend)
-
-depth = 10
-h0 = sqrtSX / depth
-data_label = 'fully_coherent_search_using_MCMC_convergence'
-
-data = pyfstat.Writer(
-    label=data_label, outdir='data', tref=tref,
-    tstart=tstart, F0=F0, F1=F1, F2=F2, duration=duration, Alpha=Alpha,
-    Delta=Delta, h0=h0, sqrtSX=sqrtSX)
-data.make_data()
-
-# The predicted twoF, given by lalapps_predictFstat can be accessed by
-twoF = data.predict_fstat()
-print 'Predicted twoF value: {}\n'.format(twoF)
-
-DeltaF0 = 5e-7
-DeltaF1 = 1e-12
-VF0 = (np.pi * duration * DeltaF0)**2 / 3.0
-VF1 = (np.pi * duration**2 * DeltaF1)**2 * 4/45.
-print '\nV={:1.2e}, VF0={:1.2e}, VF1={:1.2e}\n'.format(VF0*VF1, VF0, VF1)
-
-theta_prior = {'F0': {'type': 'unif',
-                      'lower': F0-DeltaF0/2.,
-                      'upper': F0+DeltaF0/2.},
-               'F1': {'type': 'unif',
-                      'lower': F1-DeltaF1/2.,
-                      'upper': F1+DeltaF1/2.},
-               'F2': F2,
-               'Alpha': Alpha,
-               'Delta': Delta
-               }
-
-ntemps = 1
-log10temperature_min = -1
-nwalkers = 100
-nsteps = [900, 100]
-
-mcmc = pyfstat.MCMCSearch(
-    label='fully_coherent_search_using_MCMC_convergence', outdir='data',
-    sftfilepath='data/*'+data_label+'*sft', theta_prior=theta_prior, tref=tref,
-    minStartTime=tstart, maxStartTime=tend, nsteps=nsteps, nwalkers=nwalkers,
-    ntemps=ntemps, log10temperature_min=log10temperature_min)
-mcmc.setup_convergence_testing(
-    convergence_threshold_number=5, convergence_plot_upper_lim=10,
-    convergence_burnin_fraction=0.1)
-mcmc.run(context='paper', subtractions=[30, -1e-10])
-mcmc.plot_corner(add_prior=True)
-mcmc.print_summary()
diff --git a/Paper/Examples/grided_frequency_search.py b/Paper/Examples/grided_frequency_search.py
deleted file mode 100644
index 4d677656f8372b8e3252241ed814983faf229ab5..0000000000000000000000000000000000000000
--- a/Paper/Examples/grided_frequency_search.py
+++ /dev/null
@@ -1,77 +0,0 @@
-import pyfstat
-import numpy as np
-import matplotlib.pyplot as plt
-from latex_macro_generator import write_to_macro
-
-plt.style.use('paper')
-
-F0 = 100.0
-F1 = 0
-F2 = 0
-Alpha = 5.98
-Delta = -0.1
-
-# Properties of the GW data
-sqrtSX = 1e-23
-tstart = 1000000000
-duration = 30 * 60 * 60
-tend = tstart+duration
-tref = .5*(tstart+tend)
-
-psi = 2.25
-phi = 0.2
-cosi = 0.5
-
-depth = 2
-data_label = 'grided_frequency_depth_{:1.0f}'.format(depth)
-
-h0 = sqrtSX / depth
-
-data = pyfstat.Writer(
-    label=data_label, outdir='data', tref=tref,
-    tstart=tstart, F0=F0, F1=F1, F2=F2, duration=duration, Alpha=Alpha,
-    Delta=Delta, h0=h0, sqrtSX=sqrtSX, detectors='H1', cosi=cosi, phi=phi,
-    psi=psi)
-data.make_data()
-
-DeltaF0 = 1.5e-4
-F0s = [F0-DeltaF0/2., F0+DeltaF0/2., DeltaF0/2000]
-F1s = [F1]
-F2s = [F2]
-Alphas = [Alpha]
-Deltas = [Delta]
-search = pyfstat.GridSearch(
-    'grided_frequency_search', 'data', 'data/*'+data_label+'-*.sft', F0s, F1s,
-    F2s, Alphas, Deltas, tref, tstart, tend)
-search.run()
-
-fig, ax = plt.subplots()
-xidx = search.keys.index('F0')
-frequencies = np.unique(search.data[:, xidx])
-twoF = search.data[:, -1]
-
-#mismatch = np.sign(x-F0)*(duration * np.pi * (x - F0))**2 / 12.0
-ax.plot(frequencies-F0, twoF, 'k', lw=0.5)
-
-DeltaF = frequencies - F0
-ax.set_ylabel('$\widetilde{2\mathcal{F}}$')
-ax.set_xlabel('Template frequency')
-dF0 = np.sqrt(12*1)/(np.pi*duration)
-xticks = [-4/86400., -3/86400., -2/86400., -86400, 0, 
-          1/86400., 2/86400., 3/86400., 4/86400.]
-#ax.set_xticks(xticks)
-#xticklabels = [r'$f_0 {-} \frac{4}{1\textrm{-day}}$', '$f_0$',
-#               r'$f_0 {+} \frac{4}{1\textrm{-day}}$']
-#ax.set_xticklabels(xticklabels)
-ax.grid(linewidth=0.1)
-plt.tight_layout()
-fig.savefig('{}/{}_1D.png'.format(search.outdir, search.label), dpi=300)
-
-write_to_macro('GridedFrequencySqrtSx', '{}'.format(
-    pyfstat.helper_functions.texify_float(sqrtSX)),
-               '../macros.tex')
-write_to_macro('GridedFrequencyh0', '{}'.format(
-    pyfstat.helper_functions.texify_float(h0)),
-               '../macros.tex')
-write_to_macro('GridedFrequencyT', '{}'.format(int(duration/86400)),
-               '../macros.tex')
diff --git a/Paper/Examples/matplotlibrc b/Paper/Examples/matplotlibrc
deleted file mode 100644
index c2f237718bc52015e0edaf8a286e6f1371d99973..0000000000000000000000000000000000000000
--- a/Paper/Examples/matplotlibrc
+++ /dev/null
@@ -1,36 +0,0 @@
-figure.figsize: 3.4, 2.5
-figure.facecolor: w
-
-axes.labelsize: 6
-axes.titlesize: 6
-xtick.labelsize: 4
-ytick.labelsize: 4
-legend.fontsize: 6
-font.size: 6
-
-grid.linewidth: 0.8
-lines.linewidth: 1.0
-patch.linewidth: 0.24
-lines.markersize: 3.6
-lines.markeredgewidth: 0
-
-xtick.major.size: 2.0
-ytick.major.size: 2.0
-xtick.minor.size: 1.5
-xtick.minor.visible: True
-ytick.minor.size: 1.5
-ytick.minor.visible: True
-
-xtick.major.pad: 2.5
-ytick.major.pad: 2.5
-
-pdf.compression: 9
-savefig.format: png
-savefig.dpi: 300
-
-font.family: serif
-font.serif: Computer Modern
-text.usetex: True
-axes.formatter.use_mathtext: True
-axes.formatter.useoffset: False
-axes.formatter.limits : -3, 4
diff --git a/Paper/Examples/single_glitch.py b/Paper/Examples/single_glitch.py
deleted file mode 100644
index 1da8f5d8f965e5cacf5b58f75d5ad444a6188c4d..0000000000000000000000000000000000000000
--- a/Paper/Examples/single_glitch.py
+++ /dev/null
@@ -1,115 +0,0 @@
-import pyfstat
-import numpy as np
-import matplotlib.pyplot as plt
-import matplotlib
-
-sqrtSX = 1e-22
-tstart = 1000000000
-duration = 100*86400
-tend = tstart+duration
-
-# Define parameters of the Crab pulsar as an example
-tref = .5*(tstart + tend)
-F0 = 30.0
-F1 = -1e-10
-F2 = 0
-Alpha = np.radians(83.6292)
-Delta = np.radians(22.0144)
-
-# Signal strength
-depth = 10
-h0 = sqrtSX / depth
-
-VF0 = VF1 = 200
-dF0 = np.sqrt(3)/(np.pi*duration)
-dF1 = np.sqrt(45/4.)/(np.pi*duration**2)
-DeltaF0 = VF0 * dF0
-DeltaF1 = VF1 * dF1
-
-# Next, taking the same signal parameters, we include a glitch half way through
-R = 0.5
-dtglitch = R*duration
-delta_F0 = 0.25*DeltaF0
-delta_F1 = -0.1*DeltaF1
-
-glitch_data = pyfstat.Writer(
-    label='single_glitch', outdir='data', tref=tref, tstart=tstart, F0=F0,
-    F1=F1, F2=F2, duration=duration, Alpha=Alpha, Delta=Delta, h0=h0,
-    sqrtSX=sqrtSX, dtglitch=dtglitch, delta_F0=delta_F0, delta_F1=delta_F1)
-glitch_data.make_data()
-FCtwoFPM = glitch_data.predict_fstat()
-
-F0s = [F0-DeltaF0/2., F0+DeltaF0/2., 1*dF0]
-F1s = [F1-DeltaF1/2., F1+DeltaF1/2., 1*dF1]
-F2s = [F2]
-Alphas = [Alpha]
-Deltas = [Delta]
-search = pyfstat.GridSearch(
-    'single_glitch_F0F1_grid', 'data', 'data/*single_glitch*sft', F0s, F1s,
-    F2s, Alphas, Deltas, tref, tstart, tend)
-search.run()
-ax = search.plot_2D('F0', 'F1', save=False)
-ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(6))
-plt.tight_layout()
-plt.savefig('data/single_glitch_F0F1_grid_2D.png', dpi=500)
-FCtwoF = search.get_max_twoF()['twoF']
-
-
-theta_prior = {'F0': {'type': 'unif', 'lower': F0-DeltaF0/2.,
-                      'upper': F0+DeltaF0/2},
-               'F1': {'type': 'unif', 'lower': F1-DeltaF1/2.,
-                      'upper': F1+DeltaF1/2},
-               'F2': F2,
-               'Alpha': Alpha,
-               'Delta': Delta
-               }
-
-theta_prior = {'F0': {'type': 'unif', 'lower': F0-DeltaF0/2.,
-                      'upper': F0+DeltaF0/2},
-               'F1': {'type': 'unif', 'lower': F1-DeltaF1/2.,
-                      'upper': F1+DeltaF1/2},
-               'F2': F2,
-               'Alpha': Alpha,
-               'Delta': Delta,
-               'tglitch': {'type': 'unif', 'lower': tstart+0.1*duration,
-                           'upper': tend-0.1*duration},
-               'delta_F0': {'type': 'halfnorm', 'loc': 0, 'scale': 1e-7*F0},
-               'delta_F1': {'type': 'norm', 'loc': 0, 'scale': abs(1e-3*F1)},
-               }
-ntemps = 3
-log10temperature_min = -0.1
-nwalkers = 100
-nsteps = [1000, 1000]
-glitch_mcmc = pyfstat.MCMCGlitchSearch(
-    'single_glitch_glitchSearch', 'data',
-    sftfilepath='data/*_single_glitch*.sft', theta_prior=theta_prior,
-    tref=tref, minStartTime=tstart, maxStartTime=tend, nsteps=nsteps,
-    nwalkers=nwalkers, ntemps=ntemps,
-    log10temperature_min=log10temperature_min)
-glitch_mcmc.write_prior_table()
-glitch_mcmc.run()
-glitch_mcmc.plot_corner(figsize=(6, 6))
-glitch_mcmc.print_summary()
-GlitchtwoF = glitch_mcmc.get_max_twoF()[1]
-
-from latex_macro_generator import write_to_macro
-write_to_macro('SingleGlitchTspan', '{:1.1f}'.format(duration/86400),
-               '../macros.tex')
-write_to_macro('SingleGlitchR', '{:1.1f}'.format(R), '../macros.tex')
-write_to_macro('SingleGlitchDepth',
-               '{}'.format(pyfstat.texify_float(sqrtSX/h0)), '../macros.tex')
-write_to_macro('SingleGlitchF0', '{}'.format(F0), '../macros.tex')
-write_to_macro('SingleGlitchF1', '{}'.format(F1), '../macros.tex')
-write_to_macro('SingleGlitchdeltaF0',
-               '{}'.format(pyfstat.texify_float(delta_F0)), '../macros.tex')
-write_to_macro('SingleGlitchdeltaF1',
-               '{}'.format(pyfstat.texify_float(delta_F1)), '../macros.tex')
-write_to_macro('SingleGlitchFCtwoFPM', '{:1.1f}'.format(FCtwoFPM),
-               '../macros.tex')
-write_to_macro('SingleGlitchFCtwoF', '{:1.1f}'.format(FCtwoF),
-               '../macros.tex')
-write_to_macro('SingleGlitch_FCMismatch',
-               '{:1.1f}'.format((FCtwoFPM-FCtwoF)/FCtwoFPM), '../macros.tex')
-write_to_macro('SingleGlitchGlitchtwoF', '{:1.1f}'.format(GlitchtwoF),
-               '../macros.tex')
-
diff --git a/Paper/Examples/transient_search_using_MCMC.py b/Paper/Examples/transient_search_using_MCMC.py
deleted file mode 100644
index c115edbf01bb975a1d33fa129d1a250048e135d0..0000000000000000000000000000000000000000
--- a/Paper/Examples/transient_search_using_MCMC.py
+++ /dev/null
@@ -1,95 +0,0 @@
-import pyfstat
-import numpy as np
-import matplotlib.pyplot as plt
-
-plt.style.use('thesis')
-
-F0 = 30.0
-F1 = -1e-10
-F2 = 0
-Alpha = 5e-3
-Delta = 6e-2
-
-tstart = 1000000000
-duration = 100*86400
-data_tstart = tstart - duration
-data_tend = data_tstart + 3*duration
-tref = .5*(data_tstart+data_tend)
-
-h0 = 4e-24
-sqrtSX = 1e-22
-
-transient = pyfstat.Writer(
-    label='transient', outdir='data', tref=tref, tstart=tstart, F0=F0, F1=F1,
-    F2=F2, duration=duration, Alpha=Alpha, Delta=Delta, h0=h0, sqrtSX=sqrtSX,
-    minStartTime=data_tstart, maxStartTime=data_tend)
-transient.make_data()
-print transient.predict_fstat()
-
-DeltaF0 = 1e-7
-DeltaF1 = 1e-13
-VF0 = (np.pi * duration * DeltaF0)**2 / 3.0
-VF1 = (np.pi * duration**2 * DeltaF1)**2 * 4/45.
-print '\nV={:1.2e}, VF0={:1.2e}, VF1={:1.2e}\n'.format(VF0*VF1, VF0, VF1)
-
-theta_prior = {'F0': {'type': 'unif',
-                      'lower': F0-DeltaF0/2.,
-                      'upper': F0+DeltaF0/2.},
-               'F1': {'type': 'unif',
-                      'lower': F1-DeltaF1/2.,
-                      'upper': F1+DeltaF1/2.},
-               'F2': F2,
-               'Alpha': Alpha,
-               'Delta': Delta
-               }
-
-ntemps = 3
-log10temperature_min = -1
-nwalkers = 100
-nsteps = [100, 100]
-
-mcmc = pyfstat.MCMCSearch(
-    label='transient_search_initial_stage', outdir='data',
-    sftfilepath='data/*transient*sft', theta_prior=theta_prior, tref=tref,
-    minStartTime=data_tstart, maxStartTime=data_tend, nsteps=nsteps,
-    nwalkers=nwalkers, ntemps=ntemps,
-    log10temperature_min=log10temperature_min)
-mcmc.run()
-fig, ax = plt.subplots()
-mcmc.write_par()
-mcmc.generate_loudest()
-mcmc.plot_cumulative_max(ax=ax)
-ax.set_xlabel('Days from $t_\mathrm{start}$')
-ax.legend_.remove()
-fig.savefig('data/transient_search_initial_stage_twoFcumulative')
-mcmc.print_summary()
-
-theta_prior = {'F0': {'type': 'unif',
-                      'lower': F0-DeltaF0/2.,
-                      'upper': F0+DeltaF0/2.},
-               'F1': {'type': 'unif',
-                      'lower': F1-DeltaF1/2.,
-                      'upper': F1+DeltaF1/2.},
-               'F2': F2,
-               'Alpha': Alpha,
-               'Delta': Delta,
-               'transient_tstart': {'type': 'unif',
-                                    'lower': data_tstart,
-                                    'upper': data_tend-0.2*duration},
-               'transient_duration': {'type': 'halfnorm',
-                                      'loc': 0.01*duration,
-                                      'scale': 0.5*duration}
-               }
-
-nwalkers = 500
-nsteps = [200, 200]
-
-mcmc = pyfstat.MCMCTransientSearch(
-    label='transient_search', outdir='data',
-    sftfilepath='data/*transient*sft', theta_prior=theta_prior, tref=tref,
-    minStartTime=data_tstart, maxStartTime=data_tend, nsteps=nsteps,
-    nwalkers=nwalkers, ntemps=ntemps,
-    log10temperature_min=log10temperature_min)
-mcmc.run()
-mcmc.plot_corner()
-mcmc.print_summary()
diff --git a/Paper/GlitchMCNoiseOnly/SingleGlitchMCNoiseOnly.sh b/Paper/GlitchMCNoiseOnly/SingleGlitchMCNoiseOnly.sh
deleted file mode 100755
index fb434dbbb7fbe308a66f1ec8b95d394f7923ba81..0000000000000000000000000000000000000000
--- a/Paper/GlitchMCNoiseOnly/SingleGlitchMCNoiseOnly.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-
-. /home/gregory.ashton/lalsuite-install/etc/lalapps-user-env.sh
-export PATH="/home/gregory.ashton/anaconda2/bin:$PATH"
-export MPLCONFIGDIR=/home/gregory.ashton/.config/matplotlib
-
-for ((n=0;n<5;n++))
-do
-/home/gregory.ashton/anaconda2/bin/python generate_data.py "$1" /local/user/gregory.ashton --no-interactive
-done
-cp /local/user/gregory.ashton/NoiseOnlyMCResults_"$1".txt $(pwd)/CollectedOutput
diff --git a/Paper/GlitchMCNoiseOnly/generate_data.py b/Paper/GlitchMCNoiseOnly/generate_data.py
deleted file mode 100644
index 24e2eb50d22059029e5925002db03e911bc5bded..0000000000000000000000000000000000000000
--- a/Paper/GlitchMCNoiseOnly/generate_data.py
+++ /dev/null
@@ -1,100 +0,0 @@
-import pyfstat
-import numpy as np
-import os
-import sys
-import time
-
-nglitch = 2
-
-ID = sys.argv[1]
-outdir = sys.argv[2]
-
-label = 'run_{}'.format(ID)
-data_label = '{}_data'.format(label)
-results_file_name = '{}/NoiseOnlyMCResults_{}.txt'.format(outdir, ID)
-
-# Properties of the GW data
-sqrtSX = 1e-23
-tstart = 1000000000
-Tspan = 100*86400
-tend = tstart + Tspan
-
-# Fixed properties of the signal
-F0_center = 30
-F1_center = -1e-10
-F2 = 0
-Alpha = np.radians(83.6292)
-Delta = np.radians(22.0144)
-tref = .5*(tstart+tend)
-
-VF0 = VF1 = 200
-dF0 = np.sqrt(3)/(np.pi*Tspan)
-dF1 = np.sqrt(45/4.)/(np.pi*Tspan**2)
-DeltaF0 = VF0 * dF0
-DeltaF1 = VF1 * dF1
-
-nsteps = 25
-run_setup = [((nsteps, 0), 20, False),
-             ((nsteps, 0), 7, False),
-             ((nsteps, 0), 2, False),
-             ((nsteps, nsteps), 1, False)]
-
-h0 = 0
-F0 = F0_center + np.random.uniform(-0.5, 0.5)*DeltaF0
-F1 = F1_center + np.random.uniform(-0.5, 0.5)*DeltaF1
-
-psi = np.random.uniform(-np.pi/4, np.pi/4)
-phi = np.random.uniform(0, 2*np.pi)
-cosi = np.random.uniform(-1, 1)
-
-# Next, taking the same signal parameters, we include a glitch half way through
-dtglitch = Tspan/2.0
-delta_F0 = 0.25*DeltaF0
-delta_F1 = -0.1*DeltaF1
-
-glitch_data = pyfstat.Writer(
-    label=data_label, outdir=outdir, tref=tref, tstart=tstart, F0=F0,
-    F1=F1, F2=F2, duration=Tspan, Alpha=Alpha, Delta=Delta, h0=h0,
-    sqrtSX=sqrtSX, dtglitch=dtglitch, delta_F0=delta_F0, delta_F1=delta_F1)
-glitch_data.make_data()
-
-
-startTime = time.time()
-theta_prior = {'F0': {'type': 'unif', 'lower': F0-DeltaF0/2., # PROBLEM
-                      'upper': F0+DeltaF0/2},
-               'F1': {'type': 'unif', 'lower': F1-DeltaF1/2.,
-                      'upper': F1+DeltaF1/2},
-               'F2': F2,
-               'Alpha': Alpha,
-               'Delta': Delta,
-               'tglitch': {'type': 'unif', 'lower': tstart+0.1*Tspan,
-                           'upper': tend-0.1*Tspan},
-               'delta_F0': {'type': 'halfnorm', 'loc': 0, 'scale': DeltaF0},
-               'delta_F1': {'type': 'norm', 'loc': 0, 'scale': DeltaF1},
-               }
-ntemps = 2
-log10temperature_min = -0.1
-nwalkers = 100
-nsteps = [500, 500]
-glitch_mcmc = pyfstat.MCMCGlitchSearch(
-    label=label, outdir=outdir,
-    sftfilepath='{}/*{}*sft'.format(outdir, data_label),
-    theta_prior=theta_prior,
-    tref=tref, minStartTime=tstart, maxStartTime=tend, nsteps=nsteps,
-    nwalkers=nwalkers, ntemps=ntemps, nglitch=nglitch,
-    log10temperature_min=log10temperature_min)
-glitch_mcmc.run(run_setup=run_setup, create_plots=False, log_table=False,
-                gen_tex_table=False)
-glitch_mcmc.print_summary()
-d, maxtwoF = glitch_mcmc.get_max_twoF()
-dF0 = F0 - d['F0']
-dF1 = F1 - d['F1']
-#tglitch = d['tglitch']
-#R = (tglitch - tstart) / Tspan
-#delta_F0 = d['delta_F0']
-#delta_F1 = d['delta_F1']
-runTime = time.time() - startTime
-with open(results_file_name, 'a') as f:
-    f.write('{} {:1.8e} {:1.8e} {:1.8e} {:1.1f}\n'
-            .format(nglitch, dF0, dF1, maxtwoF, runTime))
-os.system('rm {}/*{}*'.format(outdir, label))
diff --git a/Paper/GlitchMCNoiseOnly/plot_data.py b/Paper/GlitchMCNoiseOnly/plot_data.py
deleted file mode 100644
index 75a9a5b5153f7abc00179af9b5d50030791c8654..0000000000000000000000000000000000000000
--- a/Paper/GlitchMCNoiseOnly/plot_data.py
+++ /dev/null
@@ -1,91 +0,0 @@
-import pyfstat
-import matplotlib.pyplot as plt
-import pandas as pd
-import numpy as np
-import os
-from tqdm import tqdm
-from oct2py import octave
-import glob
-from scipy.stats import rv_continuous, chi2
-from scipy.special import gammaincc
-from latex_macro_generator import write_to_macro
-
-
-def CF_twoFmax_integrand(theta, twoFmax, Nt):
-    Fmax = twoFmax/2.0
-    return np.exp(1j*theta*twoFmax)*Nt/2.0*Fmax*np.exp(-Fmax)*(1-(1+Fmax)*np.exp(-Fmax))**(Nt-1.)
-
-
-def pdf_twoFhat(twoFhat, Ng, Nts, twoFtildemax=100, dtwoF=0.05):
-    if np.ndim(Nts) == 0:
-        Nts = np.zeros(Ng+1) + Nts
-
-    twoFtildemax_int = np.arange(0, twoFtildemax, dtwoF)
-    theta_int = np.arange(-1./dtwoF, 1./dtwoF, 1./twoFtildemax)
-
-    CF_twoFtildemax_theta = np.array(
-        [[np.trapz(CF_twoFmax_integrand(t, twoFtildemax_int, Nt), twoFtildemax_int)
-          for t in theta_int]
-         for Nt in Nts])
-
-    CF_twoFhat_theta = np.prod(CF_twoFtildemax_theta, axis=0)
-    print CF_twoFhat_theta.shape, theta_int.shape
-    pdf = (1/(2*np.pi)) * np.array(
-        [np.trapz(np.exp(-1j*theta_int*twoFhat_val)*CF_twoFhat_theta,
-                  theta_int) for twoFhat_val in twoFhat])
-    print np.trapz(pdf.real, x=twoFhat)
-    return pdf
-
-filenames = glob.glob("CollectedOutput/*.txt")
-
-plt.style.use('paper')
-
-Tspan = 100 * 86400
-
-df_list = []
-for fn in filenames:
-    df = pd.read_csv(
-        fn, sep=' ', names=['nglitches', 'dF0', 'dF1', 'twoF', 'runTime'])
-    df['CLUSTER_ID'] = fn.split('_')[1]
-    df_list.append(df)
-df = pd.concat(df_list)
-
-colors = ['C0', 'C1']
-fig, ax = plt.subplots()
-handles = []
-labels = []
-for ng, c in zip(np.unique(df.nglitches.values), colors):
-    print 'ng={}'.format(ng)
-    df_temp = df[df.nglitches == ng]
-    #df_temp = df_temp[[str(x).isalpha() for x in df_temp.CLUSTER_ID.values]]
-    print df_temp.tail()
-    Nsamples = len(df_temp)
-    MaxtwoF = df_temp.twoF.max()
-    print 'Number of samples = ', Nsamples
-    print 'Max twoF', MaxtwoF
-    print np.any(np.isnan(df_temp.twoF.values))
-    ax.hist(df_temp.twoF, bins=40, histtype='stepfilled',
-            normed=True, align='mid', alpha=0.5,
-            linewidth=1, label='$N_\mathrm{{glitches}}={}$'.format(ng),
-            color=c)
-
-    write_to_macro('DirectedMC{}GlitchNoiseOnlyMaximum'.format(ng),
-                   '{:1.1f}'.format(MaxtwoF), '../macros.tex')
-    write_to_macro('DirectedMC{}GlitchNoiseN'.format(ng),
-                   '{:1.0f}'.format(Nsamples), '../macros.tex')
-
-    twoFmax = np.linspace(0, 100, 200)
-    ax.plot(twoFmax, pdf_twoFhat(twoFmax, ng, Nsamples,
-                                 twoFtildemax=2*MaxtwoF, dtwoF=0.1),
-            color=c, label='$N_\mathrm{{glitches}}={}$ predicted'.format(ng))
-
-ax.set_xlabel('$\widehat{2\mathcal{F}}$')
-ax.set_xlim(0, 90)
-handles, labels = ax.get_legend_handles_labels()
-idxs = np.argsort(labels)
-ax.legend(np.array(handles)[idxs], np.array(labels)[idxs], frameon=False,
-          fontsize=6)
-fig.tight_layout()
-fig.savefig('glitch_noise_twoF_histogram.png')
-
-
diff --git a/Paper/GlitchMCNoiseOnly/submitfile b/Paper/GlitchMCNoiseOnly/submitfile
deleted file mode 100644
index 99f6657352f9552bb5415ebf4ee4764436aed140..0000000000000000000000000000000000000000
--- a/Paper/GlitchMCNoiseOnly/submitfile
+++ /dev/null
@@ -1,12 +0,0 @@
-Executable= SingleGlitchMCNoiseOnly.sh
-Arguments= $(Cluster)_$(Process)
-Universe=vanilla
-Input=/dev/null
-accounting_group=ligo.dev.o1.cw.directedisolatedother.semicoherent 
-Output=CollectedOutput/out.$(Cluster).$(Process)
-Error=CollectedOutput/err.$(Cluster).$(Process)
-Log=CollectedOutput/log.$(Cluster).$(Process)
-request_cpus = 1
-request_memory = 16 GB
-
-Queue 1806
diff --git a/Paper/Makefile b/Paper/Makefile
deleted file mode 100644
index 9c0c202082280561c2a74c6b9f07fd7753a5247a..0000000000000000000000000000000000000000
--- a/Paper/Makefile
+++ /dev/null
@@ -1,21 +0,0 @@
-SHELL = /bin/bash
-
-main = paper_cw_mcmc.pdf
-
-.PHONY : main clean figclean arxiv jones
-
-main : $(main)
-
-git_tag.tex :
-	./git-tag.sh $@
-
-$(main): *.tex git_tag.tex
-	pdflatex ${@:.pdf=} && bibtex ${@:.pdf=} && pdflatex ${@:.pdf=} && pdflatex ${@:.pdf=}
-
-clean :
-	rm -f git_tag.tex $(main:.pdf=){.aux,.bbl,.blg,.log,.out,.pdf,Notes.bib} $(texfigs) $(texonlyfigs)
-
-arxiv :
-	rm -f $(main)
-	rm -f git_tag.tex
-	tar -cvf arxiv_submission.tar *tex *bib bibliography.bib *pdf *.bbl
diff --git a/Paper/Todo.txt b/Paper/Todo.txt
deleted file mode 100644
index dbe2e6f0a96114cee717888a4517a3067dbc9121..0000000000000000000000000000000000000000
--- a/Paper/Todo.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-- Add macros for the directed MC results
-- Add macros for the all-sky MC results
-- Adds macros for the examples values
-- Investigate cause of all-sky losses again
-- Run the transient search for a weaker signal
-- Add results from Messenger et al for the transient - is there a fit?
-- Weaken basyes factor definitions since they are not used, just put it in the context
-
diff --git a/Paper/VolumeConvergenceInvestigation.png b/Paper/VolumeConvergenceInvestigation.png
deleted file mode 100644
index bb30ba721db0fa546b945861ce479d1cecb7854c..0000000000000000000000000000000000000000
Binary files a/Paper/VolumeConvergenceInvestigation.png and /dev/null differ
diff --git a/Paper/VolumeConvergenceInvestigation/VolumeConvergence_repeater.sh b/Paper/VolumeConvergenceInvestigation/VolumeConvergence_repeater.sh
deleted file mode 100755
index 7a50c2490f56a23d0d9f2eaa37218d16e9ba8bcd..0000000000000000000000000000000000000000
--- a/Paper/VolumeConvergenceInvestigation/VolumeConvergence_repeater.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-
-. /home/gregory.ashton/lalsuite-install/etc/lalapps-user-env.sh
-export PATH="/home/gregory.ashton/anaconda2/bin:$PATH"
-export MPLCONFIGDIR=/home/gregory.ashton/.config/matplotlib
-
-for ((n=0;n<1;n++))
-do
-/home/gregory.ashton/anaconda2/bin/python generate_data.py "$1" /local/user/gregory.ashton --no-template-counting --no-interactive
-done
-mv /local/user/gregory.ashton/VolumeConvergenceResults_"$1".txt $(pwd)/CollectedOutput
diff --git a/Paper/VolumeConvergenceInvestigation/generate_data.py b/Paper/VolumeConvergenceInvestigation/generate_data.py
deleted file mode 100644
index 413027d569979609ece20d3707d2320cb7d1ccf2..0000000000000000000000000000000000000000
--- a/Paper/VolumeConvergenceInvestigation/generate_data.py
+++ /dev/null
@@ -1,99 +0,0 @@
-import pyfstat
-import numpy as np
-import os
-import sys
-import time
-
-ID = sys.argv[1]
-outdir = sys.argv[2]
-
-label = 'VCrun_{}'.format(ID)
-data_label = 'VCrunData_{}_'.format(ID)
-results_file_name = '{}/VolumeConvergenceResults_{}.txt'.format(outdir, ID)
-
-# Properties of the GW data
-sqrtSX = 1e-23
-tstart = 1000000000
-Tspan = 100*86400
-tend = tstart + Tspan
-
-# Fixed properties of the signal
-F0 = 30
-F1 = -1e-10
-F2 = 0
-Alpha = np.radians(83.6292)
-Delta = np.radians(22.0144)
-tref = .5*(tstart+tend)
-
-depth = 100
-
-h0 = sqrtSX / float(depth)
-
-psi = np.random.uniform(-np.pi/4, np.pi/4)
-phi = np.random.uniform(0, 2*np.pi)
-cosi = np.random.uniform(-1, 1)
-
-data = pyfstat.Writer(
-    label=data_label, outdir=outdir, tref=tref,
-    tstart=tstart, F0=F0, F1=F1, F2=F2, duration=Tspan, Alpha=Alpha,
-    Delta=Delta, h0=h0, sqrtSX=sqrtSX, psi=psi, phi=phi, cosi=cosi,
-    detectors='L1')
-data.make_data()
-twoF_PM = data.predict_fstat()
-
-nsteps = [500, 500]
-
-Vs = np.arange(50, 550, 50)
-Vs = [50, 150, 200, 250, 350, 400, 450]
-
-for V in Vs:
-
-    DeltaF0 = V * np.sqrt(3)/(np.pi*Tspan)
-    DeltaF1 = V * np.sqrt(45/4.)/(np.pi*Tspan**2)
-
-    startTime = time.time()
-    theta_prior = {'F0': {'type': 'unif',
-                          'lower': F0-DeltaF0/2.0,
-                          'upper': F0+DeltaF0/2.0},
-                   'F1': {'type': 'unif',
-                          'lower': F1-DeltaF1/2.0,
-                          'upper': F1+DeltaF1/2.0},
-                   'F2': F2,
-                   'Alpha': Alpha,
-                   'Delta': Delta
-                   }
-
-    ntemps = 3
-    log10temperature_min = -0.5
-    nwalkers = 100
-
-    mcmc = pyfstat.MCMCSearch(
-        label=label, outdir=outdir, nsteps=nsteps,
-        sftfilepath='{}/*{}*sft'.format(outdir, data_label),
-        theta_prior=theta_prior,
-        tref=tref, minStartTime=tstart, maxStartTime=tend,
-        nwalkers=nwalkers, ntemps=ntemps,
-        log10temperature_min=log10temperature_min)
-    mcmc.setup_convergence_testing(
-        convergence_period=10, convergence_length=10,
-        convergence_burnin_fraction=0.1, convergence_threshold_number=5,
-        convergence_threshold=1.1, convergence_early_stopping=False)
-    mcmc.run(create_plots=False,
-             log_table=False, gen_tex_table=False
-             )
-    mcmc.print_summary()
-    cdF0, cdF1 = mcmc.convergence_diagnostic[-1]
-    d, maxtwoF = mcmc.get_max_twoF()
-    d_med_std = mcmc.get_median_stds()
-    dF0 = F0 - d['F0']
-    dF1 = F1 - d['F1']
-    F0_std = d_med_std['F0_std']
-    F1_std = d_med_std['F1_std']
-    runTime = time.time() - startTime
-    with open(results_file_name, 'a') as f:
-        f.write('{} {:1.8e} {:1.8e} {:1.8e} {:1.8e} {:1.8e} {:1.8e} {:1.8e} {:1.8e} {:1.5e} {:1.5e} {}\n'
-                .format(V, dF0, dF1, F0_std, F1_std, DeltaF0, DeltaF1, maxtwoF, twoF_PM, cdF0, cdF1,
-                        runTime))
-    os.system('rm {}/*{}*'.format(outdir, label))
-
-os.system('rm {}/*{}*'.format(outdir, data_label))
diff --git a/Paper/VolumeConvergenceInvestigation/plot_data.py b/Paper/VolumeConvergenceInvestigation/plot_data.py
deleted file mode 100644
index a88534298f0081a3f7a91898858ebec4386f0735..0000000000000000000000000000000000000000
--- a/Paper/VolumeConvergenceInvestigation/plot_data.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import matplotlib.pyplot as plt
-import pandas as pd
-import numpy as np
-import os
-from tqdm import tqdm
-from oct2py import octave
-import glob
-import scipy.stats
-
-filenames = glob.glob("CollectedOutput/*.txt")
-
-plt.style.use('paper')
-
-Tspan = 100 * 86400
-
-df_list = []
-for fn in filenames:
-    if '3356003' in fn:
-        continue
-    else:
-        df = pd.read_csv(
-            fn, sep=' ', names=['V', 'dF0', 'dF1', 'F0_std', 'F1_std', 'DeltaF0',
-                                'DeltaF1', 'MaxtwoF', 'twoF_PM', 'cdF0', 'cdF1', 'runTime'])
-        df['mismatch'] = (df.twoF_PM - df.MaxtwoF)/df.twoF_PM
-        df['F0_fraction'] = df.F0_std / df.DeltaF0
-        df['F1_fraction'] = df.F1_std / df.DeltaF1
-        df['CLUSTER_ID'] = fn.split('_')[1]
-        df['RUN_ID'] = fn.split('_')[2]
-        df_list.append(df)
-
-df = pd.concat(df_list)
-
-df.sort_values('V', inplace=True)
-
-df['cd_ave'] = df[['cdF0', 'cdF1']].mean(axis=1)
-df['fraction_ave'] = df[['F0_fraction', 'F1_fraction']].mean(axis=1)
-df = df[df.V <= 500]
-
-fig, (ax2, ax3) = plt.subplots(nrows=2, figsize=(3.2, 4), sharex=True)
-
-print df.groupby('V')['dF0'].count()
-
-
-Vs = df['V'].unique()
-mismatch_ave = df.groupby('V')['mismatch'].mean()
-ave_cd_ave = df.groupby('V')['cd_ave'].apply(scipy.stats.mstats.gmean)
-cdF0_ave = df.groupby('V')['cdF0'].mean()
-cdF1_ave = df.groupby('V')['cdF1'].mean()
-ave_fraction_ave = df.groupby('V')['fraction_ave'].median()
-F0_fraction_ave = df.groupby('V')['F0_fraction'].mean()
-F1_fraction_ave = df.groupby('V')['F1_fraction'].mean()
-
-ax2.plot(df.V, df.cd_ave, 'o', color='k', alpha=0.5)
-#ax2.plot(Vs, ave_cd_ave, color='r')
-ax2.set_ylim(0, 10)
-ax2.set_ylabel(r'$\langle \textrm{PSRF} \rangle$')
-
-ax3.plot(df.V, df.fraction_ave, 'o', color='k', alpha=0.2)
-#ax3.plot(Vs, ave_fraction_ave, color='r')
-ax3.set_ylabel(r'$\langle \textrm{posterior std. / prior width} \rangle$')
-ax3.set_xlabel(
-    r'$\mathcal{V}_\mathrm{PE}^{(0)}=\mathcal{V}_\mathrm{PE}^{(1)}=\sqrt{\mathcal{V}}$')
-ax3.set_xlim(0, 525)
-
-ax3.set_xticks(Vs)
-
-fig.tight_layout()
-fig.savefig('VolumeConvergenceInvestigation.png')
diff --git a/Paper/VolumeConvergenceInvestigation/submitfile b/Paper/VolumeConvergenceInvestigation/submitfile
deleted file mode 100644
index e656190d806b6217d70673dab20228e08f733316..0000000000000000000000000000000000000000
--- a/Paper/VolumeConvergenceInvestigation/submitfile
+++ /dev/null
@@ -1,12 +0,0 @@
-Executable=VolumeConvergence_repeater.sh
-Arguments=$(Cluster)_$(Process)
-Universe=vanilla
-Input=/dev/null
-accounting_group=ligo.dev.o2.cw.directedisolatedother.semicoherent 
-Output=CollectedOutput/out.$(Cluster).$(Process)
-Error=CollectedOutput/err.$(Cluster).$(Process)
-Log=CollectedOutput/log.$(Cluster).$(Process)
-request_cpus = 1
-request_memory = 16 GB
-
-Queue 100
diff --git a/Paper/allsky_noise_twoF_histogram.png b/Paper/allsky_noise_twoF_histogram.png
deleted file mode 100644
index cc7db29c229e8af0217d8a219d97ebc17c5b8ff5..0000000000000000000000000000000000000000
Binary files a/Paper/allsky_noise_twoF_histogram.png and /dev/null differ
diff --git a/Paper/allsky_recovery.png b/Paper/allsky_recovery.png
deleted file mode 100644
index 456f4a0917ce122a02ca34c5d0c59a03d3c83c68..0000000000000000000000000000000000000000
Binary files a/Paper/allsky_recovery.png and /dev/null differ
diff --git a/Paper/allsky_setup_run_setup.tex b/Paper/allsky_setup_run_setup.tex
deleted file mode 100644
index 5cf90e6b384dd79060730d0c99107b317c8f4cb3..0000000000000000000000000000000000000000
--- a/Paper/allsky_setup_run_setup.tex
+++ /dev/null
@@ -1,8 +0,0 @@
-\begin{tabular}{c|cccccc}
-Stage & $\Nseg$ & $\Tcoh^{\rm days}$ &$\Nsteps$ & $\V$ & $\Vsky$ & $\Vpe$ \\ \hline
-0 & 20 & 5.0 & 50 & $4.2{\times}10^{2}$ & 7.5 & 56.0 \\
-1 & 11 & 9.1 & 50 & $4.4{\times}10^{3}$ & 24.0 & $1.8{\times}10^{2}$ \\
-2 & 6 & 16.7 & 50 & $4.2{\times}10^{4}$ & 69.0 & $6.1{\times}10^{2}$ \\
-3 & 3 & 33.3 & 50 & $2.8{\times}10^{5}$ & $1.2{\times}10^{2}$ & $2.4{\times}10^{3}$ \\
-4 & 1 & 100.0 & 50,50 & $2.0{\times}10^{6}$ & $2.0{\times}10^{2}$ & $1.0{\times}10^{4}$ \\
-\end{tabular}
diff --git a/Paper/bibliography.bib b/Paper/bibliography.bib
deleted file mode 100644
index 7fba00b6fbe1503c2221d75c12828bc91ff19b79..0000000000000000000000000000000000000000
--- a/Paper/bibliography.bib
+++ /dev/null
@@ -1,561 +0,0 @@
-@ARTICLE{jks1998,
-   author = {{Jaranowski}, P. and {Kr{\'o}lak}, A. and {Schutz}, B.~F.},
-    title = "{Data analysis of gravitational-wave signals from spinning neutron stars: The signal and its detection}",
-  journal = {\prd},
-   eprint = {gr-qc/9804014},
- keywords = {Gravitational radiation detectors, mass spectrometers, and other instrumentation and techniques, Gravitational wave detectors and experiments, Mathematical procedures and computer techniques, Pulsars},
-     year = 1998,
-    month = sep,
-   volume = 58,
-   number = 6,
-      eid = {063001},
-    pages = {063001},
-      doi = {10.1103/PhysRevD.58.063001},
-   adsurl = {http://adsabs.harvard.edu/abs/1998PhRvD..58f3001J},
-  adsnote = {Provided by the SAO/NASA Astrophysics Data System}
-}
-
-@ARTICLE{brady1998,
-   author = {{Brady}, P.~R. and {Creighton}, T. and {Cutler}, C. and {Schutz}, B.~F.
-	},
-    title = "{Searching for periodic sources with LIGO}",
-  journal = {\prd},
-   eprint = {gr-qc/9702050},
- keywords = {Gravitational radiation detectors, mass spectrometers, and other instrumentation and techniques, Gravitational wave detectors and experiments, Mathematical procedures and computer techniques, Pulsars},
-     year = 1998,
-    month = feb,
-   volume = 57,
-    pages = {2101-2116},
-      doi = {10.1103/PhysRevD.57.2101},
-   adsurl = {http://adsabs.harvard.edu/abs/1998PhRvD..57.2101B},
-  adsnote = {Provided by the SAO/NASA Astrophysics Data System}
-}
-
-@ARTICLE{brady2000,
-   author = {{Brady}, P.~R. and {Creighton}, T.},
-    title = "{Searching for periodic sources with LIGO. II. Hierarchical searches}",
-  journal = {\prd},
-   eprint = {gr-qc/9812014},
- keywords = {Gravitational wave detectors and experiments, Gravitational radiation detectors, mass spectrometers, and other instrumentation and techniques, Mathematical procedures and computer techniques, Pulsars},
-     year = 2000,
-    month = apr,
-   volume = 61,
-   number = 8,
-      eid = {082001},
-    pages = {082001},
-      doi = {10.1103/PhysRevD.61.082001},
-   adsurl = {http://adsabs.harvard.edu/abs/2000PhRvD..61h2001B},
-  adsnote = {Provided by the SAO/NASA Astrophysics Data System}
-}
-
-@ARTICLE{shaltev2013,
-   author = {{Shaltev}, M. and {Prix}, R.},
-    title = "{Fully coherent follow-up of continuous gravitational-wave candidates}",
-  journal = {\prd},
-archivePrefix = "arXiv",
-   eprint = {1303.2471},
- primaryClass = "gr-qc",
- keywords = {Gravitational waves: theory},
-     year = 2013,
-    month = apr,
-   volume = 87,
-   number = 8,
-      eid = {084057},
-    pages = {084057},
-      doi = {10.1103/PhysRevD.87.084057},
-   adsurl = {http://adsabs.harvard.edu/abs/2013PhRvD..87h4057S},
-  adsnote = {Provided by the SAO/NASA Astrophysics Data System}
-}
-
-
-@ARTICLE{prix2012,
-   author = {{Prix}, R. and {Shaltev}, M.},
-    title = "{Search for continuous gravitational waves: Optimal StackSlide method at fixed computing cost}",
-  journal = {\prd},
-archivePrefix = "arXiv",
-   eprint = {1201.4321},
- primaryClass = "gr-qc",
- keywords = {Gravitational waves: theory, Gravitational-wave astrophysics, Gravitational wave detectors and experiments, Data analysis: algorithms and implementation, data management},
-     year = 2012,
-    month = apr,
-   volume = 85,
-   number = 8,
-      eid = {084010},
-    pages = {084010},
-      doi = {10.1103/PhysRevD.85.084010},
-   adsurl = {http://adsabs.harvard.edu/abs/2012PhRvD..85h4010P},
-  adsnote = {Provided by the SAO/NASA Astrophysics Data System}
-}
-
-@ARTICLE{krishnan2004,
-   author = {{Krishnan}, B. and {Sintes}, A.~M. and {Papa}, M.~A. and {Schutz}, B.~F. and 
-	{Frasca}, S. and {Palomba}, C.},
-    title = "{Hough transform search for continuous gravitational waves}",
-  journal = {\prd},
-   eprint = {gr-qc/0407001},
- keywords = {Gravitational wave detectors and experiments, Wave generation and sources, Data analysis: algorithms and implementation, data management, Gravitational radiation detectors, mass spectrometers, and other instrumentation and techniques},
-     year = 2004,
-    month = oct,
-   volume = 70,
-   number = 8,
-      eid = {082001},
-    pages = {082001},
-      doi = {10.1103/PhysRevD.70.082001},
-   adsurl = {http://adsabs.harvard.edu/abs/2004PhRvD..70h2001K},
-  adsnote = {Provided by the SAO/NASA Astrophysics Data System}
-}
-
-
-@ARTICLE{astone2014,
-   author = {{Astone}, P. and {Colla}, A. and {D'Antonio}, S. and {Frasca}, S. and 
-	{Palomba}, C.},
-    title = "{Method for all-sky searches of continuous gravitational wave signals using the frequency-Hough transform}",
-  journal = {\prd},
-archivePrefix = "arXiv",
-   eprint = {1407.8333},
- primaryClass = "astro-ph.IM",
- keywords = {Gravitational waves: theory, Gravitational wave detectors and experiments, Neutron stars},
-     year = 2014,
-    month = aug,
-   volume = 90,
-   number = 4,
-      eid = {042002},
-    pages = {042002},
-      doi = {10.1103/PhysRevD.90.042002},
-   adsurl = {http://adsabs.harvard.edu/abs/2014PhRvD..90d2002A},
-  adsnote = {Provided by the SAO/NASA Astrophysics Data System}
-}
-
-@ARTICLE{cutler2005,
-   author = {{Cutler}, C. and {Gholami}, I. and {Krishnan}, B.},
-    title = "{Improved stack-slide searches for gravitational-wave pulsars}",
-  journal = {\prd},
-   eprint = {gr-qc/0505082},
- keywords = {Gravitational wave detectors and experiments, Mathematical procedures and computer techniques, Pulsars},
-     year = 2005,
-    month = aug,
-   volume = 72,
-   number = 4,
-      eid = {042004},
-    pages = {042004},
-      doi = {10.1103/PhysRevD.72.042004},
-   adsurl = {http://adsabs.harvard.edu/abs/2005PhRvD..72d2004C},
-  adsnote = {Provided by the SAO/NASA Astrophysics Data System}
-}
-
-@ARTICLE{prix2009,
-   author = {{Prix}, R. and {Krishnan}, B.},
-    title = "{Targeted search for continuous gravitational waves: Bayesian versus maximum-likelihood statistics}",
-  journal = {Classical and Quantum Gravity},
-archivePrefix = "arXiv",
-   eprint = {0907.2569},
- primaryClass = "gr-qc",
-     year = 2009,
-    month = oct,
-   volume = 26,
-   number = 20,
-      eid = {204013},
-    pages = {204013},
-      doi = {10.1088/0264-9381/26/20/204013},
-   adsurl = {http://adsabs.harvard.edu/abs/2009CQGra..26t4013P},
-  adsnote = {Provided by the SAO/NASA Astrophysics Data System}
-}
-
-@Article{	  foreman-mackay2013,
-  author	= {{Foreman-Mackey}, D. and {Hogg}, D.~W. and {Lang}, D. and
-		  {Goodman}, J. },
-  title		= "{emcee: The MCMC Hammer}",
-  journal	= {PASP},
-  archiveprefix	= "arXiv",
-  eprint	= {1202.3665},
-  primaryclass	= "astro-ph.IM",
-  keywords	= {Data Analysis and Techniques},
-  year		= 2013,
-  month		= mar,
-  volume	= 125,
-  pages		= {306-312},
-  doi		= {10.1086/670067},
-  adsurl	= {http://adsabs.harvard.edu/abs/2013PASP..125..306F},
-  adsnote	= {Provided by the SAO/NASA Astrophysics Data System}
-}
-
-@Article{	  goodman2010,
-  author	= {{Goodman}, J. and {Weare}, J.},
-  title		= {Ensemble samplers with affine invariance},
-  journal	= {Comm. App. Math. Comp. Sci.},
-  year		= 2010,
-  volume	= 5,
-  pages     = {65-80},
-}
-
-@article{     swendsen1986,
-  title = {Replica Monte Carlo Simulation of Spin-Glasses},
-  author = {Swendsen, Robert H. and Wang, Jian-Sheng},
-  journal = {Phys. Rev. Lett.},
-  volume = {57},
-  issue = {21},
-  pages = {2607--2609},
-  numpages = {0},
-  year = {1986},
-  month = {Nov},
-  publisher = {American Physical Society},
-  doi = {10.1103/PhysRevLett.57.2607},
-  url = {http://link.aps.org/doi/10.1103/PhysRevLett.57.2607}
-}
-
-@Article{goggans2004,
-  author = "Goggans, Paul M. and Chi, Ying",
-  title = "Using Thermodynamic Integration to Calculate the Posterior Probability in Bayesian Model Selection Problems",
-  journal = "AIP Conference Proceedings",
-  year = "2004",
-  volume = "707",
-  number = "1", 
-  pages = "59-66",
-  url = "http://scitation.aip.org/content/aip/proceeding/aipcp/10.1063/1.1751356",
-  doi = "http://dx.doi.org/10.1063/1.1751356" 
-}
-
-@ARTICLE{wette2013,
-   author = {{Wette}, K. and {Prix}, R.},
-    title = "{Flat parameter-space metric for all-sky searches for gravitational-wave pulsars}",
-  journal = {\prd},
-archivePrefix = "arXiv",
-   eprint = {1310.5587},
- primaryClass = "gr-qc",
- keywords = {Gravitational wave detectors and experiments, Gravitational radiation detectors, mass spectrometers, and other instrumentation and techniques, Mathematical procedures and computer techniques, Neutron stars},
-     year = 2013,
-    month = dec,
-   volume = 88,
-   number = 12,
-      eid = {123005},
-    pages = {123005},
-      doi = {10.1103/PhysRevD.88.123005},
-   adsurl = {http://adsabs.harvard.edu/abs/2013PhRvD..88l3005W},
-  adsnote = {Provided by the SAO/NASA Astrophysics Data System}
-}
-
-@ARTICLE{wette2015,
-   author = {{Wette}, K.},
-    title = "{Parameter-space metric for all-sky semicoherent searches for gravitational-wave pulsars}",
-  journal = {\prd},
-archivePrefix = "arXiv",
-   eprint = {1508.02372},
- primaryClass = "gr-qc",
- keywords = {Gravitational wave detectors and experiments, Gravitational radiation detectors, mass spectrometers, and other instrumentation and techniques, Mathematical procedures and computer techniques, Neutron stars},
-     year = 2015,
-    month = oct,
-   volume = 92,
-   number = 8,
-      eid = {082003},
-    pages = {082003},
-      doi = {10.1103/PhysRevD.92.082003},
-   adsurl = {http://adsabs.harvard.edu/abs/2015PhRvD..92h2003W},
-  adsnote = {Provided by the SAO/NASA Astrophysics Data System}
-}
-
-@ARTICLE{prix2007,
-   author = {{Prix}, R.},
-    title = "{Template-based searches for gravitational waves: efficient lattice covering of flat parameter spaces}",
-  journal = {Classical and Quantum Gravity},
-archivePrefix = "arXiv",
-   eprint = {0707.0428},
- primaryClass = "gr-qc",
-     year = 2007,
-    month = oct,
-   volume = 24,
-    pages = {S481-S490},
-      doi = {10.1088/0264-9381/24/19/S11},
-   adsurl = {http://adsabs.harvard.edu/abs/2007CQGra..24S.481P},
-  adsnote = {Provided by the SAO/NASA Astrophysics Data System}
-}
-
-@article{prix2007metric,
-    archivePrefix = {arXiv},
-    arxivId = {gr-qc/0606088},
-    author = {Prix, Reinhard},
-    doi = {10.1103/PhysRevD.75.023004},
-    eprint = {0606088},
-    file = {:home/greg/Dropbox/Papers/Prix{\_}2007.pdf:pdf},
-    issn = {15507998},
-    journal = {Physical Review D - Particles, Fields, Gravitation and Cosmology},
-    number = {2},
-    pages = {1--20},
-    primaryClass = {gr-qc},
-    title = {{Search for continuous gravitational waves: Metric of the multidetector F-statistic}},
-    volume = {75},
-    year = {2007}
-}
-
-
-@ARTICLE{cutlershutz2005,
-   author = {{Cutler}, C. and {Schutz}, B.~F.},
-    title = "{Generalized F-statistic: Multiple detectors and multiple gravitational wave pulsars}",
-  journal = {\prd},
-   eprint = {gr-qc/0504011},
- keywords = {Gravitational radiation detectors, mass spectrometers, and other instrumentation and techniques, Gravitational wave detectors and experiments, Mathematical procedures and computer techniques, Pulsars},
-     year = 2005,
-    month = sep,
-   volume = 72,
-   number = 6,
-      eid = {063006},
-    pages = {063006},
-      doi = {10.1103/PhysRevD.72.063006},
-   adsurl = {http://adsabs.harvard.edu/abs/2005PhRvD..72f3006C},
-  adsnote = {Provided by the SAO/NASA Astrophysics Data System}
-}
-
-@Article{	  prix2005,
-  author	= {{Prix}, R. and {Itoh}, Y.},
-  title		= "{Global parameter-space correlations of coherent searches
-		  for continuous gravitational waves}",
-  journal	= {Classical and Quantum Gravity},
-  eprint	= {gr-qc/0504006},
-  year		= 2005,
-  month		= sep,
-  volume	= 22,
-  pages		= {1003},
-  doi		= {10.1088/0264-9381/22/18/S14},
-  adsurl	= {http://adsabs.harvard.edu/abs/2005CQGra..22S1003P},
-  adsnote	= {Provided by the SAO/NASA Astrophysics Data System}
-}
-
-@misc{lalsuite,
-  title = {{LALSuite: FreeSoftware (GPL) Tools for Data-Analysis}},
-  author = {{LIGO Scientific Collaboration}},
-  year   = 2014,
-  note    = {\url{https://www.lsc-group.phys.uwm.edu/daswg/projects/lalsuite.html}}
-}
-
-@ARTICLE{allyskyS42008,
-   author = {{Abbott}, B. and {Abbott}, R. and {Adhikari}, R. and {Agresti}, J. and 
-	{Ajith}, P. and {Allen}, B. and {Amin}, R. and {Anderson}, S.~B. and 
-	{Anderson}, W.~G. and {Arain}, M. and et al.},
-    title = "{All-sky search for periodic gravitational waves in LIGO S4 data}",
-  journal = {\prd},
-archivePrefix = "arXiv",
-   eprint = {0708.3818},
- primaryClass = "gr-qc",
- keywords = {Gravitational wave detectors and experiments, Data analysis: algorithms and implementation, data management, Gravitational radiation detectors, mass spectrometers, and other instrumentation and techniques, Pulsars},
-     year = 2008,
-    month = jan,
-   volume = 77,
-   number = 2,
-      eid = {022001},
-    pages = {022001},
-      doi = {10.1103/PhysRevD.77.022001},
-   adsurl = {http://adsabs.harvard.edu/abs/2008PhRvD..77b2001A},
-  adsnote = {Provided by the SAO/NASA Astrophysics Data System}
-}
-
-@article{pletsch2010,
-    archivePrefix = {arXiv},
-    arxivId = {1005.0395},
-    author = {Pletsch, Holger J.},
-    doi = {10.1103/PhysRevD.82.042002},
-    eprint = {1005.0395},
-    file = {:home/greg/Dropbox/Papers/Pletsch{\_}2014.pdf:pdf},
-    issn = {15507998},
-    journal = {Physical Review D},
-    number = {4},
-    title = {{Parameter-space metric of semicoherent searches for continuous gravitational waves}},
-    volume = {82},
-    year = {2010}
-}
-
-@article{leaci2015,
-    archivePrefix = {arXiv},
-    arxivId = {1502.00914},
-    author = {Leaci, Paola and Prix, Reinhard},
-    doi = {10.1103/PhysRevD.91.102003},
-    eprint = {1502.00914},
-    file = {:home/greg/Dropbox/Papers/Leaci{\_}Prix{\_}2015.pdf:pdf},
-    issn = {15502368},
-    journal = {Physical Review D},
-    number = {10},
-    pages = {1--25},
-    title = {{Directed searches for continuous gravitational waves from binary systems: Parameter-space metrics and optimal scorpius X-1 sensitivity}},
-    volume = {91},
-    year = {2015}
-}
-
-@phdthesis{veitch2007,
-  title={Applications of Markov Chain Monte Carlo methods to continuous gravitational wave data analysis},
-  author={Veitch, John D},
-  year={2007},
-  school={University of Glasgow}
-}
-
-@article{prix2011,
-author = {Prix, R. and Giampanis, S. and Messenger, C.},
-doi = {10.1103/PhysRevD.84.023007},
-isbn = {0556-2821},
-issn = {15507998},
-journal = {Physical Review D},
-number = {2},
-pages = {1--20},
-title = {{Search method for long-duration gravitational-wave transients from neutron stars}},
-volume = {84},
-year = {2011}
-}
-
-@article{ashton2015,
-archivePrefix = {arXiv},
-arxivId = {1410.8044},
-author = {Ashton, G. and Jones, D. I. and Prix, R.},
-doi = {10.1103/PhysRevD.91.062009},
-eprint = {1410.8044},
-issn = {15502368},
-journal = {Physical Review D - Particles, Fields, Gravitation and Cosmology},
-number = {6},
-pages = {1--9},
-title = {{Effect of timing noise on targeted and narrow-band coherent searches for continuous gravitational waves from pulsars}},
-volume = {91},
-year = {2015}
-}
-
-@article{wette2012,
-    arxivId = {1111.5650},
-    author = {Wette, Karl},
-    doi = {10.1103/PhysRevD.85.042003},
-    eprint = {1111.5650},
-    file = {:home/greg/Dropbox/Papers/Wette{\_}2012.pdf:pdf},
-    isbn = {0556-2821},
-    issn = {15507998},
-    journal = {Physical Review D},
-    number = {4},
-    title = {{Estimating the sensitivity of wide-parameter-space searches for gravitational-wave pulsars}},
-    volume = {85},
-    year = {2012}
-}
-
-@article{melatos2008,
-    archivePrefix = {arXiv},
-    arxivId = {0710.1021},
-    author = {Melatos, A. and Peralta, C. and Wyithe, J. S. B.},
-    doi = {10.1086/523349},
-    eprint = {0710.1021},
-    file = {:home/greg/Dropbox/Papers/Mealtos{\_}2007.pdf:pdf},
-    issn = {0004-637X},
-    journal = {The Astrophysical Journal},
-    keywords = {dense matter — pulsars: general — stars: interiors},
-    number = {Jensen 1998},
-    pages = {1103},
-    title = {{Avalanche dynamics of radio pulsar glitches}},
-    url = {http://arxiv.org/abs/0710.1021},
-    volume = {672},
-    year = {2008}
-}
-
-@article{espinoza2011,
-    archivePrefix = {arXiv},
-    arxivId = {1102.1743},
-    author = {Espinoza, Crist{\'{o}}bal and Lyne, Andrew and Stappers, Ben and Kramer, Michael},
-    doi = {10.1063/1.3615093},
-    eprint = {1102.1743},
-    file = {:home/greg/Dropbox/Papers/Espinoza{\_}2011.pdf:pdf},
-    isbn = {9780735409156},
-    issn = {0094243X},
-    journal = {AIP Conference Proceedings},
-    keywords = {general,glitches,pulsars,stars: neutron},
-    number = {March},
-    pages = {117--120},
-    title = {{Glitches in the rotation of pulsars}},
-    volume = {1357},
-    year = {2011}
-}
-
-@article{cowles1996markov,
-  title={Markov chain Monte Carlo convergence diagnostics: a comparative review},
-  author={Cowles, Mary Kathryn and Carlin, Bradley P},
-  journal={Journal of the American Statistical Association},
-  volume={91},
-  number={434},
-  pages={883--904},
-  year={1996},
-  publisher={Taylor \& Francis}
-}
-
-@article{gelman1992,
-  title={Inference from iterative simulation using multiple sequences},
-  author={Gelman, Andrew and Rubin, Donald B},
-  journal={Statistical science},
-  pages={457--472},
-  year={1992},
-  publisher={JSTOR}
-}
-
-@article{brooks1998,
-  title={General methods for monitoring convergence of iterative simulations},
-  author={Brooks, Stephen P and Gelman, Andrew},
-  journal={Journal of computational and graphical statistics},
-  volume={7},
-  number={4},
-  pages={434--455},
-  year={1998},
-  publisher={Taylor \& Francis}
-}
-
-@Article{	  hobbs2010,
-  author	= {Hobbs, G. and Lyne, A.~G. and Kramer, M.},
-  doi		= {10.1111/j.1365-2966.2009.15938.x},
-  issn		= {00358711},
-  journal	= {Monthly Notices of the Royal Astronomical Society},
-  keywords	= {1 i n t,6000 years of pulsar,archive of pulsar
-		  observations,contains over,general,pulsars,ro d u
-		  c,rotational history,t i o n,the jodrell bank data,the
-		  pulsar timing method},
-  month		= feb,
-  number	= {2},
-  pages		= {1027-1048},
-  title		= {{An analysis of the timing irregularities for 366
-		  pulsars}},
-  url		= {http://doi.wiley.com/10.1111/j.1365-2966.2009.15938.x},
-  volume	= {402},
-  year		= {2010}
-}
-
-@article{ashton2017,
-  title     = {{Statistical characterization of pulsar glitches and their
-                potential impact on searches for continuous gravitational
-                 waves}},
-  author = {Ashton, G. and Prix, R. and Jones, D. I.},
-  note = {In prep},
-  year = {2017}
-}
-
-@article{Papa2016,
-  title = {{Hierarchical follow-up of subthreshold candidates of an all-sky Einstein@Home search for continuous gravitational waves on LIGO sixth science run data}},
-  author = {{Papa}, M.~A. and {Eggenstein}, H-B. and {Walsh}, S. and {Di Palma}, I. and {Allen}, B. and {Astone}, P. and {Bock}, O. and {Creighton}, T.~D. and {Keitel}, D. and {Machenschalk}, B. and {Prix}, R. and {Siemens}, X. and {Singh}, A. and {Zhu}, S.~J. and {Schutz}, B.~F.},
-  journal = {Phys. Rev. D},
-  volume = {94},
-  issue = {12},
-  pages = {122006},
-  numpages = {14},
-  year = {2016},
-  month = {Dec},
-  publisher = {American Physical Society},
-  doi = {10.1103/PhysRevD.94.122006},
-  url = {http://link.aps.org/doi/10.1103/PhysRevD.94.122006}
-}
-
-@article{dupuis2005,
-  title = {Bayesian estimation of pulsar parameters from gravitational wave data},
-  author = {Dupuis, R\'ejean J. and Woan, Graham},
-  journal = {Phys. Rev. D},
-  volume = {72},
-  issue = {10},
-  pages = {102002},
-  numpages = {9},
-  year = {2005},
-  month = {Nov},
-  publisher = {American Physical Society},
-  doi = {10.1103/PhysRevD.72.102002},
-  url = {http://link.aps.org/doi/10.1103/PhysRevD.72.102002}
-}
-
-@book{mackay2003information,
-      title={Information theory, inference and learning algorithms},
-      author={MacKay, David JC},
-      year={2003},
-      publisher={Cambridge university press}
-}
diff --git a/Paper/definitions.tex b/Paper/definitions.tex
deleted file mode 100644
index 6001da63333382190bce9c7a4c7baaa2486a027d..0000000000000000000000000000000000000000
--- a/Paper/definitions.tex
+++ /dev/null
@@ -1,36 +0,0 @@
-\newcommand{\fdot}{\dot{f}}
-\newcommand{\F}{{\mathcal{F}}}
-\newcommand{\twoFhat}{\widehat{2\F}}
-\newcommand{\twoFtilde}{\widetilde{2\F}}
-\newcommand{\A}{\boldsymbol{\mathcal{A}}}
-\newcommand{\blambda}{\boldsymbol{\mathbf{\lambda}}}
-\newcommand{\blambdaSignal}{\boldsymbol{\mathbf{\lambda}}^{\rm s}}
-\newcommand{\tglitch}{t_{\rm glitch}}
-\newcommand{\tstart}{t_{\rm start}}
-\newcommand{\tend}{t_{\rm end}}
-\newcommand{\Nglitches}{N_{\rm g}}
-\newcommand{\Tspan}{T}
-\newcommand{\Tcoh}{T_{\rm coh}}
-\newcommand{\tref}{t_{\rm ref}}
-\newcommand{\Nseg}{N_{\rm seg}}
-\newcommand{\Nsteps}{N_{\rm steps}}
-\newcommand{\Ntemps}{N_{\rm temps}}
-\newcommand{\Nstages}{N_{\rm stages}}
-\newcommand{\Nspindown}{N_{\rm spindowns}}
-\renewcommand{\H}{\mathcal{H}}
-\newcommand{\Hs}{\H_{\rm s}}
-\newcommand{\Hn}{\H_{\rm n}}
-\newcommand{\ho}{h_0}
-\newcommand{\homax}{\ho^{\rm max}}
-\newcommand{\Bsn}{B_{\rm S/N}}
-\newcommand{\AmplitudePrior}{\Pi_\mathcal{A}}
-\newcommand{\mutilde}{\tilde{\mu}}
-\newcommand{\Sn}{S_{\rm n}}
-\newcommand{\V}{\mathcal{V}}
-\newcommand{\Vsky}{\V_{\rm Sky}}
-\newcommand{\Vpe}{\V_{\rm PE}}
-\newcommand{\smax}{s_{\textrm{max}}}
-\newcommand{\fmax}{f_{\textrm{max}}}
-\newcommand{\rhohatmax}{\hat{\rho}_{\mathrm{max}}}
-\newcommand{\logmintemp}{\log_{10}(T_\textrm{min})}
-
diff --git a/Paper/directed_noise_twoF_histogram.png b/Paper/directed_noise_twoF_histogram.png
deleted file mode 100644
index 0a4d3929b4b4ca1c5f00fe434b1210dc5d1d52bd..0000000000000000000000000000000000000000
Binary files a/Paper/directed_noise_twoF_histogram.png and /dev/null differ
diff --git a/Paper/directed_recovery.png b/Paper/directed_recovery.png
deleted file mode 100644
index 7c93d66c05c4b7f606a4f9fb68da4cc9ec855f90..0000000000000000000000000000000000000000
Binary files a/Paper/directed_recovery.png and /dev/null differ
diff --git a/Paper/directed_setup_run_setup.tex b/Paper/directed_setup_run_setup.tex
deleted file mode 100644
index 271d19f7d08aed21d5d103aa9bd3b2e0dfc35924..0000000000000000000000000000000000000000
--- a/Paper/directed_setup_run_setup.tex
+++ /dev/null
@@ -1,7 +0,0 @@
-\begin{tabular}{c|cccc}
-Stage & $\Nseg$ & $\Tcoh^{\rm days}$ &$\Nsteps$ & $\Vpe$ \\ \hline
-0 & 20 & 5.0 & 25 & 56.0 \\
-1 & 7 & 14.3 & 25 & $4.5{\times}10^{2}$ \\
-2 & 2 & 50.0 & 25 & $5.0{\times}10^{3}$ \\
-3 & 1 & 100.0 & 25,25 & $1.0{\times}10^{4}$ \\
-\end{tabular}
diff --git a/Paper/follow_up_run_setup.tex b/Paper/follow_up_run_setup.tex
deleted file mode 100644
index b7b46fff1e8bd0096a67689a0216ea6cfd8f6299..0000000000000000000000000000000000000000
--- a/Paper/follow_up_run_setup.tex
+++ /dev/null
@@ -1,11 +0,0 @@
-\begin{tabular}{c|cccccc}
-Stage & $\Nseg$ & $\Tcoh^{\rm days}$ &$\Nsteps$ & $\V$ & $\Vsky$ & $\Vpe$ \\ \hline
-0 & 100 & 1.0 & 200 & 95.0 & 6.8 & 14.0 \\
-1 & 56 & 1.8 & 200 & $9.6{\times}10^{2}$ & 22.0 & 45.0 \\
-2 & 31 & 3.2 & 200 & $1.0{\times}10^{4}$ & 69.0 & $1.5{\times}10^{2}$ \\
-3 & 17 & 5.9 & 200 & $1.1{\times}10^{5}$ & $2.3{\times}10^{2}$ & $4.8{\times}10^{2}$ \\
-4 & 9 & 11.1 & 200 & $1.3{\times}10^{6}$ & $7.5{\times}10^{2}$ & $1.7{\times}10^{3}$ \\
-5 & 5 & 20.0 & 200 & $1.1{\times}10^{7}$ & $2.0{\times}10^{3}$ & $5.5{\times}10^{3}$ \\
-6 & 2 & 50.0 & 200 & $1.0{\times}10^{8}$ & $3.3{\times}10^{3}$ & $3.1{\times}10^{4}$ \\
-7 & 1 & 100.0 & 200,200 & $2.7{\times}10^{8}$ & $4.4{\times}10^{3}$ & $6.2{\times}10^{4}$ \\
-\end{tabular}
diff --git a/Paper/follow_up_walkers.png b/Paper/follow_up_walkers.png
deleted file mode 100644
index 5a6c9fdc23cc6fdfbd6b40751041fe5171f8d2be..0000000000000000000000000000000000000000
Binary files a/Paper/follow_up_walkers.png and /dev/null differ
diff --git a/Paper/fully_coherent_search_using_MCMC_convergence_walkers.png b/Paper/fully_coherent_search_using_MCMC_convergence_walkers.png
deleted file mode 100644
index 09ca2a1925d72be1979c0f7c03c0dda1089b197f..0000000000000000000000000000000000000000
Binary files a/Paper/fully_coherent_search_using_MCMC_convergence_walkers.png and /dev/null differ
diff --git a/Paper/fully_coherent_search_using_MCMC_walkers.png b/Paper/fully_coherent_search_using_MCMC_walkers.png
deleted file mode 100644
index e50c8205bf3a63fd851b3fab2f1f1fc5461db0a1..0000000000000000000000000000000000000000
Binary files a/Paper/fully_coherent_search_using_MCMC_walkers.png and /dev/null differ
diff --git a/Paper/git-tag.sh b/Paper/git-tag.sh
deleted file mode 100755
index e6c8b6b93e0077397d998e496a2d8cb335515b7c..0000000000000000000000000000000000000000
--- a/Paper/git-tag.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/bash
-
-FILE="$1"
-if [ "x$FILE" = x ]; then
-    echo "usage: $0 <file.tex>" >&2
-    exit 1
-fi
-TMPFILE="$FILE.tmp"
-
-gitLOG=`git log -1 --pretty="format:%% generated by $0
-\\newcommand{\\commitDATE}{%ai}
-\\newcommand{\\commitID}{commitID: %H}
-\\newcommand{\\commitIDshort}{commitID: %h}
-"`
-echo "$gitLOG" > $TMPFILE
-
-diffcmd="git diff -- .";
-diff=`$diffcmd`;
-if test -n "$diff"; then
-    echo "\\newcommand{\\commitSTATUS}{UNCLEAN}" >> $TMPFILE
-else
-    echo "\\newcommand{\\commitSTATUS}{CLEAN}" >> $TMPFILE
-fi
-
-if cmp -s $TMPFILE $FILE; then
-    rm -f $TMPFILE
-else
-    mv -f $TMPFILE $FILE
-fi
diff --git a/Paper/glitch_noise_twoF_histogram.png b/Paper/glitch_noise_twoF_histogram.png
deleted file mode 100644
index 16ea1846e98658bf2f0330cf1031edd490a8b2ed..0000000000000000000000000000000000000000
Binary files a/Paper/glitch_noise_twoF_histogram.png and /dev/null differ
diff --git a/Paper/grided_frequency_search_1D.png b/Paper/grided_frequency_search_1D.png
deleted file mode 100644
index 5005592fb5a688c71437d82892154b81d2f5830d..0000000000000000000000000000000000000000
Binary files a/Paper/grided_frequency_search_1D.png and /dev/null differ
diff --git a/Paper/macros.tex b/Paper/macros.tex
deleted file mode 100644
index 0de50ac40df897b72567a1f939881dc1a4743743..0000000000000000000000000000000000000000
--- a/Paper/macros.tex
+++ /dev/null
@@ -1,34 +0,0 @@
-\def\AllSkyMCNoiseN{1.0{\times}10^{4}}
-\def\AllSkyMCNoiseOnlyMaximum{59.8}
-\def\BasicExampleDeltaFone{1.0{\times}10^{-13}}
-\def\BasicExampleDeltaFzero{1.0{\times}10^{-7}}
-\def\BasicExampleDepth{10.0}
-\def\BasicExampleFone{-1.0{\times}10^{-10}}
-\def\BasicExampleFzero{30}
-\def\BasicExampleSqrtSn{1.0{\times}10^{-23}}
-\def\BasicExampleV{120.0}
-\def\BasicExampleVFone{49.0}
-\def\BasicExampleVFzero{2.5}
-\def\BasicExamplehzero{1.0{\times}10^{-24}}
-\def\BasicExamplenburn{50.0}
-\def\BasicExamplenprod{50.0}
-\def\DirectedMCNoiseN{1.0{\times}10^{4}}
-\def\DirectedMCNoiseOnlyMaximum{52.4}
-\def\DirectedMConeGlitchNoiseN{10000}
-\def\DirectedMConeGlitchNoiseOnlyMaximum{82.6}
-\def\DirectedMCtwoGlitchNoiseN{9625}
-\def\DirectedMCtwoGlitchNoiseOnlyMaximum{87.8}
-\def\GridedFrequencySqrtSx{$1.0{\times}10^{-23}$}
-\def\GridedFrequencyT{1}
-\def\GridedFrequencyhzero{$5.0{\times}10^{-24}$}
-\def\SingleGlitchDepth{10.0}
-\def\SingleGlitchFCMismatch{0.7}
-\def\SingleGlitchFCtwoF{718.5}
-\def\SingleGlitchFCtwoFPM{2645.7}
-\def\SingleGlitchFone{-1e-10}
-\def\SingleGlitchFzero{30.0}
-\def\SingleGlitchGlitchtwoF{2622.1}
-\def\SingleGlitchR{0.5}
-\def\SingleGlitchTspan{100.0}
-\def\SingleGlitchdeltaFone{$-2.9{\times}10^{-13}$}
-\def\SingleGlitchdeltaFzero{$3.2{\times}10^{-6}$}
diff --git a/Paper/paper_cw_mcmc.tex b/Paper/paper_cw_mcmc.tex
deleted file mode 100644
index 8b0e9a55fb901469be2c46155568b7b8200aa33f..0000000000000000000000000000000000000000
--- a/Paper/paper_cw_mcmc.tex
+++ /dev/null
@@ -1,1524 +0,0 @@
-\documentclass[aps, prd, twocolumn, superscriptaddress, floatfix, showpacs, nofootinbib, longbibliography]{revtex4-1}
-
-% Fixes Adbobe error (131)
-\pdfminorversion=4
-
-% Ams math
-\usepackage{amsmath}
-\usepackage{amssymb}
-
-% Packages for setting figures
-\usepackage[]{graphicx}
-\usepackage{subfig}
-
-% Colors
-\usepackage{color}
-
-% Bibliography
-\usepackage{natbib}
-
-\usepackage{placeins}
-
-\usepackage{multirow}
-
-\usepackage{hhline}
-
-% get hyperlinks
-%\usepackage{hyperref}
-
-% Tables
-\newcommand{\specialcell}[2][c]{%
-  \begin{tabular}[#1]{@{}c@{}}#2\end{tabular}}
-
-\input{definitions.tex}
-\input{macros}
-
-% For editing purposes: remove before submition
-\usepackage[normalem]{ulem}	%% only added for 'strikeout' \sout
-\usepackage[usenames,dvipsnames]{xcolor}
-
-\newcommand{\dcc}{LIGO-{\color{red}}}
-
-%% ---------- editing/commenting macros: make sure all a cleared at the end! ----------
-\newcommand{\mygreen}{\color{green!50!black}}
-\newcommand{\addtext}[1]{\textcolor{green!50!black}{#1}}
-\newcommand{\meta}[1]{\addtext{#1}}
-\newcommand{\CHECK}[1]{\textcolor{red}{#1}}
-\newcommand{\strike}[1]{\textcolor{red}{\sout{#1}}}
-\newcommand{\comment}[1]{\textcolor{red}{[#1]}}
-\newcommand{\replace}[2]{\strike{#1}\addtext{$\rightarrow$ #2}}
-%% ---------- end: editing/commenting macros ----------------------------------------
-
-\begin{document}
-
-\title{MCMC follow-up of continuous gravitational wave candidates}
-
-%    \author{G. Ashton}
-%    \email[E-mail: ]{gregory.ashton@ligo.org}
-%    \affiliation{Max Planck Institut f{\"u}r Gravitationsphysik
-%                 (Albert Einstein Institut) and Leibniz Universit\"at Hannover,
-%                 30161 Hannover, Germany}
-%    \author{R. Prix}
-%    \affiliation{Max Planck Institut f{\"u}r Gravitationsphysik
-%                 (Albert Einstein Institut) and Leibniz Universit\"at Hannover,
-%                 30161 Hannover, Germany}
-
-\date{\today}
-
-\begin{abstract}
-
-We describe methods to follow-up potential continuous gravitational wave
-candidates (as identified by wide-parameter space semi-coherent searches)
-leveraging MCMC optimisation of the $\mathcal{F}$-statistic.  Such a framework
-provides a unique advantage in both following-up standard signals (i.e. during
-the `zoom-stage' in which the coherence time is increased aiming to localise
-the fully-coherent candidate and in parameter estimation) as well as follow-up
-of non-standard waveforms. In particular, we demonstrate provide a
-glitch-robust statistic for follow-up of glitching continuous gravitational
-wave signals.
-
-\end{abstract}
-
-\pacs{04.80.Nn, 97.60.Jd, 04.30.Db}
-\input{git_tag.tex}
-\date{\commitDATE; \commitIDshort-\commitSTATUS, \dcc}
-
-\maketitle
-
-
-\section{Introduction}
-
-A target for the advanced gravitational wave detector network of LIGO
-and Virgo are long-lived periodic sources called continuous gravitational waves (CWs).
-Rapidly rotating nonaxisymmetric neutron stars are potentially capable of
-producing detectable CWs which last much longer than typical observation spans.
-There exists three well known sources of the nonaxisymmetry: `mountains',
-precession, and r-mode oscillations; each of which make a prediction for the
-scaling between $\nu$, the NS spin frequency and $f$, the gravitational wave
-frequency. In any case, observing neutron stars through their gravitational
-wave emission would provide a unique astrophysical insight and has hence
-motivated numerous searches.
-
-As shown by \citet{jks1998}, the gravitational wave signal from a
-nonaxisymmetric source produces a strain in the detector $h(t, \A, \blambda)$;
-where $\A{=}\{h_0, \cos\iota, \psi, \phi_0\}$ is a vector of the four
-\emph{amplitude-parameters} (expressed in `physical coordinates') and
-$\blambda$ is a vector of the \emph{Doppler-parameters} consisting of the
-sky-location, frequency $f$, and any spindown terms $\{\dot{f}, \ddot{f},
-\ldots\}$.
-
-CW searches often use a fully-coherent matched-filtering methods whereby a
-\emph{template} (the signal model at some specific set of parameters) is
-convolved against the data resulting in a detection statistic (in this work, we
-will consider the $\F$-statistic, defined in \citet{jks1998}). Since the signal
-parameters are unknown, it is usual to perform this matched filtering over a
-grid of points to maximise the detection statistic.  Three search categories
-can be identified: \emph{targeted} searches for a signal from a known
-electromagnetic pulsar where the Doppler parameters are considered `known';
-\emph{directed} searches in which the location is known, but not the frequency
-and spin-down (i.e.  searching for the neutron star in a supernova remnant
-which does not have a known pulsar); and \emph{all-sky} searches where none of
-the parameters are considered known.  Searching over more parameters amounts to
-an increase in the dimension of the search space. Since the density of grid
-points required to resolve a signal scales inversely with total coherence time
-$\Tcoh$ (the span of data used in a fully-coherent matched filter),
-wide-parameter searches (such as the all-sky) with many search dimensions over
-long durations of data are computationally infeasible.
-
-At a fixed computing cost, it has been shown (see for example \citep{brady1998,
-prix2012}) that a semi-coherent search is more sensitive for unknown signals
-than a fully-coherent search. While the exact details of how the search works
-depends on the implementation, a semi-coherent search involves by dividing the
-total observation span $\Tspan$ into $\Nseg$ segments (each lasting for
-$\Tcoh$) and in each segment computes the fully-coherent detection statistic;
-the semi-coherent detection statistic is then computed by some combination of
-all segments summed at the same point in parameter space. Fundamentally, this
-gain in sensitivity is because the width of a peak in the detection statistic
-due to a signal is inversely proportional to the coherence time: shorter
-coherence times make the peak wider and hence a lower density of templates is
-required.  This idea was first proposed by \citet{brady2000} along with the
-first implementation, the `Stack-slide' search. Since then, several
-modifications such as the `Hough-transform' \citep{krishnan2004, astone2014},
-and the `Powerflux' method (first described in \citet{allyskyS42008}) have been
-proposed, implemented and applied to gravitational wave searches.
-
-Wide parameter space searches produce a list of candidates with an associated
-detection statistic which passes some threshold. In order to verify these
-candidates, they are subjected to a \emph{follow-up}: a process of increasing
-the coherence time, eventually aiming to calculate a fully-coherent detection
-statistic over the maximal span of data. In essence, the semi-coherent search
-is powerful as it spreads the significance of a candidate over a wider area of
-parameter space, the follow-up attempts to reverse this process and recover
-the maximum significance and tightly constrain the candidate parameters. The
-original hierarchical follow-up of \citet{brady2000} proposed a two-stage method
-(an initial semi-coherent  stage followed directly by a fully-coherent search.
-However, it was shown in a numerical study by \citet{cutler2005} that allowing
-an arbitrary number of semi-coherent stages before the final fully-coherent
-stage can significantly improve the efficiency: ultimately they concluded that
-three semi-coherent stages provide the best trade-off between sensitivity and
-computational cost.
-
-The first implementation of a two-stage follow-up was given by
-\citet{shaltev2013} and used the Mesh Adaptive Direct Search algorithm for
-optimisation. A multi-stage gridded approach was presented in \citet{Papa2016}.
-
-In this paper, we propose an alternative hierarchical follow-up procedure using
-Markov-Chain Monte-Carlo (MCMC) as the optimisation tool. An MCMC tool is
-advantages due to its ability to trace the evolution of multiple modes
-simultaneously through the follow-up procedure and allow the optimisation to
-decide between them without arbitrary cuts.
-
-The follow-up of canonical CW signals is a well studied problem and, provided
-they behave as expected, the maximum loss of detection statistic (i.e. the
-template bank mismatch) can be calculated. However, nature does not always
-behave as expected and it is plausible that the CW signals in the data may
-differ from the templates used in wide-parameter space searches. There are of
-course an unlimited number of ways this may manifest given our ignorance of
-neutron stars, but from the study of pulsars three obvious mechanisms present
-themselves: \emph{transient-}, \emph{glitching-}, and \emph{noisy-} signals.
-A search method for long-duration transient signals (i.e. signals which
-last from hours to weeks) was first presented by
-\citet{prix2011}, in Sec.~\ref{sec_transients} we will describe how MCMC
-optimisation can be used to perform parameter estimation using this method.
-In \citet{ashton2017}, we noted the need for a glitch-robust follow-up method
-(i.e. a search capable of finding signals which undergo sudden changed in the
-rotation frequency as seen in pulsars); to respond to this in Sec.~\ref{sec_glitches}
-we provide such a statistic, demonstrate its use on simulated glitching
-signals, and discuss significance estimation. For noisy-signals, specifically
-motivated by the observation of so-called timing noise in radio pulsars (see
-\citet{hobbs2010} for a review), in \citet{ashton2015} we determined that these
-where unlikely to be of immediate concern.
-
-The methods introduced in this paper have been implemented in the
-\texttt{python} package \texttt{pyfstat}. All examples in this work, along with
-tutorials can be found at
-\url{https://gitlab.aei.uni-hannover.de/GregAshton/PyFstat}.
-
-We begin in Section~\ref{sec_hypothesis_testing} with a review of search
-methods from a Bayesian perspective. Then in
-Section~\ref{sec_MCMC_and_the_F_statistic} we introduce the MCMC optimisation
-procedure and give details of our particular implementation. In
-Section~\ref{sec_follow_up} we describe the zoom follow-up procedure and test
-its efficient. In Sections~\ref{sec_transients} and \ref{sec_glitches} we
-demonstrate how searches can be performed for either transient or glitches CWs
-before we finally conclude in Section~\ref{sec_conclusion}.
-
-\section{MCMC and the $\F$-statistic}
-\label{sec_MCMC_and_the_F_statistic}
-
-In this section, we will give a brief introduction to the use of the
-$\F$-statistic in a hypothesis testing framework, introduce conceptual ideas
-about the use of Markov-Chain Monte-Carlo (MCMC) algorithms in this context,
-and develop the idea of using MCMC routines in CW searches.
-
-\subsection{Hypothesis testing framework}
-\label{sec_hypothesis_testing}
-
-In \citet{prix2009}, a framework was introduced demonstrating the use of the
-$\F$-statistic (first discussed in \citet{jks1998}) in defining the \emph{Bayes
-factor} $\Bsn(\boldsymbol{x})$ between the signal hypothesis and the noise
-hypothesis computed on some data $\boldsymbol{x}$.  It was shown that for an
-unphysical uniform prior on the amplitude parameters, the log-Bayes factor was
-proportional to the $\F$-statistic. In this work, we will use the modification
-proposed in \citet{prix2011}, which removed the antenna-pattern weighting
-factor; let us now briefly review this framework.
-
-The Bayes-factor between the signal and noise hypothesis, given the
-`$\F$-statistic prior', is
-\begin{equation}
-\Bsn(\boldsymbol{x}) = \int \Bsn(\boldsymbol{x}| \blambda)P(\blambda) d\blambda
-\label{eqn_Bsn_x}
-\end{equation}
-where $\Bsn(\boldsymbol{x}| \blambda)$ is the
-targeted Bayes factor (i.e. computed at a set of
-Doppler parameters $\blambda$):
-\begin{equation}
-\Bsn(\boldsymbol{x}|\blambda) = \frac{70}{\rhohatmax^{4}}
-e^{\F(\boldsymbol{x}; \blambda)}.
-\label{eqn_CW_likelihood}
-\end{equation}
-Here, $\rhohatmax$ is a maximum cutoff SNR required to normalise the prior.
-The original calculation was done for a transient-CW, in Eqn~\eqref{eqn_CW_likelihood}
-we constrain the system to the special case of standard CW signals for which the
-marginalisation over the transient parameters yields unity.
-
-Furthermore, \citet{prix2011} also demonstrated that the semi-coherent Bayes
-factor naturally arrises by splitting the likelihood into $N$ independent
-\emph{segment} resulting in
-\begin{equation}
-\Bsn^{\textrm{SC}}(\boldsymbol{x}|\blambda) =
-\left(\frac{70}{\rhohatmax^{4}}\right)^{N}
-e^{\sum_{\ell=1}^{N}\F(\boldsymbol{x}_{(\ell)}; \blambda)},
-\label{eqn_SC_CW_likelihood}
-\end{equation}
-where $\boldsymbol{x}_{(\ell)}$ refers to the data in the $\ell^{\mathrm{th}}$
-segment. As such, this can replace the relevant term in
-
-
-
-%\section{Hypothesis testing}
-%\label{sec_hypothesis_testing}
-%
-%\subsection{Bayes factors}
-%Given some data $x$ and a set of background assumptions $I$, we formulate
-%two hypotheses: $\Hn$, the data contains solely Gaussian noise and $\Hs$, the
-%data contains an additive mixture of noise and a signal $h(t; \A, \blambda)$.
-%In order to make a quantitative comparison, we use Bayes theorem in the usual
-%way to write the odds as
-%\begin{equation}
-%O_{\rm S/N} \equiv \frac{P(\Hs| x, I)}{P(\Hn| x, I)} =
-%\Bsn(x| I) \frac{P(\Hs| I)}{P(\Hn | I)},
-%\end{equation}
-%where the second factor is the \emph{prior odds} while the first is the
-%\emph{Bayes factor}:
-%\begin{equation}
-%\Bsn(x| I) = \frac{P(x| \Hs, I)}{P(x| \Hn, I)}.
-%\end{equation}
-%Typically, we set the prior odds to unity such that it is the Bayes factor
-%which determines our confidence in the signal hypothesis. In this work, we will
-%therefore discuss the Bayes factor with the implied assumption this is
-%equivalent to the odds, unless we have a good reason to change the prior odds.
-%
-%We can rewrite the Bayes factor in terms of the two sets of signal parameters
-%as
-%\begin{equation}
-%\Bsn(x| I) = \frac{P(x, \A, \blambda|\Hs, I)}
-%{P(\A| \Hs, I)P(\blambda| \Hs, I)P(x| \Hn, I)}.
-%\end{equation}
-%Marginalising over the two sets of parameters we find that
-%\begin{equation}
-%\Bsn(x| I)= \iint
-%\mathcal{L}(x; \A, \blambda)
-%P(\A| \Hs, I)P(\blambda| \Hs, I)
-%d\blambda d\A
-%\label{eqn_full_bayes}
-%\end{equation}
-%where
-%\begin{equation}
-%\mathcal{L}(x; \A, \blambda) \equiv \frac{P(x |\A, \blambda, \Hs, I)}{P(x| \Hn, I)},
-%\label{eqn_likelihood}
-%\end{equation}
-%is the \emph{likelihood-ratio}.
-%
-%At this point, we can appreciate the problems of searching for unknown signals:
-%one has four amplitude parameters and several Doppler parameters over which
-%this integral must be performed. If a single signal exists in the data, this
-%corresponds to a single peak in the likelihood-ratio, but at an unknown
-%location. Therefore, one must must first search for peaks (candidates), and
-%then subsequently analyse their significance. If one has prior knowledge this
-%can be used; for example in targeted searches for gravitational waves from
-%known pulsars emitting CWs from a mountain the Doppler parameters are
-%considered known collapsing their integral to a single evaluation (see for
-%example the method proposed by \citet{dupuis2005}).
-%\comment{Should we cite other known-pulsar methodologies?}
-%
-%\subsection{The $\F$-statistic: a frequentist perspective}
-%
-%For directed and all-sky searches, a common method introduced by
-%\citet{jks1998} to reduce the parameter space is the maximum-likelihood
-%approach. In this approach (often referred to as `frequentist'), one starts by
-%defining the likelihood-ratio, Equation~\eqref{eqn_likelihood}, which in this
-%context is a \emph{matched-filtering} amplitude. Then, analytically maximising
-%this likelihood-ratio with respect to the four amplitude parameters results
-%(c.f.~\citet{prix2009}) in a maximised log-likelihood given by $\F(x|
-%\blambda)$: the so-called $\F$-statistic. Picking a particular set of Doppler
-%parameters $\blambda$ (the template) one can then compute a detection statistic
-%(typically $\twoFtilde$ is used\footnote{Note that the `tilde' denote that it
-%is a fully-coherent quantity while we use a `hat' to denote a semi-coherent
-%quantity}) which can be used to quantify the significance of the template.
-%Usually this is done by calculating a corresponding false alarm rate, the
-%probability of seeing such a detection statistic in Gaussian noise.
-%
-%Calculations of the significance are possible due to the properties of the
-%detection statistic: in Gaussian noise, it can be shown \citep{jks1998,
-%cutlershutz2005} that $\widetilde{2\F}$ follows a chi-squared distribution with
-%4 degrees of freedom. In the presence of a signal, $\widetilde{2\F}$ is
-%still chi-squared with 4 degrees of freedom, but has a non-centrality parameter
-%$\tilde{\rho}^{2}$ such that its expectation value is
-%\begin{equation}
-%\textrm{E}[\widetilde{2\F}(x; \blambda)] = 4 + \tilde{\rho}(x; \blambda)^2.
-%\label{eqn_twoF_expectation}
-%\end{equation}
-%The non-centrality parameter in this context is the SNR of the matched-filter
-%given by
-%\begin{equation}
-%\rho^{2} = (h | h) \propto \frac{h_0^2}{\Sn}\Tcoh \mathcal{N}
-%\end{equation}
-%where $(h|h)$ is the inner product of the signal with itself (see for example
-%\citet{prix2009}), $\Sn$ is a (frequency-dependent) measure of the noise in
-%the detector and $\mathcal{N}$ is the number of detectors.
-%
-%
-%\subsection{The $\F$-statistic: a Bayesian perspective}
-%
-%At first, it
-%appears that the $\F$-statistic is independent of the Bayesian framework since
-%it was first derived directly from the likelihood. However, it was shown in
-%\citet{prix2009, prix2011} that if we marginalise over the four amplitude parameters of
-%Equation~\eqref{eqn_full_bayes} (using the ``$\F$-statistic prior'' \citep{prix2011},
-%which we denote by $\AmplitudePrior$)
-%then the integral, when $\homax \gg 1$, is a Gaussian integral and can be
-%computed analytically as
-%\begin{align}
-%\begin{split}
-%\Bsn(x| \blambda, \AmplitudePrior, I) & \equiv
-%\int
-%\mathcal{L}(x ;\A, \blambda)
-%P(\A| \Hs, \AmplitudePrior, I) d\A
-%\label{eqn_B_S_N}
-%\\
-%& \propto e^{\tilde{\F}(x| \blambda)}
-%\end{split}
-%\end{align}
-%where $\mathcal{F}$ is exactly the $\F$-statistic, first derived
-%as a maximised log-likelihood \citep{jks1998}. This result
-%demonstrates that the $\mathcal{F}$-statistic is proportional to the log-Bayes
-%factors when calculated with a uniform prior on the amplitude parameters and
-%fixed Doppler parameters.
-%
-%As such, we can define the Bayes-factor of Equation~\eqref{eqn_full_bayes} as
-%\begin{equation}
-%\Bsn(x| \AmplitudePrior, I) = \int
-%\Bsn(x| \AmplitudePrior, \blambda, I) P(\blambda| \Hs, I)
-% d\blambda,
-%\end{equation}
-%or neglecting the constants
-%\begin{equation}
-%\Bsn(x| \AmplitudePrior, I) \propto \int
-%e^{\tilde{\F}(x| \blambda)} P(\blambda| \Hs, I)
-% d\blambda.
-%\label{eqn_bayes_over_F}
-%\end{equation}
-%
-%Formulating the significance of a CW candidate in this way is pragmatic in that
-%there exists a wealth of well-tested tools (i.e. \texttt{LALSuite}
-%\citep{lalsuite}) capable of computing the $\mathcal{F}$-statistic for CW
-%signals, transient-CWs, and CW signals from binary systems; these can be
-%leveraged to compute Equation~\eqref{eqn_bayes_over_F}.
-
-The MCMC class of optimisation tools are formulated to solve the problem of
-inferring the posterior distribution of some general model parameters $\theta$
-given given some data $x$. We will not give a full description here, but
-refer the reader to the literature, e.g. \citet{mackay2003information}.
-In this case, we will be concerned with the posterior
-distribution of $\blambda$, the Doppler parameters. From Bayes theorem, we
-see that this is proportional to the integrand of Eqn.~\eqref{eqn_Bsn_x}:
-\begin{equation}
-\Bsn(\blambda | \boldsymbol{x}) \propto \Bsn(\boldsymbol{x}| \blambda)P(\blambda).
-\end{equation}
-
-The likelihood function (either for the fully-coherent of semi-coherent case)
-and the prior distributions can be passed to any optimisation routine in order
-to maximise over the unknown signal parameters. In this paper, we will discuss
-the use of an ensemble MCMC optimization routine, this has the advantage of
-being efficient in high-dimensional spaces, the ability to sampler multi-modal
-posteriors, and to calculate the marginalised Bayes factor (i.e.
-Eqn~\ref{eqn_Bsn_x}).
-
-\subsection{The \texttt{emcee} sampler}
-
-In this work we will use the \texttt{emcee} ensemble sampler
-\citep{foreman-mackay2013}, an implementation of the affine-invariant ensemble
-sampler proposed by \citet{goodman2010}. This choice addresses a key issue with
-the use of MCMC sampler, namely the choice of \emph{proposal distribution}. At
-each step of the MCMC algorithm, the sampler generates from some distribution
-(known as the proposal-distribution) a jump in parameter space. Usually, this
-proposal distribution must be `tuned' so that the MCMC sampler efficiently
-walks the parameter space without either jumping too far off the peak, or
-taking such small steps that it takes a long period of time to traverse the
-peak. The \texttt{emcee} sampler addresses this by using an ensemble, a large
-number ${\sim}100$ parallel \emph{walkers}, in which the proposal for each
-walker is generated from the current distribution of the other walkers.
-Moreover, by applying an an affine transformation, the efficiency of the
-algorithm is not diminished when the parameter space is highly anisotropic. As
-such, this sampler requires little in the way of tuning: a single proposal
-scale and the number of steps to take.
-
-\subsection{Parallel tempering: sampling multimodal posteriors}
-\label{sec_parallel_tempering}
-Beyond the standard ensemble sampler, we will also use one further
-modification, the parallel-tempered ensemble sampler. A parallel
-tempered MCMC simulation, first proposed by \citet{swendsen1986}, runs
-$\Ntemps$ simulations in parallel with the likelihood in the $i^{\rm th}$
-parallel simulation is raised to a power of $1/T_{i}$ where $T_i$ is referred
-to as the \emph{temperature}. As such, Equation~\eqref{eqn_lambda_posterior} is
-written as
-\begin{equation}
-P(\blambda | T_i, x, \AmplitudePrior, \Hs, I)
-%=\Bsn(x| \AmplitudePrior, \blambda)^{1/T_i} P(\blambda| \Hs I).
-\propto (e^{\F(x| \blambda)})^{T_i} P(\blambda| \Hs I).
-\end{equation}
-Setting $T_0=1$ with $T_i > T_0 \; \forall \; i > 1$, such that the lowest
-temperature recovers Equation~\eqref{eqn_lambda_posterior} while for higher
-temperatures the likelihood is broadened (for a Gaussian likelihood, the
-standard deviation is larger by a factor of $\sqrt{T_i}$). Periodically, the
-algorithm swaps the position of the walkers between the different
-temperatures. This allows the $T_0$ chain (from which we draw samples of the
-posterior) to efficiently sample from multi-modal posteriors. This method introduces
-two additional tuning parameters, the number and range of the set of
-temperatures $\{T_i\}$, we will discuss their significance when relevant.
-
-\subsection{Parallel tempering: estimating the Bayes factor}
-\comment{Greg: I don't think we actually need this since we aren't using the Bayes factor}
-In addition, parallel-tempering also offers a robust method to estimate the
-Bayes factor of Equation~\eqref{eqn_bayes_over_F}. If we define
-$\beta\equiv1/T$, the inverse temperature and $Z(\beta)\equiv \Bsn(x| \AmplitudePrior, I)$, then as noted by \citet{goggans2004} for the general case, we may
-write
-\begin{align}
-\frac{1}{Z} \frac{\partial Z}{\partial \beta}=
-\frac{
-\int \Bsn(x| \AmplitudePrior, \blambda)^{\beta}
-\log(\Bsn(x| \AmplitudePrior, \blambda))P(\blambda| I)
-d\blambda
-}
-{
-\int \Bsn(x| \AmplitudePrior, \blambda)^{\beta})P(\blambda| I)
-d\blambda
-}
-\end{align}
-The right-hand-side expresses the average of the log-likelihood at $\beta$. As
-such, we have
-\begin{align}
-\frac{\partial \log Z}{\partial \beta} =
-\langle \log(\Bsn(x| \AmplitudePrior, \blambda) \rangle_{\beta}
-\end{align}
-The log-likelihood values are calculated during the MCMC sampling. As such, one
-can numerically integrate to get the Bayes factor, i.e.
-\begin{align}
-\log \Bsn(x| \AmplitudePrior, I) = \log Z = \int_{0}^{1}
-\langle \log(\Bsn(x| \AmplitudePrior, \blambda) \rangle_{\beta} d\beta.
-\end{align}
-In practice, we use a simple numerical quadrature over a finite ladder of
-$\beta_i$ with the smallest chosen such that choosing a smaller value does not
-change the result beyond other numerical uncertainties. Typically, getting
-accurate results for the Bayes factor requires a substantially larger number of
-temperatures than are required for efficiently sampling multi-modal
-distributions.  Therefore, it is recommended that one uses a few
-temperatures during the search stage, and subsequently a larger number of
-temperatures (suitably initialised close to the target peak) when estimating
-the Bayes factor.
-
-\subsection{Evaluating the search space}
-
-In general, MCMC samplers are highly effective in generating samples of the
-posterior in multi-dimensional parameter spaces. However, they will perform
-poorly if the posterior has multiple small maxima with only a small number of
-large maxima which occupy a small fraction of the prior volume. Later on, in
-Section~\ref{sec_topology} we will show that for a signal in Gaussian noise our
-log-likelihood $\F$ will have a single peak due to the signal which is
-surrounded by other smaller peaks particular to given noise realisation. The relative
-size of the these peaks to the signal peak depends on the SNR.  However,
-regardless of the strength of the signal peak, if the width of the signal peak
-is small compared to the prior volume, the sampler will get `stuck' on the
-local maxima and be inefficient at finding the global maxima.  This problem is
-exacerbated in higher-dimensional search spaces where the volume fraction of
-the signal scales with the exponent of the number of dimensions.
-
-\subsubsection{The metric}
-
-In a traditional CW search which uses a grid of templates (also known as a
-template bank), the spacings of the grid are chosen such that the loss of
-SNR is bounded to less than $\mu$, the
-\emph{template-bank mismatch}. The optimal choice of grid then consists of
-minimising the computing cost while respecting this minimum template-bank
-mismatch or vice-verse (for examples see \citet{pletsch2010, prix2012,
-wette2013, wette2015}). We will now discuss how the work on setting up these
-grids can be applied to the problem of determining whether the setup is
-appropriate for an MCMC method: i.e. given the prior volume do we expect a
-signal to occupy a non-negligible fraction of the volume.
-
-For a fully-coherent $\F$-statistic search on data containing Gaussian noise
-and a signal with Doppler parameters $\blambdaSignal$, the template-bank
-mismatch at the grid point $\blambda_{l}$ we define as
-\begin{align}
-\mutilde(\blambdaSignal, \blambda_{l}) \equiv 1 -
-\frac{\tilde{\rho}(\blambda_l;\blambdaSignal)^{2}}
-{\tilde{\rho}(\blambdaSignal; \blambdaSignal)^{2}},
-\end{align}
-where $\tilde{\rho}(\blambda_l; \blambdaSignal)$ is the non-centrality
-parameter (c.f. Equation~\ref{eqn_twoF_expectation}) at $\blambda_l$, given
-that the signal is at $\blambdaSignal$; as such $\tilde{\rho}(\blambdaSignal;
-\blambdaSignal)$ is the perfectly-matched non-centrality parameter.  For a
-fully-coherent search, this non-centrality parameter is equivalent to
-fully-coherent matched-filter SNR. However, as noted by
-\citet{leaci2015}, this is true for the fully-coherent case only.  Therefore,
-we choose to use the non-centrality parameter which easily generalises to the
-semi-coherent case.
-
-To make analytic calculations of the template-bank mismatch possible, as first shown by
-\citet{brady1998}, the mismatch can be approximated by
-\begin{equation}
-\mutilde(\blambda, \Delta\blambda) \approx
-\tilde{g}_{ij}^\phi \Delta\lambda^{i}\Delta\lambda^{j}
-+ \mathcal{O}\left(\Delta\blambda^{3}\right)
-\label{eqn_mutilde_expansion}
-\end{equation}
-where we use index notation with an implicit summation over repeated indices.
-Here, $\tilde{g}_{ij}^{\phi}$ is the \emph{phase-metric} given by
-\begin{align}
-\tilde{g}_{ij}^\phi (\blambda) =
-\langle
-\partial_{\Delta\lambda^{i}}\phi
-\partial_{\Delta\lambda^{j}}\phi
-\rangle
--
-\langle
-\partial_{\Delta\lambda^{i}}\phi
-\rangle
-\langle
-\partial_{\Delta\lambda^{j}}\phi
-\rangle,
-\label{eqn_metric}
-\end{align}
-where $\langle \cdot \rangle$ denotes the time-average over $\Tcoh$ and
-$\phi(t; \blambda)$ is the phase evolution of the source. The phase metric is
-in fact an approximation of the full metric which includes modulations of the
-amplitude parameters $\A$. It was shown by \citet{prix2007metric} that
-when using data spans longer than a day and data from multiple detectors, the
-difference between phase metric and full metric is negligible.
-
-The phase metric, Equation~\eqref{eqn_metric} provides the necessary tool to
-measure distances in the Doppler parameter space in units of mismatch. To
-calculate components, we define the phase evolution
-of the source as \citep{wette2015}
-\begin{align}
-\phi(t; \blambda) \approx 2\pi\left(
-\sum_{s=0}^{\smax} f^{(s)}\frac{(t-\tref)^{s+1}}{(s+1)!}
-+ \frac{r(t)\cdot\mathbf{n}(\alpha, \delta)}{c} \fmax\right),
-\label{eqn_phi}
-\end{align}
-where $\mathbf{n}(\alpha, \delta)$ is the fixed position of the source with
-respect to the solar system barycenter (with coordinates $\alpha$ and $\delta$,
-the right ascension and declination), $f^{(s)}\equiv d^{s}\phi/dt^s$, and
-$\fmax$ a constant approximating $f(t)$, chosen conservatively to be the
-maximum frequency over the data span \citep{wette2013}.
-
-The frequency and spin-down components of the metric can be easily calculated
-due to their linearity in Equation~\eqref{eqn_phi} and for the special case in
-which $\tref$ is in the middle of the data span, the phase-evolution metric
-with $\smax{=}1$ (i.e. the frequency and spin-down components) are given by
-\meta{This is not explained properly - just link to a previous paper (I used Eqn (33) of RP's frequency metrix notes}
-\begin{equation}
-\tilde{g}^{\textrm{PE}}_{ij} \equiv
-\left[
-\begin{array}{cc}
-\frac{\pi^{2}\Tcoh^{2}}{3} & 0 \\
-0 & \frac{4\pi^{2}\Tcoh^{4}}{45}
-\end{array}
-\right]
-\textrm{ for } i, j \in [f, \dot{f}].
-\end{equation}
-
-Accurately approximating the sky components of the metric is non-trivial, but
-was accomplished by \citet{wette2013} for the fully-coherent case. In
-\citet{wette2015} it was shown how to calculate the equivalent semi-coherent
-metric $\hat{g}_{ij}(\blambda, \Nseg)$; in the following, we
-will work with this generalised semi-coherent method with the understanding
-that $\hat{g}_{ij}(\blambda, \Nseg{=}1)=
-\tilde{g}_{ij}(\blambda)$.
-
-\subsubsection{The metric volume}
-To understand the volume of parameter space which a true signal would occupy,
-we can make use of the \emph{metric-volume}, defined as \citep{prix2007}
-\begin{align}
-\mathcal{V} = \int
-\sqrt{\textrm{det}\hat{g}_{ij}(\blambda, \Nseg)} d\blambda \approx
-\sqrt{\textrm{det}\hat{g}_{ij}(\blambda, \Nseg)} \Delta\blambda
-\end{align}
-where in the second step we assume a constant coefficient metric. Here, $\Delta
-\blambda$ is the volume element which is given by
-\begin{equation}
-\Delta\lambda = \frac{\cos(\delta_0)\Delta\delta\Delta\alpha}{2}
-%\frac{1}{2}\sin\delta\Delta\delta\Delta\alpha
-\prod_{s=0}^{\smax} \Delta f^{(s)},
-\end{equation}
-where the numerator of the prefactor is the solid angle of the sky-patch at a
-declination of $\delta_0$, the factor of $1/2$ comes from converting the
-normalised determinant (which is computed over the whole sky) to the solid angle
-of the directed search, and $\Delta f^{(s)}$ is the extent of the frequency and
-spin-down range(s) searched.
-
-The metric volume $\V$ is the approximate number of templates required to cover
-given Doppler parameter volume at a fixed mismatch of $\approx 1$. As
-such, its inverse gives the approximate (order of magnitude) volume fraction of
-the search volume which would be occupied by a signal. This can therefore be
-used as a proxy for determining if an MCMC search will operate in a regime where
-it is efficient (i.e. where the a signal occupies a reasonable fraction of the
-search volume).
-
-The volume $\V$ combines the search volume from all search dimensions. However,
-let us now discuss how we can delve deeper to understand how each dimension
-contributes to the total product. This is done by noticing that the metric has
-a particular block form:
-\begin{align}
-g_{ij} = \left[
-\begin{array}{cc}
-g^{\textrm{Sky}} & 0 \\
-0 & g^{\textrm{PE}}.
-\end{array}
-\right]
-\end{align}
-where $g^{\rm Sky}$ is the $2\times2$ sky-metric, while $g^{\rm PE}$ is the
-$(\smax{+}1){\times}(\smax{+}1)$ phase-evolution metric.
-As such, the volume can be decomposed as
-\begin{align}
-\mathcal{V} & =
-\sqrt{\textrm{det}g^{\rm Sky}}\frac{\Delta\Omega}{2} \times
-\sqrt{\textrm{det}g^{\rm PE}}\prod_{s=0}^{\smax}\Delta f^{(s)} \\
-& = \Vsky \times \Vpe.
-\label{eqn_metric_volume}
-\end{align}
-Moreover, if $\tref$ is in the middle of the observation span, the diagonal
-nature of $g^{\rm PE}$ means that one can further identify
-\begin{align}
-\Vpe = \prod_{s=0}^{\smax}\sqrt{g^{\rm PE}_{ss}} \Delta f^{(s)}
-= \prod_{s=0}^{\smax}\Vpe^{(s)}
-\end{align}
-This decomposition may be useful in setting up MCMC searches.
-
-\subsection{The topology of the likelihood}
-\label{sec_topology}
-
-We intend to use the $\F$-statistic as our log-likelihood in MCMC simulations,
-but before continuing, it is worthwhile to acquaint ourselves with the typical
-behavior of the log-likelihood by considering a specific example.  In
-particular, let us consider the frequency-domain `topology' of the $\twoFtilde$
-likelihood that our MCMC simulations will need to explore.
-
-We simulate a signal with $h_0=$\GridedFrequencyhzero\; in data lasting
-\GridedFrequencyT~days with $\sqrt{S_{x}}=$\GridedFrequencySqrtSx.  In
-Figure~\ref{fig_grid_frequency}, we plot the numerically computed $\twoFtilde$
-for this data as a function of the template frequency (all other parameters are
-held fixed at the simulation values). Three clear peaks can be observed: a
-central peak at $f_0$ (the simulated signal frequency), and two `Doppler lobes'
-at $f_0\pm 4/(1\textrm{-day})$. \comment{Need to understand the cause of
-these}.
-
-As shown in Equation~\eqref{eqn_twoF_expectation}, the expectation of
-$\widetilde{2\F}$ is $4$ in Gaussian noise alone, but proportional to the
-square of the non-centrality parameter (or SNR) in the presence of a signal.
-This behaviour is exactly seen in Figure~\ref{fig_grid_frequency}: between the
-peaks one finds noise fluctations.
-
-\begin{figure}[htb]
-\centering \includegraphics[width=0.45\textwidth]{grided_frequency_search_1D}
-\caption{Numerically computed $\twoFtilde$ for a
-simulated signal with frequency $f_0$ in Gaussian noise.}
-\label{fig_grid_frequency}
-\end{figure}
-
-The widths of the peaks can be estimated by using the metric, inverting
-Eqn.~\ref{eqn_mutilde_expansion} for the frequency component alone one finds
-that $\Delta f \approx \sqrt{3}/{\pi\Tcoh}$ (taking a mismatch of 1 as a proxy
-for the peak width). In Figure~\ref{fig_grid_frequency} the coherence time is
-short enough that the width of the peak is about an order of magnitude smaller
-than the Doppler window $\sim 1/(1\mathrm{-day}$. In general, CW searches are
-performs on much longer stretches of data and therefore the peak width will
-be much narrower.
-
-When running an MCMC simulation we must therefore be aware that in addition to
-the signal peak, the likelihood can contain multiple modes which may be either
-noise fluctuations, secondary peaks of the signal, or the signal peak itself.
-
-
-\subsection{Example: MCMC optimisation for a signal in noise}
-
-In order to familiarise the reader with the features of an MCMC search, we will
-now describe a simple directed search (over $f$ and $\dot{f}$) for a simulated
-signal in Gaussian noise. The signal will have a frequency of
-$\BasicExampleFzero$~Hz and a spin-down of $\BasicExampleFone$~Hz/s, all other
-Doppler parameters are considered known. Moreover, the signal has an amplitude
-$\BasicExamplehzero$~Hz$^{-1/2}$ while the Gaussian noise has
-$\sqrt{\Sn}=\BasicExampleSqrtSn$~Hz$^{-1/2}$ such that the signal has a
-\emph{depth} of $\sqrt{\Sn}/h_0 = \BasicExampleDepth$.
-
-First, we must define a prior for each search parameter Typically, we recommend
-either a uniform prior bounding the area of interest, or a normal distribution
-centered on the target and with some well defined width. In defining the prior
-distributions, one must ensure
-that the MCMC simulation has a reasonable chance at finding a peak. To do so,
-we can use the corresponding metric-volume given in
-Equation~\eqref{eqn_metric_volume}. For this example, we will use a uniform
-prior with a frequency range of $\Delta f=\BasicExampleDeltaFzero$~Hz and a
-spin-down range of $\Delta \fdot=\BasicExampleDeltaFone$~Hz/s both centered on
-the simulated signal frequency and spin-down rate. We set the reference time to
-coincide with the middle of the data span, therefore the metric volume can be
-decomposed into the frequency contribution and spin-down contribution:
-frequency,
-\begin{align}
-\Vpe^{(0)} = \frac{(\pi\Tcoh\Delta f)^2}{3} \approx \BasicExampleVFzero
-\end{align}
-and
-\begin{align}
-\Vpe^{(1)} = \frac{4(\pi\Delta \fdot)^2\Tcoh^{4}}{45} \approx \BasicExampleVFone
-\end{align}
-such that $\V\approx\BasicExampleV$ (note that $\Vsky$ does not contribute
-since we do not search over the sky parameters). This metric volume indicates
-that the signal will occupy about 1\% of the prior volume, therefore the MCMC
-is expected to work efficiently. Alternative priors will need careful thought about how to
-translate them into a metric volume: for example using a Gaussian one could use
-the standard deviation as a proxy for the allowed search region.
-
-In addition to defining the prior, one must also consider how to
-\emph{initialise} the walkers of the ensemble sampler. If the prior genuinely
-represents the stated prior knowledge, the usual solution is to initialise the
-walkers from the prior: that is the starting position is drawn from the prior.
-However, instances do occur when one would like to initialise the walkers from
-a different distribution. For example, if one only needs to estimate the
-evidence (given a particular prior), but is aware from previous searches that
-the only significant peak lies in a small area of parameter space, one could
-initialise the walkers in a small cluster close to that area. In this example,
-we initialise the walkers from the prior such that they have the chance to
-explore the entire prior volume.
-
-Having defined the prior, the final setup step is to define the number of
-\emph{burn-in} and \emph{production} steps the sampler should take and the
-number of walkers; this is a tuning parameter of the MCMC algorithm. The number
-of walkers should be typically a few hundred, the greater the number the more
-samples will be taken resulting in improved posterior estimates. The burn-in
-steps refers to an initial set of steps which are discarded as they are taken
-whilst the walkers converge. After they have converged the steps are known as
-production steps since they are used to produce posterior estimates and
-calculate the marginal likelihood.
-
-Using these choices, the simulation is run. To illustrate the full MCMC
-process, in Figure~\ref{fig_MCMC_simple_example} we plot the progress of all
-the individual walkers (each represented by an individual line) as a function
-of the total number of steps. The portion of steps before the dashed vertical
-line are ``burn-in'' samples and hence discarded, from this plot we see why:
-the walkers are initialised from the uniform prior and initially spend some
-time exploring the whole parameter space before converging. The fact that they
-converge to a single unique point is due to the strength of the signal
-(substantially elevating the likelihood above that of Gaussian fluctuations)
-and the tight prior which was quantified through the metric volume $\V$. The
-``production'' samples, those after the black dashed vertical line, can be used
-to generate posterior plots. In this instance, the number of burn-in and
-production samples was pre-specified by hand.
-\begin{figure}[htb]
-\centering
-\includegraphics[width=0.45\textwidth]{fully_coherent_search_using_MCMC_walkers}
-\caption{The progress of the MCMC simulation for a simulated signal in Gaussian
-noise, searching over the frequency and spin-down. The upper two panels show
-the position of all walkers as a function of the number of steps for the
-frequency and spin-down; samples to the left of the dashed vertical line are discarded as
-burn-in (the first 100 steps), while the rest are used
-as production samples.}
-\label{fig_MCMC_simple_example}
-\end{figure}
-
-\subsection{Testing convergence}
-
-In Figure~\ref{fig_MCMC_simple_example}, it is easy to see by eye that that the
-MCMC simulation has converged during the burn-in. However, choosing the number
-of steps, balancing the extra computing cost against ensuring the chains have
-converged, is often a matter of trial and error. So, while graphical methods
-for assesing convergence are the most robust and informative, quantitative
-methods (diagnostics) also exist to test convergence and can therefore provide
-the utility to prematurely stop the burn-in period. As such one can then simply
-choose a large number of steps (as compared to a typical burn-in for the given
-problem) and expect that in most cases these will not all be used. Moreover, if
-a large number of searches are to be run such that inspection of each MCMC
-simulation is impossible, the convergence tests can easily be checked
-numerically.
-
-\subsubsection{The Gelman \& Rubin statistic}
-We will use the ``Gelman \& Rubin statistic" \citep{gelman1992} to assess
-convergence. This method, developed in the context of single-walker samplers
-(i.e. not ensemble samplers) requires multiple independent simulations to be
-run, each initialised from an overdispersed estimate of the target
-distribution. The statistic, the \emph{potential scale reduction factor}
-(PSRF), is calculated for each scalar quantity of interest and estimates the
-amount by which the scale factor of the target distribition (approximated by a
-Student-t distribution) might shrink if the simulation where run indefinitely.
-This method makes a normality assumption on the posterior distribution, which
-in testing appears to be reasonble for all parameters except the transient
-start time and durations (discussed later in Sec.~\ref{sec_transients}).
-However, paraphrasing \citet{brooks1998}, Gelman and Rubin's approach is
-generally applicable to any situation where the posterior inference can be
-summarized by the mean and variance (or standard deviation), even if the
-posterior distributions are not themselves believed to be normally distributed.
-
-\comment{Note I have not yet fully implemented the method, for simplicity I'm
-         assuming df $\rightarrow$ infty}
-
-In this setting, we depart from the original description by using the walkers
-of the ensemble sampler in place of independent MCMC simulations. Moreover,
-this is done periodicically checking the last $n$ steps of the simulation. As
-such, testing convergence requires only a small overhead in calculating the
-variances: there is no requirement to run multiple independent simulations.
-At each periodic check, the statistic is computed for each model parameter
-and if all model parameters are less than the threshold (with a default value
-of 1.2), the simulation is considered converged. However, the burn-in period is
-only prematurely stopped if convergence is reached a pre-specific number of
-times (the default value is 10). This is a conservative system to prevent
-misdiagnosis of convergence.
-
-While in testing this method was found to agree well with graphical
-determination convergence, but we echo the conclusions of \citet{cowles1996markov}
-that no convergence diagnostic can say with certainty that the posterior
-samples are truly representative of the underlying stationary distribution.
-
-\subsubsection{Example}
-
-In Figure~\ref{fig_MCMC_convergence_example}, we illustrate our implementation
-of the Rubin-Gelman statistic for a fully-coherent search of data including
-a strong continuous signal. In essense this repeats the simulation in
-Figure~\ref{fig_MCMC_simple_example}, but we have expanded the prior ranges
-on $f$ and $\fdot$. As a result, the sampler takes many more steps to reach
-convergence: it can be seen by eye that the traces of a few walkers remain
-isolated from the bulk of walkers. When this is the case, the PSRF is much
-larger than unity, since the between-walker variance is much larger than the
-within-walker variance. Once that walker joins the main bulk, the PSRF tends
-to unity for several periods and hence the burn-in period is prematurely
-halted. The sampler is then continued, with all samples saved as production
-samples. In the figure, the gap indicates the `missing' samples which would
-have been taken if convergence had not been obtained before the end of the
-maximum burn-in period.
-\begin{figure}[htb]
-\centering
-\includegraphics[width=0.5\textwidth]{fully_coherent_search_using_MCMC_convergence_walkers}
-\caption{Demonstration of the Potential Scale Reduction Face (PSRF) convergence
-statistic (in blue) calculated separately for each parameter: values below 1.2
-are considered converged.}
-\label{fig_MCMC_convergence_example}
-\end{figure}
-
-It is worth noting that this type of long convergence, when a small number of
-chains remain isolated from the main bulk can be easily countered by the use
-of parallel tempering (see Sec.~\ref{sec_parallel_tempering}). This was intenionally
-not used here to highlight the utility of the convergence statistic.
-
-\section{Choice of the prior metric volume}
-
-The prior volume estimates the approximate size of the parameter space relative
-to the size of a signal; any optimization routine, given finite resources,
-will fail if this volume is too large. We will now investigate precisely what
-this means for an MCMC search given a fixed search setup.
-
-In a data set of 100 days, we simulate Gaussian noise and a signal with a
-sensitivity depth of 100. Then, we perform a directed search (over $f$ and
-$\fdot$) with 500 burn-in and 500 production steps, 100 walkers, and using
-parallel tempering with $\Ntemps=3, \logmintemp=-3$. The prior for the search
-is uniform in frequency and spin-down with the width defined by an equal and
-fixed metric volume (i.e. $\Vpe^{(0)}=\Vpe^{(1)}$). This process is repeated
-500 times for a range of prior volumes so that we can infer the statistical
-behaviour. In the upper panel of Fig.~\ref{fig_volume_convergence} we plot the
-PSRF (calculated for a subset of the production samples) averaged over the two
-search dimensions fo each simulation; this demonstrates that for $\sqrt{\V}
-\gtrsim 200$, the majority of simulations where not converged (i.e. having
-PSRF < 1.2). For $\sqrt{\V}=100$, only one simulation of the 500 failed, while
-for $\sqrt{V}=50$ all simulations reached convergence.
-
-\begin{figure}[htb]
-\centering
-\includegraphics[width=0.5\textwidth]{VolumeConvergenceInvestigation}
-\caption{Results of a study into the convergence of MCMC simulations as a
-function of the prior volume}
-\label{fig_volume_convergence}
-\end{figure}
-
-It is reasonable to ask ``what happens for these cases that don't converge?''.
-For all the simulations considered here, the MCMC algorithim did find the peak
-in the likelhood: this was demonstrated by checking that the minimum mismatch
-for each simulation was $\lesssim 0.1$. However, for those simulations which
-where not converged, this was true only for a small subset of the walkers, the
-other walkers where sampling other points in the parameter space (away from the
-signal) and hence had not converged. This is demonstrated in the lower panel of
-Fig.~\ref{fig_volume_convergence} where we plot the ratio of the posterior
-standard deviation to the prior width (averaged over the two search
-dimensions). For the converged solutions, this is small indicating that the
-posterior is narrower than the prior. However, for the solution which did not
-converge, this ratio is larger; if none of the walkers had converged and hence
-the posterior samples where equivalent to the prior (i.e. uniform over some
-range), we would expect this ratio to be $\sim 0.29$. For the largest prior
-volumes considered here the ratio approaches this value.
-This equivalence of the posterior to the prior is expected behaviour since the
-walkers are initialised from the uniform prior (which is much wider than the
-signal); if we had extended the burn-in period, we would expect eventually for
-these walkers to converge: the performance at a fixed prior volume will depend
-on the setup.
-
-For a given setup, one can choose the optimal prior volume (i.e. balancing the
-risk of loosing the signal due to non-convergence against computing cost) by
-stipulating a failure rate; for this example one may choose $\sqrt{\V}=100$ based
-on an estimated failure rate of 1 in 500.
-
-
-\section{Zoom follow-up}
-\label{sec_follow_up}
-
-Incoherent detection statistics trade significance (the height of the peak) for
-sensitivity (the width of the peak). We will now discuss the advantages of
-using an MCMC sampler to follow-up a candidate found incoherently, decreasing
-the number of segments (of increasing
-the coherence time) until finally estimating its parameters and significance
-fully-coherently. We begin by rewriting Equation~\eqref{eqn_lambda_posterior},
-the posterior distribution of the Doppler parameters, with the explicit
-dependence on the $\Nseg$:
-\begin{equation}
-P(\blambda | \Tcoh, x, \AmplitudePrior, \Hs, I)
-%=\Bsn(x| \Tcoh, \AmplitudePrior, \blambda) P(\blambda| \Hs I).
-\propto e^{\hat{\F}(x| \Nseg, \blambda)} P(\blambda| \Hs I).
-\end{equation}
-
-Introducing the coherence time $\Nseg$ as a variable provides a free parameter
-which adjusts the width of signal peaks in the likelihood. Given a candidate
-found from a semi-coherent search with $\Nseg^{0}$ segments, , a natural way to
-perform a follow-up is to start the MCMC simulations with $\Nseg^{0}$ segments
-(such that the signal peak occupies a substantial fraction of the prior volume)
-and then subsequently incrementally decrease the number of segments in a
-controlled manner. The aim being at each decrease in the number of segments to
-allow the MCMC walkrs to converge to the narrower likelihood before again
-decreasing the number of segments. Ultimately, this process continuous until
-$\Nseg^{\Nstages}=1$, where $\Nstages$ is the total number of stages in  a
-ladder of segment-numbers $\Nseg^{j}$. Choosing the ladder of segment-numbers
-is a tuning balancing computation time against allowing sufficient
-time for the MCMC walkers to converge at each stage so that the signal is not
-lost.
-
-Before discussing how to choose the ladder, we remain that in some ways, this
-method bears a resemblance to `simulated annealing', a method in which the
-likelihood is raised to a power (the inverse temperature) and subsequently
-`cooled' \comment{Add citation?}. The important difference being that the
-semi-coherent likelihood is wider at short coherence times, rather than flatter
-as in the case of high-temperature simulated annealing stages. For a discussion
-and examples of using simulated annealing in the context of CW searches see
-\citet{veitch2007}.
-
-For an efficient zoom follow-up, we want to choose the ladder of
-segments-numbers to ensure that
-the metric volume at the $i^{th}$ stage $\V_i \equiv \V(\Nseg^i)$ is a constant
-fraction of the volume at adjacent stages. That is we define
-\begin{equation}
-\mathcal{R} \equiv \frac{\V_i}{\V_{i+1}},
-\end{equation}
-where $\mathcal{R} \ge 1$ as $\Tcoh^{i+1} > \Tcoh^{i}$.
-
-Assuming then that the candidate comes with a suitably small prior bounding box,
-such that $\V(\Nseg^{0}) \sim \mathcal{O}(100)$, we define $\Nseg^0$ as the
-number of segments used in the search which found the candidate.
-
-We can therefore define a minimisation problem for $\Nseg^{i+1}$ given $\Nseg^{i}$:
-\begin{align}
-\min_{\Nseg^{i+1} \in \mathbb{N}}
-\left| \log\mathcal{R} + \log\V(\Nseg^{i+1}) - \log\V(\Nseg^i)\right|
-\end{align}
-which can be solved numerically to a specified tolerance. Due to the difficulty
-in solving such a minimisation with the integer constrain on $\Nseg^{i+1}$ we
-simply solve it as a real scalar, and then round to the nearest integer. We now
-have a method to generate a ladder of $\Nseg^{i}$ which keep the ratio of
-volume fractions fixed.
-
-We run the minimisation problem, given a value of $\mathcal{R}$, iterating
-until $\Nseg^{\Nstages} = 1$ at which point we stop: we have generated the
-ladder of $\Nseg^i$ given a single tuning parameter, $\mathcal{R}$. Note that
-$\Nstages$ is not known prior to runing the iterative minimization probelm, and
-will in general depend on both the initial search prior bounding box and setup.
-
-\subsection{Example}
-
-We now provide an illustrative example of the follow-up method. We consider a
-directed search over the sky position and frequency in 100 days of data from a
-single detector, with $\sqrt{\Sn}=10^{-23}$~Hz$^{-1/2}$ (at the fiducial
-frequency of the signal). The simulated signal has an amplitude
-$h_0=2\times10^{-25}$ such that the signal has a depth of $\sqrt{\Sn}/h_0=50$
-in the noise.
-
-First, we must define the setup for the run. Using $\mathcal{R}=10$ and
-$\V^{\rm min}=100$ our optimisation procedure is run and proposes the setup
-layed out in Table~\ref{tab_follow_up_run_setup}. In addition, we show the
-number of MCMC steps taken at each stage.
-\begin{table}[htb]
-\caption{The search setup used in Figure~\ref{fig_follow_up}, generated with
-$\mathcal{R}=10$ and $\Nseg^0=100$.}
-\label{tab_follow_up_run_setup}
-\input{follow_up_run_setup}
-\end{table}
-
-The choice of $\mathcal{R}$ and $\V^{\rm min}$ is a compromise between the
-total computing time and the ability to ensure a candidate will be identified.
-From experimentation, we find that $\V^{\rm min}$ values of 100 or so are
-sufficient to ensure that any peaks are sufficiently broad during the
-initial stage. For $\mathcal{R}$ value much larger than $10^{3}$ or so where
-found to result in the MCMC simulations `loosing' the peaks between stages, we
-conservatively opt for 10 here, but values as large as 100 where also successful.
-
-In Figure~\ref{fig_follow_up} we show the progress of the MCMC sampler during
-the follow-up.  As expected from Table~\ref{tab_follow_up_run_setup}, during
-the initial stage the signal peak is broad with respect to the size of the
-prior volume, therefore the MCMC simulation quickly converges to it. Subsequently,
-each time the number of segments is reduced, the peak narrows and the samplers
-similarly converge to this new solution. At times it can appear to be inconsistent,
-however this is due to the changing way that the Gaussian noise adds to the signal.
-Eventually, the walkers all converge to the simulated signal.
-\begin{figure}[htb]
-\centering
-\includegraphics[width=0.5\textwidth]{follow_up_walkers}
-\caption{In the top three panels we show the progress of the 500 parallel
-walkers (see Figure~\ref{fig_MCMC_simple_example} for a description) during the
-MCMC simulation for each of the search parameters, frequency $f$,
-right-ascension $\alpha$ and declination $\delta$. Each vertical dashed line indicates the start of a new stage of the search, the parameters for all stages
-are listed in Table~\ref{tab_follow_up_run_setup}.}
-\label{fig_follow_up}
-\end{figure}
-
-The key advantage to note here is that all walkers successfully converged to the
-signal peak, which occupies $\sim 10^{-6}$ of the initial volume. While it is
-possible for this to occur during an ordinary MCMC simulation (with $\Tcoh$
-fixed at $\Tspan$), it would take substantially longer to converge as the
-chains explore the other `noise peaks' in the data.
-
-\subsection{Monte-Carlo studies}
-\label{sec_MCS_zoom}
-
-In order to understand how well the MCMC follow-up method works, we will test
-its ability to succesfully identify simulated signals in Gaussian noise. This will be
-done in a Monte-Carlo study, with independent random realisations of the
-Guassian noise, amplitude, and Doppler parameters in suitable ranges. Such a
-method is analagous to the studies performed in \citet{shaltev2013}, except
-that we present results as a function of the fixed injected sensitivity depth,
-rather than the squared SNR.
-
-In particular we will generate a number of 100-day data sets containing
-independent realisations of Gaussian noise and a simulated CW signal. We choose
-the parameters of the signal in such a way to model the candidates generated
-from directed and all-sky searches by drawing the signal parameters from
-appropriate distributions. However, we do not draw $h_0$ randomly, but instead
-run the MC study at a number of selected values chosen such that given the
-fixed $\sqrt{S_n}=1\times10^{3}$, the signals are injected with a depth
-$\mathcal{D} \in [100, 400]$.  To simulate an isotropic distribution of
-sources, we draw the remaining amplitude parameters for each signal uniformly
-from $\phi \in [0, 2\pi]$, $\psi \in [-\pi/4, \pi/4]$, and $\cos\iota \in [-1,
-1]$.
-
-To provide a reference, we will compare the MC follow-up study against the
-expected maximum theoretical detection probability for an infinitly dense
-fully-coherent search of data containing isotropically-distributed signals as
-calculated by \citet{wette2012}. Note however that we will parameterise with
-respect to the sensitivity depth (i.e. using Equation~(3.8) of \citet{wette2012} to
-relate the averaged-SNR to the depth). The probability is maximal in the
-sense that signals are `lost' during the follow-up due simply to the fact that
-they are not sufficiently strong to rise above the noise.
-
-
-\subsubsection{Follow-up candidates from a directed search}
-\label{sec_directed_follow_up}
-
-In a directed search, the sky-location parameters $\alpha$ and $\delta$ are
-fixed - in our study we fix them on the location of the Crab pulsar, but this
-choice is arbitrary and holds no particular significance. The ouput of a
-semi-coherent gridded directed search would provide the grid-point with the
-highest detection statistic, and some box bounding the candidate given some
-uncertainty. We will assume in this section that given the search setup
-$\Nseg^{0}$ of the input search, the bounding box is sufficiently small to
-ensure that $\V(\Nseg^0)\sim \mathcal{O}(100)$. This is not a limiting
-assumption as any search can (quite cheaply) increase the density of grid
-points around any interesting candidates in order to better constrain their
-uncertainty.
-
-Before applying the directed follow-up to simulated signals in noise, we need
-to characterise its behaviour in Gaussian noise alone. To do so, we simulate
-$\DirectedMCNoiseN$ realisations of Gaussian noise and peform the follow-up
-search on these. A histogram of the results is provided in
-Figure~\ref{fig_hist_DirectedMCNoiseOnly}, the largest observed value was
-found to be $\DirectedMCNoiseOnlyMaximum$. From this, we can set a threshold
-for the detection statistic of $\twoFtilde_{\rm th} = 60$, an arbitrary
-number chosen to be sufficiently larger than the maximum seen in noise and
-consistent with the value chosen in \citet{shaltev2013}.
-\begin{figure}[htb]
-\centering
-\includegraphics[width=0.5\textwidth]{directed_noise_twoF_histogram}
-\caption{Histogram of the recovered $\widetilde{2\F}$ values applying the
-directed follow-up routine to $\DirectedMCNoiseN$ simulated Gaussian noise
-realisations.}
-\label{fig_hist_DirectedMCNoiseOnly}
-\end{figure}
-
-The behaviour of the follow-up is independent of the exact frequency and
-spin-down values used (this is not true for an all-sky follow-up as discussed
-in Section~\ref{sec_all_sky_follow_up}). As such, we can, without loss of
-generality, define our Monte-Carlo follow-up in the following way. First, we
-select an arbitrary frequency and spindown of $f_0=30$~Hz and
-$\dot{f}_0=10^{-10}$Hz/s and a surrounding uncertainty box $\Delta f$ and
-$\Delta \dot{f}$ chosen such that the uncertainty in frequency and spindown are
-roughly equivalent in terms of mismatch. Then, we pick a candidate uniformly
-from within this uncertainty box; this choice reflects the fact that the grid
-is chosen such that the probability distribution of candidate signals is
-uniform.
-
-Having generated the data given the prescription above, we proceed to perform a
-hierarchical MCMC follow-up. Given the data span and initial bounding box, we
-compute the optimal heirarchical setup, the details of which are given in
-Table~\ref{tab_directed_MC_follow_up}, this setup was used for all MC
-simulations.
-
-\begin{table}[htb]
-\caption{Run-setup for the directed follow-up Monte-Carlo study, generated with
-$\mathcal{R}=10$ and $\Nseg^0=20$.}
-\label{tab_directed_MC_follow_up}
-\input{directed_setup_run_setup}
-\end{table}
-
-This process yeilds a maximum detection statistic $\widetilde{2\F}_{\rm max}$.
-The signal is considered `detected' if $\widetilde{2\F}^{\rm max} >
-\widetilde{2\F}_{\rm th}$, where we set the threshold at $2\F_{\rm th}=60$. In
-Figure~\ref{fig_directed_MC_follow_up} we plot the the fraction of the MC
-simulations which where recovered and compare this against the theoretical
-maximum, given the threshold. The figure demonstrates that the recovery power
-of the MCMC follow-up shows negligible differences compared to the
-theoretical maximum.
-
-\begin{figure}[htb]
-\centering
-\includegraphics[width=0.45\textwidth]{directed_recovery}
-\caption{Recovery fraction for the directed follow-up. The Monte-Carlo results
-come from random draws searches using the setup described in
-Table~\ref{tab_directed_MC_follow_up}.}
-\label{fig_directed_MC_follow_up}
-\end{figure}
-
-\comment{Need to add discussion on the pmax 'analytic fit'}
-
-\subsubsection{Follow-up candidates from an all-sky search}
-\label{sec_all_sky_follow_up}
-
-We now test the follow-up method when applied to candidates from an all-sky
-search which, by definition, have uncertain sky-position parameters $\alpha$
-and $\delta$. Searching over these parameters, in addition to the frequency
-and spin-down not only increases the parameter space volume that needs to be
-searched, but also adds difficulty due to correlations between the sky-position
-and spin-down \comment{Does some of Karl's work discuss this?}.
-
-To replicate the condition of candidates from an all-sky search, we draw the
-candidate positions isotropically from the unit sphere (i.e. $\alpha$ uniform
-on $[0, 2\pi]$ and $\delta = \cos^{-1}(2u{-}1){- }\pi/2$ where $u$ is uniform
-on $[0, 1]$). We then place an uncertainty box containing the candidates with a
-width $\Delta\alpha=\Delta\delta=0.05$; this box is not centered on the
-candidate, but chosen in such a way that the location of the candidate has a
-uniform probability distrubution within the box. This neglects the non-uniform
-variation in $\delta$ over the sky-pacth, but given the small area should not
-cause any significant bias. The frequency, spin-down, and amplitude parameters
-are chosen in the same way as for the directed search
-(Section~\ref{sec_directed_follow_up}).
-
-Again, we first characterise the behaviour of the all-sky follow-up by applying
-it to $\AllSkyMCNoiseN$ realisations of Gaussian noise. The resulting histogram
-is provided in Figure~\ref{fig_hist_AllSkyMCNoiseOnly} and the largest $\twoFtilde$
-value was found to be $\AllSkyMCNoiseOnlyMaximum$. This is larger than the
-value found for the directed search, although both use the same number of
-Gaussian noise trials, and therefore must result from the increased number of
-search parameters. \comment{Ask Reinhard about Miroslavs statement on number of
-templates}. As a result we will correspondinly increase our detection threshold
-for the all-sky search to $\twoFtilde_{\rm tr} = 70$; again this is an arbitary
-choise, but is consisent with the values chosen in \citet{shaltev2013}.
-\begin{figure}[htb]
-\centering
-\includegraphics[width=0.5\textwidth]{allsky_noise_twoF_histogram}
-\caption{Histogram of the recovered $\widetilde{2\F}$ values applying the
-all-sky follow-up routine to $\AllSkyMCNoiseN$ simulated Gaussian noise
-realisations.}
-\label{fig_hist_AllSkyMCNoiseOnly}
-\end{figure}
-
-Producing \CHECK{1000} indepedant MC simulations we then perform a follow-up on
-each using the setup given in Table~\ref{tab_allsky_MC_follow_up}. The
-resulting recovery fraction as a function of the injected sensitivity depth is given
-in Figure~\ref{fig_allsky_MC_follow_up}.
-
-\begin{table}[htb]
-\caption{Run-setup for the all-sky follow-up Monte-Carlo study, generated with
-$\mathcal{R}=10$ and $\Nseg^0=20$. Note that the number of representative
-templates will vary over the sky, these values are computed at the equator
-(i.e. $\delta=0$) which was found to produce the largest volumes - potentially
-because the sky patch is kept constant.}
-\label{tab_allsky_MC_follow_up}
-\input{allsky_setup_run_setup}
-\end{table}
-
-\begin{figure}[htb]
-\centering
-\includegraphics[width=0.45\textwidth]{allsky_recovery}
-\caption{Recovery fraction for the all-sky follow-up. The Monte-Carlo results
-come from random draws searches using the setup described in
-Table~\ref{tab_directed_MC_follow_up}.}
-\label{fig_allsky_MC_follow_up}
-\end{figure}
-
-\comment{Need to add a conclusion}
-
-\section{Alternative waveform models: transients}
-\label{sec_transients}
-
-The term \emph{transient-CWs} refers to periodic gravitational wave signals
-with a duration $\mathcal{O}(\textrm{hours-weeks})$ which have a
-phase-evolution described by Equation~\eqref{eqn_phi}. \citet{prix2011} coined
-this term and layed out the motivations for searching for such signals: in
-essence it is astrophysically plausible for each signals to exist and we should
-therefore build tools capable of finding them. Moreover, the authors described
-a simple extension to the $\F$-statistic (and by inheritance to all associated
-detection statistics) which provides a method to search for them. This
-introduces three new parameters, the start time, duration and a window-function
-which determines the evolution of $h_0(t)$ (typical examples being either a
-rectangular window or an exponential decay). These methods are implemented in
-the code-base used by our sampler to compute the likelihood and therefore we
-can expose these search parameters to our MCMC optimisation. In the following
-we will detail a simple example showing when it may be appropriate to search for
-a transient signal and how it is handled by the MCMC sampler.
-
-We simulate a signal in Gaussian noise at a depth of 10. If the signal where to
-be continuous (i.e. last for the entire duration of the data span), it should
-be recovered with a predicted detection statistic of
-$\widetilde{2\F}\approx5162$. However, the signal we inject is transient in
-that it start one third of the way through the data span and stops abruptly
-two-thirds of the of the way through (with a constant $h_0$ during this
-period). Since the signal lasts for only $1/3$ of the original data span, the
-expected $\widetilde{2\F}$ of the transient signal in a matched-filter over only
-the portion of data for which it is `on' is $5162/3\approx1720$.
-
-Running a fully-coherent MCMC search over the whole data span, we find a peak
-in the likelihood, but with a detection statistic of $\widetilde{2\F}=596$;
-this equates to a mismatch of $\approx0.9$: we have lost significance due
-to the inclusion of noise-only data into the matched filter.
-
-In a real search, we cannot know beforehand what the $h_0$ of the signal will
-be, so it is not possible to diagnose that the signal is transient due to this
-mismatch. However, there does exist tools which can help in this regard. In
-this case, plotting the cumulative $\widetilde{2\F}$, as shown in
-Figure~\ref{fig_transient_cumulative_twoF}, demonstrates that the first 100
-days contributes no power to the detection statistic, during the middle 100
-days there is an approximately linear increasing in $\widetilde{2\F}$ with time
-(as expected for a signal), while in the last 100 days this is a gradual decay
-from the peak as discussed in \citet{prix2011} \meta{Mention 1/t specifically?}. Such a figure is characteristic of a transient signal.
-\begin{figure}[htb]
-\centering
-\includegraphics[width=0.45\textwidth]{transient_search_initial_stage_twoFcumulative}
-\caption{Plot of the cumulative $\widetilde{2\F}$ for a transient signal with a
-constant $h_0$ which lasts from 100 to 200 days from the observation start
-time.}
-\label{fig_transient_cumulative_twoF}
-\end{figure}
-
-Having identified that the putative signal may in fact be transient, an extension
-of the follow-up procedure is to search for these transient parameters. In our
-MCMC method, these require a prior. For the window-function, one must define it
-either to be rectangular or exponential: one could run both and then use the
-estimated Bayes factors to decide between the two priors. For the start-time it
-is sufficient to provide a uniform distribution on the observation span, the
-duration can similarly be chosen as a uniform distribution from zero to the
-total observation span, or more informatively the absolute value of a central
-normal distribution placing greater weight on shorter transients. The choice of
-prior can allow the transient signal to overlap with epochs outside of the data
-span, in such instances if the likelihood can be computed they are allowed, but
-if the likelihood fails (for example if there is no data) the likelihood is
-returns as $-\infty$.
-\comment{Probably this is a bit too much detail}
-
-Putting all this together, we run the sampler on the
-simulated transient signal and obtain the posterior estimates given in
-Figure~\ref{fig_transient_posterior}. The resulting best-fit has a
-$\widetilde{2\F}\approx 1670$, in line with the expected value. Comparing the
-Bayes factors between the transient and fully-coherent search can quantify if
-the improvement in fit due to the inclustion of the transient parameters was
-sufficient to compensate for the greater prior volume and produce an
-improvement in the evidence for the model.
-
-\begin{figure*}[htb]
-\centering
-\includegraphics[width=0.8\textwidth]{transient_search_corner}
-\caption{Posterior distributions for a targeted search of data containing
-a simulated transient signal and Gaussian noise.}
-\label{fig_transient_posterior}
-\end{figure*}
-
-
-
-\section{Alternative waveform models: Glitch-robust follow-up search}
-\label{sec_glitches}
-
-Observations of radio pulsars show that occasionally the regular spin-down of a
-neutron stars due to electromagnetic and (potentially) gravitational torques is
-interrupted by sudden glitch events during which the rotation frequency
-suddenly \emph{increases} (see for example \citet{espinoza2011} for an
-observational review). The glitch mechanism is not yet fully understood, but as
-argued in \citet{ashton2017}, it seems plausable that the glitch mechanism, as
-it effects the radio pulsars, will also effect gravitars. As such, wide
-parameter space searches which look for unknown neutron stars spinning down
-primarily by gravitational wave emission (i.e. gravitars), could be hampered by
-the existence of glitches in the signals. This was investigated by
-\citet{ashton2017} and it was found that for many recent and ongoing searches,
-there exists a reasonable probability of one or more glitches occuring in the
-signal during typical observations spans and that these glitches may be
-sufficiently large such that the loss of detection statistic could be of order
-unity. This is particuarly true for searches looking for signals with a large
-spin-down and over long data span ($\gtrsim 100$~days). Semi-coherent searches
-are less effected (in terms of the loss of detection statistic) such that,
-provided the number of glitches in a signal during a search is less than say
-3, the initial search should still find the signal. However, that signal may
-look like a transient signal, or be `lost' during the follow up process.
-
-\citet{ashton2017} motivates the existence of a \emph{glitch-robust} follow-up search
-which is capable of fitting for the glitch parameters.  As such, we will now
-discuss how the $\F$-statistic optimisation routine discussed in this work can
-be used to search for glitching signals.
-
-\subsection{Glitch-robust detection statistic}
-In our analysis, we consider a signal which undergoes $\Nglitches$ glitches.
-We will model a glitch $\ell$ as a sudden discontinuous change in the frequency
-and spin-down Doppler parameters $\delta \blambda^\ell = [0, 0, \delta f,
-\delta \fdot, \ldots]$ at some time $\tglitch^\ell$. This is to be
-interpretted, as is done for radio-pulsar glitches, as the instantaneous change
-in frequency and spindown at the time of the glitch. For convienience, we
-define $\delta\blambda^0 = [0, 0, 0, 0 \ldots]$, $\tglitch^0 =
-t_\mathrm{start}$ (i.e. the start time of the observation), and
-$\tglitch^{\Nglitches+1}=t_\mathrm{end}$ (i.e. the end time of the
-observation).  We refer to $\{\delta\blambda^{\ell}\}, \{\tglitch^\ell\}$ as
-the set of all glitch parameters.
-
-An ideal glitch search would include these new parameters into the phase model
-itself and compute the $\F$-statistic fully-coherently i.e. rewriting
-Equation~\eqref{eqn_B_S_N} as
-\begin{multline}
-\Bsn(x| \{\tglitch^\ell\}, \{\delta\blambda^\ell\}, \AmplitudePrior, \blambda, I) \equiv \\
-\int
-\mathcal{L}(x ;\A, \blambda, \{\tglitch^\ell\}, \{\delta\blambda^\ell\})
-P(\A| \Hs, I) d\A
-\end{multline}
-where the glitch parameters subsequently modify the phase model. However, we
-propose instead the following pragmatic approach: let us instead use a
-semi-coherent detection statistic with the epoch between \emph{segments} as
-$\tglitch^\ell$, That is, we define a glitch-robust semi-coherent 
-$\mathcal{F}$-statistic:
-\begin{multline}
-\twoFhat(N_{\rm glitch}, \lambda, \{\tglitch^\ell\}, \{\delta\blambda^{\ell}\})= \\
-\sum_{\ell=0}^{N_{\rm glitch}}
-\twoFtilde(\alpha, \delta, \blambda+\delta\blambda^\ell;
-t_{\rm glitch}^\ell, t_{\rm glitch}^{\ell+1})
-\label{eqn_glitch_likelihood}
-\end{multline}
-\comment{Need to think about how to present this better}
-
-This simplistic method leverages readily available and tested code with a loss
-of sensitivity over the fully-coherent search by a factor $\sqrt{\Nglitches+1}$
-\citep{wette2015}. Such a loss is acceptable, given that the signals must be
-sufficiently strong to have been identified by the initial all-sky or directed
-search.
-
-The general methodology for a glitch-robust statistic (i.e.
-Eq.\eqref{eqn_glitch_likelihood}) could be implemented in any number of ways,
-the most straightforward of course being a gridded search over the glitch
-parameters. However, for each glitch this introduces at least two new
-parameters to search over. For reasonable priors on the unknown glitch
-parameters, the likelihood peak occupies a small fraction of the total search
-space and so a gridded search will spend the majority of the compute time in
-exploring the empty regions of parameter space. However, provided that the
-initial Doppler parameters $\blambda$ are well constrained, the fraction of the
-search space occupied should not be so small as to make application of the MCMC
-procedures described in this work inefficient. On the contrary, we find in
-practise that the MCMC parameter estimation to be highly efficient.
-
-\subsection{Case study: directed search for a glitching signal}
-
-As an illustration of the use of this method, we simulate a signal in Gaussian
-noise which undergoes a single glitch; the properties of this signal and the
-data are given in Table~\ref{tab_single_glitch}.
-
-\begin{table}
-\caption{Values used to generate the simulated glitching signal}
-\label{tab_single_glitch}
-\begin{tabular}{c|c}
-& \\ \hline
-$T$ & \SingleGlitchTspan~days \\
-$\sqrt{X}/h0$ & \SingleGlitchDepth \\
-$f$ & \SingleGlitchFzero~Hz \\
-$\dot{f}$ & \SingleGlitchFone~Hz/s \\
-$\delta f$ & \SingleGlitchdeltaFzero~Hz \\
-$\delta \dot{f}$ & \SingleGlitchdeltaFone~Hz/s \\
-$\frac{\tglitch - \tstart}{T}$ & \SingleGlitchR
-\end{tabular}
-\end{table}
-
-In Figure~\ref{fig_glitch_grid} we show the fully-coherent $\twoFtilde$ value
-computed on a grid over $f$ and $\dot{f}$ (i.e. with a non-glitching template).
-The maximum computed $\twoFtilde$ value is found to be $\SingleGlitchFCtwoF$.
-This can be compared to the expected $\twoFtilde$ for a perfectly matched
-signal in the same data which is $\SingleGlitchFCtwoFPM$: the glitch results in
-the loss of $\SingleGlitchFCMismatch$ of the detection statistic. The figure
-distinctly shows two peaks, a feature indicative of a glitching signal.
-
-\begin{figure}[htb]
-\centering
-\includegraphics[width=0.5\textwidth]{single_glitch_F0F1_grid_2D}
-\caption{The fully-coherent $\twoFtilde$ computed over a grid in $f$ and
-$\dot{f}$ for the simulated glitching signal with the properties described in
-Table~\ref{tab_single_glitch}.}
-\label{fig_glitch_grid}
-\end{figure}
-
-To perform the glitch-robust search we define a prior as follows. Uniform in
-$f$ and $\dot{f}$ (over the range shown in Figure~\ref{fig_glitch_grid}; for
-$\tglitch$ we define a uniform prior over the entire duration except the first
-and last 10\%; then for $\delta f$ we asign a prior of $\CHECK{\mathcal{N}(0,
-10^{-7} f_0)}$ and for $\delta\dot{f}$ a prior of $\CHECK{|\mathcal{N}(0,
-10^{-3}\dot{f})|}$ (i.e. a range of physical reasonable values preferring no
-glitch, but allowing an arbitrarily large glitch). Running the MCMC simulation
-with such a prior, we find the walkers converge to a single solution after
-$\sim 1000$ steps. Taking the converged sample (after the burn-period), in
-Figure~\ref{fig_glitch_posteriors} we plot the resulting posteriors. Finally,
-the maximum recovered $\twoFhat$ was found to be $\SingleGlitchGlitchtwoF$.
-Comparing this to the fully-coherent $\twoFtilde=\SingleGlitchFCtwoFPM$, we see
-that we have recovered the signal with negligible mismatch.
-
-\begin{figure*}[htb]
-\centering
-\includegraphics[width=0.8\textwidth]{single_glitch_glitchSearch_corner}
-\caption{The posterior distributions for the Doppler and glitch parameters
-applied to a simulated glitching signal. The simulation parameters are listed
-in Table~\ref{tab_single_glitch}. The extent of these plots does not correspond
-to the prior range.}
-\label{fig_glitch_posteriors}
-\end{figure*}
-
-\subsection{Monte-Carlo studies}
-\label{sec_MCS_glitch}
-
-The glitch-robust statistic is similar to a semi-coherent
-$\mathcal{F}$-statistic, but with additional degrees of freedom due to the
-glitch parameters. As such, to compute a $p$-value (or false alarm rate) for a
-given $\twoFhat$ we must understand the distribution of $\twoFhat(\Nglitches)$
-in noise. In general this will depend on the choice of prior and number of
-glitches; we will now demonstrate the general characteristics for a 100~day
-duration of data searching for a signal with 1 and 2 glitches.
-
-We follow a similar prescription to that laid out in Sec.~\ref{sec_MCS_zoom},
-namely we repeatedly run the $\Nglitches$-glitch search on independent 100~day
-data span with Gaussian noise. Taking the maximum $\twoFhat$ from each search,
-we plot the resulting histogram in
-Figure~\ref{fig_single_glitch_twoFhat_histgram}.
-
-\begin{figure}[htb]
-\centering
-\includegraphics[width=0.5\textwidth]{glitch_noise_twoF_histogram}
-\caption{The distribution of $\twoFhat$ }
-\label{fig_single_glitch_twoFhat_histgram}
-\end{figure}
-
-\comment{Need to attempt to understand this behaviour analytically}
-%This demonstrates that as the number of glitches is increased, the distribution
-%in noise is shifted to large $\twoFhat$ value. This is expected since for
-%
-%Since the $\twoFhat$ for a
-%perfectly matched signal is constant regardless of the number of segments (or
-%glitches), this is precisely why semi-coherent searches are less sensitive for
-%larger numbers of segments. 
-
-
-\section{Conclusion}
-\label{sec_conclusion}
-
-
-
-\section{Acknowledgements}
-
-\bibliography{bibliography}
-
-\end{document}
diff --git a/Paper/single_glitch_F0F1_grid_2D.png b/Paper/single_glitch_F0F1_grid_2D.png
deleted file mode 100644
index d0b2eea7e47eb37fb01b09b1962df6eb403066c7..0000000000000000000000000000000000000000
Binary files a/Paper/single_glitch_F0F1_grid_2D.png and /dev/null differ
diff --git a/Paper/single_glitch_glitchSearch_corner.png b/Paper/single_glitch_glitchSearch_corner.png
deleted file mode 100644
index 95bfceb917d3b8f061e121ed78f57c442addc411..0000000000000000000000000000000000000000
Binary files a/Paper/single_glitch_glitchSearch_corner.png and /dev/null differ
diff --git a/Paper/transient_search_corner.png b/Paper/transient_search_corner.png
deleted file mode 100644
index bff7f78e921bf2684c07f91e1201084b15587867..0000000000000000000000000000000000000000
Binary files a/Paper/transient_search_corner.png and /dev/null differ
diff --git a/Paper/transient_search_initial_stage_twoFcumulative.png b/Paper/transient_search_initial_stage_twoFcumulative.png
deleted file mode 100644
index a08cb6e9137e84f55be5dd1d29f39fd893a3c54a..0000000000000000000000000000000000000000
Binary files a/Paper/transient_search_initial_stage_twoFcumulative.png and /dev/null differ