Commit d09c9d1e authored by Gregory Ashton's avatar Gregory Ashton
Browse files

Merge branch 'master' of gitlab.aei.uni-hannover.de:GregAshton/PyFstat

parents 00a02b1b da2b4ca8
\begin{tabular}{c|cccccc}
Stage & $\Nseg$ & $\Tcoh^{\rm days}$ &$\Nsteps$ & $\V$ & $\Vsky$ & $\Vpe$ \\ \hline
0 & 27 & 3.7 & 100 & 1.0 & 1.0 & 1.0 \\
1 & 15 & 6.7 & 100 & 1.0 & 1.0 & 1.0 \\
2 & 8 & 12.5 & 100 & 1.0 & 1.0 & 1.0 \\
3 & 4 & 25.0 & 100 & 1.0 & 1.0 & 1.0 \\
4 & 1 & 100.0 & 50,50 & 1.0 & 1.0 & 1.0 \\
\end{tabular}
......@@ -27,7 +27,7 @@ DeltaF1 = VF1 * np.sqrt(45/4.)/(np.pi*Tspan**2)
depth = 100
nsteps = 50
nsteps = 20
run_setup = [((nsteps, 0), 20, False),
((nsteps, 0), 7, False),
((nsteps, 0), 2, False),
......
frequency_grid_files = grided_frequency_search_1D.png
fully_coherent_files = fully_coherent_search_using_MCMC_walkers.png
follow_up_files = follow_up_run_setup.tex follow_up_walkers.png
transient_files = transient_search_initial_stage_twoFcumulative.png transient_search_corner.png
glitch_files = single_glitch_F0F1_grid_2D.png single_glitch_glitchSearch_corner.png
all_files = $(frequency_grid_files) $(fully_coherent_files) $(follow_up_files) $(transient_files) $(glitch_files)
copyfiles:
cd data; cp $(all_files) ../../
import pyfstat
import numpy as np
# Properties of the GW data
sqrtSX = 1e-23
tstart = 1000000000
duration = 100*86400
tend = tstart + duration
# Properties of the signal
F0 = 30.0
F1 = -1e-10
F2 = 0
Alpha = 5e-3
Delta = 6e-2
tref = .5*(tstart+tend)
depth = 10
h0 = sqrtSX / depth
data_label = 'fully_coherent_search_using_MCMC'
data = pyfstat.Writer(
label=data_label, outdir='data', tref=tref,
tstart=tstart, F0=F0, F1=F1, F2=F2, duration=duration, Alpha=Alpha,
Delta=Delta, h0=h0, sqrtSX=sqrtSX)
data.make_data()
# The predicted twoF, given by lalapps_predictFstat can be accessed by
twoF = data.predict_fstat()
print 'Predicted twoF value: {}\n'.format(twoF)
DeltaF0 = 1e-7
DeltaF1 = 1e-13
VF0 = (np.pi * duration * DeltaF0)**2 / 3.0
VF1 = (np.pi * duration**2 * DeltaF1)**2 * 4/45.
print '\nV={:1.2e}, VF0={:1.2e}, VF1={:1.2e}\n'.format(VF0*VF1, VF0, VF1)
theta_prior = {'F0': {'type': 'unif',
'lower': F0-DeltaF0/2.,
'upper': F0+DeltaF0/2.},
'F1': {'type': 'unif',
'lower': F1-DeltaF1/2.,
'upper': F1+DeltaF1/2.},
'F2': F2,
'Alpha': Alpha,
'Delta': Delta
}
ntemps = 1
log10temperature_min = -1
nwalkers = 1000
nsteps = [50, 50]
mcmc = pyfstat.MCMCSearch(
label='fully_coherent_search_using_MCMC', outdir='data',
sftfilepath='data/*'+data_label+'*sft', theta_prior=theta_prior, tref=tref,
minStartTime=tstart, maxStartTime=tend, nsteps=nsteps, nwalkers=nwalkers,
ntemps=ntemps, log10temperature_min=log10temperature_min)
mcmc.run(context='paper', subtractions=[30, -1e-10])
mcmc.plot_corner(add_prior=True)
mcmc.print_summary()
import pyfstat
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('paper')
F0 = 30.0
F1 = 0
F2 = 0
Alpha = 1.0
Delta = 1.5
# Properties of the GW data
sqrtSX = 1e-23
tstart = 1000000000
duration = 100*86400
tend = tstart+duration
tref = .5*(tstart+tend)
depth = 70
data_label = 'grided_frequency_depth_{:1.0f}'.format(depth)
h0 = sqrtSX / depth
data = pyfstat.Writer(
label=data_label, outdir='data', tref=tref,
tstart=tstart, F0=F0, F1=F1, F2=F2, duration=duration, Alpha=Alpha,
Delta=Delta, h0=h0, sqrtSX=sqrtSX)
data.make_data()
m = 0.001
dF0 = np.sqrt(12*m)/(np.pi*duration)
DeltaF0 = 800*dF0
F0s = [F0-DeltaF0/2., F0+DeltaF0/2., dF0]
F1s = [F1]
F2s = [F2]
Alphas = [Alpha]
Deltas = [Delta]
search = pyfstat.GridSearch(
'grided_frequency_search', 'data', 'data/*'+data_label+'*sft', F0s, F1s,
F2s, Alphas, Deltas, tref, tstart, tend)
search.run()
fig, ax = plt.subplots()
xidx = search.keys.index('F0')
frequencies = np.unique(search.data[:, xidx])
twoF = search.data[:, -1]
#mismatch = np.sign(x-F0)*(duration * np.pi * (x - F0))**2 / 12.0
ax.plot(frequencies, twoF, 'k', lw=1)
DeltaF = frequencies - F0
sinc = np.sin(np.pi*DeltaF*duration)/(np.pi*DeltaF*duration)
A = np.abs((np.max(twoF)-4)*sinc**2 + 4)
ax.plot(frequencies, A, '-r', lw=1)
ax.set_ylabel('$\widetilde{2\mathcal{F}}$')
ax.set_xlabel('Frequency')
ax.set_xlim(F0s[0], F0s[1])
dF0 = np.sqrt(12*1)/(np.pi*duration)
xticks = [F0-10*dF0, F0, F0+10*dF0]
ax.set_xticks(xticks)
xticklabels = ['$f_0 {-} 10\Delta f$', '$f_0$', '$f_0 {+} 10\Delta f$']
ax.set_xticklabels(xticklabels)
plt.tight_layout()
fig.savefig('{}/{}_1D.png'.format(search.outdir, search.label), dpi=300)
import pyfstat
import numpy as np
import matplotlib.pyplot as plt
sqrtSX = 1e-22
tstart = 1000000000
duration = 100*86400
tend = tstart+duration
# Define parameters of the Crab pulsar as an example
tref = .5*(tstart + tend)
F0 = 30.0
F1 = -1e-10
F2 = 0
Alpha = 5e-3
Delta = 6e-2
# Signal strength
depth = 10
h0 = sqrtSX / depth
VF0 = VF1 = 200
dF0 = np.sqrt(3)/(np.pi*duration)
dF1 = np.sqrt(45/4.)/(np.pi*duration**2)
DeltaF0 = VF0 * dF0
DeltaF1 = VF1 * dF1
# Next, taking the same signal parameters, we include a glitch half way through
dtglitch = duration/2.0
delta_F0 = 0.25*DeltaF0
delta_F1 = -0.1*DeltaF1
glitch_data = pyfstat.Writer(
label='single_glitch', outdir='data', tref=tref, tstart=tstart, F0=F0,
F1=F1, F2=F2, duration=duration, Alpha=Alpha, Delta=Delta, h0=h0,
sqrtSX=sqrtSX, dtglitch=dtglitch, delta_F0=delta_F0, delta_F1=delta_F1)
glitch_data.make_data()
F0s = [F0-DeltaF0/2., F0+DeltaF0/2., 1*dF0]
F1s = [F1-DeltaF1/2., F1+DeltaF1/2., 1*dF1]
F2s = [F2]
Alphas = [Alpha]
Deltas = [Delta]
search = pyfstat.GridSearch(
'single_glitch_F0F1_grid', 'data', 'data/*single_glitch*sft', F0s, F1s,
F2s, Alphas, Deltas, tref, tstart, tend)
search.run()
search.plot_2D('F0', 'F1')
theta_prior = {'F0': {'type': 'unif', 'lower': F0-DeltaF0/2.,
'upper': F0+DeltaF0/2},
'F1': {'type': 'unif', 'lower': F1-DeltaF1/2.,
'upper': F1+DeltaF1/2},
'F2': F2,
'Alpha': Alpha,
'Delta': Delta
}
ntemps = 3
log10temperature_min = -0.05
nwalkers = 100
nsteps = [500, 500]
mcmc = pyfstat.MCMCSearch(
'single_glitch', 'data', sftfilepath='data/*_single_glitch*.sft',
theta_prior=theta_prior, tref=tref, minStartTime=tstart, maxStartTime=tend,
nsteps=nsteps, nwalkers=nwalkers, ntemps=ntemps,
log10temperature_min=log10temperature_min)
mcmc.run()
mcmc.plot_corner(figsize=(3.2, 3.2))
mcmc.print_summary()
theta_prior = {'F0': {'type': 'unif', 'lower': F0-DeltaF0/2.,
'upper': F0+DeltaF0/2},
'F1': {'type': 'unif', 'lower': F1-DeltaF1/2.,
'upper': F1+DeltaF1/2},
'F2': F2,
'Alpha': Alpha,
'Delta': Delta,
'tglitch': {'type': 'unif', 'lower': tstart+0.1*duration,
'upper': tend-0.1*duration},
'delta_F0': {'type': 'halfnorm', 'loc': 0, 'scale': DeltaF0},
'delta_F1': {'type': 'norm', 'loc': 0, 'scale': DeltaF1},
}
ntemps = 3
log10temperature_min = -0.1
nwalkers = 100
nsteps = [1000, 1000]
glitch_mcmc = pyfstat.MCMCGlitchSearch(
'single_glitch_glitchSearch', 'data',
sftfilepath='data/*_single_glitch*.sft', theta_prior=theta_prior,
tref=tref, minStartTime=tstart, maxStartTime=tend, nsteps=nsteps,
nwalkers=nwalkers, ntemps=ntemps,
log10temperature_min=log10temperature_min)
glitch_mcmc.run()
glitch_mcmc.plot_corner(figsize=(6, 6))
glitch_mcmc.print_summary()
import pyfstat
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('thesis')
F0 = 30.0
F1 = -1e-10
F2 = 0
Alpha = 5e-3
Delta = 6e-2
tstart = 1000000000
duration = 100*86400
data_tstart = tstart - duration
data_tend = data_tstart + 3*duration
tref = .5*(data_tstart+data_tend)
h0 = 1e-23
sqrtSX = 1e-22
transient = pyfstat.Writer(
label='transient', outdir='data', tref=tref, tstart=tstart, F0=F0, F1=F1,
F2=F2, duration=duration, Alpha=Alpha, Delta=Delta, h0=h0, sqrtSX=sqrtSX,
minStartTime=data_tstart, maxStartTime=data_tend)
transient.make_data()
print transient.predict_fstat()
DeltaF0 = 6e-7
DeltaF1 = 1e-13
VF0 = (np.pi * duration * DeltaF0)**2 / 3.0
VF1 = (np.pi * duration**2 * DeltaF1)**2 * 4/45.
print '\nV={:1.2e}, VF0={:1.2e}, VF1={:1.2e}\n'.format(VF0*VF1, VF0, VF1)
theta_prior = {'F0': {'type': 'unif',
'lower': F0-DeltaF0/2.,
'upper': F0+DeltaF0/2.},
'F1': {'type': 'unif',
'lower': F1-DeltaF1/2.,
'upper': F1+DeltaF1/2.},
'F2': F2,
'Alpha': Alpha,
'Delta': Delta
}
ntemps = 3
log10temperature_min = -1
nwalkers = 100
nsteps = [750, 250]
mcmc = pyfstat.MCMCSearch(
label='transient_search_initial_stage', outdir='data',
sftfilepath='data/*transient*sft', theta_prior=theta_prior, tref=tref,
minStartTime=data_tstart, maxStartTime=data_tend, nsteps=nsteps,
nwalkers=nwalkers, ntemps=ntemps,
log10temperature_min=log10temperature_min)
mcmc.run()
mcmc.plot_cumulative_max()
mcmc.print_summary()
theta_prior = {'F0': {'type': 'unif',
'lower': F0-DeltaF0/2.,
'upper': F0+DeltaF0/2.},
'F1': {'type': 'unif',
'lower': F1-DeltaF1/2.,
'upper': F1+DeltaF1/2.},
'F2': F2,
'Alpha': Alpha,
'Delta': Delta,
'transient_tstart': {'type': 'unif',
'lower': data_tstart,
'upper': data_tend},
'transient_duration': {'type': 'halfnorm',
'loc': 0,
'scale': 0.5*duration}
}
nwalkers = 500
nsteps = [200, 200]
mcmc = pyfstat.MCMCTransientSearch(
label='transient_search', outdir='data',
sftfilepath='data/*transient*sft', theta_prior=theta_prior, tref=tref,
minStartTime=data_tstart, maxStartTime=data_tend, nsteps=nsteps,
nwalkers=nwalkers, ntemps=ntemps,
log10temperature_min=log10temperature_min)
mcmc.run()
mcmc.plot_corner()
mcmc.print_summary()
\begin{tabular}{c|cccccc}
Stage & $\Nseg$ & $\Tcoh^{\rm days}$ &$\Nsteps$ & $\V$ & $\Vsky$ & $\Vpe$ \\ \hline
0 & 20 & 5.0 & 100 & $2{\times}10^{2}$ & 10.0 & 10.0 \\
1 & 11 & 9.1 & 100 & $2{\times}10^{3}$ & 40.0 & 50.0 \\
2 & 6 & 16.7 & 100 & $2{\times}10^{4}$ & $1{\times}10^{2}$ & $2{\times}10^{2}$ \\
3 & 3 & 33.3 & 100 & $1{\times}10^{5}$ & $2{\times}10^{2}$ & $6{\times}10^{2}$ \\
4 & 1 & 100.0 & 100,100 & $8{\times}10^{5}$ & $3{\times}10^{2}$ & $2{\times}10^{3}$ \\
0 & 20 & 5.0 & 50 & $2{\times}10^{2}$ & 10.0 & 10.0 \\
1 & 11 & 9.1 & 50 & $2{\times}10^{3}$ & 40.0 & 50.0 \\
2 & 6 & 16.7 & 50 & $2{\times}10^{4}$ & $1{\times}10^{2}$ & $2{\times}10^{2}$ \\
3 & 3 & 33.3 & 50 & $1{\times}10^{5}$ & $2{\times}10^{2}$ & $6{\times}10^{2}$ \\
4 & 1 & 100.0 & 50,50 & $8{\times}10^{5}$ & $3{\times}10^{2}$ & $2{\times}10^{3}$ \\
\end{tabular}
......@@ -413,3 +413,54 @@ volume = {91},
year = {2015}
}
@article{wette2012,
arxivId = {1111.5650},
author = {Wette, Karl},
doi = {10.1103/PhysRevD.85.042003},
eprint = {1111.5650},
file = {:home/greg/Dropbox/Papers/Wette{\_}2012.pdf:pdf},
isbn = {0556-2821},
issn = {15507998},
journal = {Physical Review D},
number = {4},
title = {{Estimating the sensitivity of wide-parameter-space searches for gravitational-wave pulsars}},
volume = {85},
year = {2012}
}
@article{melatos2008,
archivePrefix = {arXiv},
arxivId = {0710.1021},
author = {Melatos, A. and Peralta, C. and Wyithe, J. S. B.},
doi = {10.1086/523349},
eprint = {0710.1021},
file = {:home/greg/Dropbox/Papers/Mealtos{\_}2007.pdf:pdf},
issn = {0004-637X},
journal = {The Astrophysical Journal},
keywords = {dense matter — pulsars: general — stars: interiors},
number = {Jensen 1998},
pages = {1103},
title = {{Avalanche dynamics of radio pulsar glitches}},
url = {http://arxiv.org/abs/0710.1021},
volume = {672},
year = {2008}
}
@article{espinoza2011,
archivePrefix = {arXiv},
arxivId = {1102.1743},
author = {Espinoza, Crist{\'{o}}bal and Lyne, Andrew and Stappers, Ben and Kramer, Michael},
doi = {10.1063/1.3615093},
eprint = {1102.1743},
file = {:home/greg/Dropbox/Papers/Espinoza{\_}2011.pdf:pdf},
isbn = {9780735409156},
issn = {0094243X},
journal = {AIP Conference Proceedings},
keywords = {general,glitches,pulsars,stars: neutron},
number = {March},
pages = {117--120},
title = {{Glitches in the rotation of pulsars}},
volume = {1357},
year = {2011}
}
......@@ -3,6 +3,10 @@
\newcommand{\A}{\boldsymbol{\mathcal{A}}}
\newcommand{\blambda}{\boldsymbol{\mathbf{\lambda}}}
\newcommand{\blambdaSignal}{\boldsymbol{\mathbf{\lambda}}^{\rm s}}
\newcommand{\tglitch}{t_{\rm glitch}}
\newcommand{\tstart}{t_{\rm start}}
\newcommand{\tend}{t_{\rm end}}
\newcommand{\Nglitches}{N_{\rm glitches}}
\newcommand{\Tspan}{T_{\rm span}}
\newcommand{\Tcoh}{T_{\rm coh}}
\newcommand{\tref}{t_{\rm ref}}
......
\begin{tabular}{c|cccc}
Stage & $\Nseg$ & $\Tcoh^{\rm days}$ &$\Nsteps$ & $\Vpe$ \\ \hline
0 & 20 & 5.0 & 20 & 10.0 \\
1 & 7 & 14.3 & 20 & $1{\times}10^{2}$ \\
2 & 2 & 50.0 & 20 & $1{\times}10^{3}$ \\
3 & 1 & 100.0 & 20,20 & $2{\times}10^{3}$ \\
\end{tabular}
Paper/grided_frequency_search_1D.png

46.9 KB | W: | H:

Paper/grided_frequency_search_1D.png

42.1 KB | W: | H:

Paper/grided_frequency_search_1D.png
Paper/grided_frequency_search_1D.png
Paper/grided_frequency_search_1D.png
Paper/grided_frequency_search_1D.png
  • 2-up
  • Swipe
  • Onion skin
......@@ -296,6 +296,7 @@ computed analytically as
\int
\mathcal{L}(x ;\A, \blambda)
P(\A| \Hs, I) d\A
\label{eqn_B_S_N}
\\
& = \frac{C (2\pi)^{2} e^{\F(x| \blambda)}}
{\sqrt{\textrm{det} \mathcal{M}}},
......@@ -790,6 +791,13 @@ fraction of the volume at adjacent stages. That is we define
\end{equation}
where $\mathcal{R} \ge 1$ as $\Tcoh^{i+1} > \Tcoh^{i}$.
For the MCMC simulations to be succesful, this initial bounding
box, given the segment setup which produced the candidate, must be small
compared to the width of the signal (at that segment setup). If we start out
follow-up with the same search setup used in the source search, i.e. we set
$\Nseg^0$ equal to the number of segments used in the input search, then for
the MCMC simulation to work, we require that $\V(\Nseg^{0}) \sim \mathcal{O}(100)$.
Given a fixed prior on the Doppler parameters and a fixed span of data, the
metric volume $\V_{\Nstages}$ for $\Tcoh^{\Nstages} = \Tspan$ is fixed, or in
terms of the number of segments, $\Nseg^{\Nstages} = 1$.
......@@ -824,12 +832,12 @@ in the noise.
First, we must define the setup for the run. Using $\mathcal{R}=10$ and
$\V^{\rm min}=100$ our optimisation procedure is run and proposes the setup
layed out in Table~\ref{tab_signal_follow_up}. In addition, we show the
layed out in Table~\ref{tab_follow_up_run_setup}. In addition, we show the
number of steps taken at each stage.
\begin{table}[htb]
\caption{The search setup used in Figure~\ref{fig_follow_up}, generated with
$\mathcal{R}=10$ and $\V^{\rm min}=100$.}
\label{tab_follow_up}
\label{tab_follow_up_run_setup}
\input{follow_up_run_setup}
\end{table}
......@@ -842,7 +850,7 @@ found to result in the MCMC simulations `loosing' the peaks between stages, we
conservatively opt for 10 here, but values as large as 100 where also successful.
In Figure~\ref{fig_follow_up} we show the progress of the MCMC sampler during
the follow-up. As expected from Table~\ref{tab_follow_up}, during
the follow-up. As expected from Table~\ref{tab_follow_up_run_setup}, during
the initial stage the signal peak is broad with respect to the size of the
prior volume, therefore the MCMC simulation quickly converges to it. Subsequently,
each time the number of segments is reduced, the peak narrows and the samplers
......@@ -855,9 +863,8 @@ Eventually, the walkers all converge to the true signal.
\caption{In the top three panels we show the progress of the 500 parallel
walkers (see Figure~\ref{fig_MCMC_simple_example} for a description) during the
MCMC simulation for each of the search parameters, frequency $f$,
right-ascension $\alpha$ and declination $\delta$. Each vertical dashed line
indicates the start of a new stage of the search, the parameters for all stages
are listed in Table~\ref{tab_follow_up}.}
right-ascension $\alpha$ and declination $\delta$. Each vertical dashed line indicates the start of a new stage of the search, the parameters for all stages
are listed in Table~\ref{tab_follow_up_run_setup}.}
\label{fig_follow_up}
\end{figure}
......@@ -869,52 +876,89 @@ chains explore the other `noise peaks' in the data.
\section{Monte Carlo studies}
In order to understand how well the MCMC follow-up method works, we will now
study the recovery fraction as a function of signal depth. This will be done in
a Monte Carlo study, with independent random realisations of the Guassian
noise, amplitude, and Doppler parameters in suitable ranges. Such a method is
analagous to the studies performed in \citet{shaltev2013}, except that we
present results as a function of the fixed signal depth, rather than the
squared SNR.
In particular we will generate \comment{N} realisations of Gaussian noise data
lasting for 100 days, each with a simulated CW signal. We choose the parameters
of the signal in such a way to model the candidates generated from directed and
all-sky searches by drawing the signal parameters from appropriate
distributions. However, we do not draw $h_0$ randomly, but instead run the MC
study at a number of selected values chosen such that given the fixed
$\sqrt{S_n}=2\times10^{3}$, the signals are injected with a depth $\mathcal{D}
\in [100, 400]$.
To simulate an isotropic distribution of sources, we draw the remaining
amplitude parameters for each signal uniformly from $\phi \in [0, 2\pi]$, $\psi
\in [-\pi/4, \pi/4]$, and $\cos\iota \in [-1, 1]$.
In order to understand how well the MCMC follow-up method works, we will test
its ability to succesfully identify simulated signals in Gaussian. This will be
done in a Monte Carlo study, with independent random realisations of the
Guassian noise, amplitude, and Doppler parameters in suitable ranges. Such a
method is analagous to the studies performed in \citet{shaltev2013}, except
that we present results as a function of the fixed injected signal depth,
rather than the squared SNR.
In particular we will generate a number of 100-day data sets containing
independent realisations of Gaussian noise and a simulated CW signal. We choose
the parameters of the signal in such a way to model the candidates generated
from directed and all-sky searches by drawing the signal parameters from
appropriate distributions. However, we do not draw $h_0$ randomly, but instead
run the MC study at a number of selected values chosen such that given the
fixed $\sqrt{S_n}=2\times10^{3}$, the signals are injected with a depth
$\mathcal{D} \in [100, 400]$. To simulate an isotropic distribution of
sources, we draw the remaining amplitude parameters for each signal uniformly
from $\phi \in [0, 2\pi]$, $\psi \in [-\pi/4, \pi/4]$, and $\cos\iota \in [-1,
1]$.
To provide a reference, we will compare the MC follow-up study against the
expected maximum theoretical detection probability for an infinitly dense
fully-coherent search of data containing isotropically-distributed signals as
calculated by \citet{wette2012}. Note however that we will parameterise with
respect to the signal depth (i.e. using Equation~(3.8) of \citet{wette2012} to
relate the averaged-SNR to the depth). The probability is maximal in the
sense that signals are `lost' during the follow-up due simply to the fact that
they are not sufficiently strong to rise above the noise.
\subsection{Follow-up candidates from a directed search}
\label{sec_directed_follow_up}
In a directed search, the sky location parameters $\alpha$ and $\delta$ are
fixed - in our study we fix them on the location of the Crab pulsar, but this
choice is arbitrary and holds no particular significance. The ouput of a
gridded directed search would provide the grid-point with the highest detection
statistic, and some estimate of the iso-mismatch contours in which the
candidate is expected to exist. To simulate this, we will define a frequency
and spindown of $f_0=30$~Hz and $\dot{f}_0=10^{-10}$Hz/s and a surrounding box
$\Delta f$ and $\Delta \dot{f}$ which correspoinds to a fully-coherent
$\V=10^{4}$ with $\V_{f}=\V_{\dot{f}}$. Then, we pick a candidate we first pick
a point randomly in the unit circle, then using the PE-phase-metric we convert
this into a random point in an isomismatch contour. In addition, we also select
the amplitude parameters We then select a set of particular values of
semi-coherent gridded directed search would provide the grid-point with the
highest detection statistic, and some box bounding the candidate given some
uncertainty. We will assume in this section that given the search setup
$\Nseg^{0}$ of the input search, the bounding box is sufficiently small to
ensure that $\V(\Nseg^0)\sim \mathcal{O}(100)$. This is not a limiting
assumption as any search can (quite cheaply) increase the density of grid
points around any interesting candidates in order to better constrain their
uncertainty.
The behaviour of the follow-up is independent of the exact frequency and
spin-down values used (this is not true for an all-sky follow-up as discussed
in Section~\ref{sec_all_sky_follow_up}). As such, we can, without loss of
generality define our Monte-Carlo follow-up in the following way. First, we
select an arbitrary frequency and spindown of $f_0=30$~Hz and
$\dot{f}_0=10^{-10}$Hz/s and a surrounding uncertainty box $\Delta f$ and
$\Delta \dot{f}$ chosen such that the uncertainty in frequency and spindown are
roughly equivalent in terms of mismatch. Then, we pick a candidate uniformly