Commit e14d497b authored by Gregory Ashton's avatar Gregory Ashton
Browse files

Adds initial results from AllSky MC study

Also adds data from directed study
parent 7597931b
This diff is collapsed.
import pyfstat
import numpy as np
import os
import sys
ID = sys.argv[1]
outdir = sys.argv[2]
label = 'run_{}'.format(ID)
data_label = '{}_data'.format(label)
results_file_name = '{}/MCResults_{}.txt'.format(outdir, ID)
# Properties of the GW data
sqrtSX = 2e-23
tstart = 1000000000
Tspan = 100*86400
tend = tstart + Tspan
# Fixed properties of the signal
F0_center = 30
F1_center = 1e-10
F2 = 0
tref = .5*(tstart+tend)
VF0 = VF1 = 100
DeltaF0 = VF0 * np.sqrt(3)/(np.pi*Tspan)
DeltaF1 = VF1 * np.sqrt(45/4.)/(np.pi*Tspan**2)
depths = np.linspace(100, 400, 7)
run_setup = [((100, 0), 27, False),
((100, 0), 15, False),
((100, 0), 8, False),
((100, 0), 4, False),
((50, 50), 1, False)]
DeltaAlpha = 0.05
DeltaDelta = 0.05
for depth in depths:
h0 = sqrtSX / float(depth)
r = np.random.uniform(0, 1)
theta = np.random.uniform(0, 2*np.pi)
F0 = F0_center + 3*np.sqrt(r)*np.cos(theta)/(np.pi**2 * Tspan**2)
F1 = F1_center + 45*np.sqrt(r)*np.sin(theta)/(4*np.pi**2 * Tspan**4)
Alpha = np.random.uniform(0, 2*np.pi)
Delta = np.arccos(2*np.random.uniform(0, 1)-1)-np.pi/2
fAlpha = np.random.uniform(0, 1)
Alpha_min = Alpha - DeltaAlpha*(1-fAlpha)
Alpha_max = Alpha + DeltaAlpha*fAlpha
fDelta = np.random.uniform(0, 1)
Delta_min = Delta - DeltaDelta*(1-fDelta)
Delta_max = Delta + DeltaDelta*fDelta
psi = np.random.uniform(-np.pi/4, np.pi/4)
phi = np.random.uniform(0, 2*np.pi)
cosi = np.random.uniform(-1, 1)
data = pyfstat.Writer(
label=data_label, outdir=outdir, tref=tref,
tstart=tstart, F0=F0, F1=F1, F2=F2, duration=Tspan, Alpha=Alpha,
Delta=Delta, h0=h0, sqrtSX=sqrtSX, psi=psi, phi=phi, cosi=cosi,
detector='H1,L1')
data.make_data()
predicted_twoF = data.predict_fstat()
theta_prior = {'F0': {'type': 'unif',
'lower': F0-DeltaF0/2.,
'upper': F0+DeltaF0/2.},
'F1': {'type': 'unif',
'lower': F1-DeltaF1/2.,
'upper': F1+DeltaF1/2.},
'F2': F2,
'Alpha': {'type': 'unif',
'lower': Alpha_min,
'upper': Alpha_max},
'Delta': {'type': 'unif',
'lower': Delta_min,
'upper': Delta_max},
}
ntemps = 1
log10temperature_min = -1
nwalkers = 100
mcmc = pyfstat.MCMCFollowUpSearch(
label=label, outdir=outdir,
sftfilepath='{}/*{}*sft'.format(outdir, data_label),
theta_prior=theta_prior,
tref=tref, minStartTime=tstart, maxStartTime=tend,
nwalkers=nwalkers, ntemps=ntemps,
log10temperature_min=log10temperature_min)
mcmc.run(run_setup=run_setup, create_plots=False, log_table=False,
gen_tex_table=False)
d, maxtwoF = mcmc.get_max_twoF()
dF0 = F0 - d['F0']
dF1 = F1 - d['F1']
with open(results_file_name, 'a') as f:
f.write('{} {:1.8e} {:1.8e} {:1.8e} {:1.8e} {:1.8e}\n'
.format(depth, h0, dF0, dF1, predicted_twoF, maxtwoF))
os.system('rm {}/*{}*'.format(outdir, label))
import pyfstat
import numpy as np
outdir = 'data'
label = 'AllSky'
data_label = '{}_data'.format(label)
# Properties of the GW data
sqrtSX = 2e-23
tstart = 1000000000
Tspan = 100*86400
tend = tstart + Tspan
# Fixed properties of the signal
F0_center = 30
F1_center = 1e-10
F2 = 0
tref = .5*(tstart+tend)
VF0 = VF1 = 100
DeltaF0 = VF0 * np.sqrt(3)/(np.pi*Tspan)
DeltaF1 = VF1 * np.sqrt(45/4.)/(np.pi*Tspan**2)
depths = np.linspace(100, 400, 7)
run_setup = [((100, 0), 27, False),
((100, 0), 15, False),
((100, 0), 8, False),
((100, 0), 4, False),
((50, 50), 1, False)]
DeltaAlpha = 0.05
DeltaDelta = 0.05
depth = 100
h0 = sqrtSX / float(depth)
F0 = F0_center
F1 = F1_center
Alpha = 0
Delta = 0
Alpha_min = Alpha - DeltaAlpha/2
Alpha_max = Alpha + DeltaAlpha/2
Delta_min = Delta - DeltaDelta/2
Delta_max = Delta + DeltaDelta/2
psi = 0
phi = 0
cosi = 0
data = pyfstat.Writer(
label=data_label, outdir=outdir, tref=tref,
tstart=tstart, F0=F0, F1=F1, F2=F2, duration=Tspan, Alpha=Alpha,
Delta=Delta, h0=h0, sqrtSX=sqrtSX, psi=psi, phi=phi, cosi=cosi,
detector='H1,L1')
data.make_data()
predicted_twoF = data.predict_fstat()
theta_prior = {'F0': {'type': 'unif',
'lower': F0-DeltaF0/2.,
'upper': F0+DeltaF0/2.},
'F1': {'type': 'unif',
'lower': F1-DeltaF1/2.,
'upper': F1+DeltaF1/2.},
'F2': F2,
'Alpha': {'type': 'unif',
'lower': Alpha_min,
'upper': Alpha_max},
'Delta': {'type': 'unif',
'lower': Delta_min,
'upper': Delta_max},
}
ntemps = 1
log10temperature_min = -1
nwalkers = 100
mcmc = pyfstat.MCMCFollowUpSearch(
label=label, outdir=outdir,
sftfilepath='{}/*{}*sft'.format(outdir, data_label),
theta_prior=theta_prior,
tref=tref, minStartTime=tstart, maxStartTime=tend,
nwalkers=nwalkers, ntemps=ntemps,
log10temperature_min=log10temperature_min)
mcmc.run(run_setup=run_setup)
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
from tqdm import tqdm
from oct2py import octave
plt.style.use('paper')
Tspan = 100 * 86400
def Recovery(Tspan, Depth, twoFstar=60, detectors='H1,L1'):
numDetectors = len(detectors.split(','))
cmd = ("DetectionProbabilityStackSlide('Nseg', 1, 'Tdata', {},"
"'misHist', createDeltaHist(0), 'avg2Fth', {}, 'detectors', '{}',"
"'Depth', {})"
).format(numDetectors*Tspan, twoFstar, detectors, Depth)
return octave.eval(cmd, verbose=False)
def binomialConfidenceInterval(N, K, confidence=0.95):
cmd = '[fLow, fUpper] = binomialConfidenceInterval({}, {}, {})'.format(
N, K, confidence)
[l, u] = octave.eval(cmd, verbose=False, return_both=True)[0].split('\n')
return float(l.split('=')[1]), float(u.split('=')[1])
results_file_name = 'MCResults.txt'
df = pd.read_csv(
results_file_name, sep=' ', names=['depth', 'h0', 'dF0', 'dF1',
'twoF_predicted', 'twoF'])
twoFstar = 60
depths = np.unique(df.depth.values)
recovery_fraction = []
recovery_fraction_CI = []
for d in depths:
twoFs = df[df.depth == d].twoF.values
N = len(twoFs)
K = np.sum(twoFs > twoFstar)
print d, N, K
recovery_fraction.append(K/float(N))
[fLower, fUpper] = binomialConfidenceInterval(N, K)
recovery_fraction_CI.append([fLower, fUpper])
yerr = np.abs(recovery_fraction - np.array(recovery_fraction_CI).T)
fig, ax = plt.subplots()
ax.errorbar(depths, recovery_fraction, yerr=yerr, fmt='sk', marker='s', ms=2,
capsize=1, capthick=0.5, elinewidth=0.5,
label='Monte-Carlo result')
fname = 'analytic_data.txt'
if os.path.isfile(fname):
depths_smooth, recovery_analytic = np.loadtxt(fname)
else:
depths_smooth = np.linspace(10, 550, 100)
recovery_analytic = []
for d in tqdm(depths_smooth):
recovery_analytic.append(Recovery(Tspan, d, twoFstar))
np.savetxt(fname, np.array([depths_smooth, recovery_analytic]))
depths_smooth = np.concatenate(([0], depths_smooth))
recovery_analytic = np.concatenate(([1], recovery_analytic))
ax.plot(depths_smooth, recovery_analytic, '-k', label='Theoretical maximum')
ax.set_ylim(0, 1.05)
ax.set_xlabel(r'Signal depth', size=10)
ax.set_ylabel(r'Recovered fraction', size=10)
ax.legend(loc=1, frameon=False)
fig.tight_layout()
fig.savefig('allsky_recovery.png')
#!/bin/bash
. /home/gregory.ashton/lalsuite-install/etc/lalapps-user-env.sh
export PATH="/home/gregory.ashton/anaconda2/bin:$PATH"
export MPLCONFIGDIR=/home/gregory.ashton/.config/matplotlib
rm /local/user/gregory.ashton/MCResults*txt
for ((n=0;n<10;n++))
do
/home/gregory.ashton/anaconda2/bin/python generate_data.py "$1" /local/user/gregory.ashton --quite --no-template-counting
done
cp /local/user/gregory.ashton/MCResults*txt /home/gregory.ashton/PyFstat/Paper/AllSkyMC/CollectedOutput
Executable= repeat.sh
Arguments= $(Cluster)_$(Process)
Universe=vanilla
Input=/dev/null
accounting_group = ligo.dev.o2.cw.explore.test
Output=CollectedOutput/out.$(Process)
Error=CollectedOutput/err.$(Process)
Log=CollectedOutput/log.$(Process)
request_cpus = 1
request_memory = 16 GB
Queue 10
\begin{tabular}{c|cccccc}
Stage & $\Nseg$ & $\Tcoh^{\rm days}$ &$\Nsteps$ & $\V$ & $\Vsky$ & $\Vpe$ \\ \hline
0 & 27 & 3.7 & 100 & 1.0 & 1.0 & 1.0 \\
1 & 15 & 6.7 & 100 & 1.0 & 1.0 & 1.0 \\
2 & 8 & 12.5 & 100 & 1.0 & 1.0 & 1.0 \\
3 & 4 & 25.0 & 100 & 1.0 & 1.0 & 1.0 \\
4 & 1 & 100.0 & 50,50 & 1.0 & 1.0 & 1.0 \\
\end{tabular}
This diff is collapsed.
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment