In [38]:
"""Generate ringdown templates in the time and perform parameter estimation on them.
"""
Out [38]:
'Generate ringdown templates in the time and perform parameter estimation on them.\n'
"""Generate ringdown templates in the time and perform parameter estimation on them.
"""
'Generate ringdown templates in the time and perform parameter estimation on them.\n'
#Import relevant modules, import data and all that
import time
import numpy as np
import corner
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from matplotlib import rc
from configparser import ConfigParser
plt.rcParams.update({'font.size': 16.5})
from multiprocessing import Pool
import random
import dynesty
from dynesty import plotting as dyplot
from dynesty.utils import resample_equal
from dynesty import utils as dyfunc
import os
import argparse
import scipy.optimize as optimization
from scipy.optimize import minimize
import rdown as rd
import rdown_pe as rd_pe
import rdown_utilities as rd_ut
import read_data as rdata
## Loading and running data tested with NR data
## Loading and running data tested with Mock data
# Cell that calls the arguments from your 'config.ini' file.
try:
parser = argparse.ArgumentParser(description="Simple argument parser")
parser.add_argument("-c", action="store", dest="config_file")
result = parser.parse_args()
config_file=result.config_file
parser = ConfigParser()
parser.read(config_file)
parser.sections()
except SystemExit:
parser = ConfigParser()
parser.read('./run_0_mock/config_n0_to_1_mock.ini')
parser.sections()
pass
usage: ipykernel_launcher.py [-h] [-c CONFIG_FILE]
ipykernel_launcher.py: error: unrecognized arguments: -f /work/francisco.jimenez/.local/share/jupyter/runtime/kernel-eb21fd9f-ea57-481c-b51d-230c5ef5a4a2.json
# Load variables from config file
(simulation_path_1,simulation_path_2, metadata_file , simulation_number, output_folder,
export, overwrite, sampler,nr_code, nbcores,tshift,tend,t_align,
nmax , npoints, model, error_str, fitnoise, l_int, index_mass,index_spin,
error_type, error_val, af, mf,tau_var_str,nm_mock)=rdata.read_config_file(parser)
# Show configuration options
dim = nmax+1
ndim = 4*dim
numbins = 32 #corner plot parameter - how many bins you want
print('model:',model)
print('nmax:',nmax)
print('nm_mock:',nm_mock)
print('tshift:',tshift)
print('error:', error_str)
print('error value:',error_val)
print('export:',export)
print('nr code:',nr_code)
print('fit noise:',fitnoise)
model: w-tau
nmax: 0
nm_mock: 1
tshift: 0.2
error: True
error value: 0.002
export: True
nr code: Mock-data
fit noise: False
# Create output directories
if not os.path.exists(output_folder):
os.mkdir(output_folder)
print("Directory " , output_folder , " Created ")
if nr_code == 'Mock-data':
nm_mock_str = 'rec_with'+parser.get('rd-mock-parameters','nm_mock')+'_'
else:
nm_mock_str=''
if error_str:
output_folder_1=(output_folder+'/'+model+'-nmax'+str(nmax)+'_'+nm_mock_str+str(error_str)+'_'+str(error_type)+'_fitnoise_'+str(fitnoise))
else:
output_folder_1=output_folder+'/'+model+'-nmax'+str(nmax)+'_'+nm_mock_str+str(error_str)+'_'+'fitnoise_'+str(fitnoise)
if not os.path.exists(output_folder_1):
os.mkdir(output_folder_1)
print("Directory " , output_folder_1 , " Created ")
# Define output files
pars = [simulation_number,model,nmax,tshift,npoints]
corner_plot = rdata.create_output_files(output_folder_1,pars,'corner_plot')
corner_plot_extra = rdata.create_output_files(output_folder_1,pars,'corner_plot_extra')
diagnosis_plot = rdata.create_output_files(output_folder_1,pars,'diagnosis')
fit_plot = rdata.create_output_files(output_folder_1,pars,'fit')
samples_file = rdata.create_output_files(output_folder_1,pars,'post_samples')
results_file = rdata.create_output_files(output_folder_1,pars,'sampler_results')
sumary_data = rdata.create_output_files(output_folder_1,pars,'log_z')
best_data = rdata.create_output_files(output_folder_1,pars,'best_vals')
files = [corner_plot,corner_plot_extra,diagnosis_plot,fit_plot,samples_file,results_file,sumary_data,best_data]
# Remove old files if overwrite = True
if overwrite:
rd_ut.rm_files(files)
#Load NR data, align in time and resize. Plot real part and amplitude. Finally compute the mismatch and the snr estimate
data = rdata.read_data(nr_code,simulation_path_1,RD=True,tshift=tshift,tend = tend,metadata_file=metadata_file,parser=parser)
data_l = rdata.read_data(nr_code,simulation_path_2,RD=True,tshift=tshift,tend = tend,metadata_file=metadata_file,parser=parser)
data_r, data_lr = rdata.nr_resize(data,data_l,tshift=tshift,tend=tend)
times_rd = data_r[:,0]
plt.figure(figsize = (12, 8))
plt.plot(times_rd, data_r[:,1].real, "r", alpha=0.3, lw=3, label=r'$Lev6$: real')
plt.plot(times_rd, np.sqrt((data_r[:,1].real)**2+(data_r[:,1].imag)**2), "r", alpha=0.3, lw=3, label=r'$Lev5\,amp$')
plt.plot(times_rd, data_lr[:,1].real, "b", alpha=0.3, lw=3, label=r'$Lev5: real$')
plt.plot(times_rd, np.sqrt((data_lr[:,1].real)**2+(data_lr[:,1].imag)**2), "b", alpha=0.3, lw=3, label=r'$Lev5\,amp$')
if error_str and error_val==0:
error = np.sqrt(data_r[:,1]*data_r[:,1]-2*data_r[:,1]*data_lr[:,1]+data_lr[:,1]*data_lr[:,1])
error_est=np.sqrt(error.imag**2+error.real**2)
plt.plot(times_rd, error_est, "g", alpha=0.3, lw=2, label='error')
plt.legend()
mismatch=1-rd_ut.EasyMatchT(times_rd,data_r[:,1],data_lr[:,1],tshift,tend)
error=np.sqrt(2*mismatch)
print('error estimate:',error)
print('mismatch:', mismatch)
print('snr:', rd_ut.EasySNRT(times_rd,data_r[:,1],data_lr[:,1],tshift,tend)/error**2)
/work/francisco.jimenez/venv/lib/python3.7/site-packages/numpy/core/_asarray.py:85: ComplexWarning: Casting complex values to real discards the imaginary part
return array(a, dtype, copy=False, order=order)
/work/francisco.jimenez/venv/lib/python3.7/site-packages/numpy/core/_asarray.py:85: ComplexWarning: Casting complex values to real discards the imaginary part
return array(a, dtype, copy=False, order=order)
/work/francisco.jimenez/venv/lib/python3.7/site-packages/numpy/core/_asarray.py:85: ComplexWarning: Casting complex values to real discards the imaginary part
return array(a, dtype, copy=False, order=order)
/work/francisco.jimenez/venv/lib/python3.7/site-packages/numpy/core/_asarray.py:85: ComplexWarning: Casting complex values to real discards the imaginary part
return array(a, dtype, copy=False, order=order)
/work/francisco.jimenez/venv/lib/python3.7/site-packages/ipykernel_launcher.py:19: RuntimeWarning: invalid value encountered in sqrt
error estimate: nan
mismatch: -2.220446049250313e-16
snr: nan
# Phase alignement
if parser.has_option('rd-model','phase_alignment'):
phase_alignment=eval(parser.get('rd-model','phase_alignment'))
else:
phase_alignment=False
if phase_alignment:
datar_al = rdata.phase_align(data_r,data_lr)
gwdatanew5 = data_lr[:,1]
gwdatanew = datar_al[:,1]
timesrd_final = datar_al[:,0]
mismatch=1-rd_ut.EasyMatchT(timesrd_final,gwdatanew,gwdatanew5,tshift,tend)
error=np.sqrt(2*mismatch)
print('error estimate:',error)
print('mismatch:', mismatch)
print('snr:', rd_ut.EasySNRT(timesrd_final,gwdatanew,gwdatanew5,tshift,tend)/error)
if error_str:
error = np.sqrt(gwdatanew*gwdatanew-2*gwdatanew*gwdatanew5+gwdatanew5*gwdatanew5)
error_est=np.sqrt(error.imag**2+error.real**2)
else :
error = 1
else:
datar_al = data_r
timesrd_final = datar_al[:,0]
#Test the new interpolated data
if error_str and error_val==0:
plt.figure(figsize = (12, 8))
plt.plot(timesrd_final, datar_al[:,1].real, "r", alpha=0.3, lw=2, label='Original')
plt.plot(timesrd_final, data_lr[:,1].real, "b", alpha=0.3, lw=2, label='Aligned')
plt.plot(timesrd_final, error_est, "b", alpha=0.3, lw=2, label='error')
plt.legend()
error estimate: nan
mismatch: -2.220446049250313e-16
snr: nan
/work/francisco.jimenez/venv/lib/python3.7/site-packages/ipykernel_launcher.py:13: RuntimeWarning: invalid value encountered in sqrt
del sys.path[0]
# Define your noise depending on the noise configuration. Load priors and setup the likelihood with rd_pe.Ringdown_PE.
if error_str and error_val==0:
error_final = error_est
norm_factor = 100*len(error_final)/2*np.log(2*np.pi)
elif error_str and error_val!=0:
datar_al[:,1]+=random.uniform(0, error_val)
datar_al[:,1]+=1j*random.uniform(0, error_val)
error_tsh = error_val
error_final=(error_tsh.real**2+error_tsh.imag**2)
norm_factor = 0
else:
error_tsh=1
error_final=(error_tsh.real**2+error_tsh.imag**2)
norm_factor = 0
priors = rd_pe.load_priors(model,parser,nmax,fitnoise=fitnoise)
rdown=rd.Ringdown_Spectrum(mf,af,2,2,n=nmax,s=-2,time=timesrd_final)
rdown_pe = rd_pe.Ringdown_PE(rdown,datar_al,dim,priors,errors2=error_final,norm_factor=norm_factor,model=model,l_int=l_int)
# Get a first estimate by trying to fit the data.
nll = lambda *args: -rdown_pe.log_likelihood(*args)
if model == 'w-tau-fixed-m-af':
if fitnoise:
initial = np.concatenate((np.ones(2*dim),[0.8,0.9,1]))
soln = minimize(nll, initial,bounds=priors)
vars_ml=soln.x
else:
initial = np.concatenate((np.ones(2*dim),[0.8,0.9]))
soln = minimize(nll, initial,bounds=priors)
vars_ml=soln.x
elif model == 'w-tau-fixed':
if fitnoise:
initial = np.concatenate((np.ones(2*dim),[0.2]))
soln = minimize(nll, initial,bounds=priors)
vars_ml=soln.x
else:
initial = np.ones(2*dim)
soln = minimize(nll, initial,bounds=priors)
vars_ml=soln.x
else:
if fitnoise:
initial = np.concatenate((np.ones(ndim),[1]))
soln = minimize(nll, initial,bounds=priors)
vars_ml=soln.x
else:
initial = np.ones(ndim)
soln = minimize(nll, initial,bounds=priors)
vars_ml=soln.x
print("best fit pars from fit: ",vars_ml)
best fit pars from fit: [ 0.2 10.52057151 0.08836181 6.28318531]
mypool = Pool(nbcores)
mypool.size = nbcores
start = time.process_time()
f2=dynesty.NestedSampler(rdown_pe.log_likelihood,rdown_pe.prior_transform, len(priors), nlive=npoints,sample=sampler,pool=mypool)
if parser.has_option('setup','dlogz'):
dlogz=np.float(parser.get('setup','dlogz'))
f2.run_nested(dlogz=dlogz,print_progress=False)
else:
f2.run_nested(print_progress=False)
print(time.process_time() - start)
46474it [21:02, 36.81it/s, +1000 | bound: 286 | nc: 1 | ncall: 1106811 | eff(%): 4.289 | loglstar: -inf < -959286.172 < inf | logz: -959328.014 +/- 0.280 | dlogz: 0.000 > 0.010]
645.863601611
res = f2.results
res.samples_u.shape
res.summary()
samps=f2.results.samples
postsamps = rd_ut.posterior_samples(f2)
samps_tr=np.transpose(samps)
half_points=int(round((len(samps_tr[0])/1.25)))
evidence = res.logz[-1]
evidence_error = res.logzerr[-1]
if export:
rd_ut.save_object(res, results_file)
Summary
=======
nlive: 1000
niter: 46474
ncall: 1106811
eff(%): 4.289
logz: -959328.014 +/- 0.280
pars = nmax,model,samps_tr, half_points
npamps = rd_ut.get_best_amps(pars,parser=parser,nr_code=nr_code)
if export:
pars = simulation_number, nmax, tshift, evidence, evidence_error
rd_ut.export_logz_files(sumary_data,pars)
labels = rd_ut.define_labels(dim,model,fitnoise)
if export:
pars = tshift, len(priors), labels
rd_ut.export_bestvals_files(best_data,postsamps,pars)
w, tau = rdown.QNM_spectrum()
pars = w, tau, mf, af, npamps
truths = rd_ut.get_truths(model,pars,fitnoise)
fg=corner.corner(postsamps,quantiles=[0.05,0.5,0.95],show_titles=True,max_n_ticks = 4,bins=50,truths=truths,labels=labels,truth_color='red')
plt.show()
if export:
fg.savefig(corner_plot, format = 'png', bbox_inches = 'tight')
from importlib import reload
reload(rd_ut)
if model == 'w-tau-fixed-m-af' and export == True:
truths=np.concatenate((w,tau))
labels_mf = np.concatenate((w_lab,tau_lab))
new_samples = rd_ut.convert_m_af_2_w_tau_post(res,fitnoise=False)
figure = corner.corner(new_samples,truths=truths,quantiles=[0.05,0.95],labels=labels_mf,smooth=True,color='b',truth_color='r',show_titles=True)
figure.savefig(corner_plot_extra, format = 'png', bbox_inches = 'tight')
#lnz_truth = ndim * -np.log(2 * 10.) # analytic evidence solution
fig, axes = dyplot.runplot(res)
fig.tight_layout()
if export:
fig.savefig(diagnosis_plot, format = 'png', dpi = 384, bbox_inches = 'tight')
if export:
dict = {'w-tau':rdown.rd_model_wtau , 'w-q': rdown.rd_model_wq, 'w-tau-fixed':rdown.rd_model_wtau_fixed,'w-tau-fixed-m-af': rdown.rd_model_wtau_m_af}
figband = plt.figure(figsize = (12, 9))
plt.plot(datar_al[:,0].real,datar_al[:,1].real, "green", alpha=0.9, lw=3, label=r'$res_{240}$')
onesig_bounds = np.array([np.percentile(postsamps[:, i], [5, 95]) for i in range(len(postsamps[0]))]).T
samples_1sigma = filter(lambda sample: np.all(onesig_bounds[0] <= sample) and np.all(sample <= onesig_bounds[1]), postsamps)
samples_1sigma_down = list(samples_1sigma)[::downfactor]
for sample in samples_1sigma_down:
plt.plot(datar_al[:,0].real, dict[model](sample).real, "r-", alpha=0.1, lw=1)
plt.title(r'Comparison of the MC fit data and the $1-\sigma$ error band')
plt.legend()
plt.xlabel('t')
plt.ylabel('h')
plt.show()
figband.savefig(fit_plot)
if export:
with open(samples_file,'w') as file:
writer = csv.writer(file)
writer.writerow(labels)
writer.writerows(samps[::downfactor])