Skip to content
Snippets Groups Projects
Commit b6bb8c60 authored by Francisco Jimenez Forteza's avatar Francisco Jimenez Forteza
Browse files

cleaning

parent 231ddd74
No related branches found
No related tags found
No related merge requests found
default: paper
clean:
rm -rf config*.ini run*
Source diff could not be displayed: it is too large. Options to address this: view the blob.
#!/usr/bin/env python
# coding: utf-8
# In[38]:
"""Generate ringdown templates in the time and perform parameter estimation on them.
"""
# In[1]:
#Import relevant modules, import data and all that
import time
import numpy as np
import corner
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from matplotlib import rc
from configparser import ConfigParser
plt.rcParams.update({'font.size': 16.5})
from multiprocessing import Pool
import random
import dynesty
from dynesty import plotting as dyplot
from dynesty.utils import resample_equal
from dynesty import utils as dyfunc
import os
import argparse
import scipy.optimize as optimization
from scipy.optimize import minimize
import rdown as rd
import rdown_pe as rd_pe
import rdown_utilities as rd_ut
import read_data as rdata
# In[2]:
## Loading and running data tested with NR data
## Loading and running data tested with Mock data
# In[3]:
# Cell that calls the arguments from your 'config.ini' file.
try:
parser = argparse.ArgumentParser(description="Simple argument parser")
parser.add_argument("-c", action="store", dest="config_file")
result = parser.parse_args()
config_file=result.config_file
parser = ConfigParser()
parser.read(config_file)
parser.sections()
except SystemExit:
parser = ConfigParser()
parser.read('./run_0_mock/config_n0_to_1_mock.ini')
parser.sections()
pass
# In[4]:
# Load variables from config file
(simulation_path_1,simulation_path_2, metadata_file , simulation_number, output_folder,
export, overwrite, sampler,nr_code, nbcores,tshift,tend,t_align,
nmax , npoints, model, error_str, fitnoise, l_int, index_mass,index_spin,
error_type, error_val, af, mf,tau_var_str,nm_mock)=rdata.read_config_file(parser)
# In[5]:
# Show configuration options
dim = nmax+1
ndim = 4*dim
numbins = 32 #corner plot parameter - how many bins you want
print('model:',model)
print('nmax:',nmax)
print('nm_mock:',nm_mock)
print('tshift:',tshift)
print('error:', error_str)
print('error value:',error_val)
print('export:',export)
print('nr code:',nr_code)
print('fit noise:',fitnoise)
# In[6]:
# Create output directories
if not os.path.exists(output_folder):
os.mkdir(output_folder)
print("Directory " , output_folder , " Created ")
if nr_code == 'Mock-data':
nm_mock_str = 'rec_with'+parser.get('rd-mock-parameters','nm_mock')+'_'
else:
nm_mock_str=''
if error_str:
output_folder_1=(output_folder+'/'+model+'-nmax'+str(nmax)+'_'+nm_mock_str+str(error_str)+'_'+str(error_type)+'_fitnoise_'+str(fitnoise))
else:
output_folder_1=output_folder+'/'+model+'-nmax'+str(nmax)+'_'+nm_mock_str+str(error_str)+'_'+'fitnoise_'+str(fitnoise)
if not os.path.exists(output_folder_1):
os.mkdir(output_folder_1)
print("Directory " , output_folder_1 , " Created ")
# In[7]:
# Define output files
pars = [simulation_number,model,nmax,tshift,npoints]
corner_plot = rdata.create_output_files(output_folder_1,pars,'corner_plot')
corner_plot_extra = rdata.create_output_files(output_folder_1,pars,'corner_plot_extra')
diagnosis_plot = rdata.create_output_files(output_folder_1,pars,'diagnosis')
fit_plot = rdata.create_output_files(output_folder_1,pars,'fit')
samples_file = rdata.create_output_files(output_folder_1,pars,'post_samples')
results_file = rdata.create_output_files(output_folder_1,pars,'sampler_results')
sumary_data = rdata.create_output_files(output_folder_1,pars,'log_z')
best_data = rdata.create_output_files(output_folder_1,pars,'best_vals')
files = [corner_plot,corner_plot_extra,diagnosis_plot,fit_plot,samples_file,results_file,sumary_data,best_data]
# In[8]:
# Remove old files if overwrite = True
if overwrite:
rd_ut.rm_files(files)
# In[46]:
#Load NR data, align in time and resize. Plot real part and amplitude. Finally compute the mismatch and the snr estimate
data = rdata.read_data(nr_code,simulation_path_1,RD=True,tshift=tshift,tend = tend,metadata_file=metadata_file,parser=parser)
data_l = rdata.read_data(nr_code,simulation_path_2,RD=True,tshift=tshift,tend = tend,metadata_file=metadata_file,parser=parser)
data_r, data_lr = rdata.nr_resize(data,data_l,tshift=tshift,tend=tend)
times_rd = data_r[:,0]
plt.figure(figsize = (12, 8))
plt.plot(times_rd, data_r[:,1].real, "r", alpha=0.3, lw=3, label=r'$Lev6$: real')
plt.plot(times_rd, np.sqrt((data_r[:,1].real)**2+(data_r[:,1].imag)**2), "r", alpha=0.3, lw=3, label=r'$Lev5\,amp$')
plt.plot(times_rd, data_lr[:,1].real, "b", alpha=0.3, lw=3, label=r'$Lev5: real$')
plt.plot(times_rd, np.sqrt((data_lr[:,1].real)**2+(data_lr[:,1].imag)**2), "b", alpha=0.3, lw=3, label=r'$Lev5\,amp$')
if error_str and error_val==0:
error = np.sqrt(data_r[:,1]*data_r[:,1]-2*data_r[:,1]*data_lr[:,1]+data_lr[:,1]*data_lr[:,1])
error_est=np.sqrt(error.imag**2+error.real**2)
plt.plot(times_rd, error_est, "g", alpha=0.3, lw=2, label='error')
plt.legend()
mismatch=1-rd_ut.EasyMatchT(times_rd,data_r[:,1],data_lr[:,1],tshift,tend)
error=np.sqrt(2*mismatch)
print('error estimate:',error)
print('mismatch:', mismatch)
print('snr:', rd_ut.EasySNRT(times_rd,data_r[:,1],data_lr[:,1],tshift,tend)/error**2)
# In[47]:
# Phase alignement
if parser.has_option('rd-model','phase_alignment'):
phase_alignment=eval(parser.get('rd-model','phase_alignment'))
else:
phase_alignment=False
if phase_alignment:
datar_al = rdata.phase_align(data_r,data_lr)
gwdatanew5 = data_lr[:,1]
gwdatanew = datar_al[:,1]
timesrd_final = datar_al[:,0]
mismatch=1-rd_ut.EasyMatchT(timesrd_final,gwdatanew,gwdatanew5,tshift,tend)
error=np.sqrt(2*mismatch)
print('error estimate:',error)
print('mismatch:', mismatch)
print('snr:', rd_ut.EasySNRT(timesrd_final,gwdatanew,gwdatanew5,tshift,tend)/error)
if error_str:
error = np.sqrt(gwdatanew*gwdatanew-2*gwdatanew*gwdatanew5+gwdatanew5*gwdatanew5)
error_est=np.sqrt(error.imag**2+error.real**2)
else :
error = 1
else:
datar_al = data_r
timesrd_final = datar_al[:,0]
#Test the new interpolated data
if error_str and error_val==0:
plt.figure(figsize = (12, 8))
plt.plot(timesrd_final, datar_al[:,1].real, "r", alpha=0.3, lw=2, label='Original')
plt.plot(timesrd_final, data_lr[:,1].real, "b", alpha=0.3, lw=2, label='Aligned')
plt.plot(timesrd_final, error_est, "b", alpha=0.3, lw=2, label='error')
plt.legend()
# In[48]:
# Define your noise depending on the noise configuration. Load priors and setup the likelihood with rd_pe.Ringdown_PE.
if error_str and error_val==0:
error_final = error_est
norm_factor = 100*len(error_final)/2*np.log(2*np.pi)
elif error_str and error_val!=0:
datar_al[:,1]+=random.uniform(0, error_val)
datar_al[:,1]+=1j*random.uniform(0, error_val)
error_tsh = error_val
error_final=(error_tsh.real**2+error_tsh.imag**2)
norm_factor = 0
else:
error_tsh=1
error_final=(error_tsh.real**2+error_tsh.imag**2)
norm_factor = 0
priors = rd_pe.load_priors(model,parser,nmax,fitnoise=fitnoise)
rdown=rd.Ringdown_Spectrum(mf,af,2,2,n=nmax,s=-2,time=timesrd_final)
rdown_pe = rd_pe.Ringdown_PE(rdown,datar_al,dim,priors,errors2=error_final,norm_factor=norm_factor,model=model,l_int=l_int)
# In[49]:
# Get a first estimate by trying to fit the data.
nll = lambda *args: -rdown_pe.log_likelihood(*args)
if model == 'w-tau-fixed-m-af':
if fitnoise:
initial = np.concatenate((np.ones(2*dim),[0.8,0.9,1]))
soln = minimize(nll, initial,bounds=priors)
vars_ml=soln.x
else:
initial = np.concatenate((np.ones(2*dim),[0.8,0.9]))
soln = minimize(nll, initial,bounds=priors)
vars_ml=soln.x
elif model == 'w-tau-fixed':
if fitnoise:
initial = np.concatenate((np.ones(2*dim),[0.2]))
soln = minimize(nll, initial,bounds=priors)
vars_ml=soln.x
else:
initial = np.ones(2*dim)
soln = minimize(nll, initial,bounds=priors)
vars_ml=soln.x
else:
if fitnoise:
initial = np.concatenate((np.ones(ndim),[1]))
soln = minimize(nll, initial,bounds=priors)
vars_ml=soln.x
else:
initial = np.ones(ndim)
soln = minimize(nll, initial,bounds=priors)
vars_ml=soln.x
print("best fit pars from fit: ",vars_ml)
# In[50]:
mypool = Pool(nbcores)
mypool.size = nbcores
start = time.process_time()
f2=dynesty.NestedSampler(rdown_pe.log_likelihood,rdown_pe.prior_transform, len(priors), nlive=npoints,sample=sampler,pool=mypool)
if parser.has_option('setup','dlogz'):
dlogz=np.float(parser.get('setup','dlogz'))
f2.run_nested(dlogz=dlogz,print_progress=False)
else:
f2.run_nested(print_progress=False)
print(time.process_time() - start)
# In[52]:
res = f2.results
res.samples_u.shape
res.summary()
samps=f2.results.samples
postsamps = rd_ut.posterior_samples(f2)
samps_tr=np.transpose(samps)
half_points=int(round((len(samps_tr[0])/1.25)))
evidence = res.logz[-1]
evidence_error = res.logzerr[-1]
if export:
rd_ut.save_object(res, results_file)
# In[53]:
pars = nmax,model,samps_tr, half_points
npamps = rd_ut.get_best_amps(pars,parser=parser,nr_code=nr_code)
# In[54]:
if export:
pars = simulation_number, nmax, tshift, evidence, evidence_error
rd_ut.export_logz_files(sumary_data,pars)
# In[55]:
labels = rd_ut.define_labels(dim,model,fitnoise)
if export:
pars = tshift, len(priors), labels
rd_ut.export_bestvals_files(best_data,postsamps,pars)
# In[56]:
w, tau = rdown.QNM_spectrum()
pars = w, tau, mf, af, npamps
truths = rd_ut.get_truths(model,pars,fitnoise)
# In[57]:
fg=corner.corner(postsamps,quantiles=[0.05,0.5,0.95],show_titles=True,max_n_ticks = 4,bins=50,truths=truths,labels=labels,truth_color='red')
plt.show()
if export:
fg.savefig(corner_plot, format = 'png', bbox_inches = 'tight')
# In[58]:
from importlib import reload
reload(rd_ut)
if model == 'w-tau-fixed-m-af' and export == True:
truths=np.concatenate((w,tau))
labels_mf = np.concatenate((w_lab,tau_lab))
new_samples = rd_ut.convert_m_af_2_w_tau_post(res,fitnoise=False)
figure = corner.corner(new_samples,truths=truths,quantiles=[0.05,0.95],labels=labels_mf,smooth=True,color='b',truth_color='r',show_titles=True)
figure.savefig(corner_plot_extra, format = 'png', bbox_inches = 'tight')
# In[151]:
#lnz_truth = ndim * -np.log(2 * 10.) # analytic evidence solution
fig, axes = dyplot.runplot(res)
fig.tight_layout()
if export:
fig.savefig(diagnosis_plot, format = 'png', dpi = 384, bbox_inches = 'tight')
# In[166]:
if export:
dict = {'w-tau':rdown.rd_model_wtau , 'w-q': rdown.rd_model_wq, 'w-tau-fixed':rdown.rd_model_wtau_fixed,'w-tau-fixed-m-af': rdown.rd_model_wtau_m_af}
figband = plt.figure(figsize = (12, 9))
plt.plot(datar_al[:,0].real,datar_al[:,1].real, "green", alpha=0.9, lw=3, label=r'$res_{240}$')
onesig_bounds = np.array([np.percentile(postsamps[:, i], [5, 95]) for i in range(len(postsamps[0]))]).T
samples_1sigma = filter(lambda sample: np.all(onesig_bounds[0] <= sample) and np.all(sample <= onesig_bounds[1]), postsamps)
samples_1sigma_down = list(samples_1sigma)[::downfactor]
for sample in samples_1sigma_down:
plt.plot(datar_al[:,0].real, dict[model](sample).real, "r-", alpha=0.1, lw=1)
plt.title(r'Comparison of the MC fit data and the $1-\sigma$ error band')
plt.legend()
plt.xlabel('t')
plt.ylabel('h')
plt.show()
figband.savefig(fit_plot)
# In[162]:
if export:
with open(samples_file,'w') as file:
writer = csv.writer(file)
writer.writerow(labels)
writer.writerows(samps[::downfactor])
%% Cell type:code id:square-toddler tags:
``` python
"""
Created by Sumit Kumar on 2020-03-08 && modified by Xisco on 2021-04
Last modified:
"""
import os, sys, numpy, glob, argparse
from datetime import date
import subprocess
from subprocess import call
import re
from configparser import ConfigParser
today = date.today()
date = today.strftime("%Y%m%d")
runname='run_0_mock'
nmax = 0
config_file ='config_n0_to_1_mock.ini'
overwrite = True
######
times = '(0 0.2 0.4 0.8 1.2 2 2.5 5 7.5 10 12 15 18 20)'
accounting_group = 'cbc.test.pe_ringdown'
cpus=8
nlive_points = 2000
req_memory="16GB"
not_user='frjifo@aei.mpg.de'
pythonfile='/work/francisco.jimenez/sio/git/rdstackingproject/code_new/RD_Fits.py'
pythonscript='/work/francisco.jimenez/venv/bin/python'
#######################################################
pwd = os.getcwd()
run_dir = '%s/%s'%(pwd,runname)
logs_dir = '%s/logs'%run_dir
os.system('mkdir -p %s'%logs_dir)
os.system('cp %s %s/'%(config_file,run_dir))
###########################################################################
# Creating Condor submit file
###########################################################################
filename1 = '%s/%s'%(run_dir,'condor_submit')
text_file1 = open(filename1 + ".sub", "w")
text_file1.write("universe = vanilla\n")
text_file1.write("getenv = true\n")
text_file1.write("# run script -- make sure that condor has execute permission for this file (chmod a+x script.py)\n")
text_file1.write("executable = "'%s/%s'%(run_dir,runname+'.sh \n'))
text_file1.write("# file to dump stdout (this directory should exist)\n")
text_file1.write("output = %s/%s-$(Process).out\n"%(logs_dir,runname))
text_file1.write("# file to dump stderr\n")
text_file1.write("error = %s/%s-$(Process).err\n"%(logs_dir,runname))
text_file1.write("# condor logs\n")
text_file1.write("log = %s/%s-$(Process).log\n"%(logs_dir,runname))
text_file1.write("initialdir = %s \n"%run_dir)
text_file1.write("notify_user ="+not_user+' \n')
text_file1.write("notification = Complete\n")
text_file1.write('''arguments = "-processid $(Process)" \n''')
text_file1.write("request_memory = "+str(req_memory)+"\n")
text_file1.write("request_cpus = "+str(cpus)+"\n")
text_file1.write("on_exit_remove = (ExitBySignal == False) || ((ExitBySignal == True) && (ExitSignal != 11))\n")
text_file1.write("accounting_group = %s\n"%accounting_group)
text_file1.write("queue 1\n")
text_file1.write("\n")
text_file1.close()
###########################################################
# Creating python executable file
############################################################
filename2 = run_dir+'/'+runname+'.sh'
text_file2 = open(filename2, "w")
text_file2.write("#! /bin/bash \n")
text_file2.write("\n")
text_file2.write("times="+times+"\n")
text_file2.write("config_file="+config_file+"\n")
text_file2.write("pythonfile="+pythonfile+"\n")
text_file2.write("pythonscript="+pythonscript+"\n")
text_file2.write("\n")
text_file2.write("for i in ${times[@]}; do\n")
text_file2.write(" awk -v a=\"$i\" '/^tshift/ && $3 != \"supplied\" { $3=a } { print }' $config_file > tmp && mv tmp $config_file\n")
text_file2.write(" $pythonscript $pythonfile -c $config_file \n")
text_file2.write("done\n")
text_file2.write("awk -v a=\"0\" '/^tshift/ && $3 != \"supplied\" { $3=a } { print }' $config_file > tmp && mv tmp $config_file \n")
text_file2.write("\n")
text_file2.close()
os.system('chmod u+x %s'%filename2)
os.system('cp '+runname+'.sh %s/'%run_dir)
os.system('chmod u+x ./'+runname+'.sh')
os.system('cd '+run_dir)
###########################################################
# Checking the configuration file and adding some important replacements
############################################################
filename3 = '%s/%s'%(run_dir,config_file)
# change the number of cores
bashCommand = "awk -v a="+str(cpus)+" '/^nb_cores/ && $3 != \"supplied\" { $3=a } { print }' "+str(config_file)+" > tmp && mv tmp "+str(config_file);
subprocess.call(bashCommand,shell=True)
filename3 = '%s/%s'%(run_dir,config_file)
# change the number of nmax
bashCommand = "awk -v a="+str(nmax)+" '/^nmax/ && $3 != \"supplied\" { $3=a } { print }' "+str(config_file)+" > tmp && mv tmp "+str(config_file);
subprocess.call(bashCommand,shell=True)
filename3 = '%s/%s'%(run_dir,config_file)
# change the number of nmax
bashCommand = "awk -v a="+str(nlive_points)+" '/^npoints/ && $3 != \"supplied\" { $3=a } { print }' "+str(config_file)+" > tmp && mv tmp "+str(config_file);
subprocess.call(bashCommand,shell=True)
# change the overwrite parameter
bashCommand = "awk -v a="+str(overwrite)+" '/^overwrite/ && $3 != \"supplied\" { $3=a } { print }' "+str(config_file)+" > tmp && mv tmp "+str(config_file);
subprocess.call(bashCommand,shell=True)
###########################################################
# Submit the job
############################################################
filename4 = '%s/%s'%(run_dir,'condor_submit.sub')
bashCommand = ['condor_submit', str(filename4)]
process = subprocess.Popen(bashCommand, stdout=subprocess.PIPE,stderr=subprocess.PIPE);
output,error = process.communicate()
error
```
%% Output
b''
# Copyright (C) 2021 Xisco Jimenez Forteza
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
# Module to generate RD waveforms.
import numpy as np
import qnm
import os
f_fpars= [[2.95845, -2.58697, 0.0533469], [2.12539, -1.78054, 0.0865503], [1.74755, -1.44776, 0.123666], [1.78287, -1.53203, 0.129475], [2.04028, -1.83224, 0.112497]]
q_fpars=[[0.584077, 1.52053, -0.480658], [0.00561441, 0.630715, -0.432664], [-0.197965, 0.515956, -0.369706], [-0.275097, 0.455691, -0.331543], [-0.287596, 0.398514, -0.309799]]
c=2.99792458*10**8;G=6.67259*10**(-11);MS=1.9885*10**30;
class Ringdown_Spectrum:
"""RDown model generator"""
def __init__(self,mf,af,l,m,n=4,s=-2,time=[],fixed=False,qnm_model='berti'):
self.mf = mf
self.af = af
self.l = l
self.m = m
self.n = n
self.time = time
self.grav_220 = [qnm.modes_cache(s=s,l=self.l,m=self.m,n=i) for i in range (0,self.n+1)]
self.dim = self.n+1
self.fixed = fixed
self.qnm_model = qnm_model
dict_omega = {'berti': self.QNM_Berti , 'qnm': self.QNM_spectrum}
dic = {'w-tau':self.rd_model_wtau , 'w-q': self.rd_model_wq, 'w-tau-fixed':self.rd_model_wtau_fixed,'w-tau-fixed-m-af': self.rd_model_wtau_m_af}
if len(self.time)==0:
self.time = np.arange(0,100,0.1)
if self.fixed:
omegas_new=np.asarray([self.grav_220[i](a=self.af)[0] for i in range (0,self.dim)])
self.w = (np.real(omegas_new))/self.mf
self.tau=-1/(np.imag(omegas_new))*self.mf
def QNM_spectrum(self):
""" It computes the RD frequencies and damping times in NR units.
"""
omegas_new=np.asarray([self.grav_220[i](a=self.af)[0] for i in range (0,self.n+1)])
w_m_a = (np.real(omegas_new))/self.mf
tau_m_a=-1/(np.imag(omegas_new))*self.mf
return (w_m_a, tau_m_a)
def QNM_Berti(self,rdowndata):
""" It computes the RD frequencies and damping times in NR units.
"""
position=np.argmax(rdowndata[0,0] >= (self.af))
#w_m_a=f1+f2*(1-af)**f3
w_m_a=[None]*(self.n+1)
tau_ma_a=[None]*(self.n+1)
for i in range(self.n+1):
qnm=rdowndata[i,1:3,position]
w_m_a[i] = qnm[0]/self.mf
tau_ma_a[i] = -1/(qnm[1])*self.mf
return w_m_a, tau_ma_a
def w_fpars_Berti(self,n):
return f_fpars[n]
def tau_qpars_Berti(self,n):
return q_fpars[n]
def mass_from_wtau(self,n,w,tau):
f1,f2,f3 = w_fpars_Berti(n)
q1,q2,q3 = tau_qpars_Berti(n)
res=(f1 + f2*(2**(-1/q3)*((-2*q1 + w*tau)/q2)**(1/q3))**f3)/w
return res
def spin_from_wtau(self,n,w,tau):
f1,f2,f3 = w_fpars_Berti(n)
q1,q2,q3 = tau_qpars_Berti(n)
res=1 - 2**(-1/q3)*((-2*q1 + w*tau)/q2)**(1/q3)
return res
def mass_from_wtau_loop(self,w,tau,l,m):
res=[None]*dim
for n in range (0,dim):
f1,f2,f3 = w_fpars_Berti(n)
q1,q2,q3 = tau_qpars_Berti(n)
res[n]=(f1 + f2*(2**(-1/q3)*((-2*q1 + w[n]*tau[n])/q2)**(1/q3))**f3)/w[n]
return res
def spin_from_wtau_loop(self,w,tau,l,m):
res=[None]*dim
for n in range (0,dim):
f1,f2,f3 = w_fpars_Berti(n)
q1,q2,q3 = tau_qpars_Berti(n)
res[n]= 1 - 2**(-1/q3)*((-2*q1 + w[n]*tau[n])/q2)**(1/q3)
return res
def rd_model_wtau(self,theta):
"""RD model parametrized with the damping time tau.
"""
assert int(len(theta)/4) == self.dim, 'Please recheck your n and parameters'
wvars = theta[ : (self.dim)]
tvars = theta[(self.dim) : 2*(self.dim)]
xvars = theta[2*(self.dim) : 3*(self.dim)]
yvars = theta[3*(self.dim) : ]
ansatz = 0
for i in range (0,self.dim):
ansatz += (xvars[i]*np.exp(1j*yvars[i]))*np.exp(-self.time/tvars[i]) * (np.cos(wvars[i]*self.time)-1j*np.sin(wvars[i]*self.time))
# -1j to agree with SXS convention
return ansatz
def rd_model_wtau_m_af(theta):
"""RD model parametrized with the damping time tau and with the QNM spectrum fixd to GR. The QNM spectrum is given from the mass and spin.
"""
xvars = theta[ : (dim)]
yvars = theta[(dim) : 2*(dim)]
mass_vars = theta[index_mass]
spin_vars = theta[index_spin]
w_m_a , tau_m_a = dict_omega[self.qnm_model](mass_vars,spin_vars,2,2)
ansatz = 0
for i in range (0,dim):
ansatz += (xvars[i]*np.exp(1j*yvars[i]))*np.exp(-timesrd_final_tsh/tau_m_a[i]) * (np.cos(w_m_a[i]*timesrd_final_tsh)-1j*np.sin(w_m_a[i]*timesrd_final_tsh))
# -1j to agree with SXS convention
return ansatz
def rd_model_wtau_fixed(theta):
"""RD model parametrized with the damping time tau and with the QNM spectrum fixd to GR.
"""
xvars = theta[ : (dim)]
yvars = theta[(dim) : 2*(dim)]
ansatz = 0
for i in range (0,dim):
ansatz += (xvars[i]*np.exp(1j*yvars[i]))*np.exp(-timesrd_final_tsh/tau[i]) * (np.cos(w[i]*timesrd_final_tsh)-1j*np.sin(w[i]*timesrd_final_tsh))
# -1j to agree with SXS convention
return ansatz
def rd_model_wq(self,theta):
"""RD model parametrized with the quality factor q.
"""
assert int(len(theta)/4) == self.dim, 'Please recheck your n and parameters'
wvars = theta[ : (self.dim)]
qvars = theta[(self.dim) : 2*(self.dim)]
xvars = theta[2*(self.dim) : 3*(self.dim)]
yvars = theta[3*(self.dim) : ]
ansatz = 0
for i in range (0,self.dim):
ansatz += (xvars[i]*np.exp(1j*yvars[i]))*np.exp(-self.time*np.pi*wvars[i]/qvars[i])*(np.cos(wvars[i]*self.time)-1j*np.sin(wvars[i]*self.time))
# -1j to agree with SXS convention
return ansatz
def rd_model_wq_fixed(self,theta):
"""RD model parametrized with the damping time tau and with the QNM spectrum fixd to GR.
"""
xvars = theta[ : (self.dim)]
yvars = theta[(self.dim) : 2*(self.dim)]
ansatz = 0
for i in range (0,self.dim):
ansatz += (xvars[i]*np.exp(1j*yvars[i]))*np.exp(-self.time/self.tau[i]) * (np.cos(self.w[i]*self.time)-1j*np.sin(self.w[i]*self.time))
# -1j to agree with SXS convention
return ansatz
def rd_model_wq_m_a(self,theta):
"""RD model parametrized with the damping time tau and with the QNM spectrum fixd to GR. The QNM spectrum is given from the mass and spin.
"""
xvars = theta[ : (self.dim)]
yvars = theta[(self.dim) : 2*(self.dim)]
mass_vars = theta[-2]
spin_vars = theta[-1]
w_m_a , tau_m_a = QNM_spectrum()
ansatz = 0
for i in range (0,dim):
ansatz += (xvars[i]*np.exp(1j*yvars[i]))*np.exp(-timesrd_final_tsh/tau_m_a[i]) * (np.cos(w_m_a[i]*timesrd_final_tsh)-1j*np.sin(w_m_a[i]*timesrd_final_tsh))
# -1j to agree with SXS convention
return ansatz
\ No newline at end of file
# Copyright (C) 2021 Xisco Jimenez Forteza
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
# Module to run PE on RD data
import random
from multiprocessing import Pool
import dynesty
import numpy as np
import rdown
class Ringdown_PE:
def __init__(self,rdown_fun,data,dim,priors,errors2=1,theta=[],model='w-tau',norm_factor=0,l_int=0):
self.dim = dim
self.rdown_fun = rdown_fun
self.times = data[:,0]
self.datare = data[:,1].real
self.dataim = data[:,1].imag
self.priors = priors
self.priors_min = priors[:,0]
self.priors_max = priors[:,1]
self.prior_dim = len(priors)
self.errors2 = errors2
self.norm_factor = norm_factor
self.model = model
self.l_int = l_int
self.theta = theta
self.dict = {'w-tau':rdown_fun.rd_model_wtau , 'w-q': rdown_fun.rd_model_wq, 'w-tau-fixed':rdown_fun.rd_model_wtau_fixed,'w-tau-fixed-m-af': rdown_fun.rd_model_wtau_m_af}
#def log_likelihood(self,theta,sigma=1):
# """chi2 likelihood.
# """
# modelev = dict[model](theta)
# result = -np.sum(((gwdatanew_re_tsh - modelev.real)**2+(gwdatanew_im_tsh - modelev.imag)**2)/(2*theta[-1]*error_final))
# if np.isnan(result):
# return -np.inf
# return result
def log_likelihood(self,theta,sigma=1):
"""chi2 likelihood.
"""
modelev = self.dict[self.model](theta)
modelevre= modelev.real
modelevim= modelev.imag
sigma2 = self.errors2 + self.l_int*(self.datare** 2+self.dataim**2) * np.exp(2 * theta[-1])
result = -0.5*np.sum(((self.datare - modelevre)**2+(self.dataim - modelevim)**2)/sigma2+self.l_int*(np.log(2*np.pi*sigma2)))-self.l_int*self.norm_factor
if np.isnan(result):
return -np.inf
return result
def prior_transform(self,cube):
"""RD uniform priors. The values for priors_min and priors_max must be given out of this function.
"""
for i in range(self.prior_dim):
cube[i] = self.priors_min[i]+ cube[i]*(self.priors_max[i]-self.priors_min[i])
return cube
def load_priors(model,config_parser,nmax,fitnoise=True):
# loading priors
if model == 'w-q':
tau_var_str='q'
else:
tau_var_str='tau'
if model == 'w-tau':
w_mins=np.empty(nmax+1)
w_maxs=np.empty(nmax+1)
tau_mins=np.empty(nmax+1)
tau_maxs=np.empty(nmax+1)
a_mins=np.empty(nmax+1)
a_maxs=np.empty(nmax+1)
ph_mins=np.empty(nmax+1)
ph_maxs=np.empty(nmax+1)
for i in range(nmax+1):
wp_min=config_parser.get('prior-w'+str(i),'w'+str(i)+'_min')
w_mins[i] = np.float(wp_min)
wp_max=config_parser.get('prior-w'+str(i),'w'+str(i)+'_max')
w_maxs[i] = np.float(wp_max)
taup_min=config_parser.get('prior-'+tau_var_str+str(i),tau_var_str+str(i)+'_min')
tau_mins[i] = np.float(taup_min)
taup_max=config_parser.get('prior-'+tau_var_str+str(i),tau_var_str+str(i)+'_max')
tau_maxs[i] = np.float(taup_max)
amp0_min=config_parser.get('prior-amp'+str(i),'amp'+str(i)+'_min')
a_mins[i] = np.float(amp0_min)
amp1_max=config_parser.get('prior-amp'+str(i),'amp'+str(i)+'_max')
a_maxs[i] = np.float(amp1_max)
phase_min=config_parser.get('prior-phase'+str(i),'phase'+str(i)+'_min')
ph_mins[i] = np.float(phase_min)*2*np.pi
phase_max=config_parser.get('prior-phase'+str(i),'phase'+str(i)+'_max')
ph_maxs[i] = np.float(phase_max)*2*np.pi
priors_min = np.concatenate((w_mins,tau_mins,a_mins,ph_mins))
priors_max = np.concatenate((w_maxs,tau_maxs,a_maxs,ph_maxs))
prior_dim = len(priors_min)
priors=np.column_stack((priors_min,priors_max))
if model == 'w-tau-fixed':
a_mins=np.empty(nmax+1)
a_maxs=np.empty(nmax+1)
ph_mins=np.empty(nmax+1)
ph_maxs=np.empty(nmax+1)
for i in range(nmax+1):
amp0_min=config_parser.get('prior-amp'+str(i),'amp'+str(i)+'_min')
a_mins[i] = np.float(amp0_min)
amp1_max=config_parser.get('prior-amp'+str(i),'amp'+str(i)+'_max')
a_maxs[i] = np.float(amp1_max)
phase_min=config_parser.get('prior-phase'+str(i),'phase'+str(i)+'_min')
ph_mins[i] = np.float(phase_min)*2*np.pi
phase_max=config_parser.get('prior-phase'+str(i),'phase'+str(i)+'_max')
ph_maxs[i] = np.float(phase_max)*2*np.pi
priors_min = np.concatenate((a_mins,ph_mins))
priors_max = np.concatenate((a_maxs,ph_maxs))
prior_dim = len(priors_min)
priors=np.column_stack((priors_min,priors_max))
elif model == 'w-tau-fixed-m-af':
a_mins=np.empty(nmax+1)
a_maxs=np.empty(nmax+1)
ph_mins=np.empty(nmax+1)
ph_maxs=np.empty(nmax+1)
for i in range(nmax+1):
amp0_min=config_parser.get('prior-amp'+str(i),'amp'+str(i)+'_min')
a_mins[i] = np.float(amp0_min)
amp1_max=config_parser.get('prior-amp'+str(i),'amp'+str(i)+'_max')
a_maxs[i] = np.float(amp1_max)
phase_min=config_parser.get('prior-phase'+str(i),'phase'+str(i)+'_min')
ph_mins[i] = np.float(phase_min)*2*np.pi
phase_max=config_parser.get('prior-phase'+str(i),'phase'+str(i)+'_max')
ph_maxs[i] = np.float(phase_max)*2*np.pi
mass_min=[np.float(config_parser.get('prior-mass','mass_min'))]
mass_max=[np.float(config_parser.get('prior-mass','mass_max'))]
spin_min=[np.float(config_parser.get('prior-spin','spin_min'))]
spin_max=[np.float(config_parser.get('prior-spin','spin_max'))]
priors_min = np.concatenate((a_mins,ph_mins,mass_min,spin_min))
priors_max = np.concatenate((a_maxs,ph_maxs,mass_max,spin_max))
prior_dim = len(priors_min)
priors=np.column_stack((priors_min,priors_max))
if fitnoise:
priors_fit_min=[np.float(config_parser.get('prior-noise','noise_min'))]
priors_fit_max=[np.float(config_parser.get('prior-noise','noise_max'))]
priors_min = np.concatenate((priors_min,priors_fit_min))
priors_max = np.concatenate((priors_max,priors_fit_max))
priors=np.column_stack((priors_min,priors_max))
prior_dim = len(priors_min)
return priors
\ No newline at end of file
# Copyright (C) 2021 Xisco Jimenez Forteza
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
# Module to run PE on RD data
import numpy as np
from dynesty.utils import resample_equal
from dynesty import utils as dyfunc
import os
import csv
import pandas as pd
import pickle
def posterior_samples(sampler):
"""
Returns posterior samples from nested samples and weights
given by dynsety sampler
"""
dynesty_samples = sampler.results['samples']
wt = np.exp(sampler.results['logwt'] -
sampler.results['logz'][-1])
# Make sure that sum of weights equal to 1
weights = wt/np.sum(wt)
posterior_dynesty = dyfunc.resample_equal(dynesty_samples, weights)
return posterior_dynesty
def FFT_FreqBins(times):
Len = len(times)
DeltaT = times[-1]- times[0]
dt = DeltaT/(Len-1)
dnu = 1/(Len*dt)
maxfreq = 1/(2*dt)
add = dnu/4
p = np.arange(0.0,maxfreq+add,dnu)
m = np.arange(p[-1]-(2*maxfreq)+dnu,-dnu/2+add,dnu)
res=np.concatenate((p,m))
return res
def hFromPsi4FFI(tpsi4,f0):
timecheck1=tpsi4[-2,0]-tpsi4[-1,0]
timecheck2=tpsi4[1,0]-tpsi4[0,0]
if np.abs(timecheck1-timecheck2)>=0.0001:
print("The data might not be equally sampled!!")
times,data= tpsi4[:,0],tpsi4[:,1]
freqs = FT_FreqBins(xaxis.real).real
position = np.argmax(freqs >= f0)
freqs[:position]=f0*np.ones(len(freqs[:position]))
freqs=2*np.pi*freqs
fdata=fft(data)
len(myTable)*ifft(- fdata/floor**2);
np.stack((times,data)).T
def twopoint_autocovariance(t,n):
""" It computes the two-point autocovariance function.
"""
dt=t[1]-t[0]
res = np.zeros(len(n))
taus = np.zeros(len(n))
for tau in range(0,int(len(n)/2)):
ntau=np.roll(n, tau)
taus[tau] = t[tau]
res[tau]=np.sum(n*ntau).real
return (taus[:int(len(n)/2)],res[:int(len(n)/2)])
def save_object(obj, filename):
with open(filename, 'wb') as output: # Overwrites any existing file.
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
def EasyMatchT(t,h1,h2,tmin,tmax):
""" It computes the time-domain match for (h1|h2) complex waveforms.
"""
pos = np.argmax(t >= (tmin));
h1red=h1[pos:];
h2red=h2[pos:];
norm1=np.sum(np.abs(h1red)**2)
norm2=np.sum(np.abs(h2red)**2)
myTable=h1red*np.conjugate(h2red)
res=((np.sum(myTable)/np.sqrt(norm1*norm2))).real
return res
def EasySNRT(t,h1,h2,tmin,tmax):
""" It computes the time-domain snr for (h1|h2) complex waveforms.
"""
pos = np.argmax(t >= (tmin));
h1red=h1[pos:];
h2red=h2[pos:];
myTable=h1red*np.conjugate(h2red)
res=2*np.sqrt((np.sum(myTable)).real)
return res
def FindTmaximum(y):
""" It determines the maximum absolute value of the complex waveform.
"""
absval = np.sqrt(y[:,1]*y[:,1]+y[:,2]*y[:,2])
vmax=np.max(absval)
index = np.argmax(absval == vmax)
timemax=y[index,0]
return timemax
def export_logz_files(output_file,pars):
sim_num, nmax, tshift, evidence, evidence_error = pars
"""
Generate the logz.csv files you want to export the data to.
file_type must be one of this options: [corner_plot,corner_plot_extra,diagnosis,fit,post_samples,sampler_results,log_z]
"""
summary_titles=['n','id','t_shift','dlogz','dlogz_err']
if os.path.exists(output_file):
outvalues = np.array([[nmax, sim_num, tshift, evidence,evidence_error]])
else:
outvalues = np.array([summary_titles,[nmax, sim_num, tshift, evidence,evidence_error]])
with open(output_file, 'a') as file:
writer = csv.writer(file)
if (outvalues.shape)[0]>1 :
writer.writerows(outvalues)
else:
writer.writerow(outvalues[0])
return
def export_bestvals_files(best_data_file,postsamps,pars):
tshift, lenpriors, labels = pars
sigma_vars_m = np.empty(lenpriors)
sigma_vars_p = np.empty(lenpriors)
sigma_vars = np.empty(lenpriors)
sigma_vars_ml = np.empty(lenpriors)
for i in range(lenpriors):
amps_aux = postsamps[:,i]
sigma_vars_m[i] = np.quantile(amps_aux, 0.05)
sigma_vars[i] = np.quantile(amps_aux, 0.5)
sigma_vars_ml[i] = postsamps[-1,i]
sigma_vars_p[i] = np.quantile(amps_aux, 0.95)
sigma_vars_all = [sigma_vars,sigma_vars_ml,sigma_vars_m,sigma_vars_p]
sigma_vars_all=np.stack([sigma_vars,sigma_vars_ml,sigma_vars_m,sigma_vars_p], axis=0)
key =['max val','max val ml','lower bound','higher bound']
dfslist = [pd.DataFrame(np.concatenate(([tshift],sigma_vars_all[i])).reshape((-1,lenpriors+1)), columns=np.concatenate((['tshift'],labels)), index = [key[i]]) for i in range(4)]
df2 = pd.concat(dfslist)
if os.path.exists(best_data_file):
df2.to_csv(best_data_file, mode='a', header=False,index = True)
else:
df2.to_csv(best_data_file, index = True)
def define_labels(dim,model,fitnoise):
wstr = r'$\omega_'
if model == 'w-tau':
taustr = r'$\tau_'
elif model == 'w-q':
taustr = r'$q_'
elif model == 'w-tau-fixed':
taustr = r'$dumb_var}'
elif model == 'w-tau-fixed-m-af':
taustr = r'$\tau_'
ampstr = r'$A_'
phasestr = r'$\phi_'
w_lab = [None] * dim
tau_lab = [None] * dim
amp_lab = [None] * dim
pha_lab = [None] * dim
mass_lab = ['mass']
spin_lab = ['spin']
for i in range(dim):
w_lab[i] = wstr+str(i)+'$'
tau_lab[i] = taustr+str(i)+'$'
amp_lab[i] = ampstr+str(i)+'$'
pha_lab[i] = phasestr+str(i)+'$'
labels = np.concatenate((w_lab,tau_lab,amp_lab,pha_lab))
if model=='w-tau-fixed':
labels = np.concatenate((amp_lab,pha_lab))
if model=='w-tau-fixed-m-af':
pha_lab[i] = phasestr+str(i)+'$'
labels = np.concatenate((amp_lab,pha_lab,mass_lab,spin_lab))
if fitnoise:
noise_lab = ['noise']
labels = np.concatenate((labels,noise_lab))
return labels
def get_truths(model,pars,fitnoise):
w, tau, mf, af , npamps = pars
if model == 'w-q':
tau_val = np.pi*w*tau
truths = np.concatenate((w,tau_val,npamps))
elif model == 'w-tau':
tau_val = tau
truths = np.concatenate((w,tau_val,npamps))
elif model == 'w-tau-fixed':
truths = npamps
elif model == 'w-tau-fixed-m-af':
truths = np.concatenate((npamps,[mf],[af]))
if fitnoise:
truths = np.concatenate((truths,[1]))
return truths
def get_best_amps(pars,parser=None,nr_code=None):
nmax,model,samps_tr,half_points = pars
if model=='w-tau-fixed':
rg = (nmax+1)
elif model=='w-tau-fixed':
rg = (nmax+1)+2
else:
rg = (nmax+1)*2
if model=='w-tau-fixed-a-mf':
npamps = np.empty((nmax+1))
for i in range(0,(nmax+1)):
amps_aux = samps_tr[i+rg][half_points:-1]
npamps[i] = np.quantile(amps_aux, 0.5)
else :
npamps = np.empty((nmax+1)*2)
for i in range(0,(nmax+1)*2):
amps_aux = samps_tr[i][half_points:-1]
npamps[i] = np.quantile(amps_aux, 0.5)
if nr_code == 'Mock-data':
nm_mock = parser.get('rd-mock-parameters','nm_mock')
nm_mock = np.int(nm_mock)
amp_mock=np.empty(nm_mock+1)
ph_mock=np.empty(nm_mock+1)
for i in range(nm_mock+1):
amp_mockp = parser.get('rd-mock-parameters','amp'+str(i))
amp_mock[i] = np.float(amp_mockp)
ph_mockp=parser.get('rd-mock-parameters','phase'+str(i))
ph_mock[i] = np.float(ph_mockp)
npamps = np.concatenate((amp_mock,ph_mock))
return npamps
def convert_m_af_2_w_tau_post(res,fitnoise=False):
samples_2=res.samples
samps=f2.results.samples
if fitnoise:
fmass_spin=(samps.T)[-3:-1].T
else:
fmass_spin=(samps.T)[-2:].T
#fmass_spin=new_samples[-2:]
fmass_spin_dist=[None]*len(fmass_spin)
weight=np.exp(res.logwt - res.logz[-1])
for i in range(len(fmass_spin)):
fmass_spin_dist[i]=np.concatenate(dict_omega[qnm_model](fmass_spin[i,0],fmass_spin[i,1],2,2))
fmass_spin_dist_v2=np.asarray(fmass_spin_dist)
new_samples = dyfunc.resample_equal(fmass_spin_dist_v2, weight)
return new_samples
def save_object(obj, filename):
with open(filename, 'wb') as output: # Overwrites any existing file.
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
def rm_files(files):
""" rm all old files """
for i in files:
if os.path.exists(i):
os.remove(i)
\ No newline at end of file
# Copyright (C) 2021 Xisco Jimenez Forteza
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
# Module to run PE on RD data
import numpy as np
import rdown_utilities as rd_ut
import romspline
import rdown as rd
import h5py
import json
from scipy import interpolate
from scipy.interpolate import interp1d
def read_data(nr_code,sim_path,mf=1,af=0,parser=None,RD=True,tshift=0,tend = 100,metadata_file=None):
if nr_code=='SXS':
gw = {}
gw = h5py.File(sim_path, 'r')
gw_data = gw["Extrapolated_N3.dir"]["Y_l2_m2.dat"]
times = gw_data[:,0]
metadata = {}
with open(metadata_file) as file:
metadata = json.load(file)
af = metadata['remnant_dimensionless_spin'][-1]
mf = metadata['remnant_mass']
tmax=rd_ut.FindTmaximum(gw_data[round(len(gw_data)/2):])
times = times - tmax
if RD:
position = np.argmax(times >= 0)
gw_data = gw_data[:,1][position:]+1j*gw_data[:,2][position:]
times = times[times >= 0]
elif nr_code=='Maya':
dt=0.1
gw = {}
gw = h5py.File(sim_path, 'r')
gw_sxs_bbh_0305_amp = np.asarray(gw['amp_l2_m2/Y'])[6:]
times_1 = np.asarray(gw['amp_l2_m2/X'])[6:]
gw_sxs_bbh_0305_amp_int = romspline.ReducedOrderSpline(times_1, gw_sxs_bbh_0305_amp)
gw_sxs_bbh_0305_pha = np.asarray(gw['phase_l2_m2/Y'])[6:]
times = np.asarray(gw['phase_l2_m2/X'])[6:]
gw_sxs_bbh_0305_pha_int = romspline.ReducedOrderSpline(times, gw_sxs_bbh_0305_pha)
tmin=max(times_1[0],times[0])
tmax=min(times_1[-1],times[-1])
times=np.arange(tmin,tmax,dt)
amps=gw_sxs_bbh_0305_amp_int(times)
phs=gw_sxs_bbh_0305_pha_int(times)
gw_sxs_bbh_0305 = np.asarray([times,amps*np.cos(phs),amps*np.sin(phs)]).T
gw5_sxs_bbh_0305 = gw_sxs_bbh_0305
times = gw_sxs_bbh_0305[:,0]
tmax=rd_ut.FindTmaximum(gw_sxs_bbh_0305[round(len(gw_sxs_bbh_0305)/2):])
times = times - tmax
#times 6--> x axis of your data
times5 = gw5_sxs_bbh_0305[:,0]
tmax5=rd_ut.FindTmaximum(gw5_sxs_bbh_0305[round(len(gw_sxs_bbh_0305)/2):])
times5 = times5 - tmax5
#Select the data from 0 onwards
position = np.argmax( times >= (t_align))
position5 = np.argmax(times5 >= (t_align))
gw_sxs_bbh_0305rd=gw_sxs_bbh_0305[position+1:]
gw_sxs_bbh_0305rd5=gw5_sxs_bbh_0305[position5+1:]
timesrd=gw_sxs_bbh_0305[position:-1][:,0][:]
timesrd5=gw5_sxs_bbh_0305[position5:-1][:,0][:]
elif nr_code=='LaZeV':
dt=0.1
gw = {}
gw = h5py.File(simulation_path_1, 'r')
gw_sxs_bbh_0305_amp = np.asarray(gw['amp_l2_m2/Y'])[6:]
times_1 = np.asarray(gw['amp_l2_m2/X'])[6:]
gw_sxs_bbh_0305_amp_int = romspline.ReducedOrderSpline(times_1, gw_sxs_bbh_0305_amp)
gw_sxs_bbh_0305_pha = np.asarray(gw['phase_l2_m2/Y'])[6:]
times = np.asarray(gw['phase_l2_m2/X'])[6:]
gw_sxs_bbh_0305_pha_int = romspline.ReducedOrderSpline(times, gw_sxs_bbh_0305_pha)
tmin=max(times_1[0],times[0])
tmax=min(times_1[-1],times[-1])
times=np.arange(tmin,tmax,dt)
amps=gw_sxs_bbh_0305_amp_int(times)
phs=gw_sxs_bbh_0305_pha_int(times)
gw_sxs_bbh_0305 = np.asarray([times,amps*np.cos(phs),amps*np.sin(phs)]).T
gw5_sxs_bbh_0305 = gw_sxs_bbh_0305
times = gw_sxs_bbh_0305[:,0]
tmax=rd_ut.FindTmaximum(gw_sxs_bbh_0305[round(len(gw_sxs_bbh_0305)/2):])
times = times - tmax
#times 6--> x axis of your data
times5 = gw5_sxs_bbh_0305[:,0]
tmax5=rd_ut.FindTmaximum(gw5_sxs_bbh_0305[round(len(gw_sxs_bbh_0305)/2):])
times5 = times5 - tmax5
#Select the data from 0 onwards
position = np.argmax( times >= (t_align))
position5 = np.argmax(times5 >= (t_align))
gw_sxs_bbh_0305rd=gw_sxs_bbh_0305[position+1:]
gw_sxs_bbh_0305rd5=gw5_sxs_bbh_0305[position5+1:]
timesrd=gw_sxs_bbh_0305[position:-1][:,0][:]
timesrd5=gw5_sxs_bbh_0305[position5:-1][:,0][:]
elif nr_code=='Mock-data':
times = np.arange(tshift,tend+10,0.1)
nm_mock = parser.get('rd-mock-parameters','nm_mock')
nm_mock = np.int(nm_mock)
mf = parser.get('rd-mock-parameters','mf')
mf = np.float(mf)
af = np.float(parser.get('rd-mock-parameters','af'))
af = np.float(af)
rdown=rd.Ringdown_Spectrum(mf,af,2,2,n=nm_mock,s=-2,time=times)
w_mock=np.empty(nm_mock+1)
tau_mock=np.empty(nm_mock+1)
amp_mock=np.empty(nm_mock+1)
ph_mock=np.empty(nm_mock+1)
for i in range(nm_mock+1):
wp_mock = parser.get('rd-mock-parameters','w'+str(i))
w_mock[i] = np.float(wp_mock)
tp_mock=parser.get('rd-mock-parameters','tau'+str(i))
tau_mock[i] = np.float(tp_mock)
amp_mockp = parser.get('rd-mock-parameters','amp'+str(i))
amp_mock[i] = np.float(amp_mockp)
ph_mockp=parser.get('rd-mock-parameters','phase'+str(i))
ph_mock[i] = np.float(ph_mockp)
pars = np.concatenate((w_mock,tau_mock,amp_mock,ph_mock))
gw_data=rdown.rd_model_wtau(pars)
return np.stack((times,gw_data)).T
def nr_resize(data_1,data_2,tshift=0,tend=100):
times_1 = data_1[:,0].real
times_2 = data_2[:,0].real
data_1_re = data_1[:,1].real
data_1_im = data_1[:,1].imag
data_2_re = data_2[:,1].real
data_2_im = data_2[:,1].imag
gwnew_re = interpolate.interp1d(times_1, data_1_re, kind = 'cubic')
gwnew_im = interpolate.interp1d(times_1, data_1_im, kind = 'cubic')
gwnew_re5 = interpolate.interp1d(times_2, data_2_re, kind = 'cubic')
gwnew_im5 = interpolate.interp1d(times_2, data_2_im, kind = 'cubic')
if times_2[-1]>= times_1[-1]:
times_rd = times_1
else:
times_rd = times_2
gwdatanew_re = gwnew_re(times_rd)
gwdatanew_im = gwnew_im(times_rd)
gwdatanew_re5 = gwnew_re5(times_rd)
gwdatanew_im5 = gwnew_im5(times_rd)
gwdatanew = gwdatanew_re + 1j*gwdatanew_im
gwdatanew5 = gwdatanew_re5 + 1j*gwdatanew_im5
position_in = np.argmax(times_rd >= tshift)
position_end = np.argmax(times_rd >= tend)
times_rd = times_rd[position_in:position_end]
gwdatanew = gwdatanew[position_in:position_end]
gwdatanew5 = gwdatanew5[position_in:position_end]
return(np.stack((times_rd,gwdatanew)).T,np.stack((times_rd,gwdatanew5)).T)
def phase_align(data_1,data_2,t_align=0):
#timesrd_final = data_1[:,0]
#phas = np.angle(data_1[:,1])
#phas = np.unwrap(phas)
#phas5 = np.angle(data_2[:,1])
#phas5 = np.unwrap(phas5)
#position = np.argmax(timesrd_final >= (t_align))
#dphase = phas5[position]-phas[position]
#gwdatanew = data_1[:,1]*np.exp(1j*dphase)
phas = np.angle(data_1[:,1])
phas = np.unwrap(phas)
phas5 = np.angle(data_2[:,1])
phas5 = np.unwrap(phas5)
position = np.argmax(data_1[:,0] >= (0))
dphase = phas5[position]-phas[position]
gwdatanew = data_1[:,1]*np.exp(1j*dphase)
timesrd_final = data_1[:,0]
return np.stack((timesrd_final,gwdatanew)).T
def create_output_files(output_folder,pars,file_type):
sim_num, model, nmax, tshift, npoints = pars
"""
Generate the output files you want to export the data to.
file_type must be one of this options: [corner_plot,corner_plot_extra,diagnosis,fit,post_samples,sampler_results,log_z]
"""
if file_type=='corner_plot':
outfile = output_folder+'/Dynesty_'+str(sim_num)+'_'+model+'_nmax_'+str(nmax)+'_tshift_'+str(tshift)+'_'+str(npoints)+'corner_plot.png'
elif file_type=='corner_plot_extra':
outfile = output_folder+'/Dynesty_'+str(sim_num)+'_'+model+'_nmax_'+str(nmax)+'_tshift_'+str(tshift)+'_'+str(npoints)+'corner_plot_extra.png'
elif file_type=='diagnosis':
outfile = output_folder+'/Dynesty_diagnosis'+str(sim_num)+'_'+model+'_nmax_'+str(nmax)+'_tshift_'+str(tshift)+'_'+str(npoints)+'.png'
elif file_type=='fit':
outfile = output_folder+'/Fit_results_'+str(sim_num)+'tshift_'+str(tshift)+'_'+model+'_nmax_'+str(nmax)+'.png'
elif file_type=='post_samples':
outfile = output_folder+'/posterior_samples-'+str(sim_num)+'tshift_'+str(tshift)+'_'+model+'_nmax_'+str(nmax)+'.csv'
elif file_type=='sampler_results':
outfile = output_folder+'/results_'+str(sim_num)+'tshift_'+str(tshift)+'_'+model+'_nmax_'+str(nmax)+'.pkl'
elif file_type=='log_z':
outfile = output_folder+'/summary'+str(sim_num)+'_'+model+'_nmax_'+str(nmax)+'.csv'
elif file_type=='best_vals':
outfile = output_folder+'/best_values_'+str(sim_num)+'_'+model+'_nmax_'+str(nmax)+'.csv'
else:
print ('Something went wrong')
return
return outfile
def read_config_file(parser):
# Setup path and output folders
rootpath=parser.get('nr-paths','rootpath')
simulation_path_1 = parser.get('nr-paths','simulation_path_1')
if parser.get('nr-paths','simulation_path_2'):
simulation_path_2 = parser.get('nr-paths','simulation_path_2')
else:
simulation_path_2 = simulation_path_1
metadata_file = parser.get('nr-paths','metadata_json')
simulation_number = parser.get('nr-paths','simulation_number')
simulation_number = np.int(simulation_number)
output_folder = parser.get('output-folder','output-folder')
if parser.has_option('setup','export'):
export=eval(parser.get('setup','export'))
else:
export=True
# Setup sampler and output options
overwrite = eval(parser.get('setup','overwrite'))
downfactor = np.int(parser.get('setup','plot_down_factor'))
sampler = parser.get('setup','sampler')
nr_code = parser.get('setup','nr_code')
if parser.has_option('setup','nb_cores'):
nbcores = np.int(parser.get('setup','nb_cores'))
else:
nbcores = 1
# time shift , end and align options
tshift=parser.get('time-setup','tshift')
tshift = np.float(tshift)
tend=parser.get('time-setup','tend')
tend = np.float(tend)
t_align=parser.get('time-setup','t_align')
t_align = np.float(t_align)
# n-tones & nlive points
nmax=parser.get('n-tones','nmax')
nmax = np.int(nmax)
npoints=parser.get('n-live-points','npoints')
npoints = np.int(npoints)
# setup the RD model
model=parser.get('rd-model','model')
error_str = eval(parser.get('rd-model','error_str'))
fitnoise=eval(parser.get('rd-model','fit_noise'))
if fitnoise:
l_int=1
index_mass=-3
index_spin=-2
# prior_dim = len(priors_min)
else:
index_mass=-2
index_spin=-1
l_int=0
if error_str:
error_val=np.float(parser.get('rd-model','error_val'))
if error_val==0:
error_type=''
else:
error_type=error_val
else:
error_type='False'
error_val =0
if nr_code == 'SXS':
metadata = {}
with open(metadata_file) as file:
metadata = json.load(file)
af = metadata['remnant_dimensionless_spin'][-1]
mf = metadata['remnant_mass']
else:
mf = parser.get('rd-mock-parameters','mf')
mf = np.float(mf)
af = np.float(parser.get('rd-mock-parameters','af'))
af = np.float(af)
if model == 'w-q':
tau_var_str='q'
else:
tau_var_str='tau'
if nr_code == 'Mock-data':
nm_mock = int(parser.get('rd-mock-parameters','nm_mock'))
else:
nm_mock = None
res = simulation_path_1,simulation_path_2, metadata_file , simulation_number, output_folder, export, overwrite, sampler,nr_code, nbcores,tshift,tend,t_align, nmax , npoints, model, error_str, fitnoise, l_int, index_mass,index_spin, error_type, error_val, af, mf,tau_var_str,nm_mock
return res
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment