Select Git revision
test_aperture.py
-
Daniel Brown authored
Adding in map reading/writing class and updates to various gui components, trying to get rotation working but failing so far
Daniel Brown authoredAdding in map reading/writing class and updates to various gui components, trying to get rotation working but failing so far
NR_Interpolate-001_t05.py 12.84 KiB
#!/usr/bin/env python
# coding: utf-8
# ### Let's try the NR_Interpolate for the 0.001 stepsize.
# In[57]:
#Import relevant modules, import data and all that
import numpy as np
from scipy import interpolate
import corner
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from matplotlib import rc
#plt.rcParams['font.family'] = 'DejaVu Sans'
#rc('text', usetex=True)
plt.rcParams.update({'font.size': 16.5})
import ptemcee
from pycbc.pool import choose_pool
import h5py
import inspect
import pandas as pd
import json
import qnm
import random
#Remember to change the following global variables
#rootpath: root path to nr data
#npoints: number of points you re using for your sampling
#nmax: tone index --> nmax = 0 if fitting the fundamental tone
#tshift: time shift after the strain peak
#vary_fund: whether you vary the fundamental frequency. Works in the model_dv function.
#rootpath= "/Users/RayneLiu"#"/work/rayne.liu"
rootpath= "/work/francisco.jimenez/sio"#"/work/rayne.liu"
project_path=rootpath+"/git/rdstackingproject"
nmax=1
tshift=5
vary_fund = True
tsampling_factor=100
#sampler parameters
npoints = 2000
nwalkers = 1000
ntemps=16
#npoints = 100
#nwalkers = 32
#ntemps = 1
dim = nmax+1
ndim = 4*dim
burnin = 1000 #How many points do you burn before doing the corner plot. You need to watch the convergence of the chain plot a bit.
#This is trivial but often forgotten: this cannot be more than npoints! Usually 1/5~1/4 npoints is what I observe.
#burnin = 20
numbins = 42 #corner plot parameter - how many bins you want
datacolor = '#105670' #'#4fa3a7'
pkcolor = '#f2c977' #'#ffb45f'
mediancolor = '#f7695c' #'#9b2814'
#Import data and necessary functions
#TimeOfMaximum
def FindTmaximum(y):
#Determines the maximum absolute value of the complex waveform
absval = y[:,1]*y[:,1]+y[:,2]*y[:,2]
vmax=np.max(absval)
index = np.argmax(absval == vmax)
timemax=gw_sxs_bbh_0305[index,0]
return timemax
#This loads the 22 mode data
gw = {}
gw["SXS:BBH:0305"] = h5py.File(rootpath+"/git/rdstackingproject/SXS/BBH_SKS_d14.3_q1.22_sA_0_0_0.330_sB_0_0_-0.440/Lev6/rhOverM_Asymptotic_GeometricUnits_CoM.h5", 'r')
gw_sxs_bbh_0305 = gw["SXS:BBH:0305"]["Extrapolated_N2.dir"]["Y_l2_m2.dat"]
# Remember to download metadata.json from the simulation with number: 0305. Download Lev6/metadata.json
# This postprocesses the metadata file to find the final mass and final spin
metadata = {}
with open(rootpath+"/git/rdstackingproject/SXS/BBH_SKS_d14.3_q1.22_sA_0_0_0.330_sB_0_0_-0.440/Lev6/metadata.json") as file:
metadata["SXS:BBH:0305"] = json.load(file)
af = metadata["SXS:BBH:0305"]['remnant_dimensionless_spin'][-1]
mf = metadata["SXS:BBH:0305"]['remnant_mass']
#times --> x axis of your data
times = gw_sxs_bbh_0305[:,0]
tmax=FindTmaximum(gw_sxs_bbh_0305)
t0=tmax +tshift
#Select the data from t0 onwards
position = np.argmax(times >= (t0))
gw_sxs_bbh_0305rd=gw_sxs_bbh_0305[position:-1]
timesrd=gw_sxs_bbh_0305[position:-1][:,0][:920]
#print(timesrd[0])
#print(t0) (This checks that timesrd[0] is indeed t0)
timespan = timesrd - t0
gwdata_re = gw_sxs_bbh_0305rd[:,1][:920]
gwdata_im = gw_sxs_bbh_0305rd[:,2][:920]
# Depending on nmax, you load nmax number of freqs. and damping times from the qnm package
omegas = [qnm.modes_cache(s=-2,l=2,m=2,n=i)(a=af)[0] for i in range (0,dim)]
w = (np.real(omegas))/mf
tau=-1/(np.imag(omegas))*mf
# In[58]:
chain_file = project_path+'/plotsmc/NR_Int'+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_tsampling='+str(tsampling_factor)+'_'+str(npoints)+'pt_chain.png'
chain_file_dat=project_path+'/plotsmc/NR_Int'+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_tsampling='+str(tsampling_factor)+'_'+str(npoints)+'pt_chain.csv'
corner_file = project_path+'/plotsmc/NR_Int'+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_tsampling='+str(tsampling_factor)+'_'+str(npoints)+'pt_corner.png'
# In[59]:
#Test plot (data was picked in the last cell)
plt.figure(figsize = (12, 8))
plt.plot(timespan, gwdata_re, "r", alpha=0.3, lw=3, label=r'$NR\_re$')
plt.plot(timespan, gwdata_im, "b", alpha=0.3, lw=3, label=r'$NR\_im$')
plt.legend()
# In[60]:
gwdata_re.shape
# In[61]:
gwnew_re = interpolate.interp1d(timespan, gwdata_re, kind = 'cubic')
gwnew_im = interpolate.interp1d(timespan, gwdata_im, kind = 'cubic')
# In[62]:
timespan;
# In[63]:
timespan_new = np.linspace(tshift, timespan[-1], len(timespan)*tsampling_factor)
gwdatanew_re = gwnew_re(timespan_new)
gwdatanew_im = gwnew_im(timespan_new)
# # timespan_new[-1]
# In[64]:
timespan_new[0]
# In[65]:
timespan_new.shape
# In[66]:
#Test the new interpolated data
plt.figure(figsize = (12, 8))
plt.plot(timespan, gwdata_re, "r", alpha=0.3, lw=2, label='Before_re')
plt.plot(timespan_new, gwdatanew_re, "b", alpha=0.3, lw=2, label='After_re')
plt.plot(timespan, gwdata_im, alpha=0.3, lw=2, label='Before_im')
plt.plot(timespan_new, gwdatanew_im, alpha=0.3, lw=2, label='After_im')
plt.legend()
# ### Now the interpolation seems nice according to what we have above...let's start sampling!
# In[67]:
#Fitting
#RD model for nmax tones. Amplitudes are in (xn*Exp[i yn]) version. Used here.
def model_dv(theta):
#x0, y0= theta
#Your nmax might not align with the dim of theta. Better check it here.
assert int(len(theta)/4) == dim, 'Please recheck your n and parameters'
avars = theta[ : (dim)]
bvars = theta[(dim) : 2*(dim)]
xvars = theta[2*(dim) : 3*(dim)]
yvars = theta[3*(dim) : ]
if vary_fund == False:
avars[0]=0
bvars[0]=0
ansatz = 0
for i in range (0,dim):
#bvars[1]=0
#avars[1]=0
ansatz += (xvars[i]*np.exp(1j*yvars[i]))*np.exp(-timespan_new/(tau[i]*(1+bvars[i]))) * (np.cos((1+avars[i])*w[i]*timespan_new)-1j*np.sin((1+avars[i])*w[i]*timespan_new))
# -1j to agree with SXS convention
return ansatz
# Logprior distribution. It defines the allowed range my variables can vary over.
#It works for the (xn*Exp[iyn]) version.
def log_prior(theta):
#Warning: we are specifically working with nmax=1 so here individual prior to the parameters are manually adjusted. This does not apply to all other nmax's.
#avars = theta[ : (dim)]
#bvars = theta[(dim) : 2*(dim)]
#xvars = theta[2*(dim) : 3*(dim)]
#yvars = theta[3*(dim) : ]
alpha0, alpha1, beta0, beta1, xvar0, xvar1, yvar0, yvar1 = theta
if all([-0.9 <= alpha0 <= 0.9, -0.9 <= alpha1 <= 0.9, -0.7 <= beta0 <= 2.0, -1.0 <= beta1 <= 2.2, 0 <= xvar0 <= 2.4, 0 <= xvar1 <= 3, -np.pi <= yvar0 <= np.pi, -np.pi <= yvar1 <= np.pi]):
return 0.0
"""
if nmax == 0:
if all([0 <= tshift <= 5, vary_fund == True, -0.45 <= avars[0] <= 0.05, -0.95 <= bvars[0] <= 3.0, 0 <= xvars[0] <= 3.0, -np.pi <= yvars[0] <= np.pi]):
return 0.0
elif all([tshift == 19, vary_fund == True, -3.0 <= avars[0] <= 3.0, -2.0 <= bvars[0] <= 5.0, 0 <= xvars[0] <= 1.0, 0 <= yvars[0] <= 2*np.pi]):
return 0.0
if all([0 <= tshift <= 5, vary_fund == False, -1.0 <= avars[0] <= 1.0, -1.0 <= bvars[0] <= 1.0, 0 <= xvars[0] <= 3.0, -np.pi <= yvars[0] <= np.pi]):
return 0.0
if all([tshift == 19, vary_fund == False, -1.0 <= avars[0] <= 1.0, -1.0 <= bvars[0] <= 1.0, 0 <= xvars[0] <= 3.0, 0 <= yvars[0] <= 2*np.pi]):
return 0.0
elif nmax == 1:
if all([0 <= tshift <= 5, vary_fund == True, -0.1 <= avars[0] <= 0.1, -0.32 <= avars[1] <= 0.1, -0.19 <= bvars[0] <= 1.0, 0. <= bvars[1] <= 1.5, 0 <= xvars[0] <= 2, 0 <= xvars[1] <= 4, -np.pi <= yvars[0] <= np.pi, -np.pi <= yvars[1] <= np.pi]):
return 0.0
elif all([tshift == 19, vary_fund == True, -10.0 <= avars[0] <= 10.0, -10.0 <= avars[1] <= 10.0, -20.0 <= bvars[0] <= 30.0, -25.0 <= bvars[1] <= 30.0, 0 <= xvars[0] <= 0.6, 0 <= xvars[1] <= 0.9, 0 <= yvars[0] <= 2*np.pi, -np.pi <= yvars[1] <= np.pi]):
return 0.0
elif all([0 <= tshift <= 5, vary_fund == False, -10.0 <= avars[0] <= 10.0, -1.5 <= avars[1] <= 1.5, -9.0 <= bvars[0] <= 9.0, -6.0 <= bvars[1] <= 20.0, 0 <= xvars[0] <= 2.4, 0 <= xvars[1] <= 2.5, -np.pi <= yvars[0] <= np.pi, -np.pi <= yvars[1] <= np.pi]):
return 0.0
elif all([tshift == 19, vary_fund == False, -10.0 <= avars[0] <= 10.0, -8.0 <= avars[1] <= 8.0, -9.0 <= bvars[0] <= 9.0, -10.0 <= bvars[1] <= 12.0, 0 <= xvars[0] <= 0.6, 0 <= xvars[1] <= 0.7, 0 <= yvars[0] <= 2*np.pi, 0 <= yvars[1] <= 2* np.pi]):
return 0.0
"""
return -np.inf
# LogLikelihood function. It is just a Gaussian loglikelihood based on computing the residuals^2
def log_likelihood(theta):
modelev = model_dv(theta)
result = -np.sum((gwdatanew_re - (modelev.real))**2+(gwdatanew_im - (modelev.imag))**2)
if np.isnan(result):
return -np.inf
return result
# Logposterior distribution for the residuals case.
# The evidence is just a normalization factor
def log_probability(theta):
lp = log_prior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + log_likelihood(theta)
# In[68]:
#Check if my fit functions are correct using scipy.minimize
from scipy.optimize import minimize
np.random.seed(42)
nll = lambda *args: -log_likelihood(*args)
#This assigns the initial guess
initial = np.array([0, 0, 0, 0, 1, 1, 1, 1])
soln = minimize(nll, initial)
print("Maximum likelihood estimates:") #Maximum likelihood: minimum -log_likelihood. Log_likelihood is easier to calculate
vars_ml=soln.x
print(vars_ml)
#Now plot the NR data against the ansatz data
plt.plot(timespan_new, gwdatanew_re, "r", alpha=0.3, lw=3, label=r'$NR\_re$')
modelfit = model_dv(vars_ml)
plt.plot(timespan_new, modelfit.real,"b", alpha=0.3, lw=3, label=r'$Fit\_re$')
#plt.plot(x0, np.dot(np.vander(x0, 2), w), "--k", label="LS")
plt.legend(fontsize=14)
plt.xlabel("t")
plt.ylabel("h");
# In[13]:
#Ok, nice. Now let's do ptemcee...
np.random.seed(42)
pos = np.array([random.uniform(-0.1,0.), random.uniform(-0.1,0.), random.uniform(-0.1,0.), random.uniform(-0.1,0.), random.uniform(0,1), random.uniform(0, 1), random.uniform(0.5, 0.6), random.uniform(0.5, 0.6)])
pos = list(pos)
pos += 1e-5 * np.random.randn(ntemps, nwalkers, ndim)
sampler = ptemcee.Sampler(nwalkers, ndim, log_likelihood, log_prior, ntemps=ntemps)
sampler.run_mcmc(pos,npoints)
dim = 2
paramlabels_a = [r'$\alpha_'+str(i)+'$' for i in range (dim)]
paramlabels_b = [r'$\beta_'+str(i)+'$' for i in range (dim)]
paramlabels_x = [r'$x_'+str(i)+'$' for i in range (dim)]
paramlabels_y = [r'$y_'+str(i)+'$' for i in range (dim)]
paramlabels = paramlabels_a + paramlabels_b + paramlabels_x + paramlabels_y
print('The chain plot:')
#Chain plot
fig, axes = plt.subplots(ndim, 1, sharex=True, figsize=(12, 4*(4)))
for i in range(ndim):
axes[i].plot(sampler.chain[0,:, :, i].T, color="k", alpha=0.4, rasterized=True)
axes[i].yaxis.set_major_locator(MaxNLocator(5))
axes[i].set_ylabel(paramlabels[i])
axes[-1].set_xlabel('Iterations')
plt.show()
print('We\'re using ptemcee. Our constraints:')
#Burn samples, calculate peak likelihood value (not necessarily so in atlas) and make corner plot
samples = sampler.chain[0,:, burnin:, :].reshape((-1, ndim))
#samples for corner plot
samples_corn = samples #if vary_fund == True else np.delete(samples, np.s_[0,2], 1)
#print('Values with peak likelihood:')
lglk = np.array([log_likelihood(samples[i]) for i in range(len(samples))])
pk = samples[np.argmax(lglk)]
#print('pk:')
#print(pk)
pk_corn = pk #if vary_fund == True else np.delete(pk, [0,2])
#y_0 range needs some messaging to make the plot. But in order to make the whole picture consistent, better change the range of y_1 too.
#if vary_fund == False:
# samples_corn.T[-dim:] -= np.pi #This indeed changes samples_corn itself
# pk[-dim:] -= np.pi
#print('pkFalse:')
#print(pk)
#print(pk)
#Now calculate median (50-percentile) value
median = np.median(samples_corn, axis=0)
#print(samples)
#print(samples_corn)
figcorn = corner.corner(samples_corn, bins = numbins, hist_bin_factor = 5, color = datacolor, truths=pk_corn, truth_color = pkcolor, plot_contours = True, labels = paramlabels, quantiles=(0.05, 0.16, 0.5, 0.84, 0.95), levels=[1-np.exp(-0.5), 1-np.exp(-1.64 ** 2/2)], show_titles=True)
#Extract the axes in order to add more important line plots
naxes = len(pk_corn)
axes = np.array(figcorn.axes).reshape((naxes, naxes))
# Loop over the diagonal
for i in range(naxes):
ax = axes[i, i]
ax.axvline(median[i], color=mediancolor)
# Loop over the histograms
for yi in range(naxes):
for xi in range(yi):
ax = axes[yi, xi]
ax.axvline(median[xi], color=mediancolor)
ax.axhline(median[yi], color=mediancolor)
ax.plot(median[xi], median[yi], color = mediancolor, marker = 's')
fig.savefig(chain_file, format = 'png', dpi = 384, bbox_inches = 'tight')
out = np.concatenate(sampler.chain[0,:])
np.savetxt(chain_file_dat,out, fmt='%d')
figcorn.savefig(corner_file, format = 'png', dpi = 384, bbox_inches = 'tight')
# In[ ]: