Skip to content
Snippets Groups Projects
Commit de9ae517 authored by Rayne Liu's avatar Rayne Liu
Browse files

Rising from the ashes

parent 25ad86c2
No related branches found
No related tags found
No related merge requests found
......@@ -4,13 +4,9 @@ import warnings
warnings.simplefilter("ignore") #Used to suppress the RuntimeWarnings. But if you are changing the code for TESTING, MAKE SURE TO COMMENT IT OUT.
'''
This script calculates the RDGW150914 constraints with ptemcee - specifically, the n=1 case varying the fundamental frequencies, tshift = 0. It produces the chain plot, corner plot, parameter constraints, and data plotted with the 1-sigma band. Since we are working specifically for the n=1 case, we also compare alpha_0 and alpha_1 in a histogram, in order to demonstrate that the two are indistinguishable with each other.
Rising from the ashes.
--- R^a_{yne} L^i_u, 08/09/2020
Adapted to include the nmax = 0 case.
---R^a_{yne} L^i_u, 08/20/2020, don't know what to do now
--- R^a_{yne} L^i_u, 08/22/2020
'''
import numpy as np
......@@ -38,18 +34,18 @@ import random
#tshift: time shift after the strain peak
#vary_fund: whether you vary the fundamental frequency. Works in the model_dv function.
rootpath= "/work/rayne.liu"#"/Users/RayneLiu"
rootpath= "/Users/RayneLiu"#"/work/rayne.liu"
nmax=0
tshift=0
vary_fund = False
#sampler parameters
npoints = 2512
nwalkers = 1256
npoints = 240
nwalkers = 120
ntemps=16
dim = nmax+1
ndim = 4*dim
burnin = 1256 #How many points do you burn before doing the corner plot. You need to watch the convergence of the chain plot a bit.
burnin = 120 #How many points do you burn before doing the corner plot. You need to watch the convergence of the chain plot a bit.
#This is trivial but often forgotten: this cannot be more than npoints! Usually 1/5~1/4 npoints is what I observe.
numbins = 42 #corner plot parameter - how many bins you want
datacolor = '#105670' #'#4fa3a7'
......@@ -226,7 +222,7 @@ for i in range(ndim):
axes[i].set_ylabel(paramlabels[i])
axes[-1].set_xlabel('Iterations')
#plt.show()
fig.savefig(rootpath+'/git/rdstackingproject/plotsmc/vary'+str(vary_fund)+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_'+str(npoints)+'pt_chain.png', format = 'png', bbox_inches = 'tight')
fig.savefig(rootpath+'/git/rdstackingproject/plotsmc/vary'+str(vary_fund)+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_'+str(npoints)+'pt_chainplot.png', format = 'png', bbox_inches = 'tight')
......@@ -234,6 +230,9 @@ fig.savefig(rootpath+'/git/rdstackingproject/plotsmc/vary'+str(vary_fund)+'nmax=
samples = sampler.chain[0,:, burnin:, :].reshape((-1, ndim))
#samples for corner plot
samples_corn = samples if vary_fund == True else np.delete(samples, np.s_[0,dim], 1)
df0 = pd.DataFrame(samples_corn, columns=paramlabels_corner)
df0.to_csv(rootpath+'/git/rdstackingproject/plotsmc/vary'+str(vary_fund)+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_'+str(npoints)+'pt_chain.csv', index = False)
#print('Values with peak likelihood:')
lglk = np.array([log_likelihood(samples[i]) for i in range(len(samples))])
......@@ -283,23 +282,23 @@ samplesT = samples_corn.T
cov_matr=np.cov(samplesT)
#print('Covariant matrix')
df1 = pd.DataFrame(cov_matr, index = paramlabels_corner, columns=paramlabels_corner)
df1.to_hdf(rootpath+'/git/rdstackingproject/plotsmc/vary'+str(vary_fund)+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_'+str(npoints)+'pt_covariance.h5', key='df')
df1.to_csv(rootpath+'/git/rdstackingproject/plotsmc/vary'+str(vary_fund)+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_'+str(npoints)+'pt_covariance.csv')
#The correlation matrix
corr_matr=np.corrcoef(samplesT)
#print('Correlation matrix')
df2 = pd.DataFrame(corr_matr,index = paramlabels_corner, columns=paramlabels_corner)
df2.to_hdf(rootpath+'/git/rdstackingproject/plotsmc/vary'+str(vary_fund)+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_'+str(npoints)+'pt_correlation.h5', key='df')
df2.to_csv(rootpath+'/git/rdstackingproject/plotsmc/vary'+str(vary_fund)+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_'+str(npoints)+'pt_correlation.csv')
#The logevidence (with error) and save it
logevidence = sampler.log_evidence_estimate(fburnin=0.2)
df3 = pd.DataFrame(logevidence,index = [r'logevidence', r'error'], columns=[r'vary='+str(vary_fund)+', nmax='+str(nmax)+', tshift='+str(tshift)+', '+str(npoints)+'pts'])
df3.to_hdf(rootpath+'/git/rdstackingproject/plotsmc/vary'+str(vary_fund)+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_'+str(npoints)+'pt_logevidence.h5', key='df')
df3.to_csv(rootpath+'/git/rdstackingproject/plotsmc/vary'+str(vary_fund)+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_'+str(npoints)+'pt_logevidence.csv')
#The autocorrelation time, this time I don't intend to delete alpha0 and beta0, just keep the nan's there since we don't really use this data, only look at it. It's good to keep it for sanity check's sake.
atcrr = sampler.get_autocorr_time()[0]
df4 = pd.DataFrame(atcrr,index = paramlabels, columns=[r'Autocorrelation time, vary='+str(vary_fund)+', nmax='+str(nmax)+', tshift='+str(tshift)+', '+str(npoints)+'pts'])
df4.to_hdf(rootpath+'/git/rdstackingproject/plotsmc/vary'+str(vary_fund)+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_'+str(npoints)+'pt_autocrrtime.h5', key='df')
df4.to_csv(rootpath+'/git/rdstackingproject/plotsmc/vary'+str(vary_fund)+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_'+str(npoints)+'pt_autocrrtime.csv')
#Now plot alpha_0 on top of alpha_1 - only on top, not stacked, and only valid for vary_fund == True
......
universe = vanilla
getenv = true
# run script -- make sure that condor has execute permission for this file (chmod a+x script.py)
executable = RDGW150914_Phoenix_n0t0.py
# file to dump stdout (this directory should exist)
output = RDGW150914_Phoenix_n0t0.out
# file to dump stderr
error = RDGW150914_Phoenix_n0t0.err
# condor logs
log = RDGW150914_Phoenix_n0t0.log
initialdir = .
notify_user = rl746@cornell.edu
notification = Complete
arguments = "-processid $(Process)"
request_memory = 232GB
request_cpus = 16
on_exit_remove = (ExitBySignal == False) || ((ExitBySignal == True) && (ExitSignal != 11))
accounting_group = aei.dev.test_dynesty
queue 1
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment