From 7ff41448d95530d56e731cbc720e85252d9dfc65 Mon Sep 17 00:00:00 2001 From: Rayne Liu <rl746@cornell.edu> Date: Mon, 17 Aug 2020 22:08:40 -0400 Subject: [PATCH] Saved the covariance and correlation matrices as well as the logevidence, loosened parameter ranges, suppressed warning --- code/RDGW150914_ptemcee1.py | 48 +++++++++++++++++++++++++++---------- code/RDGW150914_ptemcee2.py | 37 ++++++++++++++++++++++------ code/RDGW150914_ptemcee3.py | 37 ++++++++++++++++++++++------ code/RDGW150914_ptemcee4.py | 37 ++++++++++++++++++++++------ 4 files changed, 126 insertions(+), 33 deletions(-) diff --git a/code/RDGW150914_ptemcee1.py b/code/RDGW150914_ptemcee1.py index d31fa93..422dbae 100755 --- a/code/RDGW150914_ptemcee1.py +++ b/code/RDGW150914_ptemcee1.py @@ -1,5 +1,7 @@ #!/usr/bin/env python # coding: utf-8 +import warnings +warnings.simplefilter("ignore") #Used to suppress the RuntimeWarnings. But if you are changing the code for TESTING, MAKE SURE TO COMMENT IT OUT. ''' This script calculates the RDGW150914 constraints with ptemcee - specifically, the n=1 case both varying or not varying the fundamental frequencies. It produces the chain plot, corner plot, parameter constraints, and data plotted with the 1-sigma band. Since we are working specifically for the n=1 case, we also add in the corner plot the combined chain of alpha_0 and alpha_1, in order to demonstrate that the two are indistinguishable with each other. @@ -39,11 +41,11 @@ tshift=3.6 vary_fund = True #sampler parameters -npoints=60002 +npoints=1002 nwalkers = 42 ntemps=12 ndim = int(4*(nmax+1)) -burnin = 5000 #How many points do you burn before doing the corner plot. You need to watch the convergence of the chain plot a bit. +burnin = 500 #How many points do you burn before doing the corner plot. You need to watch the convergence of the chain plot a bit. #This is trivial but often forgotten: this cannot be more than npoints! Usually 1/5~1/4 npoints is what I observe. numbins = 42 #corner plot parameter - how many bins you want datacolor = '#105670' #'#4fa3a7' @@ -137,15 +139,15 @@ def log_prior(theta): y_0 = theta[6] y_1 = theta[7] - if all([nmax == 1, 0 <= tshift <= 5, vary_fund == True, -0.8 <= a_0 <= 0.8, -1.0 <= a_1 <= 1.0, -1.0 <= b_0 <= 9.0, -1.0 <= b_1 <= 21.0, 0 <= x_0 <= 1.6, 0 <= x_1 <= 1.6, 0 <= y_0 <= 2*np.pi, 0 <= y_1 <= 2*np.pi]): + if all([nmax == 1, 0 <= tshift <= 5, vary_fund == True, -0.8 <= a_0 <= 0.8, -1.0 <= a_1 <= 1.0, -2.0 <= b_0 <= 9.0, -2.0 <= b_1 <= 21.0, 0 <= x_0 <= 1.6, 0 <= x_1 <= 1.6, 0 <= y_0 <= 2*np.pi, 0 <= y_1 <= 2*np.pi]): return 0.0 - elif all([nmax == 1, tshift == 19, vary_fund == True, -10.0 <= a_0 <= 10.0, -10.0 <= a_1 <= 10.0, -1.0 <= b_0 <= 10.0, -1.0 <= b_1 <= 10.0, 0 <= x_0 <= 1.0, 0 <= x_1 <= 1.2, 0 <= y_0 <= 2*np.pi, 0 <= y_1 <= 2*np.pi]): + elif all([nmax == 1, tshift == 19, vary_fund == True, -10.0 <= a_0 <= 10.0, -10.0 <= a_1 <= 10.0, -2.0 <= b_0 <= 10.0, -2.0 <= b_1 <= 10.0, 0 <= x_0 <= 1.0, 0 <= x_1 <= 1.2, 0 <= y_0 <= 2*np.pi, 0 <= y_1 <= 2*np.pi]): return 0.0 #PAY EXTRA ATTENTION TO THESE TWO CASES, SINCE WE SHIFTED y_0. THE CORNER PLOTS LABELS NEED TO BE TREATED WITH CARE. - elif all([nmax == 1, 0 <= tshift <= 5, vary_fund == False, -1.0 <= a_0 <= 1.0, -1.5 <= a_1 <= 1.5, -1.0 <= b_0 <= 1.0, -1.0 <= b_1 <= 12.0, 0 <= x_0 <= 2.0, 0 <= x_1 <= 2.4, 0 <= y_0-np.pi <= 2*np.pi, 0 <= y_1 <= 2*np.pi,]): + elif all([nmax == 1, 0 <= tshift <= 5, vary_fund == False, -1.0 <= a_0 <= 1.0, -1.5 <= a_1 <= 1.5, -1.0 <= b_0 <= 1.0, -3.0 <= b_1 <= 12.0, 0 <= x_0 <= 2.0, 0 <= x_1 <= 2.4, 0 <= y_0-np.pi <= 2*np.pi, 0 <= y_1 <= 2*np.pi,]): return 0.0 - elif all([nmax == 1, tshift == 19, vary_fund == False, -1.0 <= a_0 <= 1.0, -10.0 <= a_1 <= 10.0, -1.0 <= b_0 <= 1.0, -1.0 <= b_1 <= 10.0, 0 <= x_0 <= 0.6, 0 <= x_1 <= 1.2, 0 <= y_0-np.pi <= 2*np.pi, 0 <= y_1 <= 2*np.pi]): + elif all([nmax == 1, tshift == 19, vary_fund == False, -1.0 <= a_0 <= 1.0, -10.0 <= a_1 <= 10.0, -1.0 <= b_0 <= 1.0, -3.0 <= b_1 <= 10.0, 0 <= x_0 <= 0.6, 0 <= x_1 <= 1.2, 0 <= y_0-np.pi <= 2*np.pi, 0 <= y_1 <= 2*np.pi]): return 0.0 return -np.inf @@ -154,8 +156,10 @@ def log_prior(theta): # LogLikelihood function. It is just a Gaussian loglikelihood based on computing the residuals^2 def log_likelihood(theta): modelev = model_dv(theta) - return -np.sum((gw_sxs_bbh_0305rd[:,1] - (modelev.real))**2+(gw_sxs_bbh_0305rd[:,2] - (modelev.imag))**2) - + result = -np.sum((gw_sxs_bbh_0305rd[:,1] - (modelev.real))**2+(gw_sxs_bbh_0305rd[:,2] - (modelev.imag))**2) + if np.isnan(result): + return -np.inf + return result # Logposterior distribution for the residuals case. # The evidence is just a normalization factor @@ -172,9 +176,10 @@ def log_probability(theta): #Fit with ptemcee #Set the number of cores of your processors -pool = choose_pool(4) -pool.size = 4 +pool = choose_pool(12) +pool.size = 12 vary_param = float(vary_fund) +np.random.seed(42) pos = np.array([[random.uniform(-0.1,0.1), random.uniform(-0.1,0.1), 4.28313743e-01, random.uniform(2.5, 2.6) + (1-vary_param) * np.pi]]) for i in range (1,nmax+1): pos_aux = np.array([[random.uniform(-0.1,0.1), random.uniform(-0.1,0.1), random.uniform(0.3,0.4), random.uniform(2.01, 2.02) + (1-vary_param) * np.pi]]) @@ -255,11 +260,30 @@ for yi in range(naxes): figcorn.savefig(rootpath+'/git/rdstackingproject/plotsmc/vary'+str(vary_fund)+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_'+str(npoints)+'pt_corner.pdf', format = 'pdf') +#Now export the covariance and correlation matrices, as well as the logevidence +#The covariant matrix +samplesT = samples.T +cov_matr=np.cov(samplesT) +#print('Covariant matrix') +df1 = pd.DataFrame(cov_matr, index = paramlabels, columns=paramlabels) +df1.to_hdf(rootpath+'/git/rdstackingproject/plotsmc/vary'+str(vary_fund)+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_'+str(npoints)+'pt_covariance.h5', key='df') + +#The correlation matrix +corr_matr=np.corrcoef(samplesT) +#print('Correlation matrix') +df2 = pd.DataFrame(corr_matr,index = paramlabels, columns=paramlabels) +df2.to_hdf(rootpath+'/git/rdstackingproject/plotsmc/vary'+str(vary_fund)+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_'+str(npoints)+'pt_correlation.h5', key='df') + +#The logevidence (with error) and save it +logevidence = sampler.log_evidence_estimate(fburnin=0.2) +df3 = pd.DataFrame(logevidence,index = [r'logevidence', r'error'], columns=[r'vary='+str(vary_fund)+', nmax='+str(nmax)+', tshift='+str(tshift)+', '+str(npoints)+'pts']) +df3.to_hdf(rootpath+'/git/rdstackingproject/plotsmc/vary'+str(vary_fund)+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_'+str(npoints)+'pt_logevidence.h5', key='df') + #Now plot alpha_0 on top of alpha_1 - only on top, not stacked, and only valid for vary_fund == True if vary_fund == True: - samplea0 = samples.T[0] - samplea1 = samples.T[1] + samplea0 = samplesT[0] + samplea1 = samplesT[1] fighist1 = plt.figure(figsize = (8, 6)) n0, bins0, patches0 = plt.hist(samplea0, bins = numbins * 6, alpha = 0.5, label = r'$\alpha_0$') n1, bins1, patches1 = plt.hist(samplea1, bins = numbins * 10, alpha = 0.5, label = r'$\alpha_1$') diff --git a/code/RDGW150914_ptemcee2.py b/code/RDGW150914_ptemcee2.py index e24ae8e..7eea70a 100755 --- a/code/RDGW150914_ptemcee2.py +++ b/code/RDGW150914_ptemcee2.py @@ -1,5 +1,7 @@ #!/usr/bin/env python # coding: utf-8 +import warnings +warnings.simplefilter("ignore") #Used to suppress the RuntimeWarnings. But if you are changing the code for TESTING, MAKE SURE TO COMMENT IT OUT. ''' This script calculates the RDGW150914 constraints with ptemcee - specifically, the n=1 case both varying or not varying the fundamental frequencies. It produces the chain plot, corner plot, parameter constraints, and data plotted with the 1-sigma band. Since we are working specifically for the n=1 case, we also add in the corner plot the combined chain of alpha_0 and alpha_1, in order to demonstrate that the two are indistinguishable with each other. @@ -40,7 +42,7 @@ vary_fund = True #sampler parameters npoints=60001 -nwalkers = 42 +nwalkers = 840 ntemps=12 ndim = int(4*(nmax+1)) burnin = 5000 #How many points do you burn before doing the corner plot. You need to watch the convergence of the chain plot a bit. @@ -137,15 +139,15 @@ def log_prior(theta): y_0 = theta[6] y_1 = theta[7] - if all([nmax == 1, 0 <= tshift <= 5, vary_fund == True, -0.8 <= a_0 <= 0.8, -1.0 <= a_1 <= 1.0, -1.0 <= b_0 <= 9.0, -1.0 <= b_1 <= 21.0, 0 <= x_0 <= 1.6, 0 <= x_1 <= 1.6, 0 <= y_0 <= 2*np.pi, 0 <= y_1 <= 2*np.pi]): + if all([nmax == 1, 0 <= tshift <= 5, vary_fund == True, -0.8 <= a_0 <= 0.8, -1.0 <= a_1 <= 1.0, -2.0 <= b_0 <= 9.0, -2.0 <= b_1 <= 21.0, 0 <= x_0 <= 1.6, 0 <= x_1 <= 1.6, 0 <= y_0 <= 2*np.pi, 0 <= y_1 <= 2*np.pi]): return 0.0 - elif all([nmax == 1, tshift == 19, vary_fund == True, -10.0 <= a_0 <= 10.0, -10.0 <= a_1 <= 10.0, -1.0 <= b_0 <= 10.0, -1.0 <= b_1 <= 10.0, 0 <= x_0 <= 1.0, 0 <= x_1 <= 1.2, 0 <= y_0 <= 2*np.pi, 0 <= y_1 <= 2*np.pi]): + elif all([nmax == 1, tshift == 19, vary_fund == True, -10.0 <= a_0 <= 10.0, -10.0 <= a_1 <= 10.0, -2.0 <= b_0 <= 10.0, -2.0 <= b_1 <= 10.0, 0 <= x_0 <= 1.0, 0 <= x_1 <= 1.2, 0 <= y_0 <= 2*np.pi, 0 <= y_1 <= 2*np.pi]): return 0.0 #PAY EXTRA ATTENTION TO THESE TWO CASES, SINCE WE SHIFTED y_0. THE CORNER PLOTS LABELS NEED TO BE TREATED WITH CARE. - elif all([nmax == 1, 0 <= tshift <= 5, vary_fund == False, -1.0 <= a_0 <= 1.0, -1.5 <= a_1 <= 1.5, -1.0 <= b_0 <= 1.0, -1.0 <= b_1 <= 12.0, 0 <= x_0 <= 2.0, 0 <= x_1 <= 2.4, 0 <= y_0-np.pi <= 2*np.pi, 0 <= y_1 <= 2*np.pi,]): + elif all([nmax == 1, 0 <= tshift <= 5, vary_fund == False, -1.0 <= a_0 <= 1.0, -1.5 <= a_1 <= 1.5, -1.0 <= b_0 <= 1.0, -3.0 <= b_1 <= 12.0, 0 <= x_0 <= 2.0, 0 <= x_1 <= 2.4, 0 <= y_0-np.pi <= 2*np.pi, 0 <= y_1 <= 2*np.pi,]): return 0.0 - elif all([nmax == 1, tshift == 19, vary_fund == False, -1.0 <= a_0 <= 1.0, -10.0 <= a_1 <= 10.0, -1.0 <= b_0 <= 1.0, -1.0 <= b_1 <= 10.0, 0 <= x_0 <= 0.6, 0 <= x_1 <= 1.2, 0 <= y_0-np.pi <= 2*np.pi, 0 <= y_1 <= 2*np.pi]): + elif all([nmax == 1, tshift == 19, vary_fund == False, -1.0 <= a_0 <= 1.0, -10.0 <= a_1 <= 10.0, -1.0 <= b_0 <= 1.0, -3.0 <= b_1 <= 10.0, 0 <= x_0 <= 0.6, 0 <= x_1 <= 1.2, 0 <= y_0-np.pi <= 2*np.pi, 0 <= y_1 <= 2*np.pi]): return 0.0 return -np.inf @@ -172,9 +174,10 @@ def log_probability(theta): #Fit with ptemcee #Set the number of cores of your processors -pool = choose_pool(4) -pool.size = 4 +pool = choose_pool(12) +pool.size = 12 vary_param = float(vary_fund) +np.random.seed(42) pos = np.array([[random.uniform(-0.1,0.1), random.uniform(-0.1,0.1), 4.28313743e-01, random.uniform(2.5, 2.6) + (1-vary_param) * np.pi]]) for i in range (1,nmax+1): pos_aux = np.array([[random.uniform(-0.1,0.1), random.uniform(-0.1,0.1), random.uniform(0.3,0.4), random.uniform(2.01, 2.02) + (1-vary_param) * np.pi]]) @@ -256,6 +259,26 @@ for yi in range(naxes): figcorn.savefig(rootpath+'/git/rdstackingproject/plotsmc/vary'+str(vary_fund)+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_'+str(npoints)+'pt_corner.pdf', format = 'pdf') +#Now export the covariance and correlation matrices, as well as the logevidence +#The covariant matrix +samplesT = samples.T +cov_matr=np.cov(samplesT) +#print('Covariant matrix') +df1 = pd.DataFrame(cov_matr, index = paramlabels, columns=paramlabels) +df1.to_hdf(rootpath+'/git/rdstackingproject/plotsmc/vary'+str(vary_fund)+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_'+str(npoints)+'pt_covariance.h5', key='df') + +#The correlation matrix +corr_matr=np.corrcoef(samplesT) +#print('Correlation matrix') +df2 = pd.DataFrame(corr_matr,index = paramlabels, columns=paramlabels) +df2.to_hdf(rootpath+'/git/rdstackingproject/plotsmc/vary'+str(vary_fund)+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_'+str(npoints)+'pt_correlation.h5', key='df') + + +#The logevidence (with error) and save it +logevidence = sampler.log_evidence_estimate(fburnin=0.2) +df3 = pd.DataFrame(logevidence,index = [r'logevidence', r'error'], columns=[r'vary='+str(vary_fund)+', nmax='+str(nmax)+', tshift='+str(tshift)+', '+str(npoints)+'pts']) +df3.to_hdf(rootpath+'/git/rdstackingproject/plotsmc/vary'+str(vary_fund)+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_'+str(npoints)+'pt_logevidence.h5', key='df') + #Now plot alpha_0 on top of alpha_1 - only on top, not stacked, and only valid for vary_fund == True if vary_fund == True: diff --git a/code/RDGW150914_ptemcee3.py b/code/RDGW150914_ptemcee3.py index 4183c17..c77da00 100755 --- a/code/RDGW150914_ptemcee3.py +++ b/code/RDGW150914_ptemcee3.py @@ -1,5 +1,8 @@ #!/usr/bin/env python # coding: utf-8 +import warnings +warnings.simplefilter("ignore") #Used to suppress the RuntimeWarnings. But if you are changing the code for TESTING, MAKE SURE TO COMMENT IT OUT. + ''' This script calculates the RDGW150914 constraints with ptemcee - specifically, the n=1 case both varying or not varying the fundamental frequencies. It produces the chain plot, corner plot, parameter constraints, and data plotted with the 1-sigma band. Since we are working specifically for the n=1 case, we also add in the corner plot the combined chain of alpha_0 and alpha_1, in order to demonstrate that the two are indistinguishable with each other. @@ -40,7 +43,7 @@ vary_fund = False #sampler parameters npoints=60000 -nwalkers = 42 +nwalkers = 840 ntemps=12 ndim = int(4*(nmax+1)) burnin = 5000 #How many points do you burn before doing the corner plot. You need to watch the convergence of the chain plot a bit. @@ -137,15 +140,15 @@ def log_prior(theta): y_0 = theta[6] y_1 = theta[7] - if all([nmax == 1, 0 <= tshift <= 5, vary_fund == True, -0.8 <= a_0 <= 0.8, -1.0 <= a_1 <= 1.0, -1.0 <= b_0 <= 9.0, -1.0 <= b_1 <= 21.0, 0 <= x_0 <= 1.6, 0 <= x_1 <= 1.6, 0 <= y_0 <= 2*np.pi, 0 <= y_1 <= 2*np.pi]): + if all([nmax == 1, 0 <= tshift <= 5, vary_fund == True, -0.8 <= a_0 <= 0.8, -1.0 <= a_1 <= 1.0, -2.0 <= b_0 <= 9.0, -2.0 <= b_1 <= 21.0, 0 <= x_0 <= 1.6, 0 <= x_1 <= 1.6, 0 <= y_0 <= 2*np.pi, 0 <= y_1 <= 2*np.pi]): return 0.0 - elif all([nmax == 1, tshift == 19, vary_fund == True, -10.0 <= a_0 <= 10.0, -10.0 <= a_1 <= 10.0, -1.0 <= b_0 <= 10.0, -1.0 <= b_1 <= 10.0, 0 <= x_0 <= 1.0, 0 <= x_1 <= 1.2, 0 <= y_0 <= 2*np.pi, 0 <= y_1 <= 2*np.pi]): + elif all([nmax == 1, tshift == 19, vary_fund == True, -10.0 <= a_0 <= 10.0, -10.0 <= a_1 <= 10.0, -2.0 <= b_0 <= 10.0, -2.0 <= b_1 <= 10.0, 0 <= x_0 <= 1.0, 0 <= x_1 <= 1.2, 0 <= y_0 <= 2*np.pi, 0 <= y_1 <= 2*np.pi]): return 0.0 #PAY EXTRA ATTENTION TO THESE TWO CASES, SINCE WE SHIFTED y_0. THE CORNER PLOTS LABELS NEED TO BE TREATED WITH CARE. - elif all([nmax == 1, 0 <= tshift <= 5, vary_fund == False, -1.0 <= a_0 <= 1.0, -1.5 <= a_1 <= 1.5, -1.0 <= b_0 <= 1.0, -1.0 <= b_1 <= 12.0, 0 <= x_0 <= 2.0, 0 <= x_1 <= 2.4, 0 <= y_0-np.pi <= 2*np.pi, 0 <= y_1 <= 2*np.pi,]): + elif all([nmax == 1, 0 <= tshift <= 5, vary_fund == False, -1.0 <= a_0 <= 1.0, -1.5 <= a_1 <= 1.5, -1.0 <= b_0 <= 1.0, -3.0 <= b_1 <= 12.0, 0 <= x_0 <= 2.0, 0 <= x_1 <= 2.4, 0 <= y_0-np.pi <= 2*np.pi, 0 <= y_1 <= 2*np.pi,]): return 0.0 - elif all([nmax == 1, tshift == 19, vary_fund == False, -1.0 <= a_0 <= 1.0, -10.0 <= a_1 <= 10.0, -1.0 <= b_0 <= 1.0, -1.0 <= b_1 <= 10.0, 0 <= x_0 <= 0.6, 0 <= x_1 <= 1.2, 0 <= y_0-np.pi <= 2*np.pi, 0 <= y_1 <= 2*np.pi]): + elif all([nmax == 1, tshift == 19, vary_fund == False, -1.0 <= a_0 <= 1.0, -10.0 <= a_1 <= 10.0, -1.0 <= b_0 <= 1.0, -3.0 <= b_1 <= 10.0, 0 <= x_0 <= 0.6, 0 <= x_1 <= 1.2, 0 <= y_0-np.pi <= 2*np.pi, 0 <= y_1 <= 2*np.pi]): return 0.0 return -np.inf @@ -172,9 +175,10 @@ def log_probability(theta): #Fit with ptemcee #Set the number of cores of your processors -pool = choose_pool(6) -pool.size = 6 +pool = choose_pool(12) +pool.size = 12 vary_param = float(vary_fund) +np.random.seed(42) pos = np.array([[random.uniform(-0.1,0.1), random.uniform(-0.1,0.1), 4.28313743e-01, random.uniform(2.5, 2.6) + (1-vary_param) * np.pi]]) for i in range (1,nmax+1): pos_aux = np.array([[random.uniform(-0.1,0.1), random.uniform(-0.1,0.1), random.uniform(0.3,0.4), random.uniform(2.01, 2.02) + (1-vary_param) * np.pi]]) @@ -263,6 +267,25 @@ for yi in range(naxes): figcorn.savefig(rootpath+'/git/rdstackingproject/plotsmc/vary'+str(vary_fund)+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_'+str(npoints)+'pt_corner.pdf', format = 'pdf') +#Now export the covariance and correlation matrices, as well as the logevidence +#The covariant matrix +samplesT = samples.T +cov_matr=np.cov(samplesT) +#print('Covariant matrix') +df1 = pd.DataFrame(cov_matr, index = paramlabels, columns=paramlabels) +df1.to_hdf(rootpath+'/git/rdstackingproject/plotsmc/vary'+str(vary_fund)+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_'+str(npoints)+'pt_covariance.h5', key='df') + +#The correlation matrix +corr_matr=np.corrcoef(samplesT) +#print('Correlation matrix') +df2 = pd.DataFrame(corr_matr,index = paramlabels, columns=paramlabels) +df2.to_hdf(rootpath+'/git/rdstackingproject/plotsmc/vary'+str(vary_fund)+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_'+str(npoints)+'pt_correlation.h5', key='df') + +#The logevidence (with error) and save it +logevidence = sampler.log_evidence_estimate(fburnin=0.2) +df3 = pd.DataFrame(logevidence,index = [r'logevidence', r'error'], columns=[r'vary='+str(vary_fund)+', nmax='+str(nmax)+', tshift='+str(tshift)+', '+str(npoints)+'pts']) +df3.to_hdf(rootpath+'/git/rdstackingproject/plotsmc/vary'+str(vary_fund)+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_'+str(npoints)+'pt_logevidence.h5', key='df') + #Now plot alpha_0 on top of alpha_1 - only on top, not stacked, and only valid for vary_fund == True if vary_fund == True: diff --git a/code/RDGW150914_ptemcee4.py b/code/RDGW150914_ptemcee4.py index 6124cdc..f199312 100755 --- a/code/RDGW150914_ptemcee4.py +++ b/code/RDGW150914_ptemcee4.py @@ -1,5 +1,8 @@ #!/usr/bin/env python # coding: utf-8 +import warnings +warnings.simplefilter("ignore") #Used to suppress the RuntimeWarnings. But if you are changing the code for TESTING, MAKE SURE TO COMMENT IT OUT. + ''' This script calculates the RDGW150914 constraints with ptemcee - specifically, the n=1 case both varying or not varying the fundamental frequencies. It produces the chain plot, corner plot, parameter constraints, and data plotted with the 1-sigma band. Since we are working specifically for the n=1 case, we also add in the corner plot the combined chain of alpha_0 and alpha_1, in order to demonstrate that the two are indistinguishable with each other. @@ -40,7 +43,7 @@ vary_fund = False #sampler parameters npoints=1002 -nwalkers = 42 +nwalkers = 840 ntemps=12 ndim = int(4*(nmax+1)) burnin = 20 #How many points do you burn before doing the corner plot. You need to watch the convergence of the chain plot a bit. @@ -137,15 +140,15 @@ def log_prior(theta): y_0 = theta[6] y_1 = theta[7] - if all([nmax == 1, 0 <= tshift <= 5, vary_fund == True, -0.8 <= a_0 <= 0.8, -1.0 <= a_1 <= 1.0, -1.0 <= b_0 <= 9.0, -1.0 <= b_1 <= 21.0, 0 <= x_0 <= 1.6, 0 <= x_1 <= 1.6, 0 <= y_0 <= 2*np.pi, 0 <= y_1 <= 2*np.pi]): + if all([nmax == 1, 0 <= tshift <= 5, vary_fund == True, -0.8 <= a_0 <= 0.8, -1.0 <= a_1 <= 1.0, -2.0 <= b_0 <= 9.0, -2.0 <= b_1 <= 21.0, 0 <= x_0 <= 1.6, 0 <= x_1 <= 1.6, 0 <= y_0 <= 2*np.pi, 0 <= y_1 <= 2*np.pi]): return 0.0 - elif all([nmax == 1, tshift == 19, vary_fund == True, -10.0 <= a_0 <= 10.0, -10.0 <= a_1 <= 10.0, -1.0 <= b_0 <= 10.0, -1.0 <= b_1 <= 10.0, 0 <= x_0 <= 1.0, 0 <= x_1 <= 1.2, 0 <= y_0 <= 2*np.pi, 0 <= y_1 <= 2*np.pi]): + elif all([nmax == 1, tshift == 19, vary_fund == True, -10.0 <= a_0 <= 10.0, -10.0 <= a_1 <= 10.0, -2.0 <= b_0 <= 10.0, -2.0 <= b_1 <= 10.0, 0 <= x_0 <= 1.0, 0 <= x_1 <= 1.2, 0 <= y_0 <= 2*np.pi, 0 <= y_1 <= 2*np.pi]): return 0.0 #PAY EXTRA ATTENTION TO THESE TWO CASES, SINCE WE SHIFTED y_0. THE CORNER PLOTS LABELS NEED TO BE TREATED WITH CARE. - elif all([nmax == 1, 0 <= tshift <= 5, vary_fund == False, -1.0 <= a_0 <= 1.0, -1.5 <= a_1 <= 1.5, -1.0 <= b_0 <= 1.0, -1.0 <= b_1 <= 12.0, 0 <= x_0 <= 2.0, 0 <= x_1 <= 2.4, 0 <= y_0-np.pi <= 2*np.pi, 0 <= y_1 <= 2*np.pi,]): + elif all([nmax == 1, 0 <= tshift <= 5, vary_fund == False, -1.0 <= a_0 <= 1.0, -1.5 <= a_1 <= 1.5, -1.0 <= b_0 <= 1.0, -3.0 <= b_1 <= 12.0, 0 <= x_0 <= 2.0, 0 <= x_1 <= 2.4, 0 <= y_0-np.pi <= 2*np.pi, 0 <= y_1 <= 2*np.pi,]): return 0.0 - elif all([nmax == 1, tshift == 19, vary_fund == False, -1.0 <= a_0 <= 1.0, -10.0 <= a_1 <= 10.0, -1.0 <= b_0 <= 1.0, -1.0 <= b_1 <= 10.0, 0 <= x_0 <= 0.6, 0 <= x_1 <= 1.2, 0 <= y_0-np.pi <= 2*np.pi, 0 <= y_1 <= 2*np.pi]): + elif all([nmax == 1, tshift == 19, vary_fund == False, -1.0 <= a_0 <= 1.0, -10.0 <= a_1 <= 10.0, -1.0 <= b_0 <= 1.0, -3.0 <= b_1 <= 10.0, 0 <= x_0 <= 0.6, 0 <= x_1 <= 1.2, 0 <= y_0-np.pi <= 2*np.pi, 0 <= y_1 <= 2*np.pi]): return 0.0 return -np.inf @@ -172,9 +175,10 @@ def log_probability(theta): #Fit with ptemcee #Set the number of cores of your processors -pool = choose_pool(6) -pool.size = 6 +pool = choose_pool(12) +pool.size = 12 vary_param = float(vary_fund) +np.random.seed(42) pos = np.array([[random.uniform(-0.1,0.1), random.uniform(-0.1,0.1), 4.28313743e-01, random.uniform(2.5, 2.6) + (1-vary_param) * np.pi]]) for i in range (1,nmax+1): pos_aux = np.array([[random.uniform(-0.1,0.1), random.uniform(-0.1,0.1), random.uniform(0.3,0.4), random.uniform(2.01, 2.02) + (1-vary_param) * np.pi]]) @@ -262,6 +266,25 @@ for yi in range(naxes): figcorn.savefig(rootpath+'/git/rdstackingproject/plotsmc/vary'+str(vary_fund)+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_'+str(npoints)+'pt_corner.pdf', format = 'pdf') +#Now export the covariance and correlation matrices, as well as the logevidence +#The covariant matrix +samplesT = samples.T +cov_matr=np.cov(samplesT) +#print('Covariant matrix') +df1 = pd.DataFrame(cov_matr, index = paramlabels, columns=paramlabels) +df1.to_hdf(rootpath+'/git/rdstackingproject/plotsmc/vary'+str(vary_fund)+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_'+str(npoints)+'pt_covariance.h5', key='df') + +#The correlation matrix +corr_matr=np.corrcoef(samplesT) +#print('Correlation matrix') +df2 = pd.DataFrame(corr_matr,index = paramlabels, columns=paramlabels) +df2.to_hdf(rootpath+'/git/rdstackingproject/plotsmc/vary'+str(vary_fund)+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_'+str(npoints)+'pt_correlation.h5', key='df') + +#The logevidence (with error) and save it +logevidence = sampler.log_evidence_estimate(fburnin=0.2) +df3 = pd.DataFrame(logevidence,index = [r'logevidence', r'error'], columns=[r'vary='+str(vary_fund)+', nmax='+str(nmax)+', tshift='+str(tshift)+', '+str(npoints)+'pts']) +df3.to_hdf(rootpath+'/git/rdstackingproject/plotsmc/vary'+str(vary_fund)+'nmax='+str(nmax)+'_tshift='+str(tshift)+'_'+str(npoints)+'pt_logevidence.h5', key='df') + #Now plot alpha_0 on top of alpha_1 - only on top, not stacked, and only valid for vary_fund == True if vary_fund == True: -- GitLab