From b6f9cb22c610bcdd93a48c8aa58c76a34b8eca3a Mon Sep 17 00:00:00 2001 From: Rayne Liu <rl746@cornell.edu> Date: Wed, 26 Aug 2020 00:35:34 -0400 Subject: [PATCH] The test case --- code/A_test_with_n=1.py | 137 ++++++++++++++++++++++++++++ code/condor_submit_Rdowntestn=1.sub | 20 ++++ 2 files changed, 157 insertions(+) create mode 100644 code/A_test_with_n=1.py create mode 100644 code/condor_submit_Rdowntestn=1.sub diff --git a/code/A_test_with_n=1.py b/code/A_test_with_n=1.py new file mode 100644 index 0000000..e9da062 --- /dev/null +++ b/code/A_test_with_n=1.py @@ -0,0 +1,137 @@ +""" +In this script I use a mock data generated from combining n=0 and n=1 overtones together, and sample it with the ptemcee sampler under the n=1 scenario. It seems that either there are some fundamental internal problems that prevent us from separating alpha_0 from alpha_1, or that ptemcee doesn't work as well for this problem. Either way, there should be something going on with our approach for this entire project... + +--- R^a_{yne} L^i_u +""" + +import numpy as np +import corner +import matplotlib.pyplot as plt +from matplotlib.ticker import MaxNLocator +from matplotlib import rc +plt.rcParams['font.family'] = 'DejaVu Sans' +rc('text', usetex=True) +plt.rcParams.update({'font.size': 19}) +import pandas as pd +import ptemcee +import qnm +import random + + +ntemps = 20 +nwalkers = 100 +npoints = 150 +ndim = 8 +burnin = 75 +numbins = 42 +datacolor = '#105670' #'#4fa3a7' +pkcolor = '#f2c977' #'#ffb45f' +mediancolor = '#f7695c' #'#9b2814' +rootpath = "/Users/RayneLiu" + + +t = np.arange(0, 80, 0.1) +w0, tau0, x0, y0 = [0.55578191, 11.74090006, 0.978518, -2.11289] +#Can get the w and tau from example nb and amplitude and phase from the 1910 paper +w1, tau1, x1, y1 = [0.54351639, 3.88312743, 4.29435, 1.38519] +mockdata = x0*np.exp(1j*y0)*np.exp(-t/(tau0)) * (np.cos(w0*t)-1j*np.sin(w0*t)) + \ + x1*np.exp(1j*y1)*np.exp(-t/(tau1)) * (np.cos(w1*t)-1j*np.sin(w1*t)) + + +def modelmock(theta): + """ + theta: comprised of alpha0, alpha1, beta0, beta1, x0, x1, and y0, y1 + """ + + alpha0, alpha1, beta0, beta1, xvar0, xvar1, yvar0, yvar1 = theta + tauvar0 = tau0*(1+beta0) + wvar0 = w0*(1+alpha0) + tauvar1 = tau1*(1+beta1) + wvar1 = w1*(1+alpha1) + ansatz = (xvar0*np.exp(1j*yvar0))*np.exp(-t/tauvar0)*(np.cos(wvar0*t)-1j*np.sin(wvar0*t))+\ + (xvar1*np.exp(1j*yvar1))*np.exp(-t/tauvar1)*(np.cos(wvar1*t)-1j*np.sin(wvar1*t)) + # -1j to agree with SXS convention + return ansatz + +def log_prior(theta): + alpha0, alpha1, beta0, beta1, xvar0, xvar1, yvar0, yvar1 = theta + + if all([-0.4 <= alpha0 <= 0.4, -1.0 <= beta0 <= 2.0, 0 <= xvar0 <= 2.4, -2*np.pi <= yvar0 <= 0, \ + -0.4 <= alpha1 <= 0.4, -1.0 <= beta1 <= 2.0, 0 <= xvar1 <= 6.0, -np.pi <= yvar1 <= np.pi]): + return 0.0 + return -np.inf + +# LogLikelihood function. It is just a Gaussian loglikelihood based on computing the residuals^2 +def log_likelihood(theta): + model_mock = modelmock(theta) + + return -np.sum((mockdata.real - model_mock.real)**2+(mockdata.imag - model_mock.imag)**2) + +# Logposterior distribution for the residuals case. +# The evidence is just a normalization factor +def log_probability(theta): + lp = log_prior(theta) + if not np.isfinite(lp): + return -np.inf + return lp + log_likelihood(theta) + + +#Define labels used in plotting +paramlabels_a = [r'$\alpha_0$', r'$\alpha_1$'] +paramlabels_b = [r'$\beta_0$', r'$\beta_1$'] +paramlabels_x = [r'$x_0$', r'$x_1$'] +paramlabels_y = [r'$y_0$', r'$y_1$'] +paramlabels = paramlabels_a + paramlabels_b + paramlabels_x + paramlabels_y + + +np.random.seed(42) +pos = [random.uniform(-0.1,0.1), random.uniform(-0.1,0.1), random.uniform(-0.1,0.1), random.uniform(-0.1,0.1), \ + random.uniform(0.8, 1.0), random.uniform(4, 5), random.uniform(-3, -2), random.uniform(1, 2)] +pos += 1e-5 * np.random.randn(ntemps, nwalkers, ndim) + +sampler = ptemcee.Sampler(nwalkers, ndim, log_likelihood, log_prior, ntemps=ntemps) +sampler.run_mcmc(pos,npoints); + + +#Chain plot +fig, axes = plt.subplots(ndim, 1, sharex=True, figsize=(12, 4*(4))) +for i in range(ndim): + axes[i].plot(sampler.chain[0,:, :, i].T, color="k", alpha=0.4, rasterized=True) + axes[i].yaxis.set_major_locator(MaxNLocator(5)) + axes[i].set_ylabel(paramlabels[i]) +axes[-1].set_xlabel('Iterations') + +fig.savefig(rootpath+'/git/rdstackingproject/plotsmc/Test_with_mockdatan=1'+ str(npoints)+'pts_chainplot.png', format = 'png', bbox_inches = 'tight') + + +#Output chain +samples = sampler.chain[0,:, burnin:, :].reshape((-1, ndim)) +df0 = pd.DataFrame(samples, columns=paramlabels) +df0.to_csv(rootpath+'/git/rdstackingproject/plotsmc/Test_with_mockdatan=1'+ str(npoints)+'pts_chain.csv', index = False) + + +#Corner plot +lglk = np.array([log_likelihood(samples[i]) for i in range(len(samples))]) +pk = samples[np.argmax(lglk)] +median = np.median(samples, axis=0) + +figcorn = corner.corner(samples, bins = numbins, hist_bin_factor = 5, color = datacolor, truths=pk, truth_color = pkcolor, plot_contours = True, labels = paramlabels, quantiles=(0.05, 0.16, 0.5, 0.84, 0.95), levels=[1-np.exp(-0.5), 1-np.exp(-1.64 ** 2/2)], show_titles=True) + +#Extract the axes in order to add more important line plots +naxes = len(pk) +axes = np.array(figcorn.axes).reshape((naxes, naxes)) + +# Loop over the diagonal +for i in range(naxes): + ax = axes[i, i] + ax.axvline(median[i], color=mediancolor) + +# Loop over the histograms +for yi in range(naxes): + for xi in range(yi): + ax = axes[yi, xi] + ax.axvline(median[xi], color=mediancolor) + ax.axhline(median[yi], color=mediancolor) + ax.plot(median[xi], median[yi], color = mediancolor, marker = 's') + +figcorn.savefig(rootpath+'/git/rdstackingproject/plotsmc/Test_with_mockdatan=1'+ str(npoints)+'pts_corner.pdf', format = 'pdf') \ No newline at end of file diff --git a/code/condor_submit_Rdowntestn=1.sub b/code/condor_submit_Rdowntestn=1.sub new file mode 100644 index 0000000..8a5b9f6 --- /dev/null +++ b/code/condor_submit_Rdowntestn=1.sub @@ -0,0 +1,20 @@ +universe = vanilla +getenv = true +# run script -- make sure that condor has execute permission for this file (chmod a+x script.py) +executable = A_test_with_n=1.py +# file to dump stdout (this directory should exist) +output = A_test_with_n=1.py +# file to dump stderr +error = A_test_with_n=1.py +# condor logs +log = A_test_with_n=1.py +initialdir = . +notify_user = rl746@cornell.edu +notification = Complete +arguments = "-processid $(Process)" +request_memory = 232GB +request_cpus = 16 +on_exit_remove = (ExitBySignal == False) || ((ExitBySignal == True) && (ExitSignal != 11)) +accounting_group = aei.dev.test_dynesty +queue 1 + -- GitLab