diff --git a/code/Mock7tones_Generate-0001_t_10M_wandt.py b/code/Mock7tones_Generate-0001_t_10M_wandt.py
new file mode 100755
index 0000000000000000000000000000000000000000..8aef20d74312057dc544c2e279558f9231cf35d1
--- /dev/null
+++ b/code/Mock7tones_Generate-0001_t_10M_wandt.py
@@ -0,0 +1,340 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+# ### Let's try the 0.0001 stepsize with the n=0 and 1 mock data.
+
+# In[99]:
+
+
+#Import relevant modules, import data and all that
+import numpy as np
+import corner
+import os
+os.environ['MPLCONFIGDIR'] = '/home/rayne.liu/.config/matplotlib'
+import matplotlib.pyplot as plt
+from matplotlib.ticker import MaxNLocator
+from matplotlib import rc
+#plt.rcParams['font.family'] = 'DejaVu Sans'
+#rc('text', usetex=True)
+plt.rcParams.update({'font.size': 16.5})
+
+import ptemcee
+#from pycbc.pool import choose_pool
+from multiprocessing import Pool
+import h5py
+import inspect
+import pandas as pd
+import json
+import qnm
+import random
+
+#Remember to change the following global variables
+#rootpath: root path to nr data
+#npoints: number of points you re using for your sampling
+#nmax: tone index --> nmax = 0 if fitting the fundamental tone
+#tshift: time shift after the strain peak
+#vary_fund: whether you vary the fundamental frequency. Works in the model_dv function.
+
+rootpath= "/work/rayne.liu/git/rdstackingproject"#"/Users/RayneLiu/git/rdstackingproject"
+nmax=1
+tshift=10
+#vary_fund = True
+
+#sampler parameters
+npoints = 1370
+nwalkers = 300
+ntemps=12
+dim = nmax+1
+ndim = 4*dim
+burnin = 680  #How many points do you burn before doing the corner plot. You need to watch the convergence of the chain plot a bit.
+            #This is trivial but often forgotten: this cannot be more than npoints! I usually use half the points.
+numbins = 32 #corner plot parameter - how many bins you want
+datacolor = '#105670' #'#4fa3a7'
+pkcolor = '#f2c977' #'#ffb45f'
+mediancolor = '#f7695c' #'#9b2814'
+
+
+#WE STILL IMPORT THE ORIGINAL NR DATA FOR COMPARISON
+#TimeOfMaximum
+def FindTmaximum(y):
+    #Determines the maximum absolute value of the complex waveform
+    absval = y[:,1]*y[:,1]+y[:,2]*y[:,2]
+    vmax=np.max(absval)
+    index = np.argmax(absval == vmax)
+    timemax=gw_sxs_bbh_0305[index,0]
+    return timemax
+
+
+#This loads the 22 mode data
+gw = {}
+gw["SXS:BBH:0305"] = h5py.File(rootpath+"/SXS/BBH_SKS_d14.3_q1.22_sA_0_0_0.330_sB_0_0_-0.440/Lev6/rhOverM_Asymptotic_GeometricUnits_CoM.h5", 'r')
+gw_sxs_bbh_0305 = gw["SXS:BBH:0305"]["Extrapolated_N2.dir"]["Y_l2_m2.dat"]
+
+# Remember to download metadata.json from the simulation with number: 0305. Download Lev6/metadata.json
+# This postprocesses the metadata file to find the final mass and final spin
+metadata = {}
+with open(rootpath+"/SXS/BBH_SKS_d14.3_q1.22_sA_0_0_0.330_sB_0_0_-0.440/Lev6/metadata.json") as file:
+    metadata["SXS:BBH:0305"] = json.load(file)
+
+af = metadata["SXS:BBH:0305"]['remnant_dimensionless_spin'][-1]
+mf = metadata["SXS:BBH:0305"]['remnant_mass']
+
+# Depending on nmax, you load nmax number of freqs. and damping times from the qnm package
+omegas = [qnm.modes_cache(s=-2,l=2,m=2,n=i)(a=af)[0] for i in range (8)]
+w = (np.real(omegas))/mf
+tau=-1/(np.imag(omegas))*mf
+#print(w)
+#print(tau)
+
+#times --> x axis of your data
+times_nr = gw_sxs_bbh_0305[:,0]
+tmax_nr=FindTmaximum(gw_sxs_bbh_0305)
+t0_nr=tmax_nr +tshift
+
+#Select the data from t0 onwards
+position_nr = np.argmax(times_nr >= (t0_nr-0.1)) #The 0.1 is to compensate the stepsize-off issue of the nr data - it gives better alignment between the NR data and our mock fit, and shouldn't interfere with our results because we are still using the time points in the NR data!
+gw_sxs_bbh_0305rd=gw_sxs_bbh_0305[position_nr:-1]
+timesrd_nr=gw_sxs_bbh_0305[position_nr:-1][:,0][:920]
+#print(timesrd[0])
+#print(t0) #(This checks that timesrd[0] is indeed t0 - acturally this is a bit off due to stepsize issues, 
+            #but nvm, we'll fix it right away)
+#print(t0_nr)
+t0_nr = timesrd_nr[0]
+#print(t0_nr)
+#print(t0)
+timespan_nr = timesrd_nr - t0_nr
+gwdata_re = gw_sxs_bbh_0305rd[:,1][:920]
+gwdata_im = gw_sxs_bbh_0305rd[:,2][:920]
+
+
+
+
+#NOW WE DEFINE THE MOCKDATA
+t = np.arange(0, timespan_nr[-1]+tshift, 0.0001)
+#Can get the w and tau from example nb and amplitude and phase from the 1910 paper
+pars = np.zeros((8, 4))      
+pars[0] = [w[0], tau[0], 0.98213669, 1.47250547]
+pars[1] = [w[1], tau[1], 4.29386867, -0.76414158]
+pars[2] = [w[2], tau[2], 10.5262146, 2.51507515]
+pars[3] = [w[3], tau[3], 21.1540387, -0.98587955]
+pars[4] = [w[4], tau[4], 33.6658447, 1.73635455]
+pars[5] = [w[5], tau[5], 28.6385750, -1.66304618]
+pars[6] = [w[6], tau[6], 9.94273547, 1.42612096]
+pars[7] = [w[7], tau[7], 2.08423335, -0.90304704]
+mockdata = 0
+#print('Parameters')
+#print(pars[:, 2])
+#print(pars[:, 3])
+for i in range(8):
+    print('The n='+str(i)+' overtone frequency, damping time, amplitude and phase:')
+    print(pars[i])
+    mockdata += pars[i][2]*np.exp(1j*pars[i][3])*np.exp(-t/(pars[i][1])) * (np.cos(pars[i][0]*t)-1j*np.sin(pars[i][0]*t))
+
+
+#We also try a tshift for this one
+t0=tshift
+
+#Select the data from t0 onwards
+position = np.argmax(t >= (t0))
+mockdata_rd=mockdata[position:-1]
+timesrd=t[position:-1]
+#print(t0) #(This checks that timesrd[0] is indeed t0 - acturally this is a bit off due to stepsize issues, 
+          #but nvm, we'll fix it right away)
+t0 = timesrd[0]
+#print(t0)
+timespan = timesrd - t0
+mockdata_re =mockdata_rd.real
+mockdata_im = mockdata_rd.imag
+
+
+# In[92]:
+
+
+#Test the new generated data
+print('We check the 7 tone mock data against the original NR data:')
+figtest = plt.figure(figsize = (12, 8))
+#plt.plot(timespan, mockdata_re, "r", alpha=0.3, lw=2, label='Mock_before_re')
+plt.plot(timespan, mockdata_re, "r", alpha=0.3, lw=2, label='Mock_raw_re')
+#plt.plot(timespan, mockdata_im, alpha=0.3, lw=2, label='Mock_before_im')
+plt.plot(timespan_nr, gwdata_re, alpha=0.3, lw=2, label='NRraw_re')
+plt.plot(timespan, mockdata_im, alpha=0.3, lw=2, label='Mock_raw_im')
+plt.plot(timespan_nr, gwdata_im, alpha=0.3, lw=2, label='NRraw_im')
+plt.legend()
+figtest.savefig(rootpath + '/plotsmc/0001_'+str(tshift)+'M_mock7tonegenerated_datatest_wandt.png', format='png', bbox_inches='tight', dpi=1000)
+
+
+# ### Now the interpolation seems nice according to what we have above...let's start sampling!
+
+# In[100]:
+
+
+#Fitting
+#RD model for nmax tones. Amplitudes are in (xn*Exp[i yn]) version. Used here.
+def model_dv(theta):
+    #x0, y0= theta
+    #Your nmax might not align with the dim of theta. Better check it here.
+    assert int(len(theta)/4) == dim, 'Please recheck your n and parameters'
+    
+    wvars = theta[ : (dim)]
+    tvars = theta[(dim) : 2*(dim)]
+    xvars = theta[2*(dim) : 3*(dim)]
+    yvars = theta[3*(dim) : ]
+    
+    #if vary_fund == False:
+    #    avars[0]=0
+    #    bvars[0]=0
+        
+    ansatz = 0
+    for i in range (0,dim):
+        #bvars[1]=0
+        #avars[1]=0
+        ansatz += (xvars[i]*np.exp(1j*yvars[i]))*np.exp(-timespan/tvars[i]) * (np.cos(wvars[i]*timespan)-1j*np.sin(wvars[i]*timespan))
+    # -1j to agree with SXS convention
+    return ansatz
+
+# Logprior distribution. It defines the allowed range my variables can vary over. 
+#It works for the (xn*Exp[iyn]) version. 
+def log_prior(theta): 
+    #Warning: we are specifically working with nmax=1 so here individual prior to the parameters are manually adjusted. This does not apply to all other nmax's.
+    #avars = theta[ : (dim)]
+    #bvars = theta[(dim) : 2*(dim)]
+    #xvars = theta[2*(dim) : 3*(dim)]
+    #yvars = theta[3*(dim) : ]
+    omega0, omega1, tau0, tau1, xvar0, xvar1, yvar0, yvar1 = theta
+    if tshift == 0:
+        if all([0.45 <= omega0 <= 0.63, 0.27 <= omega1 <= 0.6, 0. <= tau0 <= 30., 0. <= tau1 <= 20., \
+            0 <= xvar0 <= 6, 0 <= xvar1 <= 6, -np.pi <= yvar0 <= np.pi, 0. <= yvar1 <= 2*np.pi]):        
+            return 0.0
+    
+    elif tshift == 10:
+        if all([0.54 <= omega0 <= 0.58, 0.4 <= omega1 <= 0.6, 9.1 <= tau0 <= 15.7, 0. <= tau1 <= 9., \
+            0. <= xvar0 <= 1.0, 0. <= xvar1 <= 1.2, -np.pi <= yvar0 <= np.pi, -np.pi <= yvar1 <= np.pi]):        
+            return 0.0
+    
+    return -np.inf
+
+
+# LogLikelihood function. It is just a Gaussian loglikelihood based on computing the residuals^2
+def log_likelihood(theta):
+    modelev = model_dv(theta)
+    result = -np.sum((mockdata_re - (modelev.real))**2+(mockdata_im - (modelev.imag))**2)
+    if np.isnan(result):
+        return -np.inf
+    return result
+
+
+# Logposterior distribution for the residuals case.
+# The evidence is just a normalization factor
+def log_probability(theta):
+    lp = log_prior(theta)
+    if not np.isfinite(lp):
+        return -np.inf
+    return lp + log_likelihood(theta)
+
+
+# In[101]:
+
+
+#This cell uses the tshift=10 results
+#Set the number of cores of your processors
+#pool = choose_pool(1)
+#pool.size = 1
+np.random.seed(42)
+pos = np.array([random.uniform(0.55,0.57), random.uniform(0.5,0.54), random.uniform(10., 13.7),                 random.uniform(4.,6.), random.uniform(0.3,0.5), random.uniform(0.3, 0.5), random.uniform(-1., 1.),                 random.uniform(-1., 1.)])
+pos = list(pos)
+pos += 1e-5 * np.random.randn(ntemps, nwalkers, ndim)
+with Pool() as pool:
+    sampler = ptemcee.Sampler(nwalkers, ndim, log_likelihood, log_prior, ntemps=ntemps, pool=pool)
+    sampler.run_mcmc(pos,npoints)
+
+dim = 2
+paramlabels_w = [r'$\omega_'+str(i)+'$' for i in range (dim)]
+paramlabels_t = [r'$\tau_'+str(i)+'$' for i in range (dim)]
+paramlabels_x = [r'$x_'+str(i)+'$' for i in range (dim)]
+paramlabels_y = [r'$y_'+str(i)+'$' for i in range (dim)] 
+
+paramlabels = paramlabels_w + paramlabels_t + paramlabels_x + paramlabels_y
+
+print('The chain plot:')
+#Chain plot
+figchain, axes = plt.subplots(ndim, 1, sharex=True, figsize=(12, 4*(4)))
+for i in range(ndim):
+    axes[i].plot(sampler.chain[0,:, :, i].T, color="k", alpha=0.4, rasterized=True)
+    axes[i].yaxis.set_major_locator(MaxNLocator(5))
+    axes[i].set_ylabel(paramlabels[i])
+axes[-1].set_xlabel('Iterations')
+figchain.savefig(rootpath + '/plotsmc/0001_10M_mock7tonegenerated_chainplot_wandt_'+str(nwalkers)+'walkers_'+str(npoints)+'pts.png', format='png', bbox_inches='tight', dpi=300)
+
+
+for temp in range(ntemps):
+    dftemp = pd.DataFrame(sampler.chain[temp,:, :, :].reshape((-1, ndim)), columns=paramlabels)
+    dftemp.to_csv(rootpath+'/plotsmc/0001_10M_mock7tonegenerated'+'_nmax'+str(nmax)+'_tshift'+str(tshift)+'_'+str(npoints)+'pt_temp'+str(temp)+'_chain.csv', index = False)
+
+print('We\'re using ptemcee. Our constraints:')
+#Burn samples, calculate peak likelihood value (not necessarily so in atlas) and make corner plot
+samples = sampler.chain[0,:, burnin:, :].reshape((-1, ndim))
+#samples for corner plot
+samples_corn = samples #if vary_fund == True else np.delete(samples, np.s_[0,2], 1)
+
+#print('Values with peak likelihood:')
+lglk = np.array([log_likelihood(samples[i]) for i in range(len(samples))])
+pk = samples[np.argmax(lglk)]
+#print('pk:')
+#print(pk)
+pk_corn = pk #if vary_fund == True else np.delete(pk, [0,2])
+#y_0 range needs some messaging to make the plot. But in order to make the whole picture consistent, better change the range of y_1 too.
+#if vary_fund == False:
+#    samples_corn.T[-dim:] -= np.pi #This indeed changes samples_corn itself
+#    pk[-dim:] -= np.pi
+
+#print('pkFalse:')
+#print(pk)
+    
+#print(pk) 
+#Now calculate median (50-percentile) value
+median = np.median(samples_corn, axis=0)
+#print(samples)
+#print(samples_corn)
+
+figcorn = corner.corner(samples_corn, bins = numbins, hist_bin_factor = 5, color = datacolor, truths=pk_corn, truth_color = pkcolor, plot_contours = True, labels = paramlabels, quantiles=(0.05, 0.16, 0.5, 0.84, 0.95), levels=[1-np.exp(-0.5), 1-np.exp(-1.64 ** 2/2)], show_titles=True)
+
+
+#Extract the axes in order to add more important line plots
+naxes = len(pk_corn)
+axes = np.array(figcorn.axes).reshape((naxes, naxes))
+
+# Loop over the diagonal
+for i in range(naxes):
+    ax = axes[i, i]
+    ax.axvline(median[i], color=mediancolor)
+
+# Loop over the histograms
+for yi in range(naxes):
+    for xi in range(yi):
+        ax = axes[yi, xi]
+        ax.axvline(median[xi], color=mediancolor)
+        ax.axhline(median[yi], color=mediancolor)
+        ax.plot(median[xi], median[yi], color = mediancolor, marker = 's')
+figcorn.savefig(rootpath + '/plotsmc/0001_10M_mock7tonegenerated_cornerplot_wandt_'+'nmax'+str(nmax)+'_tshift'+str(tshift)+'_'+str(nwalkers)+'walkers_'+str(npoints)+'pts.png', format='png', bbox_inches='tight', dpi=300)
+
+
+"""
+#Now plot the NR data against the mcmc fit data, together with the 1-sigma varying error data
+onesig_bounds = np.array([np.percentile(samples[:, i], [16, 84]) for i in range(len(samples[0]))]).T
+modelfitpk = model_dv(pk)
+figband = plt.figure(figsize = (12, 9))
+#Plot the 1-sigma_percentile
+for j in range(len(samples)):
+    sample = samples[j]
+    if np.all(onesig_bounds[0] <= sample) and np.all(sample <= onesig_bounds[1]):
+        plt.plot(timespan_new, model_dv(sample).real, "#79CAF2", alpha=0.3)
+    
+plt.plot(timespan_new, mockdatanew_re, "k", alpha=0.7, lw=2, label=r'NR_re')
+plt.plot(timespan_new, modelfitpk.real, "r", alpha=0.7, lw=2, label=r'FitMCmax_re')
+plt.title(r'Comparison of the MC fit data and the $1-\sigma$ error band')
+plt.legend()
+plt.xlabel("t")
+plt.ylabel("h")
+
+figband.savefig(rootpath + '/plotsmc/0001_10M_mock7tonegenerated_waveform_wandt_'+'nmax'+str(nmax)+'_tshift'+str(tshift)+'_'+str(nwalkers)+'walkers_'+str(npoints)+'pts.png', format = 'png', dpi = 384, bbox_inches = 'tight')
+"""
diff --git a/code/Mock7tones_Interpolate-0001_t_10M_wandt.py b/code/Mock7tones_Interpolate-0001_t_10M_wandt.py
new file mode 100644
index 0000000000000000000000000000000000000000..6e87f7143c645f7c221e595fb6c9e85c84be6478
--- /dev/null
+++ b/code/Mock7tones_Interpolate-0001_t_10M_wandt.py
@@ -0,0 +1,357 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+# ### Let's try the 0.0001 stepsize with the n=0 and 1 mock data.
+
+# In[99]:
+
+
+#Import relevant modules, import data and all that
+import numpy as np
+from scipy import interpolate
+import corner
+import os
+os.environ['MPLCONFIGDIR'] = '/home/rayne.liu/.config/matplotlib'
+import matplotlib.pyplot as plt
+from matplotlib.ticker import MaxNLocator
+from matplotlib import rc
+#plt.rcParams['font.family'] = 'DejaVu Sans'
+#rc('text', usetex=True)
+plt.rcParams.update({'font.size': 16.5})
+
+import ptemcee
+#from pycbc.pool import choose_pool
+from multiprocessing import Pool
+import h5py
+import inspect
+import pandas as pd
+import json
+import qnm
+import random
+
+#Remember to change the following global variables
+#rootpath: root path to nr data
+#npoints: number of points you re using for your sampling
+#nmax: tone index --> nmax = 0 if fitting the fundamental tone
+#tshift: time shift after the strain peak
+#vary_fund: whether you vary the fundamental frequency. Works in the model_dv function.
+
+rootpath= "/work/rayne.liu/git/rdstackingproject"#"/Users/RayneLiu/git/rdstackingproject"
+nmax=1
+tshift=10
+#vary_fund = True
+
+#sampler parameters
+npoints = 40
+nwalkers = 24
+ntemps=12
+dim = nmax+1
+ndim = 4*dim
+burnin = 20  #How many points do you burn before doing the corner plot. You need to watch the convergence of the chain plot a bit.
+            #This is trivial but often forgotten: this cannot be more than npoints! I usually use half the points.
+numbins = 32 #corner plot parameter - how many bins you want
+datacolor = '#105670' #'#4fa3a7'
+pkcolor = '#f2c977' #'#ffb45f'
+mediancolor = '#f7695c' #'#9b2814'
+
+
+#WE STILL IMPORT THE ORIGINAL NR DATA FOR COMPARISON
+#TimeOfMaximum
+def FindTmaximum(y):
+    #Determines the maximum absolute value of the complex waveform
+    absval = y[:,1]*y[:,1]+y[:,2]*y[:,2]
+    vmax=np.max(absval)
+    index = np.argmax(absval == vmax)
+    timemax=gw_sxs_bbh_0305[index,0]
+    return timemax
+
+
+#This loads the 22 mode data
+gw = {}
+gw["SXS:BBH:0305"] = h5py.File(rootpath+"/SXS/BBH_SKS_d14.3_q1.22_sA_0_0_0.330_sB_0_0_-0.440/Lev6/rhOverM_Asymptotic_GeometricUnits_CoM.h5", 'r')
+gw_sxs_bbh_0305 = gw["SXS:BBH:0305"]["Extrapolated_N2.dir"]["Y_l2_m2.dat"]
+
+# Remember to download metadata.json from the simulation with number: 0305. Download Lev6/metadata.json
+# This postprocesses the metadata file to find the final mass and final spin
+metadata = {}
+with open(rootpath+"/SXS/BBH_SKS_d14.3_q1.22_sA_0_0_0.330_sB_0_0_-0.440/Lev6/metadata.json") as file:
+    metadata["SXS:BBH:0305"] = json.load(file)
+
+af = metadata["SXS:BBH:0305"]['remnant_dimensionless_spin'][-1]
+mf = metadata["SXS:BBH:0305"]['remnant_mass']
+
+# Depending on nmax, you load nmax number of freqs. and damping times from the qnm package
+omegas = [qnm.modes_cache(s=-2,l=2,m=2,n=i)(a=af)[0] for i in range (8)]
+w = (np.real(omegas))/mf
+tau=-1/(np.imag(omegas))*mf
+#print(w)
+#print(tau)
+
+#times --> x axis of your data
+times_nr = gw_sxs_bbh_0305[:,0]
+tmax_nr=FindTmaximum(gw_sxs_bbh_0305)
+t0_nr=tmax_nr +tshift
+
+#Select the data from t0 onwards
+position_nr = np.argmax(times_nr >= (t0_nr-0.1)) #The 0.1 is to compensate the stepsize-off issue of the nr data - it gives better alignment between the NR data and our mock fit, and shouldn't interfere with our results because we are still using the time points in the NR data!
+gw_sxs_bbh_0305rd=gw_sxs_bbh_0305[position_nr:-1]
+timesrd_nr=gw_sxs_bbh_0305[position_nr:-1][:,0][:920]
+#print(timesrd[0])
+#print(t0) #(This checks that timesrd[0] is indeed t0 - acturally this is a bit off due to stepsize issues, 
+            #but nvm, we'll fix it right away)
+#print(t0_nr)
+t0_nr = timesrd_nr[0]
+#print(t0_nr)
+#print(t0)
+timespan_nr = timesrd_nr - t0_nr
+gwdata_re = gw_sxs_bbh_0305rd[:,1][:920]
+gwdata_im = gw_sxs_bbh_0305rd[:,2][:920]
+
+
+
+
+#NOW WE DEFINE THE MOCKDATA
+t = np.arange(0, 137, 0.1)
+#Can get the w and tau from example nb and amplitude and phase from the 1910 paper
+pars = np.zeros((8, 4))      
+pars[0] = [w[0], tau[0], 0.98213669, 1.47250547]
+pars[1] = [w[1], tau[1], 4.29386867, -0.76414158]
+pars[2] = [w[2], tau[2], 10.5262146, 2.51507515]
+pars[3] = [w[3], tau[3], 21.1540387, -0.98587955]
+pars[4] = [w[4], tau[4], 33.6658447, 1.73635455]
+pars[5] = [w[5], tau[5], 28.6385750, -1.66304618]
+pars[6] = [w[6], tau[6], 9.94273547, 1.42612096]
+pars[7] = [w[7], tau[7], 2.08423335, -0.90304704]
+mockdata = 0
+#print('Parameters')
+#print(pars[:, 2])
+#print(pars[:, 3])
+for i in range(8):
+    print('The n='+str(i)+' overtone frequency, damping time, amplitude and phase:')
+    print(pars[i])
+    mockdata += pars[i][2]*np.exp(1j*pars[i][3])*np.exp(-t/(pars[i][1])) * (np.cos(pars[i][0]*t)-1j*np.sin(pars[i][0]*t))
+
+
+#We also try a tshift for this one
+t0=tshift
+
+#Select the data from t0 onwards
+position = np.argmax(t >= (t0))
+mockdata_rd=mockdata[position:-1]
+timesrd=t[position:-1][:920]
+#print(t0) #(This checks that timesrd[0] is indeed t0 - acturally this is a bit off due to stepsize issues, 
+          #but nvm, we'll fix it right away)
+t0 = timesrd[0]
+#print(t0)
+timespan = timesrd - t0
+mockdata_re =mockdata_rd.real[:920]
+mockdata_im = mockdata_rd.imag[:920]
+
+
+# In[84]:
+
+mocknew_re = interpolate.interp1d(timespan, mockdata_re, kind = 'cubic')
+mocknew_im = interpolate.interp1d(timespan, mockdata_im, kind = 'cubic')
+
+
+# In[87]:
+
+
+timespan_new = np.linspace(0, timespan[-1], len(timespan)*1000)
+mockdatanew_re = mocknew_re(timespan_new)
+mockdatanew_im = mocknew_im(timespan_new)
+
+
+
+
+# In[92]:
+
+
+#Test the new interpolated data
+print('We check the 7 tone mock data against the original NR data:')
+figtest = plt.figure(figsize = (12, 8))
+#plt.plot(timespan, mockdata_re, "r", alpha=0.3, lw=2, label='Mock_before_re')
+plt.plot(timespan_new, mockdatanew_re, "r", alpha=0.3, lw=2, label='Mock_interpolated_re')
+#plt.plot(timespan, mockdata_im, alpha=0.3, lw=2, label='Mock_before_im')
+plt.plot(timespan_nr, gwdata_re, alpha=0.3, lw=2, label='NRraw_re')
+plt.plot(timespan_new, mockdatanew_im, alpha=0.3, lw=2, label='Mock_interpolated_im')
+plt.plot(timespan_nr, gwdata_im, alpha=0.3, lw=2, label='NRraw_im')
+plt.legend()
+figtest.savefig(rootpath + '/plotsmc/0001_'+str(tshift)+'M_mock7toneinterpolated_datatest_wandt.png', format='png', bbox_inches='tight', dpi=1000)
+
+
+# ### Now the interpolation seems nice according to what we have above...let's start sampling!
+
+# In[100]:
+
+
+#Fitting
+#RD model for nmax tones. Amplitudes are in (xn*Exp[i yn]) version. Used here.
+def model_dv(theta):
+    #x0, y0= theta
+    #Your nmax might not align with the dim of theta. Better check it here.
+    assert int(len(theta)/4) == dim, 'Please recheck your n and parameters'
+    
+    wvars = theta[ : (dim)]
+    tvars = theta[(dim) : 2*(dim)]
+    xvars = theta[2*(dim) : 3*(dim)]
+    yvars = theta[3*(dim) : ]
+    
+    #if vary_fund == False:
+    #    avars[0]=0
+    #    bvars[0]=0
+        
+    ansatz = 0
+    for i in range (0,dim):
+        #bvars[1]=0
+        #avars[1]=0
+        ansatz += (xvars[i]*np.exp(1j*yvars[i]))*np.exp(-timespan_new/tvars[i]) * (np.cos(wvars[i]*timespan_new)-1j*np.sin(wvars[i]*timespan_new))
+    # -1j to agree with SXS convention
+    return ansatz
+
+# Logprior distribution. It defines the allowed range my variables can vary over. 
+#It works for the (xn*Exp[iyn]) version. 
+def log_prior(theta): 
+    #Warning: we are specifically working with nmax=1 so here individual prior to the parameters are manually adjusted. This does not apply to all other nmax's.
+    #avars = theta[ : (dim)]
+    #bvars = theta[(dim) : 2*(dim)]
+    #xvars = theta[2*(dim) : 3*(dim)]
+    #yvars = theta[3*(dim) : ]
+    omega0, omega1, tau0, tau1, xvar0, xvar1, yvar0, yvar1 = theta
+    if tshift == 0:
+        if all([0.45 <= omega0 <= 0.63, 0.27 <= omega1 <= 0.6, 0. <= tau0 <= 30., 0. <= tau1 <= 20., \
+            0 <= xvar0 <= 6, 0 <= xvar1 <= 6, -np.pi <= yvar0 <= np.pi, 0. <= yvar1 <= 2*np.pi]):        
+            return 0.0
+    
+    elif tshift == 10:
+        if all([0.54 <= omega0 <= 0.6, 0.35 <= omega1 <= 0.64, 9.1 <= tau0 <= 15.7, 0. <= tau1 <= 9., \
+            0. <= xvar0 <= 1.0, 0. <= xvar1 <= 1.2, -np.pi <= yvar0 <= np.pi, -np.pi <= yvar1 <= np.pi]):        
+            return 0.0
+    
+    return -np.inf
+
+
+# LogLikelihood function. It is just a Gaussian loglikelihood based on computing the residuals^2
+def log_likelihood(theta):
+    modelev = model_dv(theta)
+    result = -np.sum((mockdatanew_re - (modelev.real))**2+(mockdatanew_im - (modelev.imag))**2)
+    if np.isnan(result):
+        return -np.inf
+    return result
+
+
+# Logposterior distribution for the residuals case.
+# The evidence is just a normalization factor
+def log_probability(theta):
+    lp = log_prior(theta)
+    if not np.isfinite(lp):
+        return -np.inf
+    return lp + log_likelihood(theta)
+
+
+# In[101]:
+
+
+#This cell uses the tshift=10 results
+#Set the number of cores of your processors
+#pool = choose_pool(1)
+#pool.size = 1
+np.random.seed(42)
+pos = np.array([random.uniform(0.59,0.64), random.uniform(0.5,0.54), random.uniform(10., 13.7),                 random.uniform(2.,5.), random.uniform(0.3,0.5), random.uniform(0.3, 0.5), random.uniform(-1., 1.),                 random.uniform(-1., 1.)])
+pos = list(pos)
+pos += 1e-5 * np.random.randn(ntemps, nwalkers, ndim)
+with Pool() as pool:
+    sampler = ptemcee.Sampler(nwalkers, ndim, log_likelihood, log_prior, ntemps=ntemps, pool=pool)
+    sampler.run_mcmc(pos,npoints)
+
+dim = 2
+paramlabels_w = [r'$\omega_'+str(i)+'$' for i in range (dim)]
+paramlabels_t = [r'$\tau_'+str(i)+'$' for i in range (dim)]
+paramlabels_x = [r'$x_'+str(i)+'$' for i in range (dim)]
+paramlabels_y = [r'$y_'+str(i)+'$' for i in range (dim)] 
+
+paramlabels = paramlabels_w + paramlabels_t + paramlabels_x + paramlabels_y
+
+print('The chain plot:')
+#Chain plot
+figchain, axes = plt.subplots(ndim, 1, sharex=True, figsize=(12, 4*(4)))
+for i in range(ndim):
+    axes[i].plot(sampler.chain[0,:, :, i].T, color="k", alpha=0.4, rasterized=True)
+    axes[i].yaxis.set_major_locator(MaxNLocator(5))
+    axes[i].set_ylabel(paramlabels[i])
+axes[-1].set_xlabel('Iterations')
+figchain.savefig(rootpath + '/plotsmc/0001_10M_mock7toneinterpolated_chainplot_wandt_'+str(nwalkers)+'walkers_'+str(npoints)+'pts.png', format='png', bbox_inches='tight', dpi=300)
+
+
+for temp in range(ntemps):
+    dftemp = pd.DataFrame(sampler.chain[temp,:, :, :].reshape((-1, ndim)), columns=paramlabels)
+    dftemp.to_csv(rootpath+'/plotsmc/0001_10M_mock7toneinterpolated'+'_nmax'+str(nmax)+'_tshift'+str(tshift)+'_'+str(npoints)+'pt_temp'+str(temp)+'_chain.csv', index = False)
+
+print('We\'re using ptemcee. Our constraints:')
+#Burn samples, calculate peak likelihood value (not necessarily so in atlas) and make corner plot
+samples = sampler.chain[0,:, burnin:, :].reshape((-1, ndim))
+#samples for corner plot
+samples_corn = samples #if vary_fund == True else np.delete(samples, np.s_[0,2], 1)
+
+#print('Values with peak likelihood:')
+lglk = np.array([log_likelihood(samples[i]) for i in range(len(samples))])
+pk = samples[np.argmax(lglk)]
+#print('pk:')
+#print(pk)
+pk_corn = pk #if vary_fund == True else np.delete(pk, [0,2])
+#y_0 range needs some messaging to make the plot. But in order to make the whole picture consistent, better change the range of y_1 too.
+#if vary_fund == False:
+#    samples_corn.T[-dim:] -= np.pi #This indeed changes samples_corn itself
+#    pk[-dim:] -= np.pi
+
+#print('pkFalse:')
+#print(pk)
+    
+#print(pk) 
+#Now calculate median (50-percentile) value
+median = np.median(samples_corn, axis=0)
+#print(samples)
+#print(samples_corn)
+
+figcorn = corner.corner(samples_corn, bins = numbins, hist_bin_factor = 5, color = datacolor, truths=pk_corn, truth_color = pkcolor, plot_contours = True, labels = paramlabels, quantiles=(0.05, 0.16, 0.5, 0.84, 0.95), levels=[1-np.exp(-0.5), 1-np.exp(-1.64 ** 2/2)], show_titles=True)
+
+
+#Extract the axes in order to add more important line plots
+naxes = len(pk_corn)
+axes = np.array(figcorn.axes).reshape((naxes, naxes))
+
+# Loop over the diagonal
+for i in range(naxes):
+    ax = axes[i, i]
+    ax.axvline(median[i], color=mediancolor)
+
+# Loop over the histograms
+for yi in range(naxes):
+    for xi in range(yi):
+        ax = axes[yi, xi]
+        ax.axvline(median[xi], color=mediancolor)
+        ax.axhline(median[yi], color=mediancolor)
+        ax.plot(median[xi], median[yi], color = mediancolor, marker = 's')
+figcorn.savefig(rootpath + '/plotsmc/0001_10M_mock7toneinterpolated_cornerplot_wandt_'+'nmax'+str(nmax)+'_tshift'+str(tshift)+'_'+str(nwalkers)+'walkers_'+str(npoints)+'pts.png', format='png', bbox_inches='tight', dpi=300)
+
+
+"""
+#Now plot the NR data against the mcmc fit data, together with the 1-sigma varying error data
+onesig_bounds = np.array([np.percentile(samples[:, i], [16, 84]) for i in range(len(samples[0]))]).T
+modelfitpk = model_dv(pk)
+figband = plt.figure(figsize = (12, 9))
+#Plot the 1-sigma_percentile
+for j in range(len(samples)):
+    sample = samples[j]
+    if np.all(onesig_bounds[0] <= sample) and np.all(sample <= onesig_bounds[1]):
+        plt.plot(timespan_new, model_dv(sample).real, "#79CAF2", alpha=0.3)
+    
+plt.plot(timespan_new, mockdatanew_re, "k", alpha=0.7, lw=2, label=r'NR_re')
+plt.plot(timespan_new, modelfitpk.real, "r", alpha=0.7, lw=2, label=r'FitMCmax_re')
+plt.title(r'Comparison of the MC fit data and the $1-\sigma$ error band')
+plt.legend()
+plt.xlabel("t")
+plt.ylabel("h")
+
+figband.savefig(rootpath + '/plotsmc/0001_10M_mock7toneinterpolated_waveform_wandt_'+'nmax'+str(nmax)+'_tshift'+str(tshift)+'_'+str(nwalkers)+'walkers_'+str(npoints)+'pts.png', format = 'png', dpi = 384, bbox_inches = 'tight')
+"""
\ No newline at end of file
diff --git a/code/condor_submit_Mock7tonesGenerate0001_t_10M_wandt.sub b/code/condor_submit_Mock7tonesGenerate0001_t_10M_wandt.sub
new file mode 100755
index 0000000000000000000000000000000000000000..8cc2dcca53834690cf1084061c58919e310d40be
--- /dev/null
+++ b/code/condor_submit_Mock7tonesGenerate0001_t_10M_wandt.sub
@@ -0,0 +1,20 @@
+universe   = vanilla
+getenv     = true
+# run script -- make sure that condor has execute permission for this file (chmod a+x script.py)
+executable = Mock7tones_Generate-0001_t_10M_wandt.py 
+# file to dump stdout (this directory should exist)
+output     = Mock7tones_Generate-0001_t_10M_wandt.out
+# file to dump stderr
+error     = Mock7tones_Generate-0001_t_10M_wandt.err
+# condor logs
+log     = Mock7tones_Generate-0001_t_10M_wandt.log
+initialdir = .
+notify_user = rl746@cornell.edu
+notification = Complete
+arguments  = "-processid $(Process)" 
+request_memory = 192GB
+request_cpus = 1
+on_exit_remove = (ExitBySignal == False) || ((ExitBySignal == True) && (ExitSignal != 11))
+accounting_group = aei.dev.test_dynesty
+queue 1
+
diff --git a/code/condor_submit_Mock7tonesInterpolate0001_t_10M_wandt.sub b/code/condor_submit_Mock7tonesInterpolate0001_t_10M_wandt.sub
new file mode 100644
index 0000000000000000000000000000000000000000..b6a412b6adb86964978712ebdf9c4759eb033061
--- /dev/null
+++ b/code/condor_submit_Mock7tonesInterpolate0001_t_10M_wandt.sub
@@ -0,0 +1,20 @@
+universe   = vanilla
+getenv     = true
+# run script -- make sure that condor has execute permission for this file (chmod a+x script.py)
+executable = Mock7tones_Interpolate-0001_t_10M_wandt.py 
+# file to dump stdout (this directory should exist)
+output     = Mock7tones_Interpolate-0001_t_10M_wandt.out
+# file to dump stderr
+error     = Mock7tones_Interpolate-0001_t_10M_wandt.err
+# condor logs
+log     = Mock7tones_Interpolate-0001_t_10M_wandt.log
+initialdir = .
+notify_user = rl746@cornell.edu
+notification = Complete
+arguments  = "-processid $(Process)" 
+request_memory = 192GB
+request_cpus = 1
+on_exit_remove = (ExitBySignal == False) || ((ExitBySignal == True) && (ExitSignal != 11))
+accounting_group = aei.dev.test_dynesty
+queue 1
+
diff --git a/plotsmc/0001_10M_interpolated_chainplot.png b/plotsmc/0001_10M_interpolated_chainplot.png
index 8e5e9d0b33988191c1336734deeb73666db31c2b..609530d66c0f4a29151dc20b63a8d8ed28c12943 100644
Binary files a/plotsmc/0001_10M_interpolated_chainplot.png and b/plotsmc/0001_10M_interpolated_chainplot.png differ
diff --git a/plotsmc/0001_10M_interpolated_cornerplot.png b/plotsmc/0001_10M_interpolated_cornerplot.png
index 468a33886e70b06923ce4b52d1146cbb0615ded8..b70ff106719c9a262e3763894793794dbc5df03f 100644
Binary files a/plotsmc/0001_10M_interpolated_cornerplot.png and b/plotsmc/0001_10M_interpolated_cornerplot.png differ
diff --git a/plotsmc/0001_10M_mock7tonegenerated_chainplot_wandt_300walkers_1370pts.png b/plotsmc/0001_10M_mock7tonegenerated_chainplot_wandt_300walkers_1370pts.png
new file mode 100644
index 0000000000000000000000000000000000000000..5e1ef189de6046d4674e559ecf2a9bd00839d75b
Binary files /dev/null and b/plotsmc/0001_10M_mock7tonegenerated_chainplot_wandt_300walkers_1370pts.png differ
diff --git a/plotsmc/0001_10M_mock7tonegenerated_cornerplot_wandt_nmax1_tshift10_300walkers_1370pts.png b/plotsmc/0001_10M_mock7tonegenerated_cornerplot_wandt_nmax1_tshift10_300walkers_1370pts.png
new file mode 100644
index 0000000000000000000000000000000000000000..d403c2763db7bf4ff329f110e4a49d367d525ad5
Binary files /dev/null and b/plotsmc/0001_10M_mock7tonegenerated_cornerplot_wandt_nmax1_tshift10_300walkers_1370pts.png differ
diff --git a/plotsmc/0001_10M_mock7toneinterpolated_chainplot_wandt_320walkers_1256pts.png b/plotsmc/0001_10M_mock7toneinterpolated_chainplot_wandt_320walkers_1256pts.png
new file mode 100644
index 0000000000000000000000000000000000000000..609f00e80409b784cbff1bd474b03a86039d73ff
Binary files /dev/null and b/plotsmc/0001_10M_mock7toneinterpolated_chainplot_wandt_320walkers_1256pts.png differ
diff --git a/plotsmc/0001_10M_mock7toneinterpolated_cornerplot_wandt_nmax1_tshift10_320walkers_1256pts.png b/plotsmc/0001_10M_mock7toneinterpolated_cornerplot_wandt_nmax1_tshift10_320walkers_1256pts.png
new file mode 100644
index 0000000000000000000000000000000000000000..66573c63521274ceba55aa7a690d6169f5140222
Binary files /dev/null and b/plotsmc/0001_10M_mock7toneinterpolated_cornerplot_wandt_nmax1_tshift10_320walkers_1256pts.png differ
diff --git a/plotsmc/0001_10M_mock7toneinterpolated_datatest_wandt.png b/plotsmc/0001_10M_mock7toneinterpolated_datatest_wandt.png
new file mode 100644
index 0000000000000000000000000000000000000000..546659877b7fa1670d32a0f7c90bf70007510bb5
Binary files /dev/null and b/plotsmc/0001_10M_mock7toneinterpolated_datatest_wandt.png differ
diff --git a/plotsmc/0001_10M_mockgenerated_cornerplot_wandt_nmax1_tshift10_300walkers_1200pts.png b/plotsmc/0001_10M_mockgenerated_cornerplot_wandt_nmax1_tshift10_300walkers_1200pts.png
new file mode 100644
index 0000000000000000000000000000000000000000..ead3caa9567390da19460a65b9f153f9544cf1c6
Binary files /dev/null and b/plotsmc/0001_10M_mockgenerated_cornerplot_wandt_nmax1_tshift10_300walkers_1200pts.png differ
diff --git a/plotsmc/0001_18M_interpolated_chainplot_wandt_300walkers_1100pts.png b/plotsmc/0001_18M_interpolated_chainplot_wandt_300walkers_1100pts.png
new file mode 100644
index 0000000000000000000000000000000000000000..42d838365b57b46e607c82551ba1e70545f0ff89
Binary files /dev/null and b/plotsmc/0001_18M_interpolated_chainplot_wandt_300walkers_1100pts.png differ
diff --git a/plotsmc/0001_18M_interpolated_chainplot_wandt_300walkers_1600pts.png b/plotsmc/0001_18M_interpolated_chainplot_wandt_300walkers_1600pts.png
new file mode 100644
index 0000000000000000000000000000000000000000..d3b710f4566b711fc2c7b259e7470e708683930e
Binary files /dev/null and b/plotsmc/0001_18M_interpolated_chainplot_wandt_300walkers_1600pts.png differ
diff --git a/plotsmc/0001_18M_interpolated_chainplot_wandt_300walkers_500pts.png b/plotsmc/0001_18M_interpolated_chainplot_wandt_300walkers_500pts.png
new file mode 100644
index 0000000000000000000000000000000000000000..123dfa069e4b707c998ca4a02cf32cb5e157ed2a
Binary files /dev/null and b/plotsmc/0001_18M_interpolated_chainplot_wandt_300walkers_500pts.png differ
diff --git a/plotsmc/0001_18M_interpolated_cornerplot_wandt_nmax1_tshift18_300walkers_1100pts.png b/plotsmc/0001_18M_interpolated_cornerplot_wandt_nmax1_tshift18_300walkers_1100pts.png
new file mode 100644
index 0000000000000000000000000000000000000000..209abadd8e4553f95cf490039288973f91d72f60
Binary files /dev/null and b/plotsmc/0001_18M_interpolated_cornerplot_wandt_nmax1_tshift18_300walkers_1100pts.png differ
diff --git a/plotsmc/0001_18M_interpolated_cornerplot_wandt_nmax1_tshift18_300walkers_500pts.png b/plotsmc/0001_18M_interpolated_cornerplot_wandt_nmax1_tshift18_300walkers_500pts.png
new file mode 100644
index 0000000000000000000000000000000000000000..e53100759b6532d9a2be8aa416d855c5986785a5
Binary files /dev/null and b/plotsmc/0001_18M_interpolated_cornerplot_wandt_nmax1_tshift18_300walkers_500pts.png differ