Skip to content
Snippets Groups Projects
Select Git revision
  • 6c4a27ccb1857a1b899e80895611f995579aab36
  • master default protected
  • legacy
  • jdk-17.0.13-ga-legacy
  • jdk-17.0.14+4
  • jdk-17.0.14+3
  • jdk-17.0.14+2
  • jdk-17.0.14+1
  • jdk-17.0.13-ga
  • jdk-17.0.13+11
  • jdk-17.0.13+10
  • jdk-17.0.13+9
  • jdk-17.0.13+8
  • jdk-17.0.13+7
  • jdk-17.0.13+6
  • jdk-17.0.14+0
  • jdk-17.0.13+5
  • jdk-17.0.13+4
  • jdk-17.0.13+3
  • jdk-17.0.13+2
  • jdk-17.0.13+1
  • jdk-17.0.13+0
  • jdk-17.0.12-ga
23 results

TestTraceLinearScanLevel.java

Blame
  • RDGW150914_MCMC.py NaN GiB
    '''
    I cannot stand it. This is tooooooooooooooooooooooooooo messy. I'm gonna write a class. Gonna do it real quick.
    
    ---R^a_{yne} Λ^i_u, 08/04/2020
    '''
    
    
    #Import necessary modules
    
    import numpy as np
    import random
    from scipy.optimize import minimize
    
    import corner
    #%matplotlib inline
    import matplotlib.pyplot as plt
    from matplotlib.ticker import MaxNLocator
    from matplotlib import rc
    plt.rcParams['font.family'] = 'DejaVu Sans'
    rc('text', usetex=True)
    plt.rcParams.update({'font.size': 19})
    from IPython.display import display, Math
    
    import emcee
    import ptemcee
    import math
    import h5py
    import inspect
    import pandas as pd
    import json
    import qnm
    
    
    #rootpath: root path to nr data (can change to your desired directory)
    rootpath="/Users/RayneLiu"
    
    #Find the maximum time, a function we need to unpack our data
    def FindTmaximum(y):
        '''
        Returns: time of maximum amplitude of the waveform.
        y: complex waveform.
        '''
        #Determines the maximum absolute value of the complex waveform
        absval = y[:,1]*y[:,1]+y[:,2]*y[:,2]
        vmax=np.max(absval)
        index = np.argmax(absval == vmax)
        timemax=gw_sxs_bbh_0305[index,0]
        return timemax
    
    #This loads the 22 mode data. This part is subject to change--we might need to deal with more data than this.
    gw = {}
    gw["SXS:BBH:0305"] = h5py.File(rootpath+"/git/rdstackingproject/SXS/BBH_SKS_d14.3_q1.22_sA_0_0_0.330_sB_0_0_-0.440/Lev6/rhOverM_Asymptotic_GeometricUnits_CoM.h5", 'r')
    gw_sxs_bbh_0305 = gw["SXS:BBH:0305"]["Extrapolated_N2.dir"]["Y_l2_m2.dat"]
    times = gw_sxs_bbh_0305[:,0]
    tmax=FindTmaximum(gw_sxs_bbh_0305)
    
    # Remember to download metadata.json from the simulation with number: 0305. Download Lev6/metadata.json
    # This postprocesses the metadata file to find the final mass and final spin
    metadata = {}
    with open(rootpath+"/git/rdstackingproject/SXS/BBH_SKS_d14.3_q1.22_sA_0_0_0.330_sB_0_0_-0.440/Lev6/metadata.json") as file:
        metadata["SXS:BBH:0305"] = json.load(file)
    
    af = metadata["SXS:BBH:0305"]['remnant_dimensionless_spin'][-1]
    mf = metadata["SXS:BBH:0305"]['remnant_mass']
    
    
    
    
    
    class BasicFits(object):
        '''
        Stores the basic fit functions and variables, such as the ansatz function, scipy fits, and the Bayesian priors, all that can be shared. Also does the fit by scipy.minimise and produces the corresponding plots.
        '''
        
        def get_nmax(self):
            '''
            Returns: tone index. e.g. nmax = 0 if fitting the fundamental tone.
            '''
            return self._nmax
        
        def set_nmax(self, value):
            '''
            Sets tone index to value. The corresponding omegas would also change.    
            value: int, usually 0, 1, 2.
            '''
            assert type(value) is int, 'Non-integer nmax. Is your research advisor Harry Potter?'
            if value > 3:
                print('Too high an nmax might give you trouble. Don\'t say I didn\'t warn ya!')
            self._nmax = value
            self._omegas = [qnm.modes_cache(s=-2,l=2,m=2,n=i)(a=af)[0] for i in range (0,self._nmax+1)]
            
        def get_tshift(self):
            '''
            Returns: time shift after the strain peak.
            '''
            return self._tshift
        
        def set_tshift(self, value):
            '''
            Sets tshift to value. The corresponding t0 and data selected also need to be changed.
            value: >= 0. Preferrably <= 20.
            '''
            assert value >= 0, 'Not sure if we want to start fitting before the merger'
            if value > 20:
                print('Are you sure you wanna fit this late? If this is what you mean then there\'s something to be taken care of in the fitting function')
            self._tshift = value
            self._t0=tmax +self._tshift
            position = np.argmax(times >= (self._t0))
            self._timesrd=gw_sxs_bbh_0305[position:-1][:,0] #Time from t0 onwards
            self._gw_sxs_bbh_0305rd=gw_sxs_bbh_0305[position:-1] #Waveform data selected from t0 onwards        
            
        def get_t0(self):
            '''
            Returns: the time that we start fitting. Not settable.
            '''
            return self._t0
        
        def get_timesrd(self):
            '''
            Returns: time from t0 onwards. Not settable.
            '''
            return self._timesrd
        
        def get_gw0305rd(self):
            '''
            Returns: Waveform data of gw0305 sxs selected from t0 onwards. Not settable.
            '''
            return self._gw_sxs_bbh_0305rd
        
        def get_omegas(self):
            '''
            Returns: the complex frequencies of our base class. Not settable.
            '''
            return self._omegas
        
            
            
        def __init__(self):
            '''
            Initializes the fit. Default is the test data.
            '''
            self._nmax = 0
            self._tshift = 10
            
            self._t0=tmax +self._tshift
            
            position = np.argmax(times >= (self._t0))
            self._timesrd=gw_sxs_bbh_0305[position:-1][:,0] #Time from t0 onwards
            self._gw_sxs_bbh_0305rd=gw_sxs_bbh_0305[position:-1] #Waveform data selected from t0 onwards
            
            # Depending on nmax, you load nmax number of freqs. and damping times from the qnm package
            self._omegas = [qnm.modes_cache(s=-2,l=2,m=2,n=i)(a=af)[0] for i in range (0,self._nmax+1)]
            
    
        #Functions
        def EradUIB2017(self, eta,chi1,chi2):
            '''
            Returns: a fit to the energy radiated = 1 - final_mass.
            eta: reduced mass ratio.
            chi1, chi2: respective spins of the binary.
            '''
            m1=0.5*(1+(1-4*eta)**0.5)
            m2=0.5*(1-(1-4*eta)**0.5)
            S= (m1**2*chi1 + m2**2*chi2)/(m1*m1 + m2*m2)
    
            erad=(((1-(2*(2)**0.5)/3)*eta+0.5609904135313374*eta**2-0.84667563764404*eta**3+3.145145224278187*eta**4)*(1+S**3 *(-0.6320191645391563+ 4.952698546796005*eta-10.023747993978121*eta**2)+S**2*(-0.17762802148331427+ 2.176667900182948*eta**2)+S*(-0.13084389181783257- 1.1387311580238488*eta+5.49074464410971*eta**2)))/(1+S*(-0.9919475346968611+ 0.367620218664352*eta+4.274567337924067*eta**2))-0.01978238971523653*S*(1-4.91667749015812*eta)*(1-4*eta)**0.5 *eta *(chi1-chi2)-0.09803730445895877*(1-4*eta)**0.5*(1-3.2283713377939134*eta)*eta**2 *(chi1-chi2)+0.01118530335431078*eta**3 *(chi1-chi2)**2
            return  erad
        
        def labels(self):
            '''
            Returns: the labels for plotting and the labels for displaying constraints.
            '''
            paramlabels = []
            for i in range (self._nmax+1):
                sublabel = ['$x_'+str(i)+'$', '$y_'+str(i)+'$', r'$\alpha_'+str(i)+'$', r'$\beta_' + str(i) + '$']
                paramlabels += sublabel
                
            displaylabels = []
            for i in range (self._nmax+1):
                sublabel1 = [r'x_' + str(i), r'y_' + str(i), r'\alpha_' + str(i), r'\beta_' + str(i)]
                displaylabels += sublabel1
                
            return paramlabels, displaylabels
        
        #RD model for nmax tones. Amplitudes are in (xn*Exp[i yn]) version. Used here.
        def model_dv(self, theta):
            '''
            Returns: the fitting ansatz. This version varies the fundamental frequency.
            theta: array-like, parameters in the order of [x0, y0, a0, b0, x1, y1, a1, b1, ...]
            '''
            #Your nmax might not align with the dim of theta. Better check it here.
            assert int(len(theta)/4) == self._nmax + 1, 'Please recheck your n and parameters'
            w = (np.real(self._omegas))/mf
            tau=-1/(np.imag(self._omegas))*mf
            dim =int(len(theta)/4)        
    
            xvars = [theta[4*i] for i in range (0, dim)]
            yvars = [theta[4*i+1] for i in range (0, dim)]
            avars = [theta[4*i+2] for i in range (0, dim)]
            bvars = [theta[4*i+3] for i in range (0, dim)]    
            
            yvars[0]=0
            bvars[0]=0
            #if self._nmax != 0:
            #    bvars[0]=0
            #    avars[0]=0
            ansatz = 0
            for i in range (0,dim):
                #bvars[1]=0
                #avars[1]=0
                ansatz += (xvars[i]*np.exp(1j*yvars[i]))*np.exp(-(self._timesrd-self._timesrd[0])/(tau[i]*(1+bvars[i])))\
                *(np.cos((1+avars[i])*w[i]*self._timesrd)-1j*np.sin((1+avars[i])*w[i]*self._timesrd))
            # -1j to agree with SXS convention
            return ansatz
        
    
        #Bayesian functions
        def log_prior(self, theta):
            '''
            Logprior distribution. Defines the allowed range my variables can vary over. Works for the (xn*Exp[iyn]) version. 
            theta: array-like, just like the above.
            '''
            x_s = theta[0::4]
            y_s = theta[1::4]
            a_s = theta[2::4]
            b_s = theta[3::4]
            if self._nmax == 0:
                prior_range = 0.4
            elif self._nmax == 1:
                prior_range = 1.0
            #print('prior_range:')
            #print(prior_range)
            if all(0 <= t <= 3 for t in x_s) and all(0 <= t <= 2*np.pi for t in y_s) and all(-prior_range <= t <= prior_range for t in a_s) and all(-prior_range <= t <= prior_range for t in b_s):
                return 0.0
            return -np.inf
    
        def log_likelihood(self, theta):
            '''
            LogLikelihood function. A Gaussian loglikelihood based on computing the residuals^2.
            theta: array-like, the same.
            '''
            modelev = self.model_dv(theta)
            return  -np.sum((self._gw_sxs_bbh_0305rd[:,1] - (modelev.real))**2+(self._gw_sxs_bbh_0305rd[:,2] - (modelev.imag))**2)
    
        def log_probability(self, theta):
            '''
            Logposterior distribution for the residuals case. The evidence is just a normalization factor, hence is not defined here.
            theta: array-like, the same.
            '''
            lp = self.log_prior(theta)
            if not np.isfinite(lp):
                return -np.inf
            return lp + self.log_likelihood(theta)
    
        #Fits using scipy.minimize
        def minifit(self):
            '''
            Returns: the best fit parameters for the maximum likelihood estimates. Also plots the NR data against the ansatz data.
            '''
            #I need to provide an initial guess for 4*(nmax+1) the parameters
            np.random.seed(42)
            nll = lambda *args: -self.log_likelihood(*args)
            
            #This assigns the initial guess. There's some subtlety we need to take care of about the ansatz.
            initial_block = np.array([0.5, 1, 0, 0])
            
            #if self._nmax == 0:
            #    initial = initial_block
            #else:
            if 0 <= self._tshift <= 5:
                starter = np.array([4.28313743e-01, 1, 0, 0])
                #starter = np.array([4.28313743e-01, 0, 0, 0])
            elif 5 < self._tshift <= 10:
                starter = np.array([3.96224770e-01, 0, 0, 0])
            elif 10 < self._tshift <= 15:
                starter = np.array([2.46933125e-01, 1, 0, 0])
            elif 15 < self._tshift <= 20:
                starter = np.array([1.90743258e-01,1, 0, 0])
            #print('starter:')
            #print(starter)
            initial = np.concatenate((starter, np.tile(initial_block, self._nmax)))
            
            #initial = np.concatenate((np.array([1.83115905e-01,-7.25269709e+02,0,0]), np.tile(initial_block, self._nmax)))    
    
            #initial = np.tile(initial_block, self._nmax+1)
            
            #np.array([1.83115905e-01,-7.25269709e+02,0,0])
            #print(initial)
            soln = minimize(nll, initial)
            vars_ml=soln.x    #x0_ml, y0_ml, a0_ml, b0_ml = soln.x
            print("Maximum likelihood estimates:") 
            #Maximum likelihood: minimum -log_likelihood. Log_likelihood is easier to calculate
            print(vars_ml)
            return vars_ml
        
        def minifitplot(self):
            '''
            Plots the NR data against the ansatz data.
            '''
            plt.plot(self._timesrd, self._gw_sxs_bbh_0305rd[:,1], "r", alpha=0.3, lw=3, label=r'$NR\_re$')
            modelfit = self.model_dv(self.minifit())
            plt.plot(self._timesrd, modelfit.real,"b", alpha=0.3, lw=3, label=r'$Fit\_re$')
            #plt.plot(x0, np.dot(np.vander(x0, 2), w), "--k", label="LS")
            plt.legend(fontsize=14)
            plt.xlim(self._timesrd[0], self._timesrd[0]+80)
            plt.xlabel("t")
            plt.ylabel("h");
        
        
    
    class EmceeFits(BasicFits):
        '''
        Fitting with emcee. Subclass of BasicFits. Produces the chain plot and corner plot using corner.py.
        '''
        
        def get_npoints(self):
            '''
            Returns: number of points you're using for your sampling
            '''
            return self._npoints
        
        def set_npoints(self, value):
            '''
            Set the number of points you're using for your sampling to value. 
            value: positive integer
            '''
            assert type(value) is int and value >= 0, 'Show me how you divide a point...'
            self._npoints = value
            
        def get_nwalkers(self):
            '''
            Returns: the number of walkers.
            '''
            return self._nwalkers
        
        def set_nwalkers(self, value):
            '''
            Set the number of walkers you're using for your sampling to value. 
            value: positive integer
            '''
            assert type(value) is int and value >= 0, 'Is one of your walkers incomplete?'
            self._nwalkers = value
            
        def get_nmax(self):
            '''
            Returns: tone index. e.g. nmax = 0 if fitting the fundamental tone.
            '''
            return self._nmax
        
        def set_nmax(self, value):
            '''
            Sets tone index to value. The corresponding omegas, ndim would also change. We have to do it again in the subclass.   
            value: int, usually 0, 1, 2.
            '''
            assert type(value) is int, 'Non-integer nmax. Is your research advisor Harry Potter?'
            if value > 3:
                print('Too high an nmax might give you trouble. Don\'t say I didn\'t warn ya!')
            self._nmax = value
            self._omegas = [qnm.modes_cache(s=-2,l=2,m=2,n=i)(a=af)[0] for i in range (0,self._nmax+1)]
            self._ndim = int(4*(self._nmax+1))
        
        #There is no getter and setter for self._tshift because there is no new attributes that depend on self._tshift in the subclass. There is no need to override the getter and setter for the base class.
        
        def get_ndim(self):
            '''
            Returns: the parameter dimension. Not settable on its own.
            '''
            return self._ndim
            
        def __init__(self):
            '''
            Inherit and add parameters. The default npoints is not so large, just for the test.
            '''
            super().__init__()
            self._npoints = 5000
            self._nwalkers = 32
            self._ndim = int(4*(self._nmax+1))
     
        
        def pos(self):
            '''
            Returns: the initial position of your 4*(nmax+1) variables.
            '''
            pos = [4.28313743e-01,random.uniform(0,2*np.pi),random.uniform(-0.1,0.1),random.uniform(-0.1,0.1)] * (self._nmax+1)
            pos += 1e-3 * np.random.randn(self._nwalkers, self._ndim)
            return pos
    
        def fitsrun(self, burning = 2000):
            '''
            Runs emcee. Generates the chain plot, corner plot, parameter constraints, and data plotted with the 1-σ band.
            burning: the time before which the data is burnt.
            '''
            print('Running emcee sampler')
            sampler = emcee.EnsembleSampler(self._nwalkers, self._ndim, self.log_probability)
            sampler.run_mcmc(self.pos(),self._npoints,progress=True)
                
            print('Chain plot:')
            fig, axes = plt.subplots(self._ndim, 1, sharex=True, figsize=(12, 13.5*(self._nmax+1)))
            for i in range(self._ndim):
                axes[i].plot(sampler.chain[:, :, i].T, color="k", alpha=0.4)
                axes[i].yaxis.set_major_locator(MaxNLocator(5))
                axes[i].set_ylabel(self.labels()[0][i])
            axes[-1].set_xlabel('Iterations')
            plt.show()
            
            #Get the peak value (or median value?) as the truths
            print('Values with peak likelihood:')
            burnin = burning
            flat_samples = sampler.get_chain(discard=burnin, thin=15, flat=True)
            lglk = np.array([self.log_likelihood(flat_samples[i]) for i in range(len(flat_samples))])
            pk = flat_samples[np.argmax(lglk)]
            print(pk)
            
            print('Corner plot:')
            fig_corn = corner.corner(
                flat_samples, bins=60, labels=self.labels()[0], truths=pk,quantiles=(0.05, 0.16, 0.84, 0.95)
            )
            plt.show()
            
            print('1-sigma constraints on the parameters:')
            for i in range(len(flat_samples[0])):
                mcmc = np.percentile(flat_samples[:, i], [16, 50, 84])
                #print(mcmc)
                q = np.diff(mcmc)
                #print(q)
                txt = "\mathrm{{{3}}} = {0:.3f}_{{-{1:.3f}}}^{{{2:.3f}}}"
                txt = txt.format(mcmc[1], q[0], q[1], self.labels()[1][i])
                display(Math(txt))
           
            print('Plot the NR data against the ansatz, together with the 1-sigma varying error')
            modelfit = self.model_dv(self.minifit())
            #modelfitmed = model_dv_af(median)
            modelfitpk = self.model_dv(pk)
            #Get the upper and lower bounds
            onesig_bounds = np.array([np.percentile(flat_samples[:, i], [16, 84]) for i in range(len(flat_samples[0]))]).T
            fig_band = plt.figure(figsize = (12, 9))
            #Plot the 68-percentile (1-σ)
            for j in range(len(flat_samples)):
                sample = flat_samples[j]
                if np.all(onesig_bounds[0] <= sample) and np.all(sample <= onesig_bounds[1]):
                    plt.plot(self._timesrd, self.model_dv(sample).real, "#79CAF2", alpha=0.3)
    
    
            plt.plot(self._timesrd, self._gw_sxs_bbh_0305rd[:,1], "k", alpha=0.7, lw=2, label=r'NR\_re')
            plt.plot(self._timesrd, modelfit.real, "g", alpha=0.7, lw=2, label=r'Fit\_re')
            plt.plot(self._timesrd, modelfitpk.real, "r", alpha=0.7, lw=2, label=r'FitMCmax\_re')
            plt.title(r'Comparison of the MC fit data and the $1-\sigma$ error band')
            plt.legend()
            plt.xlim(self._timesrd[0], self._timesrd[0]+80)
            plt.xlabel("t")
            plt.ylabel("h");
            #fig_band.savefig(rootpath+'/git/rdstackingproject/plots/fit_signal.pdf', format = 'pdf');
    
    
    class PtemceeFits(BasicFits):
        '''
        Fitting with ptemcee. Subclass of BasicFits. Produces the chain plot and corner plot using corner.py. Also calculates and plots percentiles (1-σ band).
        '''
        
        def get_npoints(self):
            '''
            Returns: number of points you're using for your sampling
            '''
            return self._npoints
        
        def set_npoints(self, value):
            '''
            Set the number of points you're using for your sampling to value. 
            value: positive integer
            '''
            assert type(value) is int and value >= 0, 'Show me how you divide a point...'
            self._npoints = value
            
        def get_nwalkers(self):
            '''
            Returns: the number of walkers.
            '''
            return self._nwalkers
        
        def set_nwalkers(self, value):
            '''
            Set the number of walkers you're using for your sampling to value. 
            value: positive integer
            '''
            assert type(value) is int and value >= 0, 'Is one of your walkers incomplete?'
            self._nwalkers = value
            
        def get_ntemps(self):
            '''
            Returns: the number of temperatures.
            '''
            return self._ntemps
        
        def set_ntemps(self, value):
            '''
            Set the number of temperatures you're using for your sampling to value. 
            value: positive integer
            '''
            assert type(value) is int and value >= 0, 'I guess you also set your nwalkers to an non-integer.'
            self._ntemps = value
    
        def get_nmax(self):
            '''
            Returns: tone index. e.g. nmax = 0 if fitting the fundamental tone.
            '''
            return self._nmax
        
        def set_nmax(self, value):
            '''
            Sets tone index to value. The corresponding omegas, ndim would also change. We have to do it again in the subclass.   
            value: int, usually 0, 1, 2.
            '''
            assert type(value) is int, 'Non-integer nmax. Is your research advisor Harry Potter?'
            if value > 3:
                print('Too high an nmax might give you trouble. Don\'t say I didn\'t warn ya!')
            self._nmax = value
            self._omegas = [qnm.modes_cache(s=-2,l=2,m=2,n=i)(a=af)[0] for i in range (0,self._nmax+1)]
            self._ndim = int(4*(self._nmax+1))
        
        #There is no getter and setter for self._tshift because there is no new attributes that depend on self._tshift in the subclass. There is no need to override the getter and setter for the base class.
        
        def get_ndim(self):
            '''
            Returns: the parameter dimension. Not settable on its own.
            '''
            return self._ndim
    
        def __init__(self):
            '''
            Inherit and add parameters. The default npoints is not so large, just for the test.
            '''
            super().__init__()
            self._npoints = 5000
            self._nwalkers = 32
            self._ntemps = 12
            self._ndim = int(4*(self._nmax+1))
    
    
        def pos(self):
            '''
            Returns: the initial position of your 4*(nmax+1) variables, under your assigned levels of temperatures.
            '''
            pos = [4.28313743e-01, random.uniform(0.01,2*np.pi), random.uniform(-0.1,0.1), random.uniform(-0.1,0.1)]
            for i in range (1,self._nmax+1):
                pos_aux = [4.28313743e-01, random.uniform(0.01,0.02), random.uniform(-.1,.1), random.uniform(-.1,.1)]
                pos += pos_aux
    
            pos += 1e-5 * np.random.randn(self._ntemps, self._nwalkers, self._ndim)
            return pos
        
        def fitsrun(self, burning = 2000):
            '''
            Runs ptemcee. Generates the chain plot, corner plot, 
            burning: the time before which the data is burnt.
            '''
            print('Running ptemcee sampler')
            sampler = ptemcee.Sampler(self._nwalkers, self._ndim, self.log_likelihood, self.log_prior, ntemps=self._ntemps)
            sampler.run_mcmc(self.pos(), self._npoints)
       
            print('Chain plot:')
            fig, axes = plt.subplots(self._ndim, 1, sharex=True, figsize=(12, 13.5*(self._nmax+1)))
            for i in range(self._ndim):
                axes[i].plot(sampler.chain[0,:, :, i].T, color="k", alpha=0.4)
                axes[i].yaxis.set_major_locator(MaxNLocator(5))
                axes[i].set_ylabel(self.labels()[0][i])
            axes[-1].set_xlabel('Iterations')
            plt.show()
            
    
            #Get the peak value (or median value?) as the truths
            print('Values with peak likelihood:')
            burnin = burning
            samples = sampler.chain[0,:, burnin:, :].reshape((-1, self._ndim))
            lglk = np.array([self.log_likelihood(samples[i]) for i in range(len(samples))])
            pk = samples[np.argmax(lglk)]
            print(pk)        
    
            
            print('Corner plot:')
            fig_corn = corner.corner(
                samples, bins=60, labels=self.labels()[0], truths=pk, quantiles=(0.05, 0.16, 0.84, 0.95)
            )
            plt.show()
            
            print('1-sigma constraints on the parameters:')
            for i in range(len(samples[0])):
                mcmc = np.percentile(samples[:, i], [16, 50, 84])
                #print(mcmc)
                q = np.diff(mcmc)
                #print(q)
                txt = "\mathrm{{{3}}} = {0:.3f}_{{-{1:.3f}}}^{{{2:.3f}}}"
                txt = txt.format(mcmc[1], q[0], q[1], self.labels()[1][i])
                display(Math(txt))
            
            print('Plot the NR data against the ansatz, together with the 1-sigma varying error')
            modelfit = self.model_dv(self.minifit())
            #modelfitmed = model_dv_af(median)
            modelfitpk = self.model_dv(pk)
            #Get the upper and lower bounds
            onesig_bounds = np.array([np.percentile(samples[:, i], [16, 84]) for i in range(len(samples[0]))]).T
            fig_band = plt.figure(figsize = (12, 9))
            #Plot the 68-percentile (1-σ)
            for j in range(len(samples)):
                sample = samples[j]
                if np.all(onesig_bounds[0] <= sample) and np.all(sample <= onesig_bounds[1]):
                    plt.plot(self._timesrd, self.model_dv(sample).real, "#79CAF2", alpha=0.3)
    
    
            plt.plot(self._timesrd, self._gw_sxs_bbh_0305rd[:,1], "k", alpha=0.7, lw=2, label=r'NR\_re')
            plt.plot(self._timesrd, modelfit.real, "g", alpha=0.7, lw=2, label=r'Fit\_re')
            plt.plot(self._timesrd, modelfitpk.real, "r", alpha=0.7, lw=2, label=r'FitMCmax\_re')
            plt.title(r'Comparison of the MC fit data and the $1-\sigma$ error band')
            plt.legend()
            plt.xlim(self._timesrd[0], self._timesrd[0]+80)
            plt.xlabel("t")
            plt.ylabel("h");
            #fig_band.savefig(rootpath+'/git/rdstackingproject/plots/fit_signal.pdf', format = 'pdf');