Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
R
RDStackingProject
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
GitLab community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Yifan Wang
RDStackingProject
Commits
ebf75f72
Commit
ebf75f72
authored
Aug 19, 2020
by
frcojimenez
Browse files
Options
Downloads
Patches
Plain Diff
test 4 parameters
parent
b486909f
No related branches found
No related tags found
No related merge requests found
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
code/RDGW150914_ptemceeX.py
+272
-0
272 additions, 0 deletions
code/RDGW150914_ptemceeX.py
with
272 additions
and
0 deletions
code/RDGW150914_ptemceeX.py
0 → 100755
+
272
−
0
View file @
ebf75f72
#!/usr/bin/env python
# coding: utf-8
# In[191]:
import
numpy
as
np
import
corner
#get_ipython().run_line_magic('matplotlib', 'inline')
import
matplotlib.pyplot
as
plt
from
matplotlib.ticker
import
MaxNLocator
from
matplotlib
import
rc
plt
.
rcParams
[
'
font.family
'
]
=
'
DejaVu Sans
'
rc
(
'
text
'
,
usetex
=
True
)
plt
.
rcParams
.
update
({
'
font.size
'
:
19
})
import
ptemcee
import
h5py
import
inspect
import
pandas
as
pd
import
json
import
qnm
import
random
#import hope
# ### <font text color = 'Goldenrod'>Import data and necessary functions</font>
# In[192]:
#TimeOfMaximum
def
FindTmaximum
(
y
):
#Determines the maximum absolute value of the complex waveform
absval
=
y
[:,
1
]
*
y
[:,
1
]
+
y
[:,
2
]
*
y
[:,
2
]
vmax
=
np
.
max
(
absval
)
index
=
np
.
argmax
(
absval
==
vmax
)
timemax
=
gw_sxs_bbh_0305
[
index
,
0
]
return
timemax
# You do not need that. This is a fit to the energy radiated = 1 - final_mass
def
EradUIB2017
(
eta
,
chi1
,
chi2
):
m1
=
0.5
*
(
1
+
(
1
-
4
*
eta
)
**
0.5
)
m2
=
0.5
*
(
1
-
(
1
-
4
*
eta
)
**
0.5
)
S
=
(
m1
**
2
*
chi1
+
m2
**
2
*
chi2
)
/
(
m1
*
m1
+
m2
*
m2
)
erad
=
(((
1
-
(
2
*
(
2
)
**
0.5
)
/
3
)
*
eta
+
0.5609904135313374
*
eta
**
2
-
0.84667563764404
*
eta
**
3
+
3.145145224278187
*
eta
**
4
)
*
(
1
+
S
**
3
*
(
-
0.6320191645391563
+
4.952698546796005
*
eta
-
10.023747993978121
*
eta
**
2
)
+
S
**
2
*
(
-
0.17762802148331427
+
2.176667900182948
*
eta
**
2
)
+
S
*
(
-
0.13084389181783257
-
1.1387311580238488
*
eta
+
5.49074464410971
*
eta
**
2
)))
/
(
1
+
S
*
(
-
0.9919475346968611
+
0.367620218664352
*
eta
+
4.274567337924067
*
eta
**
2
))
-
0.01978238971523653
*
S
*
(
1
-
4.91667749015812
*
eta
)
*
(
1
-
4
*
eta
)
**
0.5
*
eta
*
(
chi1
-
chi2
)
-
0.09803730445895877
*
(
1
-
4
*
eta
)
**
0.5
*
(
1
-
3.2283713377939134
*
eta
)
*
eta
**
2
*
(
chi1
-
chi2
)
+
0.01118530335431078
*
eta
**
3
*
(
chi1
-
chi2
)
**
2
return
erad
# In[193]:
#rootpath: root path to nr data
#npoints: number of points you re using for your sampling
#nmax: tone index --> nmax = 0 if fitting the fundamental tone
#tshift: time shift after the strain peak
rootpath
=
"
/Users/xisco
"
#npoints=5000 (This is more conveniently defined in the MCMC section)
nmax
=
0
tshift
=
0
# In[194]:
print
(
'
hello
'
)
#This loads the 22 mode data
gw
=
{}
gw
[
"
SXS:BBH:0305
"
]
=
h5py
.
File
(
rootpath
+
"
/sio/git/SXS/BBH_SKS_d14.3_q1.22_sA_0_0_0.330_sB_0_0_-0.440/Lev6/rhOverM_Asymptotic_GeometricUnits_CoM.h5
"
,
'
r
'
)
gw_sxs_bbh_0305
=
gw
[
"
SXS:BBH:0305
"
][
"
Extrapolated_N2.dir
"
][
"
Y_l2_m2.dat
"
]
# Remember to download metadata.json from the simulation with number: 0305. Download Lev6/metadata.json
# This postprocesses the metadata file to find the final mass and final spin
metadata
=
{}
with
open
(
rootpath
+
"
/sio/git/rdstackingproject/SXS/BBH_SKS_d14.3_q1.22_sA_0_0_0.330_sB_0_0_-0.440/Lev6/metadata.json
"
)
as
file
:
metadata
[
"
SXS:BBH:0305
"
]
=
json
.
load
(
file
)
af
=
metadata
[
"
SXS:BBH:0305
"
][
'
remnant_dimensionless_spin
'
][
-
1
]
mf
=
metadata
[
"
SXS:BBH:0305
"
][
'
remnant_mass
'
]
# In[195]:
#times --> x axis of your data
times
=
gw_sxs_bbh_0305
[:,
0
]
tmax
=
FindTmaximum
(
gw_sxs_bbh_0305
)
t0
=
tmax
+
tshift
#Select the data from t0 onwards
position
=
np
.
argmax
(
times
>=
(
t0
))
gw_sxs_bbh_0305rd
=
gw_sxs_bbh_0305
[
position
:
-
1
]
timesrd
=
gw_sxs_bbh_0305
[
position
:
-
1
][:,
0
]
# Depending on nmax, you load nmax number of freqs. and damping times from the qnm package
omegas
=
[]
for
i
in
range
(
0
,
nmax
+
1
):
grav_220
=
qnm
.
modes_cache
(
s
=-
2
,
l
=
2
,
m
=
2
,
n
=
i
)
omega
=
grav_220
(
a
=
af
)[
0
]
omegas
.
append
(
omega
)
# ### <font text color = 'Goldenrod'>Fitting</font>
# In[196]:
#Functions
#RD model for nmax tones. Amplitudes are in (xn*Exp[i yn]) version. Used here.
def
model_dv_af
(
theta
):
#x0, y0= theta
#Your nmax might not align with the dim of theta. Better check it here.
assert
int
(
len
(
theta
)
/
4
)
==
nmax
+
1
,
'
Please recheck your n and parameters
'
w
=
(
np
.
real
(
omegas
))
/
mf
tau
=-
1
/
(
np
.
imag
(
omegas
))
*
mf
dim
=
int
(
len
(
theta
)
/
4
)
xvars
=
[
theta
[
4
*
i
]
for
i
in
range
(
0
,
dim
)]
yvars
=
[
theta
[
4
*
i
+
1
]
for
i
in
range
(
0
,
dim
)]
avars
=
[
theta
[
4
*
i
+
2
]
for
i
in
range
(
0
,
dim
)]
bvars
=
[
theta
[
4
*
i
+
3
]
for
i
in
range
(
0
,
dim
)]
ansatz
=
0
for
i
in
range
(
0
,
dim
):
# bvars[0]=0
# avars[0]=0
# bvars[1]=0
# avars[1]=0
ansatz
+=
(
xvars
[
i
]
*
np
.
exp
(
1j
*
yvars
[
i
]))
*
np
.
exp
(
-
(
timesrd
-
timesrd
[
0
])
/
(
tau
[
i
]
*
(
1
+
bvars
[
i
])))
*
(
np
.
cos
((
1
+
avars
[
i
])
*
w
[
i
]
*
timesrd
)
-
1j
*
np
.
sin
((
1
+
avars
[
i
])
*
w
[
i
]
*
timesrd
))
# -1j to agree with SXS convention
return
ansatz
# LogLikelihood function. It is just a Gaussian loglikelihood based on computing the residuals^2
def
log_likelihood
(
theta
):
#modelev = model_dv(theta)
modelev
=
model_dv_af
(
theta
)
#return -np.sum((gw_sxs_bbh_0305rd[:,1] - (modelev.real))**2+(gw_sxs_bbh_0305rd[:,2] - (modelev.imag))**2)
return
-
np
.
sum
((
gw_sxs_bbh_0305rd
[:,
1
]
-
modelev
.
real
)
**
2
+
(
gw_sxs_bbh_0305rd
[:,
2
]
-
modelev
.
imag
)
**
2
)
# LogLikelihood match function. Not used at the moment. It is just a Gaussian match loglikelihood based on computing the match
'''
def log_likelihood_match(theta):
#model and data
modelev = model_dv(theta)
data = gw_sxs_bbh_0305rd[:,1] - 1j*gw_sxs_bbh_0305rd[:,2]
#norms
norm1=np.sum(modelev*np.conj(modelev))
norm2=np.sum(data*np.conj(data))
#mismatch
myTable = data*np.conj(modelev);
return -(1-(np.sum(myTable)).real/np.sqrt(norm1*norm2)).real
'''
# Logposterior distribution for the residuals case.
# The evidence is just a normalization factor
def
log_probability
(
theta
):
lp
=
log_prior
(
theta
)
if
not
np
.
isfinite
(
lp
):
return
-
np
.
inf
return
lp
+
log_likelihood
(
theta
)
# Logposterior distribution for the mismatch case. Not used yet.
'''
def log_probability_match(theta):
lp = log_prior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + log_likelihood_match(theta)
'''
# Logprior distribution. It defines the allowed range my variables can vary over.
#It works for the (xn*Exp[iyn]) version!!!
def
log_prior
(
theta
):
x_s
=
theta
[
0
::
4
]
y_s
=
theta
[
1
::
4
]
a_s
=
theta
[
2
::
4
]
b_s
=
theta
[
3
::
4
]
if
all
(
0
<=
t
<=
10
for
t
in
x_s
)
and
all
(
0
<=
t
<=
2
*
np
.
pi
for
t
in
y_s
)
and
all
(
-
1
<=
t
<=
1
for
t
in
a_s
)
and
all
(
-
1
<=
t
<=
5
for
t
in
b_s
):
return
0.0
return
-
np
.
inf
# Maximum estimator Fitting. Same use as in mathematica
# Interesting. This "overflow encountered in square" error message doesn't happen again when I'm not doing the fitting of parameters..
# In[197]:
nwalkers
=
10
ntemps
=
10
#Why can't I change the number of temperatures here?
ndim
=
int
(
4
*
(
nmax
+
1
))
npoints
=
100
#Does the difference in pos and pos_aux matter? Why the range is different here?
pos
=
[
random
.
uniform
(
0.
,
10
)
,
random
.
uniform
(
0.01
,
2
*
np
.
pi
)
,
random
.
uniform
(
-
1
,
1
)
,
random
.
uniform
(
-
0.99
,
1
)]
for
i
in
range
(
1
,
nmax
+
1
):
pos_aux
=
[
random
.
uniform
(
1
,
8
)
,
random
.
uniform
(
0.01
,
0.9
*
2
*
np
.
pi
)
,
random
.
uniform
(
-
.
1
,.
1
)
,
random
.
uniform
(
-
.
1
,.
1
)]
pos
=
pos
+
pos_aux
pos
=
pos
+
1e-4
*
np
.
random
.
randn
(
ntemps
,
nwalkers
,
ndim
)
sampler
=
ptemcee
.
Sampler
(
nwalkers
,
ndim
,
log_likelihood
,
log_prior
,
ntemps
=
ntemps
)
sampler
.
run_mcmc
(
pos
,
npoints
);
# In[202]:
paramlabels
=
[]
for
i
in
range
(
nmax
+
1
):
sublabel
=
[
r
'
$x_
'
+
str
(
i
)
+
'
$
'
,
r
'
$y_
'
+
str
(
i
)
+
'
$
'
,
r
'
$\alpha_
'
+
str
(
i
)
+
'
$
'
,
r
'
$\beta_
'
+
str
(
i
)
+
'
$
'
]
paramlabels
+=
sublabel
# In[203]:
plt
.
clf
()
fig
,
axes
=
plt
.
subplots
(
4
*
(
nmax
+
1
),
1
,
sharex
=
True
,
figsize
=
(
12
,
13.5
*
(
nmax
+
1
)))
for
i
in
range
(
4
*
(
nmax
+
1
)):
axes
[
i
].
plot
(
sampler
.
chain
[
0
,:,
:,
i
].
T
,
color
=
"
k
"
,
alpha
=
0.4
)
axes
[
i
].
yaxis
.
set_major_locator
(
MaxNLocator
(
5
))
axes
[
i
].
set_ylabel
(
paramlabels
[
i
])
fig
.
savefig
(
rootpath
+
'
/sio/git/rdstackingproject/plotsmc/Test
'
+
'
nmax=
'
+
str
(
nmax
)
+
'
_tshift=
'
+
str
(
tshift
)
+
'
_
'
+
str
(
npoints
)
+
'
pt_chain.png
'
,
format
=
'
png
'
,
dpi
=
384
,
bbox_inches
=
'
tight
'
)
#plt.show()
# In[204]:
atcrr
=
sampler
.
get_autocorr_time
()[
0
]
atcrr
# In[206]:
burnin
=
200
samples
=
sampler
.
chain
[
0
,:,
burnin
:,
:].
reshape
((
-
1
,
ndim
))
median
=
np
.
median
(
samples
,
axis
=
0
)
print
(
median
)
figcorn
=
corner
.
corner
(
samples
,
bins
=
50
,
labels
=
paramlabels
,
truths
=
median
,
quantiles
=
(
0.1
,
0.9
))
figcorn
.
savefig
(
rootpath
+
'
/sio/git/rdstackingproject/plotsmc/Test
'
+
'
nmax=
'
+
str
(
nmax
)
+
'
_tshift=
'
+
str
(
tshift
)
+
'
_
'
+
str
(
npoints
)
+
'
pt_corner.pdf
'
,
format
=
'
pdf
'
)
# In[143]:
#Now plot the NR data against the ansatz data
plt
.
plot
(
timesrd
,
gw_sxs_bbh_0305rd
[:,
1
],
"
r
"
,
alpha
=
0.3
,
lw
=
3
,
label
=
r
'
$NR\_re$
'
)
modelfit
=
model_dv_af
(
median
)
plt
.
plot
(
timesrd
,
modelfit
.
real
,
"
b
"
,
alpha
=
0.3
,
lw
=
3
,
label
=
r
'
$Fit\_re$
'
)
#plt.plot(x0, np.dot(np.vander(x0, 2), w), "--k", label="LS")
plt
.
legend
(
fontsize
=
14
)
plt
.
xlim
(
timesrd
[
0
],
timesrd
[
0
]
+
80
)
plt
.
xlabel
(
"
t
"
)
plt
.
ylabel
(
"
h
"
);
# In[ ]:
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment