Commit 6a8e4c3b authored by Reinhard Prix's avatar Reinhard Prix

converted to python3 via '2to3 -w'

- tests currently fail
parent aa700d85
...@@ -28,13 +28,13 @@ data.make_data() ...@@ -28,13 +28,13 @@ data.make_data()
# The predicted twoF, given by lalapps_predictFstat can be accessed by # The predicted twoF, given by lalapps_predictFstat can be accessed by
twoF = data.predict_fstat() twoF = data.predict_fstat()
print 'Predicted twoF value: {}\n'.format(twoF) print('Predicted twoF value: {}\n'.format(twoF))
DeltaF0 = 1e-7 DeltaF0 = 1e-7
DeltaF1 = 1e-13 DeltaF1 = 1e-13
VF0 = (np.pi * duration * DeltaF0)**2 / 3.0 VF0 = (np.pi * duration * DeltaF0)**2 / 3.0
VF1 = (np.pi * duration**2 * DeltaF1)**2 * 4/45. VF1 = (np.pi * duration**2 * DeltaF1)**2 * 4/45.
print '\nV={:1.2e}, VF0={:1.2e}, VF1={:1.2e}\n'.format(VF0*VF1, VF0, VF1) print('\nV={:1.2e}, VF0={:1.2e}, VF1={:1.2e}\n'.format(VF0*VF1, VF0, VF1))
theta_prior = {'F0': {'type': 'unif', theta_prior = {'F0': {'type': 'unif',
'lower': F0-DeltaF0/2., 'lower': F0-DeltaF0/2.,
......
...@@ -28,13 +28,13 @@ data.make_data() ...@@ -28,13 +28,13 @@ data.make_data()
# The predicted twoF, given by lalapps_predictFstat can be accessed by # The predicted twoF, given by lalapps_predictFstat can be accessed by
twoF = data.predict_fstat() twoF = data.predict_fstat()
print 'Predicted twoF value: {}\n'.format(twoF) print('Predicted twoF value: {}\n'.format(twoF))
DeltaF0 = 1e-7 DeltaF0 = 1e-7
DeltaF1 = 1e-13 DeltaF1 = 1e-13
VF0 = (np.pi * duration * DeltaF0)**2 / 3.0 VF0 = (np.pi * duration * DeltaF0)**2 / 3.0
VF1 = (np.pi * duration**2 * DeltaF1)**2 * 4/45. VF1 = (np.pi * duration**2 * DeltaF1)**2 * 4/45.
print '\nV={:1.2e}, VF0={:1.2e}, VF1={:1.2e}\n'.format(VF0*VF1, VF0, VF1) print('\nV={:1.2e}, VF0={:1.2e}, VF1={:1.2e}\n'.format(VF0*VF1, VF0, VF1))
theta_prior = {'F0': {'type': 'unif', theta_prior = {'F0': {'type': 'unif',
'lower': F0-DeltaF0/2., 'lower': F0-DeltaF0/2.,
......
...@@ -28,7 +28,7 @@ data.make_data() ...@@ -28,7 +28,7 @@ data.make_data()
# The predicted twoF, given by lalapps_predictFstat can be accessed by # The predicted twoF, given by lalapps_predictFstat can be accessed by
twoF = data.predict_fstat() twoF = data.predict_fstat()
print 'Predicted twoF value: {}\n'.format(twoF) print('Predicted twoF value: {}\n'.format(twoF))
# Search # Search
VF0 = VF1 = 1e5 VF0 = VF1 = 1e5
......
...@@ -66,5 +66,5 @@ mcmc.plot_corner(label_offset=0.25, truths=[0, 0, 0, 0], ...@@ -66,5 +66,5 @@ mcmc.plot_corner(label_offset=0.25, truths=[0, 0, 0, 0],
mcmc.print_summary() mcmc.print_summary()
print('Prior widths =', F0_width, F1_width) print(('Prior widths =', F0_width, F1_width))
print("Actual run time = {}".format(dT)) print(("Actual run time = {}".format(dT)))
...@@ -62,6 +62,6 @@ fig.savefig('{}/{}_projection_matrix.png'.format(outdir, label), ...@@ -62,6 +62,6 @@ fig.savefig('{}/{}_projection_matrix.png'.format(outdir, label),
bbox_inches='tight') bbox_inches='tight')
print('Prior widths =', F0_width, F1_width) print(('Prior widths =', F0_width, F1_width))
print("Actual run time = {}".format(dT)) print(("Actual run time = {}".format(dT)))
print("Actual number of grid points = {}".format(search.data.shape[0])) print(("Actual number of grid points = {}".format(search.data.shape[0])))
...@@ -27,13 +27,13 @@ data.make_data() ...@@ -27,13 +27,13 @@ data.make_data()
# The predicted twoF, given by lalapps_predictFstat can be accessed by # The predicted twoF, given by lalapps_predictFstat can be accessed by
twoF = data.predict_fstat() twoF = data.predict_fstat()
print 'Predicted twoF value: {}\n'.format(twoF) print('Predicted twoF value: {}\n'.format(twoF))
DeltaF0 = 1e-7 DeltaF0 = 1e-7
DeltaF1 = 1e-13 DeltaF1 = 1e-13
VF0 = (np.pi * duration * DeltaF0)**2 / 3.0 VF0 = (np.pi * duration * DeltaF0)**2 / 3.0
VF1 = (np.pi * duration**2 * DeltaF1)**2 * 4/45. VF1 = (np.pi * duration**2 * DeltaF1)**2 * 4/45.
print '\nV={:1.2e}, VF0={:1.2e}, VF1={:1.2e}\n'.format(VF0*VF1, VF0, VF1) print('\nV={:1.2e}, VF0={:1.2e}, VF1={:1.2e}\n'.format(VF0*VF1, VF0, VF1))
theta_prior = {'F0': {'type': 'unif', theta_prior = {'F0': {'type': 'unif',
'lower': F0-DeltaF0/2., 'lower': F0-DeltaF0/2.,
......
...@@ -28,13 +28,13 @@ data.make_data() ...@@ -28,13 +28,13 @@ data.make_data()
# The predicted twoF, given by lalapps_predictFstat can be accessed by # The predicted twoF, given by lalapps_predictFstat can be accessed by
twoF = data.predict_fstat() twoF = data.predict_fstat()
print 'Predicted twoF value: {}\n'.format(twoF) print('Predicted twoF value: {}\n'.format(twoF))
DeltaF0 = 1e-7 DeltaF0 = 1e-7
DeltaF1 = 1e-13 DeltaF1 = 1e-13
VF0 = (np.pi * duration * DeltaF0)**2 / 3.0 VF0 = (np.pi * duration * DeltaF0)**2 / 3.0
VF1 = (np.pi * duration**2 * DeltaF1)**2 * 4/45. VF1 = (np.pi * duration**2 * DeltaF1)**2 * 4/45.
print '\nV={:1.2e}, VF0={:1.2e}, VF1={:1.2e}\n'.format(VF0*VF1, VF0, VF1) print('\nV={:1.2e}, VF0={:1.2e}, VF1={:1.2e}\n'.format(VF0*VF1, VF0, VF1))
theta_prior = {'F0': {'type': 'unif', theta_prior = {'F0': {'type': 'unif',
'lower': F0-DeltaF0/2., 'lower': F0-DeltaF0/2.,
......
from __future__ import division as _division
from .core import BaseSearchClass, ComputeFstat, SemiCoherentSearch, SemiCoherentGlitchSearch from .core import BaseSearchClass, ComputeFstat, SemiCoherentSearch, SemiCoherentGlitchSearch
from .make_sfts import Writer, GlitchWriter, FrequencyModulatedArtifactWriter, FrequencyAmplitudeModulatedArtifactWriter from .make_sfts import Writer, GlitchWriter, FrequencyModulatedArtifactWriter, FrequencyAmplitudeModulatedArtifactWriter
......
""" The core tools used in pyfstat """ """ The core tools used in pyfstat """
from __future__ import division, absolute_import, print_function
import os import os
import logging import logging
......
""" Searches using grid-based methods """ """ Searches using grid-based methods """
from __future__ import division, absolute_import, print_function
import os import os
import logging import logging
...@@ -342,7 +342,7 @@ class GridSearch(BaseSearchClass): ...@@ -342,7 +342,7 @@ class GridSearch(BaseSearchClass):
def print_max_twoF(self): def print_max_twoF(self):
d = self.get_max_twoF() d = self.get_max_twoF()
print('Max twoF values for {}:'.format(self.label)) print('Max twoF values for {}:'.format(self.label))
for k, v in d.iteritems(): for k, v in d.items():
print(' {}={}'.format(k, v)) print(' {}={}'.format(k, v))
def set_out_file(self, extra_label=None): def set_out_file(self, extra_label=None):
...@@ -1006,7 +1006,7 @@ class EarthTest(GridSearch): ...@@ -1006,7 +1006,7 @@ class EarthTest(GridSearch):
vals = [self.minStartTime, self.maxStartTime, self.F0, self.F1, vals = [self.minStartTime, self.maxStartTime, self.F0, self.F1,
self.F2, self.Alpha, self.Delta] self.F2, self.Alpha, self.Delta]
self.special_data = {'zero': [0, 0, 0]} self.special_data = {'zero': [0, 0, 0]}
for key, (dR, dphi, dP) in self.special_data.iteritems(): for key, (dR, dphi, dP) in self.special_data.items():
rescaleRadius = (1 + dR / lal.REARTH_SI) rescaleRadius = (1 + dR / lal.REARTH_SI)
rescalePeriod = (1 + dP / lal.DAYSID_SI) rescalePeriod = (1 + dP / lal.DAYSID_SI)
lalpulsar.BarycenterModifyEarthRotation( lalpulsar.BarycenterModifyEarthRotation(
......
...@@ -110,7 +110,7 @@ def get_ephemeris_files(): ...@@ -110,7 +110,7 @@ def get_ephemeris_files():
logging.warning('No [earth/sun]_ephem found in '+config_file+'. '+please) logging.warning('No [earth/sun]_ephem found in '+config_file+'. '+please)
earth_ephem = None earth_ephem = None
sun_ephem = None sun_ephem = None
elif env_var in os.environ.keys(): elif env_var in list(os.environ.keys()):
earth_ephem = os.path.join(os.environ[env_var],'earth00-40-DE421.dat.gz') earth_ephem = os.path.join(os.environ[env_var],'earth00-40-DE421.dat.gz')
sun_ephem = os.path.join(os.environ[env_var],'sun00-40-DE421.dat.gz') sun_ephem = os.path.join(os.environ[env_var],'sun00-40-DE421.dat.gz')
if not ( os.path.isfile(earth_ephem) and os.path.isfile(sun_ephem) ): if not ( os.path.isfile(earth_ephem) and os.path.isfile(sun_ephem) ):
......
""" pyfstat tools to generate sfts """ """ pyfstat tools to generate sfts """
from __future__ import division, absolute_import, print_function
import numpy as np import numpy as np
import logging import logging
...@@ -477,7 +477,7 @@ class FrequencyModulatedArtifactWriter(Writer): ...@@ -477,7 +477,7 @@ class FrequencyModulatedArtifactWriter(Writer):
linePhi = 0 linePhi = 0
lineFreq_old = 0 lineFreq_old = 0
for i in tqdm(range(self.nsfts)): for i in tqdm(list(range(self.nsfts))):
mid_time = self.tstart + (i+.5)*self.Tsft mid_time = self.tstart + (i+.5)*self.Tsft
lineFreq = self.get_frequency(mid_time) lineFreq = self.get_frequency(mid_time)
...@@ -517,7 +517,7 @@ class FrequencyModulatedArtifactWriter(Writer): ...@@ -517,7 +517,7 @@ class FrequencyModulatedArtifactWriter(Writer):
logging.info('Using {} threads'.format(args.N)) logging.info('Using {} threads'.format(args.N))
try: try:
with pathos.pools.ProcessPool(args.N) as p: with pathos.pools.ProcessPool(args.N) as p:
list(tqdm(p.imap(self.make_ith_sft, range(self.nsfts)), list(tqdm(p.imap(self.make_ith_sft, list(range(self.nsfts))),
total=self.nsfts)) total=self.nsfts))
except KeyboardInterrupt: except KeyboardInterrupt:
p.terminate() p.terminate()
...@@ -525,7 +525,7 @@ class FrequencyModulatedArtifactWriter(Writer): ...@@ -525,7 +525,7 @@ class FrequencyModulatedArtifactWriter(Writer):
logging.info( logging.info(
"No multiprocessing requested or `pathos` not install, cont." "No multiprocessing requested or `pathos` not install, cont."
" without multiprocessing") " without multiprocessing")
for i in tqdm(range(self.nsfts)): for i in tqdm(list(range(self.nsfts))):
self.make_ith_sft(i) self.make_ith_sft(i)
self.concatenate_sft_files() self.concatenate_sft_files()
......
""" Searches using MCMC-based methods """ """ Searches using MCMC-based methods """
from __future__ import division, absolute_import, print_function
import sys import sys
import os import os
...@@ -221,7 +221,7 @@ class MCMCSearch(core.BaseSearchClass): ...@@ -221,7 +221,7 @@ class MCMCSearch(core.BaseSearchClass):
self.theta_keys = [] self.theta_keys = []
fixed_theta_dict = {} fixed_theta_dict = {}
for key, val in self.theta_prior.iteritems(): for key, val in self.theta_prior.items():
if type(val) is dict: if type(val) is dict:
fixed_theta_dict[key] = 0 fixed_theta_dict[key] = 0
self.theta_keys.append(key) self.theta_keys.append(key)
...@@ -953,7 +953,7 @@ class MCMCSearch(core.BaseSearchClass): ...@@ -953,7 +953,7 @@ class MCMCSearch(core.BaseSearchClass):
See the pyfstat.core.plot_twoF_cumulative function for further details See the pyfstat.core.plot_twoF_cumulative function for further details
""" """
d, maxtwoF = self.get_max_twoF() d, maxtwoF = self.get_max_twoF()
for key, val in self.theta_prior.iteritems(): for key, val in self.theta_prior.items():
if key not in d: if key not in d:
d[key] = val d[key] = val
...@@ -1223,8 +1223,8 @@ class MCMCSearch(core.BaseSearchClass): ...@@ -1223,8 +1223,8 @@ class MCMCSearch(core.BaseSearchClass):
def _generate_scattered_p0(self, p): def _generate_scattered_p0(self, p):
""" Generate a set of p0s scattered about p """ """ Generate a set of p0s scattered about p """
p0 = [[p + self.scatter_val * p * np.random.randn(self.ndim) p0 = [[p + self.scatter_val * p * np.random.randn(self.ndim)
for i in xrange(self.nwalkers)] for i in range(self.nwalkers)]
for j in xrange(self.ntemps)] for j in range(self.ntemps)]
return p0 return p0
def _generate_initial_p0(self): def _generate_initial_p0(self):
...@@ -1349,7 +1349,7 @@ class MCMCSearch(core.BaseSearchClass): ...@@ -1349,7 +1349,7 @@ class MCMCSearch(core.BaseSearchClass):
setattr(self, key, new_d[key]) setattr(self, key, new_d[key])
mod_keys = [] mod_keys = []
for key in new_d.keys(): for key in list(new_d.keys()):
if key in old_d: if key in old_d:
if new_d[key] != old_d[key]: if new_d[key] != old_d[key]:
mod_keys.append((key, old_d[key], new_d[key])) mod_keys.append((key, old_d[key], new_d[key]))
...@@ -1486,10 +1486,10 @@ class MCMCSearch(core.BaseSearchClass): ...@@ -1486,10 +1486,10 @@ class MCMCSearch(core.BaseSearchClass):
if hasattr(self, 'theta0_index'): if hasattr(self, 'theta0_index'):
f.write('theta0_index = {}\n'.format(self.theta0_idx)) f.write('theta0_index = {}\n'.format(self.theta0_idx))
if method == 'med': if method == 'med':
for key, val in median_std_d.iteritems(): for key, val in median_std_d.items():
f.write('{} = {:1.16e}\n'.format(key, val)) f.write('{} = {:1.16e}\n'.format(key, val))
if method == 'twoFmax': if method == 'twoFmax':
for key, val in max_twoF_d.iteritems(): for key, val in max_twoF_d.items():
f.write('{} = {:1.16e}\n'.format(key, val)) f.write('{} = {:1.16e}\n'.format(key, val))
def generate_loudest(self): def generate_loudest(self):
...@@ -1514,7 +1514,7 @@ class MCMCSearch(core.BaseSearchClass): ...@@ -1514,7 +1514,7 @@ class MCMCSearch(core.BaseSearchClass):
f.write(r"\begin{tabular}{c l c} \hline" + '\n' f.write(r"\begin{tabular}{c l c} \hline" + '\n'
r"Parameter & & & \\ \hhline{====}") r"Parameter & & & \\ \hhline{====}")
for key, prior in self.theta_prior.iteritems(): for key, prior in self.theta_prior.items():
if type(prior) is dict: if type(prior) is dict:
Type = prior['type'] Type = prior['type']
if Type == "unif": if Type == "unif":
...@@ -1546,10 +1546,10 @@ class MCMCSearch(core.BaseSearchClass): ...@@ -1546,10 +1546,10 @@ class MCMCSearch(core.BaseSearchClass):
if hasattr(self, 'theta0_idx'): if hasattr(self, 'theta0_idx'):
logging.info('theta0 index: {}'.format(self.theta0_idx)) logging.info('theta0 index: {}'.format(self.theta0_idx))
logging.info('Max twoF: {} with parameters:'.format(max_twoF)) logging.info('Max twoF: {} with parameters:'.format(max_twoF))
for k in np.sort(max_twoFd.keys()): for k in np.sort(list(max_twoFd.keys())):
print(' {:10s} = {:1.9e}'.format(k, max_twoFd[k])) print(' {:10s} = {:1.9e}'.format(k, max_twoFd[k]))
logging.info('Median +/- std for production values') logging.info('Median +/- std for production values')
for k in np.sort(median_std_d.keys()): for k in np.sort(list(median_std_d.keys())):
if 'std' not in k: if 'std' not in k:
logging.info(' {:10s} = {:1.9e} +/- {:1.9e}'.format( logging.info(' {:10s} = {:1.9e} +/- {:1.9e}'.format(
k, median_std_d[k], median_std_d[k+'_std'])) k, median_std_d[k], median_std_d[k+'_std']))
...@@ -1668,7 +1668,7 @@ class MCMCSearch(core.BaseSearchClass): ...@@ -1668,7 +1668,7 @@ class MCMCSearch(core.BaseSearchClass):
def write_evidence_file_from_dict(self, EvidenceDict, evidence_file_name): def write_evidence_file_from_dict(self, EvidenceDict, evidence_file_name):
with open(evidence_file_name, 'w+') as f: with open(evidence_file_name, 'w+') as f:
for key, val in EvidenceDict.iteritems(): for key, val in EvidenceDict.items():
f.write('{} {} {}\n'.format(key, val[0], val[1])) f.write('{} {} {}\n'.format(key, val[0], val[1]))
...@@ -1801,7 +1801,7 @@ class MCMCGlitchSearch(MCMCSearch): ...@@ -1801,7 +1801,7 @@ class MCMCGlitchSearch(MCMCSearch):
r'$\delta$'] + full_glitch_symbols) r'$\delta$'] + full_glitch_symbols)
self.theta_keys = [] self.theta_keys = []
fixed_theta_dict = {} fixed_theta_dict = {}
for key, val in self.theta_prior.iteritems(): for key, val in self.theta_prior.items():
if type(val) is dict: if type(val) is dict:
fixed_theta_dict[key] = 0 fixed_theta_dict[key] = 0
if key in glitch_keys: if key in glitch_keys:
...@@ -1863,7 +1863,7 @@ class MCMCGlitchSearch(MCMCSearch): ...@@ -1863,7 +1863,7 @@ class MCMCGlitchSearch(MCMCSearch):
fig, ax = plt.subplots() fig, ax = plt.subplots()
d, maxtwoF = self.get_max_twoF() d, maxtwoF = self.get_max_twoF()
for key, val in self.theta_prior.iteritems(): for key, val in self.theta_prior.items():
if key not in d: if key not in d:
d[key] = val d[key] = val
...@@ -2223,7 +2223,7 @@ class MCMCFollowUpSearch(MCMCSemiCoherentSearch): ...@@ -2223,7 +2223,7 @@ class MCMCFollowUpSearch(MCMCSemiCoherentSearch):
def check_old_run_setup(self, old_setup, **kwargs): def check_old_run_setup(self, old_setup, **kwargs):
try: try:
truths = [val == old_setup[key] for key, val in kwargs.iteritems()] truths = [val == old_setup[key] for key, val in kwargs.items()]
if all(truths): if all(truths):
return True return True
else: else:
...@@ -2540,7 +2540,7 @@ class MCMCTransientSearch(MCMCSearch): ...@@ -2540,7 +2540,7 @@ class MCMCTransientSearch(MCMCSearch):
self.theta_keys = [] self.theta_keys = []
fixed_theta_dict = {} fixed_theta_dict = {}
for key, val in self.theta_prior.iteritems(): for key, val in self.theta_prior.items():
if type(val) is dict: if type(val) is dict:
fixed_theta_dict[key] = 0 fixed_theta_dict[key] = 0
self.theta_keys.append(key) self.theta_keys.append(key)
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
Provides functions to aid in calculating the optimal setup for zoom follow up Provides functions to aid in calculating the optimal setup for zoom follow up
""" """
from __future__ import division, absolute_import, print_function
import logging import logging
import numpy as np import numpy as np
......
...@@ -31,7 +31,7 @@ def _optional_import ( modulename, shorthand=None ): ...@@ -31,7 +31,7 @@ def _optional_import ( modulename, shorthand=None ):
logging.debug('Successfully imported module %s%s.' logging.debug('Successfully imported module %s%s.'
% (modulename, shorthandbit)) % (modulename, shorthandbit))
success = True success = True
except ImportError, e: except ImportError as e:
if e.message == 'No module named '+modulename: if e.message == 'No module named '+modulename:
logging.debug('No module {:s} found.'.format(modulename)) logging.debug('No module {:s} found.'.format(modulename))
success = False success = False
...@@ -111,7 +111,7 @@ def init_transient_fstat_map_features ( wantCuda=False, cudaDeviceName=None ): ...@@ -111,7 +111,7 @@ def init_transient_fstat_map_features ( wantCuda=False, cudaDeviceName=None ):
' then checking all available devices...') ' then checking all available devices...')
try: try:
context0 = pycuda.tools.make_default_context() context0 = pycuda.tools.make_default_context()
except pycuda._driver.LogicError, e: except pycuda._driver.LogicError as e:
if e.message == 'cuDeviceGet failed: invalid device ordinal': if e.message == 'cuDeviceGet failed: invalid device ordinal':
devn = int(os.environ['CUDA_DEVICE']) devn = int(os.environ['CUDA_DEVICE'])
raise RuntimeError('Requested CUDA device number {} exceeds' \ raise RuntimeError('Requested CUDA device number {} exceeds' \
......
...@@ -315,10 +315,10 @@ class SemiCoherentGlitchSearch(Test): ...@@ -315,10 +315,10 @@ class SemiCoherentGlitchSearch(Test):
Writer.tend = maxStartTime Writer.tend = maxStartTime
FSB = Writer.predict_fstat() FSB = Writer.predict_fstat()
print FSA, FSB print(FSA, FSB)
predicted_FS = (FSA + FSB) predicted_FS = (FSA + FSB)
print(predicted_FS, FS) print((predicted_FS, FS))
self.assertTrue(np.abs((FS - predicted_FS))/predicted_FS < 0.3) self.assertTrue(np.abs((FS - predicted_FS))/predicted_FS < 0.3)
...@@ -359,8 +359,8 @@ class MCMCSearch(Test): ...@@ -359,8 +359,8 @@ class MCMCSearch(Test):
search.run(create_plots=False) search.run(create_plots=False)
_, FS = search.get_max_twoF() _, FS = search.get_max_twoF()
print('Predicted twoF is {} while recovered is {}'.format( print(('Predicted twoF is {} while recovered is {}'.format(
predicted_FS, FS)) predicted_FS, FS)))
self.assertTrue( self.assertTrue(
FS > predicted_FS or np.abs((FS-predicted_FS))/predicted_FS < 0.3) FS > predicted_FS or np.abs((FS-predicted_FS))/predicted_FS < 0.3)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment