예제 #1
0
variant_pol_tag = ['TE', 'EE']
variants = variant_tag

planck_highL_sets = []
planck_pol_sets = []
planck_vars = ['plikHM', 'CamSpecHM']

planck_ini = ['plik_rd12_HM_v22_%s.ini', 'nonclik_v10_7_%s.ini']
clean_ini = ['nonclik_v10_7_TT_clean.ini']
# planck_ini = ['plik_rd12_HM_v22_%s.ini', 'CAMspec_%s_clik14.ini']
planck_base = [[], []]

for planck, ini, base in zip(planck_vars, planck_ini, planck_base):
    for name, var in zip(variant_tag, variants):
        planck_highL_sets.append(
            batchjob.dataSet([planck, name], base + [ini % var]))
    for var in variant_pol_tag:
        planck_pol_sets.append(
            batchjob.dataSet([planck, var], base + [ini % var]))

baseTT = planck_highL_sets[0]
baseTTTEEE = planck_highL_sets[1]

WMAP9 = [[WMAP], ['WMAP.ini']]

likechecks = []

newCovmats = False

# Importance sampling settings
예제 #2
0
camspec_CS = ['nonclik.ini']

variant_tag = ['TT', 'TTTEEE']
variant_pol_tag = ['TE', 'EE']
variants = variant_tag

planck_highL_sets = []
planck_pol_sets = []
planck_vars = ['plikHM', 'CamSpecHM']
planck_ini = ['plik_dx11dr2_HM_v18_%s.ini', 'CAMspec_%s.ini']
planck_base = [[], camspec_CS]

for planck, ini, base in zip(planck_vars, planck_ini, planck_base):
    for name, var in zip(variant_tag, variants):
        planck_highL_sets.append(
            batchjob.dataSet([planck, name], base + [ini % var]))
    for var in variant_pol_tag:
        planck_pol_sets.append(
            batchjob.dataSet([planck, var], base + [ini % var]))

baseTT = planck_highL_sets[0]

WMAP9 = [[WMAP], ['WMAP.ini']]

likechecks = []
likechecks.append(
    batchjob.dataSet(['CamSpecDS', 'TT'],
                     camspec_detsets + ['CAMspec_TT.ini']))
likechecks.append(
    batchjob.dataSet(['plikDS', 'TT'], ['plik_dx11dr2_DS_v18_TT.ini']))
# likechecks.append(batchjob.dataSet(['Mspec', 'TT'], ['mspec_dx11d_HM_v1_TT.ini']))
예제 #3
0
# ini files you want to base each set of runs on
defaults = ['common.ini']
importanceDefaults = ['importance_sampling.ini']

# set up list of groups of parameters and data sets
groups = []

# make first group of runs (all parameter variations with all data combinations)
g = batchjob.jobGroup('main')

g.params = [[], ['mnu'], ['nnu']]

g.datasets = []

# lists of dataset names to combine, with corresponding sets of inis to include
g.datasets.append(batchjob.dataSet(['plikHM', 'TT', 'lowTEB'], ['plik_dx11dr2_HM_v18_TT.ini', 'lowTEB.ini']))
g.datasets.append(batchjob.dataSet(['plikHM', 'TT', 'lowTEB', 'lensing'],
                                   ['plik_dx11dr2_HM_v18_TT.ini', 'lowTEB.ini', 'lensing.ini']))


# add importance name tags, and list of specific .ini files to include (in batch1/)
g.importanceRuns = []
g.importanceRuns.append([['BAO'], ['BAO.ini']])

groups.append(g)

# ranges for parameters when they are varied (can delete params if you just want to use defaults)
params = dict()
params['w'] = '-0.99 -3. 1 0.02 0.02'
params['wa'] = '0 -3 2 0.05 0.05'
params['mnu'] = '0.02 0 5 0.1 0.03'
예제 #4
0
camspec_detsets = ['nonclik_detsets.ini']
camspec_CS = ['nonclik.ini']

variant_tag = ['TT', 'TTTEEE']
variant_pol_tag = ['TE', 'EE']
variants = variant_tag

planck_highL_sets = []
planck_pol_sets = []
planck_vars = ['plikHM', 'CamSpecHM']
planck_ini = ['plik_dx11dr2_HM_v18_%s.ini', 'CAMspec_%s.ini']
planck_base = [[], camspec_CS]

for planck, ini, base in zip(planck_vars, planck_ini, planck_base):
    for name, var in zip(variant_tag, variants):
        planck_highL_sets.append(batchjob.dataSet([planck, name], base + [ini % var]))
    for var in variant_pol_tag:
        planck_pol_sets.append(batchjob.dataSet([planck, var], base + [ini % var]))

baseTT = planck_highL_sets[0]

WMAP9 = [[WMAP], ['WMAP.ini']]

likechecks = []
likechecks.append(batchjob.dataSet(['CamSpecDS', 'TT'], camspec_detsets + ['CAMspec_TT.ini']))
likechecks.append(batchjob.dataSet(['plikDS', 'TT'], ['plik_dx11dr2_DS_v18_TT.ini']))
# likechecks.append(batchjob.dataSet(['Mspec', 'TT'], ['mspec_dx11d_HM_v1_TT.ini']))
# likechecks.append(batchjob.dataSet(['cleanCMH', 'TT'], ['cleanCMH.ini']))
# likechecks.append(batchjob.dataSet(['plikLite', 'TT'], ['plik_lite_TT.ini']))
# likechecks.append(batchjob.dataSet(['plikLite', 'TTTEEE'], ['plik_lite_TTTEEE.ini']))
예제 #5
0
# ini files you want to base each set of runs on
defaults = ['common.ini']

# set up list of groups of parameters and data sets
groups = []

# make first group of runs (all parameter variations with all data combinations)
g = batchjob.jobGroup('main')

g.params = [['r']]

# skip lensing for now as slow
variants = [
    'fiducial', 'y1y2', '9bins', 'no217', 'relaxbetad', 'relaxalphad',
    'sync000', 'sync100'
]

g.datasets = []

for i, var in enumerate(variants):
    g.datasets.append(
        batchjob.dataSet(['BKPlanckonly', var], [{
            'root_dir': ''
        }, 'BKPlanck/BKPlanck_0%u_%s.ini' % (i + 1, var)]))

# add importance name tags, and list of specific .ini files to include (in batch1/)
g.importanceRuns = []

groups.append(g)
예제 #6
0
variant_tag = ['TT', 'TTTEEE']
variant_pol_tag = ['TE', 'EE']
variants = variant_tag

planck_highL_sets = []
planck_pol_sets = []
planck_vars = ['plikHM', 'CamSpecHM']

planck_ini = ['plik_rd12_HM_v22_%s.ini', 'nonclik_v10_7_%s.ini']
clean_ini = ['nonclik_v10_7_TT_clean.ini']
# planck_ini = ['plik_rd12_HM_v22_%s.ini', 'CAMspec_%s_clik14.ini']
planck_base = [[], []]

for planck, ini, base in zip(planck_vars, planck_ini, planck_base):
    for name, var in zip(variant_tag, variants):
        planck_highL_sets.append(batchjob.dataSet([planck, name], base + [ini % var]))
    for var in variant_pol_tag:
        planck_pol_sets.append(batchjob.dataSet([planck, var], base + [ini % var]))

baseTT = planck_highL_sets[0]
baseTTTEEE = planck_highL_sets[1]

WMAP9 = [[WMAP], ['WMAP.ini']]

likechecks = []

newCovmats = False


# Importance sampling settings
예제 #7
0
# ini files you want to base each set of runs on
defaults = ['common.ini']
importanceDefaults = ['importance_sampling.ini']

# set up list of groups of parameters and data sets
groups = []

# make first group of runs (all parameter variations with all data combinations)
g = batchjob.jobGroup('main')

g.params = [[], ['Alens']]

g.datasets = []

# lists of dataset names to combine, with corresponding sets of inis to include
g.datasets.append(batchjob.dataSet(['CamSpec_v12_5_HM_cln','TT','tauP'],['camspec_v12_5_HM_cln_TT.ini','tauprior.ini']))
g.datasets.append(batchjob.dataSet(['CamSpec_v12_5_HM_cln','TTTEEE','tauP'],['camspec_v12_5_HM_cln_TTTEEE.ini','tauprior.ini']))
g.datasets.append(batchjob.dataSet(['CamSpec_v12_5_HM_cln','TE','tauP'],['camspec_v12_5_HM_cln_TE.ini','tauprior.ini']))
g.datasets.append(batchjob.dataSet(['CamSpec_v12_5_HM_cln','EE','tauP'],['camspec_v12_5_HM_cln_EE.ini','tauprior.ini']))
g.datasets.append(batchjob.dataSet(['CamSpec_v12_5_HM_cln','TEEE','tauP'],['camspec_v12_5_HM_cln_TEEE.ini','tauprior.ini']))

# add importance name tags, and list of specific .ini files to include (in batch1/)
g.importanceRuns = []
g.importanceRuns.append([['BAO'], ['BAO.ini']])

groups.append(g)

# ranges for parameters when they are varied (can delete params if you just want to use defaults)
params = dict()
params['w'] = '-0.99 -3. 1 0.02 0.02'
params['wa'] = '0 -3 2 0.05 0.05'
예제 #8
0
ini_dir = 'batch2/'

# directory to look for existing covariance matrices
covmat = 'planck_covmats/BKPlanck.covmat'

# ini files you want to base each set of runs on
defaults = ['common.ini']

# set up list of groups of parameters and data sets
groups = []

# make first group of runs (all parameter variations with all data combinations)
g = batchjob.jobGroup('main')

g.params = [['r']]

# skip lensing for now as slow
variants = ['fiducial', 'y1y2', '9bins', 'no217', 'relaxbetad', 'relaxalphad', 'sync000', 'sync100']

g.datasets = []

for i, var in enumerate(variants):
    g.datasets.append(batchjob.dataSet(['BKPlanckonly', var], [{'root_dir': ''},
                                                               'BKPlanck/BKPlanck_0%u_%s.ini' % (i + 1, var)]))


# add importance name tags, and list of specific .ini files to include (in batch1/)
g.importanceRuns = []

groups.append(g)