def parseForBatch(self, vals=None): if self.importanceParameter: self.parser.add_argument('--noimportance', action='store_true', help='original chains only, no importance sampled') self.parser.add_argument('--importance', nargs='*', default=None, help='data names for importance sampling runs to include') self.parser.add_argument('--importancetag', nargs='*', default=None, help='importance tags for importance sampling runs to include') self.parser.add_argument('--name', default=None, nargs='+', help='specific chain full name only (base_paramx_data1_data2)') self.parser.add_argument('--param', default=None, nargs='+', help='runs including specific parameter only (paramx)') self.parser.add_argument('--paramtag', default=None, nargs='+', help='runs with specific parameter tag only (base_paramx)') self.parser.add_argument('--data', nargs='+', default=None, help='runs including specific data only (data1)') self.parser.add_argument('--datatag', nargs='+', default=None, help='runs with specific data tag only (data1_data2)') self.parser.add_argument('--musthave_data', nargs='+', default=None, help='include only runs that include specific data (data1)') self.parser.add_argument('--skip_data', nargs='+', default=None, help='skip runs containing specific data (data1)') self.parser.add_argument('--skip_param', nargs='+', default=None, help='skip runs containing specific parameter (paramx)') self.parser.add_argument('--group', default=None, nargs='+', help='include only runs with given group names') self.parser.add_argument('--skip_group', default=None, nargs='+', help='exclude runs with given group names') if self.notExist: self.parser.add_argument('--notexist', action='store_true', help='only include chains that don\'t already exist on disk') if self.notall: self.parser.add_argument('--notall', type=int, default=None, help='only include chains where all N chains don\'t already exist on disk') if self.doplots: self.parser.add_argument('--paramNameFile', default='clik_latex.paramnames', help=".paramnames file for custom labels for parameters") self.parser.add_argument('--param_list', default=None, help=".paramnames file listing specific parameters to include (only)") self.parser.add_argument('--size_inch', type=float, default=None, help='output subplot size in inches') self.parser.add_argument('--nx', default=None, help='number of plots per row') self.parser.add_argument('--outputs', nargs='+', default=['pdf'], help='output file type (default: pdf)') args = self.parser.parse_args(vals) self.args = args if args.batchPath: self.batch = batchjob.readobject(args.batchPath) if self.batch is None: raise Exception('batchPath %s does not exist or is not initialized with makeGrid.py' % args.batchPath) if self.doplots: import getdist.plots as plots from getdist import paramnames if args.paramList is not None: args.paramList = paramnames.ParamNames(args.paramList) g = plots.GetDistPlotter(chain_dir=self.batch.batchPath) if args.size_inch is not None: g.settings.set_with_subplot_size(args.size_inch) return self.batch, self.args, g else: return self.batch, self.args else: return None, self.args
def load_supported_grid(chain_dir): if is_grid_object(chain_dir): return chain_dir try: # If cosmomc is installed, e.g. to allow use of old Planck grids # The 2018 final Planck grid should be OK with getdist default chain grid below from paramgrid import gridconfig, batchjob if gridconfig.pathIsGrid(chain_dir): return batchjob.readobject(chain_dir) except ImportError: pass return None
from __future__ import absolute_import from __future__ import print_function import os import sys from paramgrid import batchjob if len(sys.argv) < 3: print('Usage: python/addGridBatch.py directory_with_outputs directory_with_output_to_add [and_another..]') sys.exit() batch = batchjob.readobject() for subBatch in sys.argv[2:]: batchPath2 = os.path.abspath(subBatch) + os.sep batch2 = batchjob.readobject(batchPath2) batch.subBatches.append(batch2) for jobItem in list(batch.items()): for x in [imp for imp in jobItem.importanceJobsRecursive()]: if batch.hasName(x.name.replace('_post', '')): print('replacing importance sampling run (not deleting files): ' + x.name) jobItem.removeImportance(x) batch.save()
def parseForBatch(self, vals=None): if self.importanceParameter: self.parser.add_argument('--noimportance', action='store_true', help='original chains only, no importance sampled') self.parser.add_argument('--importance', nargs='*', default=None, help='data names for importance sampling runs to include') self.parser.add_argument('--importancetag', nargs='*', default=None, help='importance tags for importance sampling runs to include') self.parser.add_argument('--name', default=None, nargs='+', help='specific chain full name only (base_paramx_data1_data2)') self.parser.add_argument('--param', default=None, nargs='+', help='runs including specific parameter only (paramx)') self.parser.add_argument('--paramtag', default=None, nargs='+', help='runs with specific parameter tag only (base_paramx)') self.parser.add_argument('--data', nargs='+', default=None, help='runs including specific data only (data1)') self.parser.add_argument('--datatag', nargs='+', default=None, help='runs with specific data tag only (data1_data2)') self.parser.add_argument('--musthave_data', nargs='+', default=None, help='include only runs that include specific data (data1)') self.parser.add_argument('--skip_data', nargs='+', default=None, help='skip runs containing specific data (data1)') self.parser.add_argument('--skip_param', nargs='+', default=None, help='skip runs containing specific parameter (paramx)') self.parser.add_argument('--group', default=None, nargs='+', help='include only runs with given group names') self.parser.add_argument('--skip_group', default=None, nargs='+', help='exclude runs with given group names') if self.notExist: self.parser.add_argument('--notexist', action='store_true', help='only include chains that don\'t already exist on disk') if self.notall: self.parser.add_argument('--notall', type=int, default=None, help='only include chains where all N chains don\'t already exist on disk') if self.doplots: self.parser.add_argument('--plot_data', nargs='*', default=None, help='directory/ies containing getdist output plot_data') self.parser.add_argument('--paramNameFile', default='clik_latex.paramnames', help=".paramnames file for custom labels for parameters") self.parser.add_argument('--paramList', default=None, help=".paramnames file listing specific parameters to include (only)") self.parser.add_argument('--size_inch', type=float, default=None, help='output subplot size in inches') self.parser.add_argument('--nx', default=None, help='number of plots per row') self.parser.add_argument('--outputs', nargs='+', default=['pdf'], help='output file type (default: pdf)') args = self.parser.parse_args(vals) self.args = args if args.batchPath: self.batch = batchjob.readobject(args.batchPath) if self.batch is None: raise Exception('batchPath does not exist or it not initialized with makeGrid.py') if self.doplots: import getdist.plots as plots from getdist import paramnames if args.paramList is not None: args.paramList = paramnames.ParamNames(args.paramList) if args.plot_data is not None: g = plots.GetDistPlotter(plot_data=args.plot_data) else: g = plots.GetDistPlotter(chain_dir=self.batch.batchPath) if args.size_inch is not None: g.settings.setWithSubplotSize(args.size_inch) return self.batch, self.args, g else: return self.batch, self.args else: return None, self.args
def getBatch(self): if not hasattr(self, 'batch'): self.batch = batchjob.readobject(rootdir) return self.batch
from __future__ import absolute_import from __future__ import print_function import os import sys from paramgrid import batchjob if len(sys.argv) < 3: print( 'Usage: python/addGridBatch.py directory_with_outputs directory_with_output_to_add [and_another..]' ) sys.exit() batch = batchjob.readobject() for subBatch in sys.argv[2:]: batchPath2 = os.path.abspath(subBatch) + os.sep batch2 = batchjob.readobject(batchPath2) batch.subBatches.append(batch2) for jobItem in list(batch.items()): for x in [imp for imp in jobItem.importanceJobs()]: if batch.hasName(x.name.replace('_post', '')): print('replacing importance sampling run (not deleting files): ' + x.name) jobItem.importanceItems.remove(x) batch.save()