def _makeArgumentParser(cls): parser = pipeBase.ArgumentParser(name=cls._DefaultName) parser.add_id_argument("--id", "deepDiff_differenceExp", help="data ID with raw CCD keys" "e.g. --id visit=12345 ccd") return parser
def _makeArgumentParser(cls): parser = pipeBase.ArgumentParser(name=cls._DefaultName) parser.add_id_argument("--id", "sourceTable_visit", help="Data ID, e.g. --id visit=6789 tract=9617", ContainerClass=TractCheckDataIdContainer) return parser
def _makeArgumentParser(cls): parser = pipeBase.ArgumentParser(name=cls._DefaultName) parser.add_id_argument("--id", butlerTarget, help="Data ID, e.g. --id tract=1234 patch=2,2", ContainerClass=dataIdContainer[butlerTarget]) return parser
def _makeArgumentParser(cls): parser = pipeBase.ArgumentParser(name=cls._DefaultName) parser.add_id_argument("--id", "jointcal_wcs", help="data ID, with raw CCD keys + tract", ContainerClass=PerTractCcdDataIdContainer) parser.add_argument("--diagDir", default=".", help="Directory in which to dump diagnostics") parser.add_argument("--diagnostics", default=False, action="store_true", help="Save diagnostics plots?") parser.add_argument("--snapshots", default=False, action="store_true", help="Save snapshots of ObsVecs during iteration?") parser.add_argument( "--numCoresForReadSource", default=1, type=int, help="Number of cores to be used for reading source catalog") parser.add_argument( "--readTimeout", default=9999, type=float, help="Timeout (sec) for reading inputs with multiple processes") return parser
def _makeArgumentParser(cls): parser = pipeBase.ArgumentParser(name=cls._DefaultName) parser.add_id_argument("--id", "calexp", help="Data ID, e.g. --id tract=1234 patch=2,2", ContainerClass=pipeBase.DataIdContainer) return parser
def testOutputs(self): """Test output directories, specified in different ways""" parser = pipeBase.ArgumentParser(name="argumentParser") self.assertTrue(parser.requireOutput) # Location of our working repository # We'll start by creating this, then use it as the basis for further tests # It's removed at the end of the try/finally block below repositoryPath = tempfile.mkdtemp() try: # Given input at DataPath, demonstrate that we can create a new # output repository at repositoryPath args = parser.parse_args( config=self.config, args=[DataPath, "--output", repositoryPath]) self.assertEqual(args.input, DataPath) self.assertEqual(args.output, repositoryPath) self.assertIsNone(args.rerun) # Now based on our new output repository, demonstrate that we can create a rerun at rerun/foo args = parser.parse_args(config=self.config, args=[repositoryPath, "--rerun", "foo"]) self.assertEqual(args.input, repositoryPath) self.assertEqual(args.output, os.path.join(repositoryPath, "rerun", "foo")) self.assertEqual(args.rerun, ["foo"]) # Now check that that we can chain the above rerun into another args = parser.parse_args( config=self.config, args=[repositoryPath, "--rerun", "foo:bar"]) self.assertEqual(args.input, os.path.join(repositoryPath, "rerun", "foo")) self.assertEqual(args.output, os.path.join(repositoryPath, "rerun", "bar")) self.assertEqual(args.rerun, ["foo", "bar"]) # Finally, check that the above also works if the rerun directory already exists rerunPath = tempfile.mkdtemp( dir=os.path.join(repositoryPath, "rerun")) rerun = os.path.basename(rerunPath) try: args = parser.parse_args( config=self.config, args=[repositoryPath, "--rerun", rerun]) self.assertEqual(args.input, repositoryPath) self.assertEqual(args.output, os.path.join(repositoryPath, "rerun", rerun)) self.assertEqual(args.rerun, [rerun]) finally: shutil.rmtree(rerunPath) finally: shutil.rmtree(repositoryPath) # Finally, check that we raise an appropriate error if we don't specify an output location at all with self.assertRaises(SystemExit): parser.parse_args(config=self.config, args=[ DataPath, ])
def _makeArgumentParser(cls, *args, **kwargs): parser = pipeBase.ArgumentParser(name="makeFakeInputs", *args, **kwargs) parser.add_id_argument("--id", datasetType="deepCoadd", help="data ID, e.g. --id tract=0", ContainerClass=SkyMapIdContainer) return parser
def _makeArgumentParser(cls): """Create an argument parser""" parser = pipeBase.ArgumentParser(name=cls._DefaultName) parser.add_id_argument("--id", "sourceTable_visit", help="Data ID, e.g. --id visit=6789") return parser
def _makeArgumentParser(cls): """Create an argument parser """ parser = pipeBase.ArgumentParser(name=cls._DefaultName) parser.add_id_argument("--id", "deepCoadd", help="data ID, e.g. --id tract=12345 patch=1,2", ContainerClass=CoaddDataIdContainer) parser.add_id_argument("--selectId", "calexp", help="data ID, e.g. --selectId visit=6789 ccd=0..9", ContainerClass=SelectDataIdContainer) return parser
def _makeArgumentParser(cls, *args, **kwargs): doBatch = kwargs.pop("doBatch", False) parser = pipeBase.ArgumentParser(name=cls._DefaultName) parser.add_id_argument("--id", "calexp", level="visit", ContainerClass=pipeBase.DataIdContainer, help="data ID, e.g. --id visit=12345") return parser
def _makeArgumentParser(cls, *args, **kwargs): # Pop doBatch keyword before passing it along to the argument parser kwargs.pop("doBatch", False) parser = pipeBase.ArgumentParser(name="StampAnalysis", *args, **kwargs) parser.add_id_argument("--id", datasetType="donutSrc", level="visit", help="data ID, e.g. --id visit=12345") return parser
def _makeArgumentParser(cls, *args, **kwargs): parser = pipeBase.ArgumentParser(name="buildCovariance", *args, **kwargs) parser.add_id_argument("--id", "deepCoadd", help="data ID, e.g. --id tract=12345 patch=1,2", ContainerClass=CoaddDataIdContainer) return parser
def _makeArgumentParser(cls): parser = pipeBase.ArgumentParser(name=cls._DefaultName) parser.add_id_argument( name="--id", datasetType="deepCoadd", help= "data IDs for the deepCoadd, e.g. --id tract=12345 patch=1,2 filter=r", ContainerClass=ExistingCoaddDataIdContainer) return parser
def _makeArgumentParser(cls): """Create an argument parser""" parser = pipeBase.ArgumentParser(name=cls._DefaultName) parser.add_id_argument("--id", "calexp", help="Data ID, e.g. --id visit=6789 (optional)") return parser
def _makeArgumentParser(cls): parser = pipeBase.ArgumentParser(name=cls._DefaultName) parser.add_id_argument( "--id", "fakes_calexp", help="data ID with raw CCD keys [+ tract optionally], " "e.g. --id visit=12345 ccd=1,2 [tract=0]", ContainerClass=PerTractCcdDataIdContainer) return parser
def _makeArgumentParser(cls): """Create an argument parser""" parser = pipeBase.ArgumentParser(name=cls._DefaultName) parser.add_id_argument("--id", "src", help="Data ID, e.g. --id visit=6789", ContainerClass=PerTractCcdDataIdContainer) return parser
def _makeArgumentParser(cls): parser = pipeBase.ArgumentParser(name=cls._DefaultName) parser.add_id_argument("--id", "deepCoadd", help="data ID with coadd keys" "e.g. --id tract=1234 patch=1,2 filter=r^i") parser.add_argument("--psfCache", type=int, default=100, help="Size of CoaddPsf cache") return parser
def makeArgumentParser(cls): """!Create and return an argument parser @param[in] cls the class object @return the argument parser for this task. """ parser = pipeBase.ArgumentParser(name=cls._DefaultName) parser.add_id_argument(name="--id", datasetType="icExp", help="data IDs, e.g. --id visit=12345 ccd=1,2^0,3") return parser
def _makeArgumentParser(cls): parser = pipeBase.ArgumentParser(name=cls._DefaultName) parser.add_id_argument("--id", "deepCoadd_forced_src", help="data ID, with raw CCD keys + tract", ContainerClass=coaddUtils.CoaddDataIdContainer) parser.add_argument("--psfCache", type=int, default=100, help="Size of CoaddPsf cache") return parser
def _makeArgumentParser(cls): """We want an argument parser that has multiple identifiers""" parser = pipeBase.ArgumentParser(name=cls._DefaultName) parser.add_id_argument("--one", "raw", "data identifier one", level="sensor") parser.add_id_argument("--two", "raw", "data identifier two", level="sensor") return parser
def _makeArgumentParser(cls): """Create an argument parser""" parser = pipeBase.ArgumentParser(name=cls._DefaultName) parser.add_argument("--profile_jointcal", default=False, action="store_true", help="Profile steps of jointcal separately.") parser.add_id_argument("--id", "calexp", help="data ID, e.g. --id visit=6789 ccd=0..9", ContainerClass=PerTractCcdDataIdContainer) return parser
def _makeArgumentParser(cls, *args, **kwargs): kwargs.pop("doBatch", False) parser = pipeBase.ArgumentParser(name=cls._DefaultName) parser.add_argument('--minIndex', type=int, default=0, help='minimum group index') parser.add_argument('--maxIndex', type=int, default=1, help='maximum group index') return parser
def _makeArgumentParser(cls): """!Create and return an argument parser @param[in] cls the class object @return the argument parser for this task. This override is used to delay making the data ref list until the dataset type is known; this is done in @ref parseAndRun. """ parser = pipeBase.ArgumentParser(name=cls._DefaultName) parser.add_id_argument(name="--id", datasetType=pipeBase.ConfigDatasetType(name="isr.datasetType"), help="data IDs, e.g. --id visit=12345 ccd=1,2^0,3") return parser
def _makeArgumentParser(cls): parser = pipeBase.ArgumentParser(name=cls._DefaultName) parser.add_argument("--prior_rerun", required=True, help="rerun for prior data") parser.add_id_argument(name="--id", datasetType="image", level="image", help="data ID, e.g. --id subfield=0") parser.add_id_argument(name="--prior", datasetType="prior", level="image", help="data ID, e.g. --id subfield=0 label=b1") return parser
def _makeArgumentParser(cls): """Create an argument parser """ parser = pipeBase.ArgumentParser(name=cls._DefaultName) parser.add_id_argument("--id", "calexp", help="data ID, e.g. --id visit=12345 ccd=1,2") parser.add_id_argument( "--templateId", "calexp", doMakeDataRefList=True, help= "Optional template data ID (visit only), e.g. --templateId visit=6789" ) return parser
def _makeArgumentParser(cls): """Extend the default argument parser. Arguments specifying the output SQLite database and exposure dataset type are added in. """ parser = pipe_base.ArgumentParser(name=cls._DefaultName) parser.add_argument( '--database', dest='database', required=True, help='SQLite 3 database file name') # Use DatasetArgument to require dataset type be specified on # the command line parser.add_id_argument( '--id', pipe_base.DatasetArgument('dstype'), help='Dataset data id to index') return parser
def _makeArgumentParser(cls, *args, **kwargs): # Pop doBatch keyword before passing it along to the argument parser kwargs.pop("doBatch", False) dstype = pipeBase.DatasetArgument( '--dstype', default='calexp', help="dataset type to process from input data repository" "(e.g. eimage, postISRCCD, or calexp)") parser = pipeBase.ArgumentParser(name="focalplaneSummary", *args, **kwargs) parser.add_id_argument("--id", datasetType=dstype, help="data ID, e.g. --id visit=12345") return parser
def _makeArgumentParser(cls): """Extend the default argument parser. Database-specific arguments and the dataset type of the catalogs to read are added in. """ parser = pipe_base.ArgumentParser(name=cls._DefaultName) parser.add_argument("--host", dest="host", required=True, help="Database hostname") parser.add_argument("--database", dest="db", required=True, help="Database name") parser.add_argument("--user", dest="user", help="Database username (optional)", default=None) parser.add_argument("--port", dest="port", type=int, help="Database port number (optional)", default=3306) parser.add_argument("--table", dest="table_name", required=True, help="Table to ingest into") parser.add_argument("--view", dest="view_name", help="View to create containing column aliases") # Use DatasetArgument to require dataset type be specified on # the command line parser.add_id_argument("--id", pipe_base.DatasetArgument("dstype"), help="Dataset data id to ingest") return parser
def _makeArgumentParser(cls): """Create an argument parser No identifiers are added because none are used. """ return pipeBase.ArgumentParser(name=cls._DefaultName)
) oneStr = pexConfig.Field( dtype=str, doc="Example string value", default="default value", ) intList = pexConfig.ListField( dtype=int, doc="example list of integers", default=[-1, 0, 1], ) floatList = pexConfig.ListField( dtype=float, doc="example list of floats", default=[-2.7, 0, 3.7e42], ) strList = pexConfig.ListField( dtype=str, doc="example list of strings", default=["a", "bb", "ccc"], ) parser = pipeBase.ArgumentParser(name="argumentParser") parser.add_id_argument("--id", "raw", "data identifier", level="sensor") config = ExampleConfig() parsedCmd = parser.parse_args(config=config) pcDict = parsedCmd.__dict__ for key in sorted(pcDict): print("parsedCmd.%s=%r" % (key, pcDict[key]))