def create_knope_parser(): """ Create the argument parser for ``cwinpy_knope``. """ description = """\ A script to run the CWInPy known pulsar analysis pipeline; gravitational-wave \ data will be preprocessed based on the phase evolution of a pulsar which will \ then be used to infer the unknown signal parameters. """ parser = ArgumentParser(description=description, allow_abbrev=False) parser.add( "--heterodyne-config", action="append", help=( "A configuration file for the heterodyne pre-processing using " "cwinpy_heterodyne. If requiring multiple detectors then this " "option can be used multiple times to pass configuration files " "for each detector." ), required=True, ) parser.add( "--pe-config", type=str, help=( "A configuration file for the Bayesian inference stage using " "cwinpy_pe." ), required=True, ) parser.add( "--version", action="version", version="%(prog)s {version}".format(version=cwinpy.__version__), ) return parser
def parseArgs(): ''' Process command line args using argparse or if not available the optparse in a backwards compatible way Returns tuple of (args, remnants) where args is object with attributes corresponding to named arguments and remnants is list of remaining unnamed positional arguments ''' try: # make backwards compatible with deprecated optparse from argparse import ArgumentParser as Parser Parser.add = Parser.add_argument Parser.add_group = Parser.add_argument_group Parser.parse = Parser.parse_known_args except ImportError as ex: from optparse import OptionParser as Parser Parser.add = Parser.add_option Parser.add_group = Parser.add_option_group Parser.parse = Parser.parse_args d = "Runs localhost web application wsgi service on given host address and port. " d += "\nDefault host:port is 0.0.0.0:8080." d += "\n(0.0.0.0 is any interface on localhost)" p = Parser(description = d) p.add('-l','--level', action='store', default='info', choices=aiding.LOGGING_LEVELS.keys(), help="Logging level.") p.add('-s','--server', action = 'store', default='paste', help = "Web application WSGI server type.") p.add('-a','--host', action = 'store', default='0.0.0.0', help = "Web application WSGI server ip host address.") p.add('-p','--port', action = 'store', default='8080', help = "Web application WSGI server ip port.") p.add('-b','--base', action = 'store', default = '', help = "Base Url path prefix for client side web application.") p.add('-x','--cors', action = 'store_true', default = False, help = "Enable CORS Cross Origin Resource Sharing on server.") p.add('-t','--tls', action = 'store_true', default = False, help = "Use TLS/SSL (https).") p.add('-c','--cert', action = 'store', default = '/etc/pki/tls/certs/localhost.crt', help = "File path to tls/ssl cacert certificate file.") p.add('-k','--key', action = 'store', default = '/etc/pki/tls/certs/localhost.key', help = "File path to tls/ssl private key file.") p.add('-e','--pem', action = 'store', default = '/etc/pki/tls/certs/localhost.pem', help = "File path to tls/ssl pem file with both cert and key.") p.add('-g','--gen', action = 'store_true', default = False, help = ("Generate web app load file. Default is 'app/main.html'" " or if provided the file specified by -f option.")) p.add('-f','--load', action = 'store', default = 'app/main.html', help = "Filepath to save generated web app load file upon -g option.") p.add('-C','--coffee', action = 'store_true', default = False, help = "Upon -g option generate to load coffeescript.") p.add('-d','--devel', action = 'store_true', default = False, help = "Development mode.") return (p.parse())
def parse_args(): # add this ot the train of model in R&D #parser = ArgParser(allow_config=True, #config_file_parser_class=YAMLConfigFileParser) parser = ArgumentParser() parser.add("--csv-file", type=str, required=True, default=r"data\gt.csv", help="Path to csv file that contains " "Image location and the corresponding list keywords ") parser.add( "--model-name", type=str, required=True, default=r"data\gt.csv", help= "name of model to save. Specify different names for different models when training to save " "multiple models. At each run, current model name is loaded ") parser.add("--epochs", type=int, required=True, default=5, help="Number of times to run cycle through training data ") parser.add( "--log-interval", type=int, required=True, default=10, help= "Period to display training progress, corresponding to the number of samples seen " ) parser.add("--source-folder", type=str, default=r"data\nondata", help="Path to directory that contains " "a set of image files where or sub-directory of images ") parser.add("--destination", type=str, default=r"data\output", help="Path to a folder to save extracted .txt files.") args = parser.parse_args() return args
def create_parser() -> ArgumentParser: """ Create a parser for the lookout.style.format utility. :return: an ArgumentParser with an handler defined in the handler attribute. """ # Deferred imports to speed up loading __init__ from lookout.style.format.benchmarks.evaluate_smoke import evaluate_smoke_entry from lookout.style.format.benchmarks.generate_smoke import generate_smoke_entry from lookout.style.format.benchmarks.general_report import print_reports from lookout.style.format.benchmarks.quality_report_noisy import quality_report_noisy parser = ArgumentParser( formatter_class=ArgumentDefaultsHelpFormatterNoNone) # General options parser.add("--log-level", default="DEBUG", help="Log verbosity level.") subparsers = parser.add_subparsers(help="Commands") def add_parser(name, help): return subparsers.add_parser( name, help=help, formatter_class=ArgumentDefaultsHelpFormatterNoNone) # Evaluation eval_parser = add_parser("eval", "Evaluate trained model on given dataset.") eval_parser.set_defaults(handler=print_reports) add_input_pattern_arg(eval_parser) add_bblfsh_arg(eval_parser) add_model_args(eval_parser) eval_parser.add_argument( "-n", "--n-files", default=0, type=int, help="How many files with most mispredictions to show. " "If n <= 0 show all.") # Generate the quality report based on the artificial noisy dataset quality_report_noisy_parser = add_parser( "quality-report-noisy", "Quality report on the " "artificial noisy dataset") quality_report_noisy_parser.set_defaults(handler=quality_report_noisy) add_bblfsh_arg(quality_report_noisy_parser) add_rules_thresholds(quality_report_noisy_parser) quality_report_noisy_parser.add_argument( "-l", "--language", default="javascript", help="Programming language to use.") quality_report_noisy_parser.add_argument( "--repos", type=str, help= "list of urls or paths to the repositories to analyze. Should be strings separated " "by newlines.") quality_report_noisy_parser.add_argument( "--precision-threshold", type=float, default=0.95, help="Precision threshold tolerated for the model.") quality_report_noisy_parser.add_argument( "-o", "--dir-output", required=True, type=str, help= "Path to the output directory where to store the quality report and the " "precision-recall curve.") # Generate dataset of different styles in code for smoke testing. gen_smoke_parser = add_parser( "gen-smoke-dataset", "Generate dataset with different styles. " "Helps to check the basic system functionality. " "Only JavaScript code is supported now.") gen_smoke_parser.set_defaults(handler=generate_smoke_entry) gen_smoke_parser.add_argument( "inputpath", type=str, help="Path to the tar.xz archive containing initial repositories.") gen_smoke_parser.add_argument( "outputpath", type=str, help= "Path to the directory where the generated dataset should be stored.") gen_smoke_parser.add_argument( "--force", default=False, action="store_true", help="Override output directory if exists.", ) # Evaluate on different styles dataset eval_gen_styles_parser = add_parser( "eval-smoke-dataset", "Evaluate on the dataset with different styles.") eval_gen_styles_parser.set_defaults(handler=evaluate_smoke_entry) eval_gen_styles_parser.add_argument( "inputpath", type=str, help="Path to the directory where the generated dataset is stored. " "To generate a dataset run gen-smoke-dataset command.") eval_gen_styles_parser.add_argument( "reportdir", type=str, help="Path for report performance output directory.") add_bblfsh_arg(eval_gen_styles_parser) eval_gen_styles_parser.add_argument("--train-config", type=json.loads, default="{}", help="Json config for train step.") eval_gen_styles_parser.add_argument("--analyze-config", type=json.loads, default="{}", help=" Json config for analyze step.") eval_gen_styles_parser.add_argument( "--database", type=str, default=None, help="Path to the sqlite3 database with trained models metadata. " "Enables reusing previously trained models.") return parser