def main_cmd(): """ Entry point for command line. """ # build description string based on available methods desc_string = "Command-line driver for Enrich2 v{}".format(__version__) + \ "\n\nscoring methods:\n" + \ "\n".join([" {:22}{}".format(k, v) for k, v in list(SCORING_METHODS.items())]) + \ "\n\nlog ratio methods:\n" + \ "\n".join([" {:22}{}".format(k, v) for k, v in list(LOGR_METHODS.items())]) # create parser and add description parser = ArgumentParser(prog="Enrich2", description=desc_string, formatter_class=RawDescriptionHelpFormatter) # add command line arguments parser.add_argument("config", help="JSON configuration file") parser.add_argument("scoring_method", help="scoring method", choices=list(SCORING_METHODS.keys())) parser.add_argument("logr_method", help="log ratio method", choices=list(LOGR_METHODS.keys())) # add support for semantic version checking parser.add_argument("--version", action="version", version="%(prog)s {}".format(__version__)) # add analysis options parser.add_argument("--log", metavar="FILE", dest="log_file", help="path to log file") parser.add_argument("--no-plots", dest="plots_requested", action="store_false", default=True, help="don't make plots") parser.add_argument("--no-tsv", dest="tsv_requested", action="store_false", default=True, help="don't generate tsv files") parser.add_argument("--recalculate", dest="force_recalculate", action="store_true", default=False, help="force recalculation") parser.add_argument("--component-outliers", dest="component_outliers", action="store_true", default=False, help="calculate component outlier stats") parser.add_argument("--output-dir", metavar="DIR", dest="output_dir_override", help="override the config file's output directory") parser.add_argument("--sfmap-aa-file", metavar="FILE", dest="sfmap_aa_file", help="amino acid groups for sequence-function maps") args = parser.parse_args() # start the logs start_logging(args.log_file, logging.DEBUG) # read the JSON file try: cfg = json.load(open(args.config, "U")) except IOError: raise IOError("Failed to open '{}' [{}]".format( args.config, DRIVER_NAME)) except ValueError: raise ValueError( "Improperly formatted .json file [{}]".format(DRIVER_NAME)) # identify config file type and create the object if config_check.is_experiment(cfg): logging.info("Detected an Experiment config file", extra={'oname': DRIVER_NAME}) obj = Experiment() elif config_check.is_selection(cfg): logging.info("Detected a Selection config file", extra={'oname': DRIVER_NAME}) obj = Selection() elif config_check.is_seqlib(cfg): seqlib_type = config_check.seqlib_type(cfg) logging.info("Detected a %s config file", seqlib_type, extra={'oname': DRIVER_NAME}) if seqlib_type == "BarcodeSeqLib": obj = BarcodeSeqLib() elif seqlib_type == "BcidSeqLib": obj = BcidSeqLib() elif seqlib_type == "BcvSeqLib": obj = BcvSeqLib() elif seqlib_type == "BasicSeqLib": obj = BasicSeqLib() elif seqlib_type == "OverlapSeqLib": obj = OverlapSeqLib() elif seqlib_type == "IdOnlySeqLib": obj = IdOnlySeqLib() else: raise ValueError("Unrecognized SeqLib type '{}' [{}]".format( seqlib_type, DRIVER_NAME)) else: raise ValueError("Unrecognized .json config [{}]".format(DRIVER_NAME)) # set analysis options obj.force_recalculate = args.force_recalculate obj.component_outliers = args.component_outliers obj.scoring_method = args.scoring_method obj.logr_method = args.logr_method obj.plots_requested = args.plots_requested obj.tsv_requested = args.tsv_requested if args.output_dir_override is not None: obj.output_dir_override = True obj.output_dir = args.output_dir_override else: obj.output_dir_override = False if args.sfmap_aa_file is not None: obj.plot_options = dict() obj.plot_options['aa_list'], obj.plot_options['aa_label_groups'] = \ parse_aa_list(args.sfmap_aa_file) # configure the object obj.configure(cfg) # make sure objects are valid try: obj.validate() except ValueError: logging.exception("Invalid configuration", extra={'oname': DRIVER_NAME}) else: # open HDF5 files for the object and all child objects obj.store_open(children=True) # perform the analysis obj.calculate() # generate desired output obj.make_plots() try: obj.make_plots() except Exception: logging.exception("Calculations completed, but plotting failed.", extra={'oname': DRIVER_NAME}) try: obj.write_tsv() except Exception: logging.exception("Calculations completed, but TSV ouput failed.", extra={'oname': DRIVER_NAME}) # clean up obj.store_close(children=True)
def main_cmd(): """ Entry point for command line. """ # build description string based on available methods desc_string = "Command-line driver for Enrich2." + \ "\n\nscoring methods:\n" + \ "\n".join([" {:22}{}".format(k, v) for k, v in available_scoring_methods.items()]) + \ "\n\nlog ratio methods:\n" + \ "\n".join([" {:22}{}".format(k, v) for k, v in available_logr_methods.items()]) # create parser and add description parser = argparse.ArgumentParser( description=desc_string, formatter_class=argparse.RawDescriptionHelpFormatter) # add command line arguments parser.add_argument("config", help="JSON configuration file") parser.add_argument("scoring_method", help="scoring method", choices=available_scoring_methods.keys()) parser.add_argument("logr_method", help="log ratio method", choices=available_logr_methods.keys()) # add analysis options parser.add_argument("--log", dest="log_file", metavar="FILE", help="path to log file") parser.add_argument("--no-plots", help="don't make plots", dest="plots_requested", action="store_false", default=True) parser.add_argument("--no-tsv", help="don't generate tsv files", dest="tsv_requested", action="store_false", default=True) parser.add_argument("--recalculate", help="force recalculation", dest="force_recalculate", action="store_true", default=False) parser.add_argument("--component-outliers", help="calculate component outlier stats", dest="component_outliers", action="store_true", default=False) parser.add_argument("--output-dir", help="override the config file's output directory", dest="output_dir_override", metavar="DIR") args = parser.parse_args() # start the logs start_logging(args.log_file, logging.DEBUG) # read the JSON file try: cfg = json.load(open(args.config, "U")) except IOError: raise IOError("Failed to open '{}' [{}]".format( args.config, driver_name)) except ValueError: raise ValueError( "Improperly formatted .json file [{}]".format(driver_name)) # identify config file type and create the object if config_check.is_experiment(cfg): logging.info("Detected an Experiment config file", extra={'oname': driver_name}) obj = Experiment() elif config_check.is_selection(cfg): logging.info("Detected a Selection config file", extra={'oname': driver_name}) obj = Selection() elif config_check.is_seqlib(cfg): seqlib_type = config_check.seqlib_type(cfg) logging.info("Detected a {} config file".format(seqlib_type), extra={'oname': driver_name}) obj = globals()[seqlib_type]() else: raise ValueError("Unrecognized .json config [{}]".format(driver_name)) # set analysis options obj.force_recalculate = args.force_recalculate obj.component_outliers = args.component_outliers obj.scoring_method = args.scoring_method obj.logr_method = args.logr_method obj.plots_requested = args.plots_requested obj.tsv_requested = args.tsv_requested if args.output_dir_override is not None: obj.output_dir_override = True obj.output_dir = args.output_dir_override else: obj.output_dir_override = False # configure the object obj.configure(cfg) # make sure objects are valid try: obj.validate() except ValueError, e: logging.error("Invalid settings: {}".format(e), extra={'oname': driver_name})