def __init__(self, prog, indent_increment=indent_increment, max_help_position=max_help_position, width=width, **kwargs): HelpFormatter.__init__(self, prog, indent_increment, max_help_position, width, **kwargs)
def _split_lines(self, text, width): if text.startswith('R|'): raw_text = text[2:] if "<br>" in raw_text: return raw_text.split('<br>') return HelpFormatter._split_lines(self, raw_text, width) # this is the RawTextHelpFormatter._split_lines return HelpFormatter._split_lines(self, text, width)
def __init__(self, prog): """Initialize the help formatter. :param prog: The program name """ long_string = "--rac --ansible-runner-rotate-artifacts-count" # 3 here accounts for the spaces in the ljust(6) below HelpFormatter.__init__( self, prog=prog, indent_increment=1, max_help_position=len(long_string) + 3, )
def _format_usage(self, usage, actions, groups, prefix): usage = HelpFormatter._format_usage(self, usage, actions, groups, prefix) parts = usage.split(" ", 2) prog = parts[1] parts[1] = (prog if IS_WINDOWS else "python " + prog) return " ".join(parts)
def get_argument_parser(): ''' Parse the command-line arguments for this program :return ArchNetArgumentParser: The argument parser ''' formatter = lambda prog: HelpFormatter( prog, max_help_position=50, width=200) parser = DtnArgumentParser(prog='Dtn Simulator', formatter_class=formatter, description='DTN Network Simulator') # Add required arguments for running in config file mode parser.add_argument('-cf', '--configfile', help='configuration file path', type=str, default=None, nargs='?') # Add optional arguments parser.add_argument('-v', '--validate', help='run unit tests', action='store_true') return parser
def format_help_text(self, ctx: click.Context, formatter: HelpFormatter): """`format_help_text` formats the help Override that adds arguments to the help properly Args: - `ctx` (`click.Context`): context for click - `formatter` (`HelpFormatter`): formatter to use """ click.Command.format_help_text(self, ctx, formatter) with formatter.section(_("Arguments")): formatter.write_dl([ ("<Source>", CLI_SOURCE_STR), ("<Destination>", CLI_DESTINATION_STR), ("<Key>", CLI_ENCODE_KEY_STR), ])
def add_subparser(cmd, msg, subparsers): return subparsers.add_parser( cmd, description=msg, help=msg, formatter_class=lambda prog: HelpFormatter( prog, max_help_position=40, width=90), )
def test_normal(self): formatter = shell.SmartFormatter(None) raw_string = 'This is a raw string that will be split over lines '\ 'because it will go into the HelpFormatter. but This string '\ 'needs to be longer'\ 'than 80 chars to split on the lines' value = formatter._split_lines(raw_string, self.width) self.assertEqual( value, HelpFormatter._split_lines(formatter, raw_string, self.width))
def populate_args(parser): """ parse option from call """ parser.formatter_class = lambda prog: HelpFormatter( prog, width=95, max_help_position=27) glopts = parser.add_argument_group('General options') glopts.add_argument('-w', '--workdir', dest='workdir', metavar="PATH", action='store', default=None, type=str, help='''path to working directory (generated with the tool tadbit mapper)''') glopts.add_argument('-t', '--table', dest='tables', metavar='', action='store', nargs='+', type=str, default=[str(t) for t in range(1, len(TABLE_IDX) + 1)], help=('[%(default)s] what tables to show, wrte either ' 'the sequence of names or indexes, according to ' 'this list: {}').format(', '.join([ '%s: %s' % (k, v) for k, v in TABLE_IDX.iteritems() ]))) glopts.add_argument( '--tmpdb', dest='tmpdb', action='store', default=None, metavar='PATH', type=str, help='''if provided uses this directory to manipulate the database''') glopts.add_argument('--tsv', dest='tsv', action='store', default=None, metavar='PATH', type=str, help='''store output in tab separated format to the provided path.''') parser.add_argument_group(glopts)
def parse_args(): parser = ArgumentParser(formatter_class=lambda prog: HelpFormatter(prog, max_help_position=40)) parser.add_argument("--state-dir", metavar="PATH", type=Path, required=True, help="path to directory containing model state") parser.add_argument("--data-dir", metavar="PATH", type=Path, required=True, help="path to dataset directory") parser.add_argument("--output-dir", metavar="PATH", type=Path, required=True, help="path to output directory") parser.add_argument("--batch-size", metavar="NUM", type=int, default=256, help="batch sample size used during training") parser.add_argument("--workers", metavar="NUM", type=int, default=1, help="number of processes used to load data") return parser.parse_args()
def populate_args(parser): """ parse option from call """ parser.formatter_class=lambda prog: HelpFormatter(prog, width=95, max_help_position=27) glopts = parser.add_argument_group('General options') # glopts.add_argument('--qc_plot', dest='quality_plot', action='store_true', # default=False, # help='generate a quality plot of FASTQ and exits') glopts.add_argument('-w', '--workdir', dest='workdir', metavar="PATH", action='store', default=None, type=str, help='''path to working directory (generated with the tool tadbit mapper)''') glopts.add_argument('--type', dest='type', metavar="STR", type=str, default='map', choices=['map', 'sam', 'bam'], help='''[%(default)s]file type to be parser, MAP (GEM-mapper), SAM or BAM''') glopts.add_argument('--read', dest='read', metavar="INT", type=int, default=None, help='In case only one of the reads needs to be parsed') glopts.add_argument('--skip', dest='skip', action='store_true', default=False, help='[DEBUG] in case already mapped.') glopts.add_argument('--compress_input', dest='compress_input', action='store_true', default=False, help='''Compress input mapped files when parsing is done. This is done in background, while next MAP file is processed, or while reads are sorted.''') glopts.add_argument('--genome', dest='genome', metavar="PATH", nargs='+', type=str, help='''paths to file(s) with FASTA files of the reference genome. If many, files will be concatenated. I.e.: --fasta chr_1.fa chr_2.fa In this last case, order is important or the rest of the analysis. Note: it can also be the path to a previously parsed genome in pickle format.''') glopts.add_argument('--jobids', dest='jobids', metavar="INT", action='store', default=None, nargs='+', type=int, help='''Use as input data generated by a job with a given jobid(s). Use tadbit describe to find out which. In this case one jobid can be passed per read.''') parser.add_argument_group(glopts)
def _format_commands(self, formatter: argparse.HelpFormatter, command: Command = None) -> None: prefix = self.prefixes['commands'] if command: if command.group: prefix = self.prefixes['group'] elif command.guesses: prefix = self.prefixes['guesses'] formatter.start_section(Fore.YELLOW + prefix + Fore.RESET) prev_group = '' colors = {True: Fore.GREEN, False: Fore.BLUE} color = True for name, handler in self._handlers.items(): if command and command.guesses and name not in command.guesses: continue # switch colors for every group group, _, subname = name.rpartition(' ') if group != prev_group: prev_group = group color = not color formatter.add_argument( argparse.Action( option_strings=[colors[color] + name + Fore.RESET], dest='', help=handler.summary, )) formatter.end_section()
def parse_arguments() -> Dict: """Parse and return CLI arguments.""" parser: ArgumentParser = ArgumentParser( formatter_class=lambda prog: HelpFormatter( prog, max_help_position=30, # characters )) parser.add_argument( "--version", "-v", action="store_true", default=False, help="print program version and exit", ) server: ArgumentParser = parser.add_argument_group("SIP server arguments") server.add_argument( "--address", metavar="host", type=Text, default="127.0.0.1", help="server listening address (default: '127.0.0.1')", ) server.add_argument( "--port", metavar="port", type=int, default=5060, help="server listening port (default: 5060)", ) server.add_argument( "--worker-count", metavar="n", type=int, default=1, help=f"number of worker threads (default: 1)", ) config: ArgumentParser = parser.add_argument_group("config arguments") default_conf_path: Text = os.path.join(os.path.curdir, "config.json") config.add_argument( "--config", metavar="path", type=Text, default=default_conf_path, help=f"configuration file path (default: '{default_conf_path}')", ) args: Namespace = parser.parse_args() return vars(args)
def __call__(self, *a, **kw): actions = [ Action([i], help=getattr(pathmaker, i).__doc__, dest='') for i in sorted(pathmaker.__all__) if not i.startswith('_') and callable(getattr(pathmaker, i)) ] formatter = HelpFormatter('') formatter.add_text( "An explicit layout strategy can be specified. This is to " "instruct how ExplosiveFUSE should present file entries across " "all archive files within its mount point. Do note that " "the final outcome of the layout is also influenced by the usage " "of the '--overwrite' and the '--omit-arcname' flags, and " "arguments which may associate with each of the strategies. " "They are specified by appending ':', followed by the value of " "each positional argument(s)." ) formatter.start_section('Available layout strategies') formatter.add_arguments(actions) formatter.end_section() print(formatter.format_help()) sys.exit(0)
def __init__(self, *args, **kwargs): suppress = kwargs.has_key('add_help') and kwargs['add_help'] is None kwargs.update({ 'add_help': False, 'formatter_class': lambda prog: HelpFormatter(prog, max_help_position=30) }) ArgumentParser.__init__(self, *args, **kwargs) if not (suppress or self.add_help): self.add_help_argument("-h", "--help", action="store_true", help="Show this help message")
def parse_args(): parser = ArgumentParser( formatter_class=lambda prog: HelpFormatter(prog, max_help_position=40)) parser.add_argument("--data-dir", metavar="PATH", type=Path, required=True, help="path to dataset directory") parser.add_argument("--output-dir", metavar="PATH", type=Path, required=True, help="path to output directory") parser.add_argument("--restart-dir", metavar="PATH", type=Path, help="path to directory containing model state") parser.add_argument("--batch-size", metavar="NUM", type=int, default=256, help="batch sample size used during training") parser.add_argument("--clusters", metavar="NUM", type=int, default=2, help="number of target clusters") parser.add_argument("--components", metavar="NUM", type=int, default=256, help="number of PCA components to use") parser.add_argument( "--repetitions", metavar="NUM", type=int, default=1, help="number of times to repeat the training to collect statistics") parser.add_argument("--workers", metavar="NUM", type=int, default=1, help="number of processes used to load data") return parser.parse_args()
def populate_args(parser): """ parse option from call """ parser.formatter_class = lambda prog: HelpFormatter( prog, width=95, max_help_position=27) glopts = parser.add_argument_group('General options') glopts.add_argument('-w', '--workdir', dest='workdir', metavar="PATH", action='store', default=None, type=str, help='''path to working directory (generated with the tool tadbit mapper)''') glopts.add_argument( '-t', '--table', dest='tables', metavar='', action='store', nargs='+', type=str, default=[str(t) for t in range(1, 10)], help='''[%(default)s] what tables to show, wrte either the sequence of names or indexes, according to this list: 1: paths, 2: jobs, 3: mapped_outputs, 4: mapped_inputs, 5: parsed_outputs, 6: intersection_outputs, 7: filter_outputs, 8: normalize_outputs, 9: segment_outputs''') glopts.add_argument( '--tmp', dest='tmp', action='store', default=None, metavar='PATH', type=str, help='''if provided uses this directory to manipulate the database''') parser.add_argument_group(glopts)
def __init__(self, *args, **kwargs): """ If the "bootarg_prefix" keyword argument is set, it's assumed that all bootargs will start with that prefix. "require_prefix" is a bool: False: accept the argument with or without the prefix. True: ignore the argument without the prefix. (default) """ help_width = get_help_width() self._boot_arg = dict() self.deprecated_bootargs = [] self.bootarg_prefix = kwargs.pop("bootarg_prefix", "") self.require_prefix = kwargs.pop("require_prefix", True) ArgumentParser.__init__(self, description=DESCRIPTION, formatter_class=lambda prog: HelpFormatter( prog, max_help_position=LEFT_PADDING, width=help_width), *args, **kwargs)
def populate_args(parser): """ parse option from call """ parser.formatter_class = lambda prog: HelpFormatter( prog, width=95, max_help_position=27) glopts = parser.add_argument_group('General options') glopts.add_argument('-w', '--workdir', dest='workdir', metavar="PATH", action='store', default=None, type=str, help='''path to working directory (generated with the tool tadbit mapper)''') glopts.add_argument('-j', '--jobids', dest='jobids', metavar="INT", action='store', default=None, nargs='+', type=int, help='jobids of the files and entries to be removed') glopts.add_argument('--delete', dest='delete', action="store_true", default=False, help='delete files, otherwise only DB entries.') glopts.add_argument('--compress', dest='compress', action="store_true", default=False, help='compress files and update paths accordingly') parser.add_argument_group(glopts)
def default_formatter(prog): return HelpFormatter(prog)
def populate_args(parser): """ parse option from call """ parser.formatter_class=lambda prog: HelpFormatter(prog, width=95, max_help_position=27) oblopt = parser.add_argument_group('Required options') glopts = parser.add_argument_group('General options') rfiltr = parser.add_argument_group('Read filtering options') normpt = parser.add_argument_group('Normalization options') outopt = parser.add_argument_group('Output options') pltopt = parser.add_argument_group('Plotting options') oblopt.add_argument('-w', '--workdir', dest='workdir', metavar="PATH", action='store', default=None, type=str, required=True, help='''path to working directory (generated with the tool tadbit mapper)''') glopts.add_argument('--noX', help='no display server (X screen)', dest='nox', action='store_true') oblopt.add_argument('-r', '--resolution', dest='reso', metavar="INT", action='store', default=None, type=int, required=True, help='''resolution at which to output matrices''') glopts.add_argument('--bam', dest='bam', metavar="PATH", action='store', default=None, type=str, help='''path to a TADbit-generated BAM file with all reads (other wise the tool will guess from the working directory database)''') glopts.add_argument('-j', '--jobid', dest='jobid', metavar="INT", action='store', default=None, type=int, help='''Use as input data generated by a job with a given jobid. Use tadbit describe to find out which.''') glopts.add_argument('--force', dest='force', action='store_true', default=False, help='overwrite previously run job') glopts.add_argument('-q', '--quiet', dest='quiet', action='store_true', default=False, help='remove all messages') glopts.add_argument('--tmpdb', dest='tmpdb', action='store', default=None, metavar='PATH', type=str, help='''if provided uses this directory to manipulate the database''') glopts.add_argument('--nchunks', dest='nchunks', action='store', default=None, type=int, help='''maximum number of chunks into which to cut the BAM''') glopts.add_argument("-C", "--cpus", dest="cpus", type=int, default=cpu_count(), help='''[%(default)s] Maximum number of CPU cores available in the execution host. If higher than 1, tasks with multi-threading capabilities will enabled (if 0 all available) cores will be used''') outopt.add_argument('--matrix', dest='matrix', action='store_true', default=False, help='''Write text matrix in multiple columns. By defaults matrices are written in BED-like format (also only way to get a raw matrix with all values including the ones in masked columns).''') outopt.add_argument('--rownames', dest='row_names', action='store_true', default=False, help='To store row names in the output text matrix.') pltopt.add_argument('--plot', dest='plot', action='store_true', default=False, help='Plot matrix in desired format.') pltopt.add_argument('--force_plot', dest='force_plot', action='store_true', default=False, help=('Force plotting even with demoniacally big ' 'matrices (more than 5000x5000, or 1500x1500' 'with interactive option).')) outopt.add_argument('--only_plot', dest='only_plot', action='store_true', default=False, help='[%(default)s] Skip writing matrix in text format.') outopt.add_argument('-i', '--interactive', dest='interactive', action='store_true', default=False, help='''[%(default)s] Open matplotlib interactive plot (nothing will be saved).''') pltopt.add_argument('--triangular', dest='triangular', action='store_true', default=False, help='''[%(default)s] represents only half matrix. Note that this also results in truly vectorial images of matrix.''') pltopt.add_argument('--xtick_rotation', dest='xtick_rotation', default=-25, type=int, help='''[%(default)s] x-tick rotation''') pltopt.add_argument('--cmap', dest='cmap', action='store', default='viridis', help='[%(default)s] Matplotlib color map to use.') pltopt.add_argument('--bad_color', dest='bad_color', action='store', default='white', help='''[%(default)s] Matplotlib color to use on bins filtered out (only used with normalized matrices, not raw).''') pltopt.add_argument('--format', dest='format', action='store', default='png', help='[%(default)s] plot file format.') pltopt.add_argument('--zrange', dest='zrange', action='store', default=None, help='''Range, in log2 scale of the color scale. i.e.: --zrange=-2,2''') pltopt.add_argument('--figsize', dest='figsize', action='store', default=None, help='''Range, in log2 scale of the color scale. default for triangular matrices: --figsize=16,10 and for square matrices: --figsize=16,14''') outopt.add_argument('-c', '--coord', dest='coord1', metavar='', default=None, help='''Coordinate of the region to retrieve. By default all genome, arguments can be either one chromosome name, or the coordinate in the form: "-c chr3:110000000-120000000"''') outopt.add_argument('-c2', '--coord2', dest='coord2', metavar='', default=None, help='''Coordinate of a second region to retrieve the matrix in the intersection with the first region.''') normpt.add_argument('--biases', dest='biases', metavar="PATH", action='store', default=None, type=str, help='''path to file with pre-calculated biases by columns''') normpt.add_argument('--norm', dest='normalizations', metavar="STR", action='store', default=['raw'], type=str, nargs='+', choices=['norm', 'decay', 'raw', 'raw&decay'], help='''[%(default)s] normalization(s) to apply. Choices are: [%(choices)s]''') rfiltr.add_argument('-F', '--filter', dest='filter', nargs='+', type=int, metavar='INT', default=[1, 2, 3, 4, 6, 7, 9, 10], choices = range(0, 11), help=("""[%(default)s] Use filters to define a set os valid pair of reads e.g.: '--apply 1 2 3 4 8 9 10'. Where these numbers""" + "correspond to: 0: nothing, %s" % (', '.join( ['%2d: %15s' % (k, MASKED[k]['name']) for k in MASKED])))) outopt.add_argument('--only_txt', dest='only_txt', action='store_true', default=False, help='Save only text file for matrices, not images') rfiltr.add_argument('--valid', dest='only_valid', action='store_true', default=False, help='input BAM file contains only valid pairs (already filtered).')
def _mk_usage(self, parser: ArgumentParser) -> literal_block: parser.formatter_class = lambda prog: HelpFormatter(prog, width=self.options.get("usage_width", 100)) texts = parser.format_usage()[len("usage: ") :].splitlines() texts = [line if at == 0 else f"{' ' * (len(parser.prog) + 1)}{line.lstrip()}" for at, line in enumerate(texts)] return literal_block("", Text("\n".join(texts)))
def populate_args(parser): """ parse option from call """ parser.formatter_class = lambda prog: HelpFormatter( prog, width=95, max_help_position=27) glopts = parser.add_argument_group('General options') glopts.add_argument('-w', '--workdir', dest='workdir', metavar="PATH", action='store', default=None, type=str, required=True, help='''path to a new output folder''') glopts.add_argument('-w1', '--workdir1', dest='workdir1', metavar="PATH", action='store', default=None, type=str, help='''path to working directory of the first HiC data sample to merge''') glopts.add_argument( '-w2', '--workdir2', dest='workdir2', metavar="PATH", action='store', default=None, type=str, help='''path to working directory of the second HiC data sample to merge''') glopts.add_argument( '--bed1', dest='bed1', metavar="PATH", action='store', default=None, type=str, help='''path to the first TADbit-generated BED file with filtered reads (other wise the tool will guess from the working directory database)''') glopts.add_argument( '--bed2', dest='bed2', metavar="PATH", action='store', default=None, type=str, help='''path to the second TADbit-generated BED file with filtered reads (other wise the tool will guess from the working directory database)''') glopts.add_argument('-r', '--resolution', dest='reso', metavar="INT", action='store', default=None, type=int, help='''resolution at which to do the comparison, and generate the matrices.''') glopts.add_argument( '--skip_comparison', dest='skip_comparison', action='store_true', default=False, help='''skip the comparison between replicates (faster).''') glopts.add_argument('--skip_merge', dest='skip_merge', action='store_true', default=False, help='''skip the merge of replicates (faster).''') glopts.add_argument('--perc_zeros', dest='perc_zeros', metavar="FLOAT", action='store', default=95, type=float, help=('[%(default)s%%] maximum percentage of zeroes ' 'allowed per column.')) glopts.add_argument('--normalization', dest='resolution', metavar="STR", action='store', default='ICE', nargs='+', type=str, choices=['ICE', 'EXP'], help='''[%(default)s] normalization(s) to apply. Order matters.''') glopts.add_argument( '--save', dest='save', metavar="STR", action='store', default='genome', nargs='+', type=str, choices=['genome', 'chromosomes'], help='''[%(default)s] save genomic or chromosomic matrix.''') glopts.add_argument( '--jobid1', dest='jobid1', metavar="INT", action='store', default=None, type=int, help='''Use as input data generated by a job with a given jobid. Use tadbit describe to find out which.''') glopts.add_argument( '--jobid2', dest='jobid2', metavar="INT", action='store', default=None, type=int, help='''Use as input data generated by a job with a given jobid. Use tadbit describe to find out which.''') glopts.add_argument('--force', dest='force', action='store_true', default=False, help='overwrite previously run job') glopts.add_argument('--norm', dest='norm', action='store_true', default=False, help='compare normalized matrices') glopts.add_argument('--bad_cols1', dest='bad_co1', metavar="PATH", action='store', default=None, type=str, help='''path to file with bad columns''') glopts.add_argument('--biases1', dest='biases1', metavar="PATH", action='store', default=None, type=str, help='''path to file with precalculated biases by columns''') glopts.add_argument('--bad_cols2', dest='bad_co2', metavar="PATH", action='store', default=None, type=str, help='''path to file with bad columns''') glopts.add_argument('--biases2', dest='biases2', metavar="PATH", action='store', default=None, type=str, help='''path to file with precalculated biases by columns''') glopts.add_argument( '--tmpdb', dest='tmpdb', action='store', default=None, metavar='PATH', type=str, help='''if provided uses this directory to manipulate the database''') parser.add_argument_group(glopts)
def get_options(): """ parse option from call """ parser = ArgumentParser(usage="%(prog)s [options] [--cfg CONFIG_PATH]", formatter_class=lambda prog: HelpFormatter( prog, width=95, max_help_position=27)) glopts = parser.add_argument_group('General arguments') optimo = parser.add_argument_group('Optimization of IMP arguments') modelo = parser.add_argument_group('Modeling with optimal IMP arguments') parser.add_argument( '--usage', dest='usage', action="store_true", default=False, help='''show detailed usage documentation, with examples and exit''') parser.add_argument('--cfg', dest='cfg', metavar="PATH", action='store', default=None, type=str, help='path to a configuration file with predefined ' + 'parameters') parser.add_argument('--optimize_only', dest='optimize_only', default=False, action='store_true', help='do the optimization of the region and exit') parser.add_argument('--ncpus', dest='ncpus', metavar="INT", default=1, type=int, help='[%(default)s] Number of CPUs to use') ######################################### # GENERAL glopts.add_argument( '--root_path', dest='root_path', metavar="PATH", default='', type=str, help=('path to search for data files (just pass file name' + 'in "data")')) glopts.add_argument('--data', dest='data', metavar="PATH", nargs='+', type=str, help='''path to file(s) with Hi-C data matrix. If many, experiments will be summed up. I.e.: --data replicate_1.txt replicate_2.txt''') glopts.add_argument('--xname', dest='xname', metavar="STR", nargs='+', default=[], type=str, help='''[file name] experiment name(s). Use same order as data.''') glopts.add_argument( '--norm', dest='norm', metavar="PATH", nargs='+', type=str, help='path to file(s) with normalizedHi-C data matrix.') glopts.add_argument('--crm', dest='crm', metavar="NAME", help='chromosome name') glopts.add_argument('--beg', dest='beg', metavar="INT", type=float, default=None, help='genomic coordinate from which to start modeling') glopts.add_argument('--end', dest='end', metavar="INT", type=float, help='genomic coordinate where to end modeling') glopts.add_argument('--res', dest='res', metavar="INT", type=int, help='resolution of the Hi-C experiment') glopts.add_argument('--outdir', dest='outdir', metavar="PATH", default=None, help='out directory for results') ######################################### # MODELING modelo.add_argument( '--nmodels_mod', dest='nmodels_mod', metavar="INT", default='5000', type=int, help=('[%(default)s] number of models to generate for' + ' modeling')) modelo.add_argument('--nkeep_mod', dest='nkeep_mod', metavar="INT", default='1000', type=int, help=('[%(default)s] number of models to keep for ' + 'modeling')) ######################################### # OPTIMIZATION optimo.add_argument('--maxdist', action='store', metavar="LIST", default='400', dest='maxdist', help='range of numbers for maxdist' + ', i.e. 400:1000:100 -- or just a number') optimo.add_argument('--upfreq', dest='upfreq', metavar="LIST", default='0', help='range of numbers for upfreq' + ', i.e. 0:1.2:0.3 -- or just a number') optimo.add_argument('--lowfreq', dest='lowfreq', metavar="LIST", default='0', help='range of numbers for lowfreq' + ', i.e. -1.2:0:0.3 -- or just a number') optimo.add_argument('--scale', dest='scale', metavar="LIST", default="0.01", help='[%(default)s] range of numbers to be test as ' + 'optimal scale value, i.e. 0.005:0.01:0.001 -- Can ' + 'also pass only one number') optimo.add_argument( '--dcutoff', dest='dcutoff', metavar="LIST", default="2", help='[%(default)s] range of numbers to be test as ' + 'optimal distance cutoff parameter (distance, in ' + 'number of beads, from which to consider 2 beads as ' + 'being close), i.e. 1:5:0.5 -- Can also pass only one' + ' number') optimo.add_argument( '--nmodels_opt', dest='nmodels_opt', metavar="INT", default='500', type=int, help='[%(default)s] number of models to generate for ' + 'optimization') optimo.add_argument('--nkeep_opt', dest='nkeep_opt', metavar="INT", default='100', type=int, help='[%(default)s] number of models to keep for ' + 'optimization') parser.add_argument_group(optimo) parser.add_argument_group(modelo) opts = parser.parse_args() if opts.usage: print __doc__ exit() log = '\tSummary of arguments:\n' # merger opts with CFG file and write summary args = reduce(lambda x, y: x + y, [i.strip('-').split('=') for i in sys.argv]) new_opts = {} if opts.cfg: for line in open(opts.cfg): if not '=' in line: continue if line.startswith('#'): continue key, value = line.split('#')[0].strip().split('=') key = key.strip() value = value.strip() if value == 'True': value = True elif value == 'False': value = False elif key in ['data', 'norm', 'xname']: new_opts.setdefault(key, []).extend(value.split()) continue new_opts[key] = value # bad key in configuration file for bad_k in set(new_opts.keys()) - set(opts.__dict__.keys()): sys.stderr.write('WARNING: parameter "%s" not recognized' % (bad_k)) for key in sorted(opts.__dict__.keys()): if key in args: log += ' * Command setting %13s to %s\n' % (key, opts.__dict__[key]) elif key in new_opts: opts.__dict__[key] = new_opts[key] log += ' - Config. setting %13s to %s\n' % (key, new_opts[key]) else: log += ' o Default setting %13s to %s\n' % (key, opts.__dict__[key]) if not opts.data and not opts.norm: sys.stderr.write('MISSING data') exit(parser.print_help()) if not opts.outdir: sys.stderr.write('MISSING outdir') exit(parser.print_help()) if not opts.crm: sys.stderr.write('MISSING crm NAME') exit(parser.print_help()) if not opts.beg: sys.stderr.write('MISSING beg COORDINATE') exit(parser.print_help()) if not opts.end: sys.stderr.write('MISSING end COORDINATE') exit(parser.print_help()) if not opts.res: sys.stderr.write('MISSING resolution') exit(parser.print_help()) if not opts.maxdist: sys.stderr.write('MISSING maxdist') exit(parser.print_help()) if not opts.lowfreq: sys.stderr.write('MISSING lowfreq') exit(parser.print_help()) if not opts.upfreq: sys.stderr.write('MISSING upfreq') exit(parser.print_help()) # groups for TAD detection if not opts.data: opts.data = [None] * len(opts.norm) else: opts.norm = [None] * len(opts.data) # this options should stay as this now # opts.scale = '0.01' # switch to number opts.nmodels_mod = int(opts.nmodels_mod) opts.nkeep_mod = int(opts.nkeep_mod) opts.nmodels_opt = int(opts.nmodels_opt) opts.nkeep_opt = int(opts.nkeep_opt) opts.ncpus = int(opts.ncpus) opts.res = int(opts.res) # TODO: UNDER TEST opts.container = None #['cylinder', 1000, 5000, 100] # do the divisinon to bins opts.beg = int(float(opts.beg) / opts.res) opts.end = int(float(opts.end) / opts.res) if opts.end - opts.beg <= 2: raise Exception('"beg" and "end" parameter should be given in ' + 'genomic coordinates, not bin') # Create out-directory name = '{0}_{1}_{2}'.format(opts.crm, opts.beg, opts.end) if not os.path.exists(os.path.join(opts.outdir, name)): os.makedirs(os.path.join(opts.outdir, name)) # write log if opts.optimize_only: log_format = '[OPTIMIZATION {0}_{1}_{2}_{3}_{4}] %(message)s'.format( opts.maxdist, opts.upfreq, opts.lowfreq, opts.scale, opts.dcutoff) else: log_format = '[DEFAULT] %(message)s' try: logging.basicConfig(filename=os.path.join(opts.outdir, name, name + '.log'), level=logging.INFO, format=log_format) except IOError: logging.basicConfig(filename=os.path.join(opts.outdir, name, name + '.log2'), level=logging.INFO, format=log_format) logging.getLogger().addHandler(logging.StreamHandler()) logging.info(('\n' + log_format.replace(' %(message)s', '')).join( log.split('\n'))) # update path to Hi-C data adding root directory if opts.root_path and opts.data[0]: for i in xrange(len(opts.data)): logging.info(os.path.join(opts.root_path, opts.data[i])) opts.data[i] = os.path.join(opts.root_path, opts.data[i]) # update path to Hi-C norm adding root directory if opts.root_path and opts.norm[0]: for i in xrange(len(opts.norm)): logging.info(os.path.join(opts.root_path, opts.norm[i])) opts.norm[i] = os.path.join(opts.root_path, opts.norm[i]) return opts
def populate_args(parser): """ parse option from call """ parser.formatter_class = lambda prog: HelpFormatter( prog, width=95, max_help_position=27) glopts = parser.add_argument_group('General options') glopts.add_argument( '--job_list', dest='job_list', action='store_true', default=False, help='generate a a file with a list of jobs to be run in ' 'a cluster') glopts.add_argument('-w', '--workdir', dest='workdir', metavar="PATH", action='store', default=None, type=str, required=True, help='''path to working directory (generated with the tool tadbit mapper)''') glopts.add_argument( '--optimize', dest='optimize', default=False, action="store_true", help='''optimization run, store less info about models''') glopts.add_argument('--rand', dest='rand', metavar="INT", type=str, default='1', help='''[%(default)s] random initial number. NOTE: when running single model at the time, should be different for each run''') glopts.add_argument('--skip', dest='skip', action='store_true', default=False, help='[DEBUG] in case already mapped.') glopts.add_argument('--crm', dest='crm', metavar="NAME", help='chromosome name') glopts.add_argument('--beg', dest='beg', metavar="INT", type=float, default=None, help='genomic coordinate from which to start modeling') glopts.add_argument('--end', dest='end', metavar="INT", type=float, help='genomic coordinate where to end modeling') glopts.add_argument('-r', '--reso', dest='reso', metavar="INT", type=int, help='resolution of the Hi-C experiment') glopts.add_argument('--input_matrix', dest='matrix', metavar="PATH", type=str, help='''In case input was not generated with the TADbit tools''') glopts.add_argument('--nmodels_run', dest='nmodels_run', metavar="INT", default=None, type=int, help='[ALL] number of models to run with this call') glopts.add_argument( '--nmodels', dest='nmodels', metavar="INT", default=5000, type=int, help=('[%(default)s] number of models to generate for' + ' modeling')) glopts.add_argument('--nkeep', dest='nkeep', metavar="INT", default=1000, type=int, help=('[%(default)s] number of models to keep for ' + 'modeling')) glopts.add_argument('--perc_zero', dest='perc_zero', metavar="FLOAT", type=float, default=90) glopts.add_argument('--maxdist', action='store', metavar="LIST", default='400', dest='maxdist', help='range of numbers for maxdist' + ', i.e. 400:1000:100 -- or just a number') glopts.add_argument('--upfreq', dest='upfreq', metavar="LIST", default='0', help='range of numbers for upfreq' + ', i.e. 0:1.2:0.3 -- or just a number') glopts.add_argument('--lowfreq', dest='lowfreq', metavar="LIST", default='0', help='range of numbers for lowfreq' + ', i.e. -1.2:0:0.3 -- or just a number') glopts.add_argument('--scale', dest='scale', metavar="LIST", default="0.01", help='[%(default)s] range of numbers to be test as ' + 'optimal scale value, i.e. 0.005:0.01:0.001 -- Can ' + 'also pass only one number') glopts.add_argument( '--dcutoff', dest='dcutoff', metavar="LIST", default="2", help='[%(default)s] range of numbers to be test as ' + 'optimal distance cutoff parameter (distance, in ' + 'number of beads, from which to consider 2 beads as ' + 'being close), i.e. 1:5:0.5 -- Can also pass only one' + ' number') glopts.add_argument("-C", "--cpu", dest="cpus", type=int, default=1, help='''[%(default)s] Maximum number of CPU cores available in the execution host. If higher than 1, tasks with multi-threading capabilities will enabled (if 0 all available) cores will be used''') parser.add_argument_group(glopts)
def populate_args(parser): """ parse option from call """ masked = { 1: { 'name': 'self-circle' }, 2: { 'name': 'dangling-end' }, 3: { 'name': 'error' }, 4: { 'name': 'extra dangling-end' }, 5: { 'name': 'too close from RES' }, 6: { 'name': 'too short' }, 7: { 'name': 'too large' }, 8: { 'name': 'over-represented' }, 9: { 'name': 'duplicated' }, 10: { 'name': 'random breaks' } } parser.formatter_class = lambda prog: HelpFormatter( prog, width=95, max_help_position=27) glopts = parser.add_argument_group('General options') glopts.add_argument('--force', dest='force', action='store_true', default=False, help='overwrite previously run job') glopts.add_argument('--resume', dest='resume', action='store_true', default=False, help='use filters of previously run job') glopts.add_argument( '--apply', dest='apply', nargs='+', type=int, metavar='INT', default=[1, 2, 3, 4, 6, 7, 8, 9, 10], choices=range(1, 11), help=( """[%(default)s] Use filters to define a set os valid pair of reads e.g.: '--apply 1 2 3 4 6 7 8 9'. Where these numbers""" + "correspond to: %s" % (', '.join(['%2d: %15s' % (k, masked[k]['name']) for k in masked])))) glopts.add_argument('-w', '--workdir', dest='workdir', metavar="PATH", action='store', default=None, type=str, help='''path to working directory (generated with the tool tadbit mapper)''') glopts.add_argument( '--pathids', dest='pathids', metavar="INT", action='store', default=None, nargs='+', type=int, help='''Use as input data generated by a job under a given pathids. Use tadbit describe to find out which. Needs one PATHid per read, first for read 1, second for read 2.''') parser.add_argument_group(glopts)
def _split_lines(self, text, width): # this is the RawTextHelpFormatter._split_lines if text.startswith('R|'): return text[2:].splitlines() return HelpFormatter._split_lines(self, text, width)
def populate_args(parser): """ parse option from call """ parser.formatter_class = lambda prog: HelpFormatter( prog, width=95, max_help_position=27) glopts = parser.add_argument_group('General options') notadbit = parser.add_argument_group('Mapped outside TADbit options') # glopts.add_argument('--qc_plot', dest='quality_plot', action='store_true', # default=False, # help='generate a quality plot of FASTQ and exits') glopts.add_argument('-w', '--workdir', dest='workdir', metavar="PATH", action='store', default=None, type=str, help='''path to working directory (generated with the tool tadbit mapper)''') glopts.add_argument('--type', dest='type', metavar="STR", type=str, default='map', choices=['map', 'sam', 'bam'], help='''[%(default)s]file type to be parser, MAP (GEM-mapper), SAM or BAM''') glopts.add_argument( '--read', dest='read', metavar="INT", type=int, default=None, help='In case only one of the reads needs to be parsed') notadbit.add_argument( '--mapped1', dest='mapped1', metavar="PATHs", action='store', nargs='+', default=None, type=str, help='''paths to mapped bam files (first read-end)''') notadbit.add_argument( '--mapped2', dest='mapped2', metavar="PATHs", action='store', nargs='+', default=None, type=str, help='''paths to mapped bam files (second read-end)''') notadbit.add_argument('--renz', dest='renz', metavar="STR", type=str, help='''restriction enzyme name''') glopts.add_argument( '--filter_chrom', dest='filter_chrom', default= "^(chr)?[A-Za-z]?[0-9]{0,3}[XVI]{0,3}(?:ito)?[A-Z-a-z]?(_dna)?$", help='''default: --filter_chrom "%(default)s", regexp to consider only chromosome names passing''') glopts.add_argument('--skip', dest='skip', action='store_true', default=False, help='[DEBUG] in case already mapped.') glopts.add_argument('--compress_input', dest='compress_input', action='store_true', default=False, help='''Compress input mapped files when parsing is done. This is done in background, while next MAP file is processed, or while reads are sorted.''') glopts.add_argument( '--tmpdb', dest='tmpdb', action='store', default=None, metavar='PATH', type=str, help='''if provided uses this directory to manipulate the database''') glopts.add_argument('--genome', dest='genome', metavar="PATH", nargs='+', type=str, help='''paths to file(s) with FASTA files of the reference genome. If many, files will be concatenated. I.e.: --genome chr_1.fa chr_2.fa In this last case, order is important or the rest of the analysis. Note: it can also be the path to a previously parsed genome in pickle format.''') glopts.add_argument( '--jobids', dest='jobids', metavar="INT", action='store', default=None, nargs='+', type=int, help='''Use as input data generated by a job with a given jobid(s). Use tadbit describe to find out which. In this case one jobid can be passed per read.''') glopts.add_argument('--noX', action='store_true', help='no display server (X screen)') parser.add_argument_group(glopts)
def _split_lines(self, text, width): print(text) if text.startswith('CF|'): return text[3:].splitlines() return HelpFormatter._split_lines(self, text, width)
def _split_lines(self, text, width): if text.startswith('R|'): return text[2:].splitlines() # this is the RawTextHelpFormatter._split_lines return HelpFormatter._split_lines(self, text, width)
def populate_args(parser): """ parse option from call """ parser.formatter_class=lambda prog: HelpFormatter(prog, width=95, max_help_position=27) glopts = parser.add_argument_group('General options') glopts.add_argument('-w', '--workdir', dest='workdir', metavar="PATH", action='store', default=None, type=str, help='''path to a new output folder''') glopts.add_argument('-w1', '--workdir1', dest='workdir1', metavar="PATH", action='store', default=None, type=str, help='''path to working directory of the first HiC data sample to merge''') glopts.add_argument('-w2', '--workdir2', dest='workdir2', metavar="PATH", action='store', default=None, type=str, help='''path to working directory of the second HiC data sample to merge''') glopts.add_argument('--bam1', dest='bam1', metavar="PATH", action='store', default=None, type=str, help='''path to the first TADbit-generated BAM file with all reads (other wise the tool will guess from the working directory database)''') glopts.add_argument('--noX', action='store_true', help='no display server (X screen)') glopts.add_argument('--bam2', dest='bam2', metavar="PATH", action='store', default=None, type=str, help='''path to the second TADbit-generated BAM file with all reads (other wise the tool will guess from the working directory database)''') glopts.add_argument("-C", "--cpus", dest="cpus", type=int, default=cpu_count(), help='''[%(default)s] Maximum number of CPU cores available in the execution host. If higher than 1, tasks with multi-threading capabilities will enabled (if 0 all available) cores will be used''') glopts.add_argument('-r', '--resolution', dest='reso', metavar="INT", action='store', default=None, type=int, help='''resolution at which to do the comparison, and generate the matrices.''') glopts.add_argument('--skip_comparison', dest='skip_comparison', action='store_true', default=False, help='''skip the comparison between replicates (faster). Comparisons are performed at 3 levels 1- comparing first diagonals of each experiment (and generating SCC score and standard deviation see https://doi.org/10.1101/gr.220640.117) 2- Comparing the first eigenvectors of input experiments 3- Generates reproducibility score using function from https://doi.org/10.1093/bioinformatics/btx152''') glopts.add_argument('--skip_merge', dest='skip_merge', action='store_true', default=False, help='''skip the merge of replicates (faster).''') glopts.add_argument('--save', dest='save', metavar="STR", action='store', default='genome', nargs='+', type=str, choices=['genome', 'chromosomes'], help='''[%(default)s] save genomic or chromosomic matrix.''') glopts.add_argument('--jobid1', dest='jobid1', metavar="INT", action='store', default=None, type=int, help='''Use as input data generated by a job with a given jobid. Use tadbit describe to find out which.''') glopts.add_argument('--jobid2', dest='jobid2', metavar="INT", action='store', default=None, type=int, help='''Use as input data generated by a job with a given jobid. Use tadbit describe to find out which.''') glopts.add_argument('--force', dest='force', action='store_true', default=False, help='overwrite previously run job') glopts.add_argument('--norm', dest='norm', action='store_true', default=False, help='compare normalized matrices') glopts.add_argument('--biases1', dest='biases1', metavar="PATH", action='store', default=None, type=str, help='''path to file with precalculated biases by columns''') glopts.add_argument('--biases2', dest='biases2', metavar="PATH", action='store', default=None, type=str, help='''path to file with precalculated biases by columns''') glopts.add_argument('--filter', dest='filter', nargs='+', type=int, metavar='INT', default=[1, 2, 3, 4, 6, 7, 9, 10], choices = list(range(1, 11)), help=("""[%(default)s] Use filters to define a set os valid pair of reads e.g.: '--apply 1 2 3 4 8 9 10'. Where these numbers""" + "correspond to: %s" % (', '.join( ['%2d: %15s' % (k, MASKED[k]['name']) for k in MASKED])))) glopts.add_argument('--samtools', dest='samtools', metavar="PATH", action='store', default='samtools', type=str, help='''path samtools binary''') glopts.add_argument('--tmpdb', dest='tmpdb', action='store', default=None, metavar='PATH', type=str, help='''if provided uses this directory to manipulate the database''') parser.add_argument_group(glopts)
from argparse import ArgumentParser from argparse import HelpFormatter arg_desc = ''' Prepare, submit and check jobs to NAF HTCondor batch system. The configuration file must have a particular name: btag_eff_<balgo>_<bwp>_<mode>_<year>_<trg>.cfg balgo: btag algorithm, e.g. deepjet bwp : btag working point, e.g. medium mode : fh for fullhadronic, sl for semileptonic year : year of data trg : trg when trigger is used, notrg when no trigger is used ''' parser = ArgumentParser(prog='submit_btag.py', formatter_class=lambda prog: HelpFormatter(prog,indent_increment=6,max_help_position=80,width=280), description=arg_desc,add_help=True) parser.add_argument('--exe' , dest='exe' , required= True , help='Executable') parser.add_argument('--cfg' , dest='cfg' , required= True , help='Configuration file') parser.add_argument('--label' , dest='label' , required= True , help='Unique label to identify the submission') parser.add_argument('--samples' , dest='samples' , required= True , help='File containing samples to be processed') parser.add_argument('--btagweight', dest='btagweight' , action='store_true' , help='apply btag weight') parser.add_argument('--submit' , dest='submit' , action='store_true' , help='submit jobs') parser.add_argument('--status' , dest='status' , action='store_true' , help='status of the jobs') parser.add_argument('--resubmit' , dest='resubmit' , action='store_true' , help='resubmit failed jobs') parser.add_argument('--hadd' , dest='hadd' , action='store_true' , help='hadd root files') args = parser.parse_args() import os, sys import getpass
def populate_args(parser): """ parse option from call """ parser.formatter_class = lambda prog: HelpFormatter( prog, width=95, max_help_position=27) glopts = parser.add_argument_group('General options') glopts.add_argument('-w', '--workdir', dest='workdir', metavar="PATH", action='store', default=None, type=str, required=True, help='''path to working directory (generated with the tool tadbit mapper)''') glopts.add_argument('--bed', dest='bed', metavar="PATH", action='store', default=None, type=str, help='''path to a TADbit-generated BED file with filtered reads (other wise the tool will guess from the working directory database)''') glopts.add_argument('-r', '--resolution', dest='reso', metavar="INT", action='store', default=None, type=int, required=True, help='''resolution at which to output matrices''') glopts.add_argument('--perc_zeros', dest='perc_zeros', metavar="FLOAT", action='store', default=95, type=float, help=('[%(default)s%%] maximum percentage of zeroes ' 'allowed per column.')) glopts.add_argument('--normalization', dest='resolution', metavar="STR", action='store', default='ICE', nargs='+', type=str, choices=['ICE', 'EXP'], help='''[%(default)s] normalization(s) to apply. Order matters.''') glopts.add_argument('--factor', dest='factor', metavar="NUM", action='store', default=1, type=float, help='''[%(default)s] target mean value of a cell after normalization (can be used to weight experiments before merging)''') glopts.add_argument( '--save', dest='save', metavar="STR", action='store', default='genome', nargs='+', type=str, choices=['genome', 'chromosomes'], help='''[%(default)s] save genomic or chromosomic matrix.''') glopts.add_argument( '-j', '--jobid', dest='jobid', metavar="INT", action='store', default=None, type=int, help='''Use as input data generated by a job with a given jobid. Use tadbit describe to find out which.''') glopts.add_argument('--keep', dest='keep', action='store', default=['intra', 'genome'], nargs='+', choices=['intra', 'inter', 'genome'], help='''%(default)s Matrices to save, choices are "intra" to keep intra-chromosomal matrices, "inter" to keep inter-chromosomal matrices and "genome", to keep genomic matrices.''') glopts.add_argument('--only_txt', dest='only_txt', action='store_true', default=False, help='Save only text file for matrices, not images') glopts.add_argument( '--fast_filter', dest='fast_filter', action='store_true', default=False, help='only filter according to the percentage of zero count') glopts.add_argument('--force', dest='force', action='store_true', default=False, help='overwrite previously run job') parser.add_argument_group(glopts)
def add_usage(self, usage, actions, groups, prefix=None): HelpFormatter.add_usage(self, usage, actions, groups, prefix="Usage: ")